From 4a402b01cfdd8ffe8947e384471bd7a31faa6d86 Mon Sep 17 00:00:00 2001 From: mickeypash Date: Mon, 23 Oct 2017 19:12:49 +0100 Subject: [PATCH 001/658] Correct response when trying to delete a volume that is attached to an EC2 instance. Created a VolumeInUse error and did a simple check on the delete_volume method. --- moto/ec2/exceptions.py | 9 +++++++++ moto/ec2/models.py | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index e5432baf7bbf..ae279d5b2557 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -244,6 +244,15 @@ def __init__(self, volume_id, instance_id): .format(volume_id, instance_id)) +class VolumeInUseError(EC2ClientError): + + def __init__(self, volume_id, instance_id): + super(VolumeInUseError, self).__init__( + "VolumeInUse", + "Volume {0} is currently attached to {1}" + .format(volume_id, instance_id)) + + class InvalidDomainError(EC2ClientError): def __init__(self, domain): diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 7fa7e1009895..011258520591 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -45,6 +45,7 @@ InvalidAMIAttributeItemValueError, InvalidSnapshotIdError, InvalidVolumeIdError, + VolumeInUseError, InvalidVolumeAttachmentError, InvalidDomainError, InvalidAddressError, @@ -1813,6 +1814,10 @@ def get_volume(self, volume_id): def delete_volume(self, volume_id): if volume_id in self.volumes: + volume = self.volumes[volume_id] + instance_id = volume.attachment.instance.id + if volume.attachment is not None: + raise VolumeInUseError(volume_id, instance_id) return self.volumes.pop(volume_id) raise InvalidVolumeIdError(volume_id) From d5b841fb6c4e8fdb4c94bd9becc74441a96ec6da Mon Sep 17 00:00:00 2001 From: mickeypash Date: Mon, 13 Nov 2017 19:58:21 +0000 Subject: [PATCH 002/658] Fixing volume.attachment is None --- moto/ec2/models.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index bad32d6538aa..b9cbe04074d3 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1812,9 +1812,8 @@ def get_volume(self, volume_id): def delete_volume(self, volume_id): if volume_id in self.volumes: volume = self.volumes[volume_id] - instance_id = volume.attachment.instance.id if volume.attachment is not None: - raise VolumeInUseError(volume_id, instance_id) + raise VolumeInUseError(volume_id, volume.attachment.instance.id) return self.volumes.pop(volume_id) raise InvalidVolumeIdError(volume_id) From c49a8387bd54a64e1d8d1e7d26252fc58533ab90 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 24 Sep 2018 09:29:57 +0200 Subject: [PATCH 003/658] implemented `get_job_document` for AWS IoT --- IMPLEMENTATION_COVERAGE.md | 9199 +++++++++++++++++++----------------- moto/iot/models.py | 3 + moto/iot/responses.py | 10 + tests/test_iot/test_iot.py | 64 +- 4 files changed, 4847 insertions(+), 4429 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 938cc3549933..7fbbbcbb0a3b 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,4428 +1,4771 @@ - -## acm - 41% implemented -- [X] add_tags_to_certificate -- [X] delete_certificate -- [ ] describe_certificate -- [ ] export_certificate -- [X] get_certificate -- [ ] import_certificate -- [ ] list_certificates -- [ ] list_tags_for_certificate -- [X] remove_tags_from_certificate -- [X] request_certificate -- [ ] resend_validation_email -- [ ] update_certificate_options - -## acm-pca - 0% implemented -- [ ] create_certificate_authority -- [ ] create_certificate_authority_audit_report -- [ ] delete_certificate_authority -- [ ] describe_certificate_authority -- [ ] describe_certificate_authority_audit_report -- [ ] get_certificate -- [ ] get_certificate_authority_certificate -- [ ] get_certificate_authority_csr -- [ ] import_certificate_authority_certificate -- [ ] issue_certificate -- [ ] list_certificate_authorities -- [ ] list_tags -- [ ] revoke_certificate -- [ ] tag_certificate_authority -- [ ] untag_certificate_authority -- [ ] update_certificate_authority - -## alexaforbusiness - 0% implemented -- [ ] associate_contact_with_address_book -- [ ] associate_device_with_room -- [ ] associate_skill_group_with_room -- [ ] create_address_book -- [ ] create_contact -- [ ] create_profile -- [ ] create_room -- [ ] create_skill_group -- [ ] create_user -- [ ] delete_address_book -- [ ] delete_contact -- [ ] delete_profile -- [ ] delete_room -- [ ] delete_room_skill_parameter -- [ ] delete_skill_group -- [ ] delete_user -- [ ] disassociate_contact_from_address_book -- [ ] disassociate_device_from_room -- [ ] disassociate_skill_group_from_room -- [ ] get_address_book -- [ ] get_contact -- [ ] get_device -- [ ] get_profile -- [ ] get_room -- [ ] get_room_skill_parameter -- [ ] get_skill_group -- [ ] list_skills -- [ ] list_tags -- [ ] put_room_skill_parameter -- [ ] resolve_room -- [ ] revoke_invitation -- [ ] search_address_books -- [ ] search_contacts -- [ ] search_devices -- [ ] search_profiles -- [ ] search_rooms -- [ ] search_skill_groups -- [ ] search_users -- [ ] send_invitation -- [ ] start_device_sync -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_address_book -- [ ] update_contact -- [ ] update_device -- [ ] update_profile -- [ ] update_room -- [ ] update_skill_group - -## apigateway - 24% implemented -- [ ] create_api_key -- [ ] create_authorizer -- [ ] create_base_path_mapping -- [X] create_deployment -- [ ] create_documentation_part -- [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model -- [ ] create_request_validator -- [X] create_resource -- [X] create_rest_api -- [X] create_stage -- [X] create_usage_plan -- [X] create_usage_plan_key -- [ ] create_vpc_link -- [ ] delete_api_key -- [ ] delete_authorizer -- [ ] delete_base_path_mapping -- [ ] delete_client_certificate -- [X] delete_deployment -- [ ] delete_documentation_part -- [ ] delete_documentation_version -- [ ] delete_domain_name -- [ ] delete_gateway_response -- [X] delete_integration -- [X] delete_integration_response -- [ ] delete_method -- [X] delete_method_response -- [ ] delete_model -- [ ] delete_request_validator -- [X] delete_resource -- [X] delete_rest_api -- [ ] delete_stage -- [X] delete_usage_plan -- [X] delete_usage_plan_key -- [ ] delete_vpc_link -- [ ] flush_stage_authorizers_cache -- [ ] flush_stage_cache -- [ ] generate_client_certificate -- [ ] get_account -- [ ] get_api_key -- [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers -- [ ] get_base_path_mapping -- [ ] get_base_path_mappings -- [ ] get_client_certificate -- [ ] get_client_certificates -- [X] get_deployment -- [X] get_deployments -- [ ] get_documentation_part -- [ ] get_documentation_parts -- [ ] get_documentation_version -- [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names -- [ ] get_export -- [ ] get_gateway_response -- [ ] get_gateway_responses -- [X] get_integration -- [X] get_integration_response -- [X] get_method -- [X] get_method_response -- [ ] get_model -- [ ] get_model_template -- [ ] get_models -- [ ] get_request_validator -- [ ] get_request_validators -- [X] get_resource -- [ ] get_resources -- [X] get_rest_api -- [ ] get_rest_apis -- [ ] get_sdk -- [ ] get_sdk_type -- [ ] get_sdk_types -- [X] get_stage -- [X] get_stages -- [ ] get_tags -- [ ] get_usage -- [X] get_usage_plan -- [X] get_usage_plan_key -- [X] get_usage_plan_keys -- [X] get_usage_plans -- [ ] get_vpc_link -- [ ] get_vpc_links -- [ ] import_api_keys -- [ ] import_documentation_parts -- [ ] import_rest_api -- [ ] put_gateway_response -- [ ] put_integration -- [ ] put_integration_response -- [ ] put_method -- [ ] put_method_response -- [ ] put_rest_api -- [ ] tag_resource -- [ ] test_invoke_authorizer -- [ ] test_invoke_method -- [ ] untag_resource -- [ ] update_account -- [ ] update_api_key -- [ ] update_authorizer -- [ ] update_base_path_mapping -- [ ] update_client_certificate -- [ ] update_deployment -- [ ] update_documentation_part -- [ ] update_documentation_version -- [ ] update_domain_name -- [ ] update_gateway_response -- [ ] update_integration -- [ ] update_integration_response -- [ ] update_method -- [ ] update_method_response -- [ ] update_model -- [ ] update_request_validator -- [ ] update_resource -- [ ] update_rest_api -- [X] update_stage -- [ ] update_usage -- [ ] update_usage_plan -- [ ] update_vpc_link - -## application-autoscaling - 0% implemented -- [ ] delete_scaling_policy -- [ ] delete_scheduled_action -- [ ] deregister_scalable_target -- [ ] describe_scalable_targets -- [ ] describe_scaling_activities -- [ ] describe_scaling_policies -- [ ] describe_scheduled_actions -- [ ] put_scaling_policy -- [ ] put_scheduled_action -- [ ] register_scalable_target - -## appstream - 0% implemented -- [ ] associate_fleet -- [ ] copy_image -- [ ] create_directory_config -- [ ] create_fleet -- [ ] create_image_builder -- [ ] create_image_builder_streaming_url -- [ ] create_stack -- [ ] create_streaming_url -- [ ] delete_directory_config -- [ ] delete_fleet -- [ ] delete_image -- [ ] delete_image_builder -- [ ] delete_stack -- [ ] describe_directory_configs -- [ ] describe_fleets -- [ ] describe_image_builders -- [ ] describe_images -- [ ] describe_sessions -- [ ] describe_stacks -- [ ] disassociate_fleet -- [ ] expire_session -- [ ] list_associated_fleets -- [ ] list_associated_stacks -- [ ] list_tags_for_resource -- [ ] start_fleet -- [ ] start_image_builder -- [ ] stop_fleet -- [ ] stop_image_builder -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_directory_config -- [ ] update_fleet -- [ ] update_stack - -## appsync - 0% implemented -- [ ] create_api_key -- [ ] create_data_source -- [ ] create_graphql_api -- [ ] create_resolver -- [ ] create_type -- [ ] delete_api_key -- [ ] delete_data_source -- [ ] delete_graphql_api -- [ ] delete_resolver -- [ ] delete_type -- [ ] get_data_source -- [ ] get_graphql_api -- [ ] get_introspection_schema -- [ ] get_resolver -- [ ] get_schema_creation_status -- [ ] get_type -- [ ] list_api_keys -- [ ] list_data_sources -- [ ] list_graphql_apis -- [ ] list_resolvers -- [ ] list_types -- [ ] start_schema_creation -- [ ] update_api_key -- [ ] update_data_source -- [ ] update_graphql_api -- [ ] update_resolver -- [ ] update_type - -## athena - 0% implemented -- [ ] batch_get_named_query -- [ ] batch_get_query_execution -- [ ] create_named_query -- [ ] delete_named_query -- [ ] get_named_query -- [ ] get_query_execution -- [ ] get_query_results -- [ ] list_named_queries -- [ ] list_query_executions -- [ ] start_query_execution -- [ ] stop_query_execution - -## autoscaling - 44% implemented -- [X] attach_instances -- [X] attach_load_balancer_target_groups -- [X] attach_load_balancers -- [ ] complete_lifecycle_action -- [X] create_auto_scaling_group -- [X] create_launch_configuration -- [X] create_or_update_tags -- [X] delete_auto_scaling_group -- [X] delete_launch_configuration -- [ ] delete_lifecycle_hook -- [ ] delete_notification_configuration -- [X] delete_policy -- [ ] delete_scheduled_action -- [ ] delete_tags -- [ ] describe_account_limits -- [ ] describe_adjustment_types -- [X] describe_auto_scaling_groups -- [X] describe_auto_scaling_instances -- [ ] describe_auto_scaling_notification_types -- [X] describe_launch_configurations -- [ ] describe_lifecycle_hook_types -- [ ] describe_lifecycle_hooks -- [X] describe_load_balancer_target_groups -- [X] describe_load_balancers -- [ ] describe_metric_collection_types -- [ ] describe_notification_configurations -- [X] describe_policies -- [ ] describe_scaling_activities -- [ ] describe_scaling_process_types -- [ ] describe_scheduled_actions -- [ ] describe_tags -- [ ] describe_termination_policy_types -- [X] detach_instances -- [X] detach_load_balancer_target_groups -- [X] detach_load_balancers -- [ ] disable_metrics_collection -- [ ] enable_metrics_collection -- [ ] enter_standby -- [X] execute_policy -- [ ] exit_standby -- [ ] put_lifecycle_hook -- [ ] put_notification_configuration -- [ ] put_scaling_policy -- [ ] put_scheduled_update_group_action -- [ ] record_lifecycle_action_heartbeat -- [ ] resume_processes -- [X] set_desired_capacity -- [X] set_instance_health -- [ ] set_instance_protection -- [X] suspend_processes -- [ ] terminate_instance_in_auto_scaling_group -- [X] update_auto_scaling_group - -## autoscaling-plans - 0% implemented -- [ ] create_scaling_plan -- [ ] delete_scaling_plan -- [ ] describe_scaling_plan_resources -- [ ] describe_scaling_plans - -## batch - 93% implemented -- [ ] cancel_job -- [X] create_compute_environment -- [X] create_job_queue -- [X] delete_compute_environment -- [X] delete_job_queue -- [X] deregister_job_definition -- [X] describe_compute_environments -- [X] describe_job_definitions -- [X] describe_job_queues -- [X] describe_jobs -- [X] list_jobs -- [X] register_job_definition -- [X] submit_job -- [X] terminate_job -- [X] update_compute_environment -- [X] update_job_queue - -## budgets - 0% implemented -- [ ] create_budget -- [ ] create_notification -- [ ] create_subscriber -- [ ] delete_budget -- [ ] delete_notification -- [ ] delete_subscriber -- [ ] describe_budget -- [ ] describe_budgets -- [ ] describe_notifications_for_budget -- [ ] describe_subscribers_for_notification -- [ ] update_budget -- [ ] update_notification -- [ ] update_subscriber - -## ce - 0% implemented -- [ ] get_cost_and_usage -- [ ] get_dimension_values -- [ ] get_reservation_coverage -- [ ] get_reservation_purchase_recommendation -- [ ] get_reservation_utilization -- [ ] get_tags - -## cloud9 - 0% implemented -- [ ] create_environment_ec2 -- [ ] create_environment_membership -- [ ] delete_environment -- [ ] delete_environment_membership -- [ ] describe_environment_memberships -- [ ] describe_environment_status -- [ ] describe_environments -- [ ] list_environments -- [ ] update_environment -- [ ] update_environment_membership - -## clouddirectory - 0% implemented -- [ ] add_facet_to_object -- [ ] apply_schema -- [ ] attach_object -- [ ] attach_policy -- [ ] attach_to_index -- [ ] attach_typed_link -- [ ] batch_read -- [ ] batch_write -- [ ] create_directory -- [ ] create_facet -- [ ] create_index -- [ ] create_object -- [ ] create_schema -- [ ] create_typed_link_facet -- [ ] delete_directory -- [ ] delete_facet -- [ ] delete_object -- [ ] delete_schema -- [ ] delete_typed_link_facet -- [ ] detach_from_index -- [ ] detach_object -- [ ] detach_policy -- [ ] detach_typed_link -- [ ] disable_directory -- [ ] enable_directory -- [ ] get_applied_schema_version -- [ ] get_directory -- [ ] get_facet -- [ ] get_object_attributes -- [ ] get_object_information -- [ ] get_schema_as_json -- [ ] get_typed_link_facet_information -- [ ] list_applied_schema_arns -- [ ] list_attached_indices -- [ ] list_development_schema_arns -- [ ] list_directories -- [ ] list_facet_attributes -- [ ] list_facet_names -- [ ] list_incoming_typed_links -- [ ] list_index -- [ ] list_object_attributes -- [ ] list_object_children -- [ ] list_object_parent_paths -- [ ] list_object_parents -- [ ] list_object_policies -- [ ] list_outgoing_typed_links -- [ ] list_policy_attachments -- [ ] list_published_schema_arns -- [ ] list_tags_for_resource -- [ ] list_typed_link_facet_attributes -- [ ] list_typed_link_facet_names -- [ ] lookup_policy -- [ ] publish_schema -- [ ] put_schema_from_json -- [ ] remove_facet_from_object -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_facet -- [ ] update_object_attributes -- [ ] update_schema -- [ ] update_typed_link_facet -- [ ] upgrade_applied_schema -- [ ] upgrade_published_schema - -## cloudformation - 21% implemented -- [ ] cancel_update_stack -- [ ] continue_update_rollback -- [X] create_change_set -- [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set -- [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set -- [ ] describe_account_limits -- [ ] describe_change_set -- [ ] describe_stack_events -- [ ] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation -- [X] describe_stacks -- [ ] estimate_template_cost -- [X] execute_change_set -- [ ] get_stack_policy -- [ ] get_template -- [ ] get_template_summary -- [ ] list_change_sets -- [X] list_exports -- [ ] list_imports -- [ ] list_stack_instances -- [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets -- [X] list_stacks -- [ ] set_stack_policy -- [ ] signal_resource -- [ ] stop_stack_set_operation -- [X] update_stack -- [ ] update_stack_instances -- [ ] update_stack_set -- [ ] update_termination_protection -- [ ] validate_template - -## cloudfront - 0% implemented -- [ ] create_cloud_front_origin_access_identity -- [ ] create_distribution -- [ ] create_distribution_with_tags -- [ ] create_field_level_encryption_config -- [ ] create_field_level_encryption_profile -- [ ] create_invalidation -- [ ] create_public_key -- [ ] create_streaming_distribution -- [ ] create_streaming_distribution_with_tags -- [ ] delete_cloud_front_origin_access_identity -- [ ] delete_distribution -- [ ] delete_field_level_encryption_config -- [ ] delete_field_level_encryption_profile -- [ ] delete_public_key -- [ ] delete_service_linked_role -- [ ] delete_streaming_distribution -- [ ] get_cloud_front_origin_access_identity -- [ ] get_cloud_front_origin_access_identity_config -- [ ] get_distribution -- [ ] get_distribution_config -- [ ] get_field_level_encryption -- [ ] get_field_level_encryption_config -- [ ] get_field_level_encryption_profile -- [ ] get_field_level_encryption_profile_config -- [ ] get_invalidation -- [ ] get_public_key -- [ ] get_public_key_config -- [ ] get_streaming_distribution -- [ ] get_streaming_distribution_config -- [ ] list_cloud_front_origin_access_identities -- [ ] list_distributions -- [ ] list_distributions_by_web_acl_id -- [ ] list_field_level_encryption_configs -- [ ] list_field_level_encryption_profiles -- [ ] list_invalidations -- [ ] list_public_keys -- [ ] list_streaming_distributions -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cloud_front_origin_access_identity -- [ ] update_distribution -- [ ] update_field_level_encryption_config -- [ ] update_field_level_encryption_profile -- [ ] update_public_key -- [ ] update_streaming_distribution - -## cloudhsm - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_hapg -- [ ] create_hsm -- [ ] create_luna_client -- [ ] delete_hapg -- [ ] delete_hsm -- [ ] delete_luna_client -- [ ] describe_hapg -- [ ] describe_hsm -- [ ] describe_luna_client -- [ ] get_config -- [ ] list_available_zones -- [ ] list_hapgs -- [ ] list_hsms -- [ ] list_luna_clients -- [ ] list_tags_for_resource -- [ ] modify_hapg -- [ ] modify_hsm -- [ ] modify_luna_client -- [ ] remove_tags_from_resource - -## cloudhsmv2 - 0% implemented -- [ ] create_cluster -- [ ] create_hsm -- [ ] delete_cluster -- [ ] delete_hsm -- [ ] describe_backups -- [ ] describe_clusters -- [ ] initialize_cluster -- [ ] list_tags -- [ ] tag_resource -- [ ] untag_resource - -## cloudsearch - 0% implemented -- [ ] build_suggesters -- [ ] create_domain -- [ ] define_analysis_scheme -- [ ] define_expression -- [ ] define_index_field -- [ ] define_suggester -- [ ] delete_analysis_scheme -- [ ] delete_domain -- [ ] delete_expression -- [ ] delete_index_field -- [ ] delete_suggester -- [ ] describe_analysis_schemes -- [ ] describe_availability_options -- [ ] describe_domains -- [ ] describe_expressions -- [ ] describe_index_fields -- [ ] describe_scaling_parameters -- [ ] describe_service_access_policies -- [ ] describe_suggesters -- [ ] index_documents -- [ ] list_domain_names -- [ ] update_availability_options -- [ ] update_scaling_parameters -- [ ] update_service_access_policies - -## cloudsearchdomain - 0% implemented -- [ ] search -- [ ] suggest -- [ ] upload_documents - -## cloudtrail - 0% implemented -- [ ] add_tags -- [ ] create_trail -- [ ] delete_trail -- [ ] describe_trails -- [ ] get_event_selectors -- [ ] get_trail_status -- [ ] list_public_keys -- [ ] list_tags -- [ ] lookup_events -- [ ] put_event_selectors -- [ ] remove_tags -- [ ] start_logging -- [ ] stop_logging -- [ ] update_trail - -## cloudwatch - 56% implemented -- [X] delete_alarms -- [X] delete_dashboards -- [ ] describe_alarm_history -- [ ] describe_alarms -- [ ] describe_alarms_for_metric -- [ ] disable_alarm_actions -- [ ] enable_alarm_actions -- [X] get_dashboard -- [ ] get_metric_data -- [X] get_metric_statistics -- [X] list_dashboards -- [ ] list_metrics -- [X] put_dashboard -- [X] put_metric_alarm -- [X] put_metric_data -- [X] set_alarm_state - -## codebuild - 0% implemented -- [ ] batch_delete_builds -- [ ] batch_get_builds -- [ ] batch_get_projects -- [ ] create_project -- [ ] create_webhook -- [ ] delete_project -- [ ] delete_webhook -- [ ] invalidate_project_cache -- [ ] list_builds -- [ ] list_builds_for_project -- [ ] list_curated_environment_images -- [ ] list_projects -- [ ] start_build -- [ ] stop_build -- [ ] update_project -- [ ] update_webhook - -## codecommit - 0% implemented -- [ ] batch_get_repositories -- [ ] create_branch -- [ ] create_pull_request -- [ ] create_repository -- [ ] delete_branch -- [ ] delete_comment_content -- [ ] delete_repository -- [ ] describe_pull_request_events -- [ ] get_blob -- [ ] get_branch -- [ ] get_comment -- [ ] get_comments_for_compared_commit -- [ ] get_comments_for_pull_request -- [ ] get_commit -- [ ] get_differences -- [ ] get_merge_conflicts -- [ ] get_pull_request -- [ ] get_repository -- [ ] get_repository_triggers -- [ ] list_branches -- [ ] list_pull_requests -- [ ] list_repositories -- [ ] merge_pull_request_by_fast_forward -- [ ] post_comment_for_compared_commit -- [ ] post_comment_for_pull_request -- [ ] post_comment_reply -- [ ] put_file -- [ ] put_repository_triggers -- [ ] test_repository_triggers -- [ ] update_comment -- [ ] update_default_branch -- [ ] update_pull_request_description -- [ ] update_pull_request_status -- [ ] update_pull_request_title -- [ ] update_repository_description -- [ ] update_repository_name - -## codedeploy - 0% implemented -- [ ] add_tags_to_on_premises_instances -- [ ] batch_get_application_revisions -- [ ] batch_get_applications -- [ ] batch_get_deployment_groups -- [ ] batch_get_deployment_instances -- [ ] batch_get_deployments -- [ ] batch_get_on_premises_instances -- [ ] continue_deployment -- [ ] create_application -- [ ] create_deployment -- [ ] create_deployment_config -- [ ] create_deployment_group -- [ ] delete_application -- [ ] delete_deployment_config -- [ ] delete_deployment_group -- [ ] delete_git_hub_account_token -- [ ] deregister_on_premises_instance -- [ ] get_application -- [ ] get_application_revision -- [ ] get_deployment -- [ ] get_deployment_config -- [ ] get_deployment_group -- [ ] get_deployment_instance -- [ ] get_on_premises_instance -- [ ] list_application_revisions -- [ ] list_applications -- [ ] list_deployment_configs -- [ ] list_deployment_groups -- [ ] list_deployment_instances -- [ ] list_deployments -- [ ] list_git_hub_account_token_names -- [ ] list_on_premises_instances -- [ ] put_lifecycle_event_hook_execution_status -- [ ] register_application_revision -- [ ] register_on_premises_instance -- [ ] remove_tags_from_on_premises_instances -- [ ] skip_wait_time_for_instance_termination -- [ ] stop_deployment -- [ ] update_application -- [ ] update_deployment_group - -## codepipeline - 0% implemented -- [ ] acknowledge_job -- [ ] acknowledge_third_party_job -- [ ] create_custom_action_type -- [ ] create_pipeline -- [ ] delete_custom_action_type -- [ ] delete_pipeline -- [ ] disable_stage_transition -- [ ] enable_stage_transition -- [ ] get_job_details -- [ ] get_pipeline -- [ ] get_pipeline_execution -- [ ] get_pipeline_state -- [ ] get_third_party_job_details -- [ ] list_action_types -- [ ] list_pipeline_executions -- [ ] list_pipelines -- [ ] poll_for_jobs -- [ ] poll_for_third_party_jobs -- [ ] put_action_revision -- [ ] put_approval_result -- [ ] put_job_failure_result -- [ ] put_job_success_result -- [ ] put_third_party_job_failure_result -- [ ] put_third_party_job_success_result -- [ ] retry_stage_execution -- [ ] start_pipeline_execution -- [ ] update_pipeline - -## codestar - 0% implemented -- [ ] associate_team_member -- [ ] create_project -- [ ] create_user_profile -- [ ] delete_project -- [ ] delete_user_profile -- [ ] describe_project -- [ ] describe_user_profile -- [ ] disassociate_team_member -- [ ] list_projects -- [ ] list_resources -- [ ] list_tags_for_project -- [ ] list_team_members -- [ ] list_user_profiles -- [ ] tag_project -- [ ] untag_project -- [ ] update_project -- [ ] update_team_member -- [ ] update_user_profile - -## cognito-identity - 0% implemented -- [ ] create_identity_pool -- [ ] delete_identities -- [ ] delete_identity_pool -- [ ] describe_identity -- [ ] describe_identity_pool -- [ ] get_credentials_for_identity -- [ ] get_id -- [ ] get_identity_pool_roles -- [ ] get_open_id_token -- [ ] get_open_id_token_for_developer_identity -- [ ] list_identities -- [ ] list_identity_pools -- [ ] lookup_developer_identity -- [ ] merge_developer_identities -- [ ] set_identity_pool_roles -- [ ] unlink_developer_identity -- [ ] unlink_identity -- [ ] update_identity_pool - -## cognito-idp - 0% implemented -- [ ] add_custom_attributes -- [ ] admin_add_user_to_group -- [ ] admin_confirm_sign_up -- [ ] admin_create_user -- [ ] admin_delete_user -- [ ] admin_delete_user_attributes -- [ ] admin_disable_provider_for_user -- [ ] admin_disable_user -- [ ] admin_enable_user -- [ ] admin_forget_device -- [ ] admin_get_device -- [ ] admin_get_user -- [ ] admin_initiate_auth -- [ ] admin_link_provider_for_user -- [ ] admin_list_devices -- [ ] admin_list_groups_for_user -- [ ] admin_list_user_auth_events -- [ ] admin_remove_user_from_group -- [ ] admin_reset_user_password -- [ ] admin_respond_to_auth_challenge -- [ ] admin_set_user_mfa_preference -- [ ] admin_set_user_settings -- [ ] admin_update_auth_event_feedback -- [ ] admin_update_device_status -- [ ] admin_update_user_attributes -- [ ] admin_user_global_sign_out -- [ ] associate_software_token -- [ ] change_password -- [ ] confirm_device -- [ ] confirm_forgot_password -- [ ] confirm_sign_up -- [ ] create_group -- [ ] create_identity_provider -- [ ] create_resource_server -- [ ] create_user_import_job -- [ ] create_user_pool -- [ ] create_user_pool_client -- [ ] create_user_pool_domain -- [ ] delete_group -- [ ] delete_identity_provider -- [ ] delete_resource_server -- [ ] delete_user -- [ ] delete_user_attributes -- [ ] delete_user_pool -- [ ] delete_user_pool_client -- [ ] delete_user_pool_domain -- [ ] describe_identity_provider -- [ ] describe_resource_server -- [ ] describe_risk_configuration -- [ ] describe_user_import_job -- [ ] describe_user_pool -- [ ] describe_user_pool_client -- [ ] describe_user_pool_domain -- [ ] forget_device -- [ ] forgot_password -- [ ] get_csv_header -- [ ] get_device -- [ ] get_group -- [ ] get_identity_provider_by_identifier -- [ ] get_signing_certificate -- [ ] get_ui_customization -- [ ] get_user -- [ ] get_user_attribute_verification_code -- [ ] get_user_pool_mfa_config -- [ ] global_sign_out -- [ ] initiate_auth -- [ ] list_devices -- [ ] list_groups -- [ ] list_identity_providers -- [ ] list_resource_servers -- [ ] list_user_import_jobs -- [ ] list_user_pool_clients -- [ ] list_user_pools -- [ ] list_users -- [ ] list_users_in_group -- [ ] resend_confirmation_code -- [ ] respond_to_auth_challenge -- [ ] set_risk_configuration -- [ ] set_ui_customization -- [ ] set_user_mfa_preference -- [ ] set_user_pool_mfa_config -- [ ] set_user_settings -- [ ] sign_up -- [ ] start_user_import_job -- [ ] stop_user_import_job -- [ ] update_auth_event_feedback -- [ ] update_device_status -- [ ] update_group -- [ ] update_identity_provider -- [ ] update_resource_server -- [ ] update_user_attributes -- [ ] update_user_pool -- [ ] update_user_pool_client -- [ ] verify_software_token -- [ ] verify_user_attribute - -## cognito-sync - 0% implemented -- [ ] bulk_publish -- [ ] delete_dataset -- [ ] describe_dataset -- [ ] describe_identity_pool_usage -- [ ] describe_identity_usage -- [ ] get_bulk_publish_details -- [ ] get_cognito_events -- [ ] get_identity_pool_configuration -- [ ] list_datasets -- [ ] list_identity_pool_usage -- [ ] list_records -- [ ] register_device -- [ ] set_cognito_events -- [ ] set_identity_pool_configuration -- [ ] subscribe_to_dataset -- [ ] unsubscribe_from_dataset -- [ ] update_records - -## comprehend - 0% implemented -- [ ] batch_detect_dominant_language -- [ ] batch_detect_entities -- [ ] batch_detect_key_phrases -- [ ] batch_detect_sentiment -- [ ] describe_topics_detection_job -- [ ] detect_dominant_language -- [ ] detect_entities -- [ ] detect_key_phrases -- [ ] detect_sentiment -- [ ] list_topics_detection_jobs -- [ ] start_topics_detection_job - -## config - 0% implemented -- [ ] batch_get_resource_config -- [ ] delete_aggregation_authorization -- [ ] delete_config_rule -- [ ] delete_configuration_aggregator -- [ ] delete_configuration_recorder -- [ ] delete_delivery_channel -- [ ] delete_evaluation_results -- [ ] delete_pending_aggregation_request -- [ ] deliver_config_snapshot -- [ ] describe_aggregate_compliance_by_config_rules -- [ ] describe_aggregation_authorizations -- [ ] describe_compliance_by_config_rule -- [ ] describe_compliance_by_resource -- [ ] describe_config_rule_evaluation_status -- [ ] describe_config_rules -- [ ] describe_configuration_aggregator_sources_status -- [ ] describe_configuration_aggregators -- [ ] describe_configuration_recorder_status -- [ ] describe_configuration_recorders -- [ ] describe_delivery_channel_status -- [ ] describe_delivery_channels -- [ ] describe_pending_aggregation_requests -- [ ] get_aggregate_compliance_details_by_config_rule -- [ ] get_aggregate_config_rule_compliance_summary -- [ ] get_compliance_details_by_config_rule -- [ ] get_compliance_details_by_resource -- [ ] get_compliance_summary_by_config_rule -- [ ] get_compliance_summary_by_resource_type -- [ ] get_discovered_resource_counts -- [ ] get_resource_config_history -- [ ] list_discovered_resources -- [ ] put_aggregation_authorization -- [ ] put_config_rule -- [ ] put_configuration_aggregator -- [ ] put_configuration_recorder -- [ ] put_delivery_channel -- [ ] put_evaluations -- [ ] start_config_rules_evaluation -- [ ] start_configuration_recorder -- [ ] stop_configuration_recorder - -## connect - 0% implemented -- [ ] start_outbound_voice_contact -- [ ] stop_contact - -## cur - 0% implemented -- [ ] delete_report_definition -- [ ] describe_report_definitions -- [ ] put_report_definition - -## datapipeline - 42% implemented -- [X] activate_pipeline -- [ ] add_tags -- [X] create_pipeline -- [ ] deactivate_pipeline -- [X] delete_pipeline -- [X] describe_objects -- [X] describe_pipelines -- [ ] evaluate_expression -- [X] get_pipeline_definition -- [X] list_pipelines -- [ ] poll_for_task -- [X] put_pipeline_definition -- [ ] query_objects -- [ ] remove_tags -- [ ] report_task_progress -- [ ] report_task_runner_heartbeat -- [ ] set_status -- [ ] set_task_status -- [ ] validate_pipeline_definition - -## dax - 0% implemented -- [ ] create_cluster -- [ ] create_parameter_group -- [ ] create_subnet_group -- [ ] decrease_replication_factor -- [ ] delete_cluster -- [ ] delete_parameter_group -- [ ] delete_subnet_group -- [ ] describe_clusters -- [ ] describe_default_parameters -- [ ] describe_events -- [ ] describe_parameter_groups -- [ ] describe_parameters -- [ ] describe_subnet_groups -- [ ] increase_replication_factor -- [ ] list_tags -- [ ] reboot_node -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cluster -- [ ] update_parameter_group -- [ ] update_subnet_group - -## devicefarm - 0% implemented -- [ ] create_device_pool -- [ ] create_instance_profile -- [ ] create_network_profile -- [ ] create_project -- [ ] create_remote_access_session -- [ ] create_upload -- [ ] delete_device_pool -- [ ] delete_instance_profile -- [ ] delete_network_profile -- [ ] delete_project -- [ ] delete_remote_access_session -- [ ] delete_run -- [ ] delete_upload -- [ ] get_account_settings -- [ ] get_device -- [ ] get_device_instance -- [ ] get_device_pool -- [ ] get_device_pool_compatibility -- [ ] get_instance_profile -- [ ] get_job -- [ ] get_network_profile -- [ ] get_offering_status -- [ ] get_project -- [ ] get_remote_access_session -- [ ] get_run -- [ ] get_suite -- [ ] get_test -- [ ] get_upload -- [ ] install_to_remote_access_session -- [ ] list_artifacts -- [ ] list_device_instances -- [ ] list_device_pools -- [ ] list_devices -- [ ] list_instance_profiles -- [ ] list_jobs -- [ ] list_network_profiles -- [ ] list_offering_promotions -- [ ] list_offering_transactions -- [ ] list_offerings -- [ ] list_projects -- [ ] list_remote_access_sessions -- [ ] list_runs -- [ ] list_samples -- [ ] list_suites -- [ ] list_tests -- [ ] list_unique_problems -- [ ] list_uploads -- [ ] purchase_offering -- [ ] renew_offering -- [ ] schedule_run -- [ ] stop_remote_access_session -- [ ] stop_run -- [ ] update_device_instance -- [ ] update_device_pool -- [ ] update_instance_profile -- [ ] update_network_profile -- [ ] update_project - -## directconnect - 0% implemented -- [ ] allocate_connection_on_interconnect -- [ ] allocate_hosted_connection -- [ ] allocate_private_virtual_interface -- [ ] allocate_public_virtual_interface -- [ ] associate_connection_with_lag -- [ ] associate_hosted_connection -- [ ] associate_virtual_interface -- [ ] confirm_connection -- [ ] confirm_private_virtual_interface -- [ ] confirm_public_virtual_interface -- [ ] create_bgp_peer -- [ ] create_connection -- [ ] create_direct_connect_gateway -- [ ] create_direct_connect_gateway_association -- [ ] create_interconnect -- [ ] create_lag -- [ ] create_private_virtual_interface -- [ ] create_public_virtual_interface -- [ ] delete_bgp_peer -- [ ] delete_connection -- [ ] delete_direct_connect_gateway -- [ ] delete_direct_connect_gateway_association -- [ ] delete_interconnect -- [ ] delete_lag -- [ ] delete_virtual_interface -- [ ] describe_connection_loa -- [ ] describe_connections -- [ ] describe_connections_on_interconnect -- [ ] describe_direct_connect_gateway_associations -- [ ] describe_direct_connect_gateway_attachments -- [ ] describe_direct_connect_gateways -- [ ] describe_hosted_connections -- [ ] describe_interconnect_loa -- [ ] describe_interconnects -- [ ] describe_lags -- [ ] describe_loa -- [ ] describe_locations -- [ ] describe_tags -- [ ] describe_virtual_gateways -- [ ] describe_virtual_interfaces -- [ ] disassociate_connection_from_lag -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_lag - -## discovery - 0% implemented -- [ ] associate_configuration_items_to_application -- [ ] create_application -- [ ] create_tags -- [ ] delete_applications -- [ ] delete_tags -- [ ] describe_agents -- [ ] describe_configurations -- [ ] describe_export_configurations -- [ ] describe_export_tasks -- [ ] describe_tags -- [ ] disassociate_configuration_items_from_application -- [ ] export_configurations -- [ ] get_discovery_summary -- [ ] list_configurations -- [ ] list_server_neighbors -- [ ] start_data_collection_by_agent_ids -- [ ] start_export_task -- [ ] stop_data_collection_by_agent_ids -- [ ] update_application - -## dms - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_endpoint -- [ ] create_event_subscription -- [ ] create_replication_instance -- [ ] create_replication_subnet_group -- [ ] create_replication_task -- [ ] delete_certificate -- [ ] delete_endpoint -- [ ] delete_event_subscription -- [ ] delete_replication_instance -- [ ] delete_replication_subnet_group -- [ ] delete_replication_task -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_connections -- [ ] describe_endpoint_types -- [ ] describe_endpoints -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_replication_instances -- [ ] describe_refresh_schemas_status -- [ ] describe_replication_instance_task_logs -- [ ] describe_replication_instances -- [ ] describe_replication_subnet_groups -- [ ] describe_replication_task_assessment_results -- [ ] describe_replication_tasks -- [ ] describe_schemas -- [ ] describe_table_statistics -- [ ] import_certificate -- [ ] list_tags_for_resource -- [ ] modify_endpoint -- [ ] modify_event_subscription -- [ ] modify_replication_instance -- [ ] modify_replication_subnet_group -- [ ] modify_replication_task -- [ ] reboot_replication_instance -- [ ] refresh_schemas -- [ ] reload_tables -- [ ] remove_tags_from_resource -- [ ] start_replication_task -- [ ] start_replication_task_assessment -- [ ] stop_replication_task -- [ ] test_connection - -## ds - 0% implemented -- [ ] add_ip_routes -- [ ] add_tags_to_resource -- [ ] cancel_schema_extension -- [ ] connect_directory -- [ ] create_alias -- [ ] create_computer -- [ ] create_conditional_forwarder -- [ ] create_directory -- [ ] create_microsoft_ad -- [ ] create_snapshot -- [ ] create_trust -- [ ] delete_conditional_forwarder -- [ ] delete_directory -- [ ] delete_snapshot -- [ ] delete_trust -- [ ] deregister_event_topic -- [ ] describe_conditional_forwarders -- [ ] describe_directories -- [ ] describe_domain_controllers -- [ ] describe_event_topics -- [ ] describe_snapshots -- [ ] describe_trusts -- [ ] disable_radius -- [ ] disable_sso -- [ ] enable_radius -- [ ] enable_sso -- [ ] get_directory_limits -- [ ] get_snapshot_limits -- [ ] list_ip_routes -- [ ] list_schema_extensions -- [ ] list_tags_for_resource -- [ ] register_event_topic -- [ ] remove_ip_routes -- [ ] remove_tags_from_resource -- [ ] restore_from_snapshot -- [ ] start_schema_extension -- [ ] update_conditional_forwarder -- [ ] update_number_of_domain_controllers -- [ ] update_radius -- [ ] verify_trust - -## dynamodb - 22% implemented -- [ ] batch_get_item -- [ ] batch_write_item -- [ ] create_backup -- [ ] create_global_table -- [X] create_table -- [ ] delete_backup -- [X] delete_item -- [X] delete_table -- [ ] describe_backup -- [ ] describe_continuous_backups -- [ ] describe_global_table -- [ ] describe_limits -- [ ] describe_table -- [ ] describe_time_to_live -- [X] get_item -- [ ] list_backups -- [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource -- [X] put_item -- [X] query -- [ ] restore_table_from_backup -- [ ] restore_table_to_point_in_time -- [X] scan -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_continuous_backups -- [ ] update_global_table -- [ ] update_item -- [ ] update_table -- [ ] update_time_to_live - -## dynamodbstreams - 0% implemented -- [ ] describe_stream -- [ ] get_records -- [ ] get_shard_iterator -- [ ] list_streams - -## ec2 - 37% implemented -- [ ] accept_reserved_instances_exchange_quote -- [ ] accept_vpc_endpoint_connections -- [X] accept_vpc_peering_connection -- [X] allocate_address -- [ ] allocate_hosts -- [ ] assign_ipv6_addresses -- [ ] assign_private_ip_addresses -- [X] associate_address -- [X] associate_dhcp_options -- [ ] associate_iam_instance_profile -- [X] associate_route_table -- [ ] associate_subnet_cidr_block -- [X] associate_vpc_cidr_block -- [ ] attach_classic_link_vpc -- [X] attach_internet_gateway -- [X] attach_network_interface -- [X] attach_volume -- [X] attach_vpn_gateway -- [X] authorize_security_group_egress -- [X] authorize_security_group_ingress -- [ ] bundle_instance -- [ ] cancel_bundle_task -- [ ] cancel_conversion_task -- [ ] cancel_export_task -- [ ] cancel_import_task -- [ ] cancel_reserved_instances_listing -- [X] cancel_spot_fleet_requests -- [X] cancel_spot_instance_requests -- [ ] confirm_product_instance -- [ ] copy_fpga_image -- [X] copy_image -- [X] copy_snapshot -- [X] create_customer_gateway -- [ ] create_default_subnet -- [ ] create_default_vpc -- [X] create_dhcp_options -- [ ] create_egress_only_internet_gateway -- [ ] create_flow_logs -- [ ] create_fpga_image -- [X] create_image -- [ ] create_instance_export_task -- [X] create_internet_gateway -- [X] create_key_pair -- [ ] create_launch_template -- [ ] create_launch_template_version -- [X] create_nat_gateway -- [X] create_network_acl -- [X] create_network_acl_entry -- [X] create_network_interface -- [ ] create_network_interface_permission -- [ ] create_placement_group -- [ ] create_reserved_instances_listing -- [X] create_route -- [X] create_route_table -- [X] create_security_group -- [X] create_snapshot -- [ ] create_spot_datafeed_subscription -- [X] create_subnet -- [X] create_tags -- [X] create_volume -- [X] create_vpc -- [ ] create_vpc_endpoint -- [ ] create_vpc_endpoint_connection_notification -- [ ] create_vpc_endpoint_service_configuration -- [X] create_vpc_peering_connection -- [X] create_vpn_connection -- [ ] create_vpn_connection_route -- [X] create_vpn_gateway -- [X] delete_customer_gateway -- [ ] delete_dhcp_options -- [ ] delete_egress_only_internet_gateway -- [ ] delete_flow_logs -- [ ] delete_fpga_image -- [X] delete_internet_gateway -- [X] delete_key_pair -- [ ] delete_launch_template -- [ ] delete_launch_template_versions -- [X] delete_nat_gateway -- [X] delete_network_acl -- [X] delete_network_acl_entry -- [X] delete_network_interface -- [ ] delete_network_interface_permission -- [ ] delete_placement_group -- [X] delete_route -- [X] delete_route_table -- [X] delete_security_group -- [X] delete_snapshot -- [ ] delete_spot_datafeed_subscription -- [X] delete_subnet -- [X] delete_tags -- [X] delete_volume -- [X] delete_vpc -- [ ] delete_vpc_endpoint_connection_notifications -- [ ] delete_vpc_endpoint_service_configurations -- [ ] delete_vpc_endpoints -- [X] delete_vpc_peering_connection -- [X] delete_vpn_connection -- [ ] delete_vpn_connection_route -- [X] delete_vpn_gateway -- [X] deregister_image -- [ ] describe_account_attributes -- [X] describe_addresses -- [ ] describe_aggregate_id_format -- [X] describe_availability_zones -- [ ] describe_bundle_tasks -- [ ] describe_classic_link_instances -- [ ] describe_conversion_tasks -- [ ] describe_customer_gateways -- [X] describe_dhcp_options -- [ ] describe_egress_only_internet_gateways -- [ ] describe_elastic_gpus -- [ ] describe_export_tasks -- [ ] describe_flow_logs -- [ ] describe_fpga_image_attribute -- [ ] describe_fpga_images -- [ ] describe_host_reservation_offerings -- [ ] describe_host_reservations -- [ ] describe_hosts -- [ ] describe_iam_instance_profile_associations -- [ ] describe_id_format -- [ ] describe_identity_id_format -- [ ] describe_image_attribute -- [X] describe_images -- [ ] describe_import_image_tasks -- [ ] describe_import_snapshot_tasks -- [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications -- [ ] describe_instance_status -- [ ] describe_instances -- [X] describe_internet_gateways -- [X] describe_key_pairs -- [ ] describe_launch_template_versions -- [ ] describe_launch_templates -- [ ] describe_moving_addresses -- [ ] describe_nat_gateways -- [ ] describe_network_acls -- [ ] describe_network_interface_attribute -- [ ] describe_network_interface_permissions -- [X] describe_network_interfaces -- [ ] describe_placement_groups -- [ ] describe_prefix_lists -- [ ] describe_principal_id_format -- [X] describe_regions -- [ ] describe_reserved_instances -- [ ] describe_reserved_instances_listings -- [ ] describe_reserved_instances_modifications -- [ ] describe_reserved_instances_offerings -- [ ] describe_route_tables -- [ ] describe_scheduled_instance_availability -- [ ] describe_scheduled_instances -- [ ] describe_security_group_references -- [X] describe_security_groups -- [ ] describe_snapshot_attribute -- [X] describe_snapshots -- [ ] describe_spot_datafeed_subscription -- [X] describe_spot_fleet_instances -- [ ] describe_spot_fleet_request_history -- [X] describe_spot_fleet_requests -- [X] describe_spot_instance_requests -- [ ] describe_spot_price_history -- [ ] describe_stale_security_groups -- [ ] describe_subnets -- [X] describe_tags -- [ ] describe_volume_attribute -- [ ] describe_volume_status -- [X] describe_volumes -- [ ] describe_volumes_modifications -- [X] describe_vpc_attribute -- [ ] describe_vpc_classic_link -- [ ] describe_vpc_classic_link_dns_support -- [ ] describe_vpc_endpoint_connection_notifications -- [ ] describe_vpc_endpoint_connections -- [ ] describe_vpc_endpoint_service_configurations -- [ ] describe_vpc_endpoint_service_permissions -- [ ] describe_vpc_endpoint_services -- [ ] describe_vpc_endpoints -- [ ] describe_vpc_peering_connections -- [ ] describe_vpcs -- [X] describe_vpn_connections -- [ ] describe_vpn_gateways -- [ ] detach_classic_link_vpc -- [X] detach_internet_gateway -- [X] detach_network_interface -- [X] detach_volume -- [X] detach_vpn_gateway -- [ ] disable_vgw_route_propagation -- [ ] disable_vpc_classic_link -- [ ] disable_vpc_classic_link_dns_support -- [X] disassociate_address -- [ ] disassociate_iam_instance_profile -- [X] disassociate_route_table -- [ ] disassociate_subnet_cidr_block -- [X] disassociate_vpc_cidr_block -- [ ] enable_vgw_route_propagation -- [ ] enable_volume_io -- [ ] enable_vpc_classic_link -- [ ] enable_vpc_classic_link_dns_support -- [ ] get_console_output -- [ ] get_console_screenshot -- [ ] get_host_reservation_purchase_preview -- [ ] get_launch_template_data -- [ ] get_password_data -- [ ] get_reserved_instances_exchange_quote -- [ ] import_image -- [ ] import_instance -- [X] import_key_pair -- [ ] import_snapshot -- [ ] import_volume -- [ ] modify_fpga_image_attribute -- [ ] modify_hosts -- [ ] modify_id_format -- [ ] modify_identity_id_format -- [ ] modify_image_attribute -- [X] modify_instance_attribute -- [ ] modify_instance_credit_specification -- [ ] modify_instance_placement -- [ ] modify_launch_template -- [X] modify_network_interface_attribute -- [ ] modify_reserved_instances -- [ ] modify_snapshot_attribute -- [X] modify_spot_fleet_request -- [X] modify_subnet_attribute -- [ ] modify_volume -- [ ] modify_volume_attribute -- [X] modify_vpc_attribute -- [ ] modify_vpc_endpoint -- [ ] modify_vpc_endpoint_connection_notification -- [ ] modify_vpc_endpoint_service_configuration -- [ ] modify_vpc_endpoint_service_permissions -- [ ] modify_vpc_peering_connection_options -- [ ] modify_vpc_tenancy -- [ ] monitor_instances -- [ ] move_address_to_vpc -- [ ] purchase_host_reservation -- [ ] purchase_reserved_instances_offering -- [ ] purchase_scheduled_instances -- [X] reboot_instances -- [ ] register_image -- [ ] reject_vpc_endpoint_connections -- [X] reject_vpc_peering_connection -- [X] release_address -- [ ] release_hosts -- [ ] replace_iam_instance_profile_association -- [X] replace_network_acl_association -- [X] replace_network_acl_entry -- [X] replace_route -- [X] replace_route_table_association -- [ ] report_instance_status -- [X] request_spot_fleet -- [X] request_spot_instances -- [ ] reset_fpga_image_attribute -- [ ] reset_image_attribute -- [ ] reset_instance_attribute -- [ ] reset_network_interface_attribute -- [ ] reset_snapshot_attribute -- [ ] restore_address_to_classic -- [X] revoke_security_group_egress -- [X] revoke_security_group_ingress -- [ ] run_instances -- [ ] run_scheduled_instances -- [X] start_instances -- [X] stop_instances -- [X] terminate_instances -- [ ] unassign_ipv6_addresses -- [ ] unassign_private_ip_addresses -- [ ] unmonitor_instances -- [ ] update_security_group_rule_descriptions_egress -- [ ] update_security_group_rule_descriptions_ingress - -## ecr - 31% implemented -- [ ] batch_check_layer_availability -- [ ] batch_delete_image -- [X] batch_get_image -- [ ] complete_layer_upload -- [X] create_repository -- [ ] delete_lifecycle_policy -- [X] delete_repository -- [ ] delete_repository_policy -- [X] describe_images -- [X] describe_repositories -- [ ] get_authorization_token -- [ ] get_download_url_for_layer -- [ ] get_lifecycle_policy -- [ ] get_lifecycle_policy_preview -- [ ] get_repository_policy -- [ ] initiate_layer_upload -- [X] list_images -- [X] put_image -- [ ] put_lifecycle_policy -- [ ] set_repository_policy -- [ ] start_lifecycle_policy_preview -- [ ] upload_layer_part - -## ecs - 87% implemented -- [X] create_cluster -- [X] create_service -- [X] delete_attributes -- [X] delete_cluster -- [X] delete_service -- [X] deregister_container_instance -- [X] deregister_task_definition -- [X] describe_clusters -- [X] describe_container_instances -- [X] describe_services -- [X] describe_task_definition -- [X] describe_tasks -- [ ] discover_poll_endpoint -- [X] list_attributes -- [X] list_clusters -- [X] list_container_instances -- [X] list_services -- [X] list_task_definition_families -- [X] list_task_definitions -- [X] list_tasks -- [X] put_attributes -- [X] register_container_instance -- [X] register_task_definition -- [X] run_task -- [X] start_task -- [X] stop_task -- [ ] submit_container_state_change -- [ ] submit_task_state_change -- [ ] update_container_agent -- [X] update_container_instances_state -- [X] update_service - -## efs - 0% implemented -- [ ] create_file_system -- [ ] create_mount_target -- [ ] create_tags -- [ ] delete_file_system -- [ ] delete_mount_target -- [ ] delete_tags -- [ ] describe_file_systems -- [ ] describe_mount_target_security_groups -- [ ] describe_mount_targets -- [ ] describe_tags -- [ ] modify_mount_target_security_groups - -## elasticache - 0% implemented -- [ ] add_tags_to_resource -- [ ] authorize_cache_security_group_ingress -- [ ] copy_snapshot -- [ ] create_cache_cluster -- [ ] create_cache_parameter_group -- [ ] create_cache_security_group -- [ ] create_cache_subnet_group -- [ ] create_replication_group -- [ ] create_snapshot -- [ ] delete_cache_cluster -- [ ] delete_cache_parameter_group -- [ ] delete_cache_security_group -- [ ] delete_cache_subnet_group -- [ ] delete_replication_group -- [ ] delete_snapshot -- [ ] describe_cache_clusters -- [ ] describe_cache_engine_versions -- [ ] describe_cache_parameter_groups -- [ ] describe_cache_parameters -- [ ] describe_cache_security_groups -- [ ] describe_cache_subnet_groups -- [ ] describe_engine_default_parameters -- [ ] describe_events -- [ ] describe_replication_groups -- [ ] describe_reserved_cache_nodes -- [ ] describe_reserved_cache_nodes_offerings -- [ ] describe_snapshots -- [ ] list_allowed_node_type_modifications -- [ ] list_tags_for_resource -- [ ] modify_cache_cluster -- [ ] modify_cache_parameter_group -- [ ] modify_cache_subnet_group -- [ ] modify_replication_group -- [ ] modify_replication_group_shard_configuration -- [ ] purchase_reserved_cache_nodes_offering -- [ ] reboot_cache_cluster -- [ ] remove_tags_from_resource -- [ ] reset_cache_parameter_group -- [ ] revoke_cache_security_group_ingress -- [ ] test_failover - -## elasticbeanstalk - 0% implemented -- [ ] abort_environment_update -- [ ] apply_environment_managed_action -- [ ] check_dns_availability -- [ ] compose_environments -- [ ] create_application -- [ ] create_application_version -- [ ] create_configuration_template -- [ ] create_environment -- [ ] create_platform_version -- [ ] create_storage_location -- [ ] delete_application -- [ ] delete_application_version -- [ ] delete_configuration_template -- [ ] delete_environment_configuration -- [ ] delete_platform_version -- [ ] describe_account_attributes -- [ ] describe_application_versions -- [ ] describe_applications -- [ ] describe_configuration_options -- [ ] describe_configuration_settings -- [ ] describe_environment_health -- [ ] describe_environment_managed_action_history -- [ ] describe_environment_managed_actions -- [ ] describe_environment_resources -- [ ] describe_environments -- [ ] describe_events -- [ ] describe_instances_health -- [ ] describe_platform_version -- [ ] list_available_solution_stacks -- [ ] list_platform_versions -- [ ] list_tags_for_resource -- [ ] rebuild_environment -- [ ] request_environment_info -- [ ] restart_app_server -- [ ] retrieve_environment_info -- [ ] swap_environment_cnames -- [ ] terminate_environment -- [ ] update_application -- [ ] update_application_resource_lifecycle -- [ ] update_application_version -- [ ] update_configuration_template -- [ ] update_environment -- [ ] update_tags_for_resource -- [ ] validate_configuration_settings - -## elastictranscoder - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_pipeline -- [ ] create_preset -- [ ] delete_pipeline -- [ ] delete_preset -- [ ] list_jobs_by_pipeline -- [ ] list_jobs_by_status -- [ ] list_pipelines -- [ ] list_presets -- [ ] read_job -- [ ] read_pipeline -- [ ] read_preset -- [ ] test_role -- [ ] update_pipeline -- [ ] update_pipeline_notifications -- [ ] update_pipeline_status - -## elb - 34% implemented -- [ ] add_tags -- [X] apply_security_groups_to_load_balancer -- [ ] attach_load_balancer_to_subnets -- [X] configure_health_check -- [X] create_app_cookie_stickiness_policy -- [X] create_lb_cookie_stickiness_policy -- [X] create_load_balancer -- [X] create_load_balancer_listeners -- [ ] create_load_balancer_policy -- [X] delete_load_balancer -- [X] delete_load_balancer_listeners -- [ ] delete_load_balancer_policy -- [ ] deregister_instances_from_load_balancer -- [ ] describe_account_limits -- [ ] describe_instance_health -- [ ] describe_load_balancer_attributes -- [ ] describe_load_balancer_policies -- [ ] describe_load_balancer_policy_types -- [X] describe_load_balancers -- [ ] describe_tags -- [ ] detach_load_balancer_from_subnets -- [ ] disable_availability_zones_for_load_balancer -- [ ] enable_availability_zones_for_load_balancer -- [ ] modify_load_balancer_attributes -- [ ] register_instances_with_load_balancer -- [ ] remove_tags -- [ ] set_load_balancer_listener_ssl_certificate -- [ ] set_load_balancer_policies_for_backend_server -- [X] set_load_balancer_policies_of_listener - -## elbv2 - 70% implemented -- [ ] add_listener_certificates -- [ ] add_tags -- [X] create_listener -- [X] create_load_balancer -- [X] create_rule -- [X] create_target_group -- [X] delete_listener -- [X] delete_load_balancer -- [X] delete_rule -- [X] delete_target_group -- [X] deregister_targets -- [ ] describe_account_limits -- [ ] describe_listener_certificates -- [X] describe_listeners -- [X] describe_load_balancer_attributes -- [X] describe_load_balancers -- [X] describe_rules -- [ ] describe_ssl_policies -- [ ] describe_tags -- [ ] describe_target_group_attributes -- [X] describe_target_groups -- [X] describe_target_health -- [X] modify_listener -- [X] modify_load_balancer_attributes -- [X] modify_rule -- [X] modify_target_group -- [ ] modify_target_group_attributes -- [X] register_targets -- [ ] remove_listener_certificates -- [ ] remove_tags -- [X] set_ip_address_type -- [X] set_rule_priorities -- [X] set_security_groups -- [X] set_subnets - -## emr - 55% implemented -- [ ] add_instance_fleet -- [X] add_instance_groups -- [X] add_job_flow_steps -- [X] add_tags -- [ ] cancel_steps -- [ ] create_security_configuration -- [ ] delete_security_configuration -- [ ] describe_cluster -- [X] describe_job_flows -- [ ] describe_security_configuration -- [X] describe_step -- [X] list_bootstrap_actions -- [X] list_clusters -- [ ] list_instance_fleets -- [X] list_instance_groups -- [ ] list_instances -- [ ] list_security_configurations -- [X] list_steps -- [ ] modify_instance_fleet -- [X] modify_instance_groups -- [ ] put_auto_scaling_policy -- [ ] remove_auto_scaling_policy -- [X] remove_tags -- [X] run_job_flow -- [X] set_termination_protection -- [X] set_visible_to_all_users -- [X] terminate_job_flows - -## es - 0% implemented -- [ ] add_tags -- [ ] create_elasticsearch_domain -- [ ] delete_elasticsearch_domain -- [ ] delete_elasticsearch_service_role -- [ ] describe_elasticsearch_domain -- [ ] describe_elasticsearch_domain_config -- [ ] describe_elasticsearch_domains -- [ ] describe_elasticsearch_instance_type_limits -- [ ] list_domain_names -- [ ] list_elasticsearch_instance_types -- [ ] list_elasticsearch_versions -- [ ] list_tags -- [ ] remove_tags -- [ ] update_elasticsearch_domain_config - -## events - 100% implemented -- [X] delete_rule -- [X] describe_event_bus -- [X] describe_rule -- [X] disable_rule -- [X] enable_rule -- [X] list_rule_names_by_target -- [X] list_rules -- [X] list_targets_by_rule -- [X] put_events -- [X] put_permission -- [X] put_rule -- [X] put_targets -- [X] remove_permission -- [X] remove_targets -- [X] test_event_pattern - -## firehose - 0% implemented -- [ ] create_delivery_stream -- [ ] delete_delivery_stream -- [ ] describe_delivery_stream -- [ ] list_delivery_streams -- [ ] put_record -- [ ] put_record_batch -- [ ] update_destination - -## fms - 0% implemented -- [ ] associate_admin_account -- [ ] delete_notification_channel -- [ ] delete_policy -- [ ] disassociate_admin_account -- [ ] get_admin_account -- [ ] get_compliance_detail -- [ ] get_notification_channel -- [ ] get_policy -- [ ] list_compliance_status -- [ ] list_policies -- [ ] put_notification_channel -- [ ] put_policy - -## gamelift - 0% implemented -- [ ] accept_match -- [ ] create_alias -- [ ] create_build -- [ ] create_fleet -- [ ] create_game_session -- [ ] create_game_session_queue -- [ ] create_matchmaking_configuration -- [ ] create_matchmaking_rule_set -- [ ] create_player_session -- [ ] create_player_sessions -- [ ] create_vpc_peering_authorization -- [ ] create_vpc_peering_connection -- [ ] delete_alias -- [ ] delete_build -- [ ] delete_fleet -- [ ] delete_game_session_queue -- [ ] delete_matchmaking_configuration -- [ ] delete_scaling_policy -- [ ] delete_vpc_peering_authorization -- [ ] delete_vpc_peering_connection -- [ ] describe_alias -- [ ] describe_build -- [ ] describe_ec2_instance_limits -- [ ] describe_fleet_attributes -- [ ] describe_fleet_capacity -- [ ] describe_fleet_events -- [ ] describe_fleet_port_settings -- [ ] describe_fleet_utilization -- [ ] describe_game_session_details -- [ ] describe_game_session_placement -- [ ] describe_game_session_queues -- [ ] describe_game_sessions -- [ ] describe_instances -- [ ] describe_matchmaking -- [ ] describe_matchmaking_configurations -- [ ] describe_matchmaking_rule_sets -- [ ] describe_player_sessions -- [ ] describe_runtime_configuration -- [ ] describe_scaling_policies -- [ ] describe_vpc_peering_authorizations -- [ ] describe_vpc_peering_connections -- [ ] get_game_session_log_url -- [ ] get_instance_access -- [ ] list_aliases -- [ ] list_builds -- [ ] list_fleets -- [ ] put_scaling_policy -- [ ] request_upload_credentials -- [ ] resolve_alias -- [ ] search_game_sessions -- [ ] start_game_session_placement -- [ ] start_match_backfill -- [ ] start_matchmaking -- [ ] stop_game_session_placement -- [ ] stop_matchmaking -- [ ] update_alias -- [ ] update_build -- [ ] update_fleet_attributes -- [ ] update_fleet_capacity -- [ ] update_fleet_port_settings -- [ ] update_game_session -- [ ] update_game_session_queue -- [ ] update_matchmaking_configuration -- [ ] update_runtime_configuration -- [ ] validate_matchmaking_rule_set - -## glacier - 12% implemented -- [ ] abort_multipart_upload -- [ ] abort_vault_lock -- [ ] add_tags_to_vault -- [ ] complete_multipart_upload -- [ ] complete_vault_lock -- [X] create_vault -- [ ] delete_archive -- [X] delete_vault -- [ ] delete_vault_access_policy -- [ ] delete_vault_notifications -- [ ] describe_job -- [ ] describe_vault -- [ ] get_data_retrieval_policy -- [ ] get_job_output -- [ ] get_vault_access_policy -- [ ] get_vault_lock -- [ ] get_vault_notifications -- [X] initiate_job -- [ ] initiate_multipart_upload -- [ ] initiate_vault_lock -- [X] list_jobs -- [ ] list_multipart_uploads -- [ ] list_parts -- [ ] list_provisioned_capacity -- [ ] list_tags_for_vault -- [ ] list_vaults -- [ ] purchase_provisioned_capacity -- [ ] remove_tags_from_vault -- [ ] set_data_retrieval_policy -- [ ] set_vault_access_policy -- [ ] set_vault_notifications -- [ ] upload_archive -- [ ] upload_multipart_part - -## glue - 0% implemented -- [ ] batch_create_partition -- [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table -- [ ] batch_delete_table_version -- [ ] batch_get_partition -- [ ] batch_stop_job_run -- [ ] create_classifier -- [ ] create_connection -- [ ] create_crawler -- [ ] create_database -- [ ] create_dev_endpoint -- [ ] create_job -- [ ] create_partition -- [ ] create_script -- [ ] create_table -- [ ] create_trigger -- [ ] create_user_defined_function -- [ ] delete_classifier -- [ ] delete_connection -- [ ] delete_crawler -- [ ] delete_database -- [ ] delete_dev_endpoint -- [ ] delete_job -- [ ] delete_partition -- [ ] delete_table -- [ ] delete_table_version -- [ ] delete_trigger -- [ ] delete_user_defined_function -- [ ] get_catalog_import_status -- [ ] get_classifier -- [ ] get_classifiers -- [ ] get_connection -- [ ] get_connections -- [ ] get_crawler -- [ ] get_crawler_metrics -- [ ] get_crawlers -- [ ] get_database -- [ ] get_databases -- [ ] get_dataflow_graph -- [ ] get_dev_endpoint -- [ ] get_dev_endpoints -- [ ] get_job -- [ ] get_job_run -- [ ] get_job_runs -- [ ] get_jobs -- [ ] get_mapping -- [ ] get_partition -- [ ] get_partitions -- [ ] get_plan -- [ ] get_table -- [ ] get_table_version -- [ ] get_table_versions -- [ ] get_tables -- [ ] get_trigger -- [ ] get_triggers -- [ ] get_user_defined_function -- [ ] get_user_defined_functions -- [ ] import_catalog_to_glue -- [ ] reset_job_bookmark -- [ ] start_crawler -- [ ] start_crawler_schedule -- [ ] start_job_run -- [ ] start_trigger -- [ ] stop_crawler -- [ ] stop_crawler_schedule -- [ ] stop_trigger -- [ ] update_classifier -- [ ] update_connection -- [ ] update_crawler -- [ ] update_crawler_schedule -- [ ] update_database -- [ ] update_dev_endpoint -- [ ] update_job -- [ ] update_partition -- [ ] update_table -- [ ] update_trigger -- [ ] update_user_defined_function - -## greengrass - 0% implemented -- [ ] associate_role_to_group -- [ ] associate_service_role_to_account -- [ ] create_core_definition -- [ ] create_core_definition_version -- [ ] create_deployment -- [ ] create_device_definition -- [ ] create_device_definition_version -- [ ] create_function_definition -- [ ] create_function_definition_version -- [ ] create_group -- [ ] create_group_certificate_authority -- [ ] create_group_version -- [ ] create_logger_definition -- [ ] create_logger_definition_version -- [ ] create_resource_definition -- [ ] create_resource_definition_version -- [ ] create_software_update_job -- [ ] create_subscription_definition -- [ ] create_subscription_definition_version -- [ ] delete_core_definition -- [ ] delete_device_definition -- [ ] delete_function_definition -- [ ] delete_group -- [ ] delete_logger_definition -- [ ] delete_resource_definition -- [ ] delete_subscription_definition -- [ ] disassociate_role_from_group -- [ ] disassociate_service_role_from_account -- [ ] get_associated_role -- [ ] get_connectivity_info -- [ ] get_core_definition -- [ ] get_core_definition_version -- [ ] get_deployment_status -- [ ] get_device_definition -- [ ] get_device_definition_version -- [ ] get_function_definition -- [ ] get_function_definition_version -- [ ] get_group -- [ ] get_group_certificate_authority -- [ ] get_group_certificate_configuration -- [ ] get_group_version -- [ ] get_logger_definition -- [ ] get_logger_definition_version -- [ ] get_resource_definition -- [ ] get_resource_definition_version -- [ ] get_service_role_for_account -- [ ] get_subscription_definition -- [ ] get_subscription_definition_version -- [ ] list_core_definition_versions -- [ ] list_core_definitions -- [ ] list_deployments -- [ ] list_device_definition_versions -- [ ] list_device_definitions -- [ ] list_function_definition_versions -- [ ] list_function_definitions -- [ ] list_group_certificate_authorities -- [ ] list_group_versions -- [ ] list_groups -- [ ] list_logger_definition_versions -- [ ] list_logger_definitions -- [ ] list_resource_definition_versions -- [ ] list_resource_definitions -- [ ] list_subscription_definition_versions -- [ ] list_subscription_definitions -- [ ] reset_deployments -- [ ] update_connectivity_info -- [ ] update_core_definition -- [ ] update_device_definition -- [ ] update_function_definition -- [ ] update_group -- [ ] update_group_certificate_configuration -- [ ] update_logger_definition -- [ ] update_resource_definition -- [ ] update_subscription_definition - -## guardduty - 0% implemented -- [ ] accept_invitation -- [ ] archive_findings -- [ ] create_detector -- [ ] create_ip_set -- [ ] create_members -- [ ] create_sample_findings -- [ ] create_threat_intel_set -- [ ] decline_invitations -- [ ] delete_detector -- [ ] delete_invitations -- [ ] delete_ip_set -- [ ] delete_members -- [ ] delete_threat_intel_set -- [ ] disassociate_from_master_account -- [ ] disassociate_members -- [ ] get_detector -- [ ] get_findings -- [ ] get_findings_statistics -- [ ] get_invitations_count -- [ ] get_ip_set -- [ ] get_master_account -- [ ] get_members -- [ ] get_threat_intel_set -- [ ] invite_members -- [ ] list_detectors -- [ ] list_findings -- [ ] list_invitations -- [ ] list_ip_sets -- [ ] list_members -- [ ] list_threat_intel_sets -- [ ] start_monitoring_members -- [ ] stop_monitoring_members -- [ ] unarchive_findings -- [ ] update_detector -- [ ] update_findings_feedback -- [ ] update_ip_set -- [ ] update_threat_intel_set - -## health - 0% implemented -- [ ] describe_affected_entities -- [ ] describe_entity_aggregates -- [ ] describe_event_aggregates -- [ ] describe_event_details -- [ ] describe_event_types -- [ ] describe_events - -## iam - 48% implemented -- [ ] add_client_id_to_open_id_connect_provider -- [X] add_role_to_instance_profile -- [X] add_user_to_group -- [X] attach_group_policy -- [X] attach_role_policy -- [X] attach_user_policy -- [ ] change_password -- [X] create_access_key -- [X] create_account_alias -- [X] create_group -- [X] create_instance_profile -- [X] create_login_profile -- [ ] create_open_id_connect_provider -- [X] create_policy -- [X] create_policy_version -- [X] create_role -- [ ] create_saml_provider -- [ ] create_service_linked_role -- [ ] create_service_specific_credential -- [X] create_user -- [ ] create_virtual_mfa_device -- [X] deactivate_mfa_device -- [X] delete_access_key -- [X] delete_account_alias -- [ ] delete_account_password_policy -- [ ] delete_group -- [ ] delete_group_policy -- [ ] delete_instance_profile -- [X] delete_login_profile -- [ ] delete_open_id_connect_provider -- [ ] delete_policy -- [X] delete_policy_version -- [X] delete_role -- [X] delete_role_policy -- [ ] delete_saml_provider -- [X] delete_server_certificate -- [ ] delete_service_linked_role -- [ ] delete_service_specific_credential -- [ ] delete_signing_certificate -- [ ] delete_ssh_public_key -- [X] delete_user -- [X] delete_user_policy -- [ ] delete_virtual_mfa_device -- [X] detach_group_policy -- [X] detach_role_policy -- [X] detach_user_policy -- [X] enable_mfa_device -- [ ] generate_credential_report -- [ ] get_access_key_last_used -- [X] get_account_authorization_details -- [ ] get_account_password_policy -- [ ] get_account_summary -- [ ] get_context_keys_for_custom_policy -- [ ] get_context_keys_for_principal_policy -- [X] get_credential_report -- [X] get_group -- [X] get_group_policy -- [X] get_instance_profile -- [X] get_login_profile -- [ ] get_open_id_connect_provider -- [X] get_policy -- [X] get_policy_version -- [X] get_role -- [X] get_role_policy -- [ ] get_saml_provider -- [X] get_server_certificate -- [ ] get_service_linked_role_deletion_status -- [ ] get_ssh_public_key -- [X] get_user -- [X] get_user_policy -- [ ] list_access_keys -- [X] list_account_aliases -- [X] list_attached_group_policies -- [X] list_attached_role_policies -- [X] list_attached_user_policies -- [ ] list_entities_for_policy -- [X] list_group_policies -- [X] list_groups -- [ ] list_groups_for_user -- [ ] list_instance_profiles -- [ ] list_instance_profiles_for_role -- [X] list_mfa_devices -- [ ] list_open_id_connect_providers -- [X] list_policies -- [X] list_policy_versions -- [X] list_role_policies -- [ ] list_roles -- [ ] list_saml_providers -- [ ] list_server_certificates -- [ ] list_service_specific_credentials -- [ ] list_signing_certificates -- [ ] list_ssh_public_keys -- [X] list_user_policies -- [X] list_users -- [ ] list_virtual_mfa_devices -- [X] put_group_policy -- [X] put_role_policy -- [X] put_user_policy -- [ ] remove_client_id_from_open_id_connect_provider -- [X] remove_role_from_instance_profile -- [X] remove_user_from_group -- [ ] reset_service_specific_credential -- [ ] resync_mfa_device -- [ ] set_default_policy_version -- [ ] simulate_custom_policy -- [ ] simulate_principal_policy -- [X] update_access_key -- [ ] update_account_password_policy -- [ ] update_assume_role_policy -- [ ] update_group -- [X] update_login_profile -- [ ] update_open_id_connect_provider_thumbprint -- [ ] update_role -- [ ] update_role_description -- [ ] update_saml_provider -- [ ] update_server_certificate -- [ ] update_service_specific_credential -- [ ] update_signing_certificate -- [ ] update_ssh_public_key -- [ ] update_user -- [ ] upload_server_certificate -- [ ] upload_signing_certificate -- [ ] upload_ssh_public_key - -## importexport - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] get_shipping_label -- [ ] get_status -- [ ] list_jobs -- [ ] update_job - -## inspector - 0% implemented -- [ ] add_attributes_to_findings -- [ ] create_assessment_target -- [ ] create_assessment_template -- [ ] create_resource_group -- [ ] delete_assessment_run -- [ ] delete_assessment_target -- [ ] delete_assessment_template -- [ ] describe_assessment_runs -- [ ] describe_assessment_targets -- [ ] describe_assessment_templates -- [ ] describe_cross_account_access_role -- [ ] describe_findings -- [ ] describe_resource_groups -- [ ] describe_rules_packages -- [ ] get_assessment_report -- [ ] get_telemetry_metadata -- [ ] list_assessment_run_agents -- [ ] list_assessment_runs -- [ ] list_assessment_targets -- [ ] list_assessment_templates -- [ ] list_event_subscriptions -- [ ] list_findings -- [ ] list_rules_packages -- [ ] list_tags_for_resource -- [ ] preview_agents -- [ ] register_cross_account_access_role -- [ ] remove_attributes_from_findings -- [ ] set_tags_for_resource -- [ ] start_assessment_run -- [ ] stop_assessment_run -- [ ] subscribe_to_event -- [ ] unsubscribe_from_event -- [ ] update_assessment_target - -## iot - 30% implemented -- [ ] accept_certificate_transfer -- [X] add_thing_to_thing_group -- [ ] associate_targets_with_job -- [ ] attach_policy -- [X] attach_principal_policy -- [X] attach_thing_principal -- [ ] cancel_certificate_transfer -- [ ] cancel_job -- [ ] clear_default_authorizer -- [ ] create_authorizer -- [ ] create_certificate_from_csr -- [X] create_job -- [X] create_keys_and_certificate -- [ ] create_ota_update -- [X] create_policy -- [ ] create_policy_version -- [ ] create_role_alias -- [ ] create_stream -- [X] create_thing -- [X] create_thing_group -- [X] create_thing_type -- [ ] create_topic_rule -- [ ] delete_authorizer -- [ ] delete_ca_certificate -- [X] delete_certificate -- [ ] delete_ota_update -- [X] delete_policy -- [ ] delete_policy_version -- [ ] delete_registration_code -- [ ] delete_role_alias -- [ ] delete_stream -- [X] delete_thing -- [X] delete_thing_group -- [X] delete_thing_type -- [ ] delete_topic_rule -- [ ] delete_v2_logging_level -- [ ] deprecate_thing_type -- [ ] describe_authorizer -- [ ] describe_ca_certificate -- [X] describe_certificate -- [ ] describe_default_authorizer -- [ ] describe_endpoint -- [ ] describe_event_configurations -- [ ] describe_index -- [X] describe_job -- [ ] describe_job_execution -- [ ] describe_role_alias -- [ ] describe_stream -- [X] describe_thing -- [X] describe_thing_group -- [ ] describe_thing_registration_task -- [X] describe_thing_type -- [ ] detach_policy -- [X] detach_principal_policy -- [X] detach_thing_principal -- [ ] disable_topic_rule -- [ ] enable_topic_rule -- [ ] get_effective_policies -- [ ] get_indexing_configuration -- [ ] get_job_document -- [ ] get_logging_options -- [ ] get_ota_update -- [X] get_policy -- [ ] get_policy_version -- [ ] get_registration_code -- [ ] get_topic_rule -- [ ] get_v2_logging_options -- [ ] list_attached_policies -- [ ] list_authorizers -- [ ] list_ca_certificates -- [X] list_certificates -- [ ] list_certificates_by_ca -- [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs -- [ ] list_ota_updates -- [ ] list_outgoing_certificates -- [X] list_policies -- [X] list_policy_principals -- [ ] list_policy_versions -- [X] list_principal_policies -- [X] list_principal_things -- [ ] list_role_aliases -- [ ] list_streams -- [ ] list_targets_for_policy -- [X] list_thing_groups -- [X] list_thing_groups_for_thing -- [X] list_thing_principals -- [ ] list_thing_registration_task_reports -- [ ] list_thing_registration_tasks -- [X] list_thing_types -- [X] list_things -- [X] list_things_in_thing_group -- [ ] list_topic_rules -- [ ] list_v2_logging_levels -- [ ] register_ca_certificate -- [ ] register_certificate -- [ ] register_thing -- [ ] reject_certificate_transfer -- [X] remove_thing_from_thing_group -- [ ] replace_topic_rule -- [ ] search_index -- [ ] set_default_authorizer -- [ ] set_default_policy_version -- [ ] set_logging_options -- [ ] set_v2_logging_level -- [ ] set_v2_logging_options -- [ ] start_thing_registration_task -- [ ] stop_thing_registration_task -- [ ] test_authorization -- [ ] test_invoke_authorizer -- [ ] transfer_certificate -- [ ] update_authorizer -- [ ] update_ca_certificate -- [X] update_certificate -- [ ] update_event_configurations -- [ ] update_indexing_configuration -- [ ] update_role_alias -- [ ] update_stream -- [X] update_thing -- [X] update_thing_group -- [X] update_thing_groups_for_thing - -## iot-data - 0% implemented -- [ ] delete_thing_shadow -- [ ] get_thing_shadow -- [ ] publish -- [ ] update_thing_shadow - -## iot-jobs-data - 0% implemented -- [ ] describe_job_execution -- [ ] get_pending_job_executions -- [ ] start_next_pending_job_execution -- [ ] update_job_execution - -## kinesis - 56% implemented -- [X] add_tags_to_stream -- [X] create_stream -- [ ] decrease_stream_retention_period -- [X] delete_stream -- [ ] describe_limits -- [X] describe_stream -- [ ] describe_stream_summary -- [ ] disable_enhanced_monitoring -- [ ] enable_enhanced_monitoring -- [X] get_records -- [X] get_shard_iterator -- [ ] increase_stream_retention_period -- [ ] list_shards -- [X] list_streams -- [X] list_tags_for_stream -- [X] merge_shards -- [X] put_record -- [X] put_records -- [X] remove_tags_from_stream -- [X] split_shard -- [ ] start_stream_encryption -- [ ] stop_stream_encryption -- [ ] update_shard_count - -## kinesis-video-archived-media - 0% implemented -- [ ] get_media_for_fragment_list -- [ ] list_fragments - -## kinesis-video-media - 0% implemented -- [ ] get_media - -## kinesisanalytics - 0% implemented -- [ ] add_application_cloud_watch_logging_option -- [ ] add_application_input -- [ ] add_application_input_processing_configuration -- [ ] add_application_output -- [ ] add_application_reference_data_source -- [ ] create_application -- [ ] delete_application -- [ ] delete_application_cloud_watch_logging_option -- [ ] delete_application_input_processing_configuration -- [ ] delete_application_output -- [ ] delete_application_reference_data_source -- [ ] describe_application -- [ ] discover_input_schema -- [ ] list_applications -- [ ] start_application -- [ ] stop_application -- [ ] update_application - -## kinesisvideo - 0% implemented -- [ ] create_stream -- [ ] delete_stream -- [ ] describe_stream -- [ ] get_data_endpoint -- [ ] list_streams -- [ ] list_tags_for_stream -- [ ] tag_stream -- [ ] untag_stream -- [ ] update_data_retention -- [ ] update_stream - -## kms - 25% implemented -- [ ] cancel_key_deletion -- [ ] create_alias -- [ ] create_grant -- [X] create_key -- [ ] decrypt -- [X] delete_alias -- [ ] delete_imported_key_material -- [X] describe_key -- [ ] disable_key -- [X] disable_key_rotation -- [ ] enable_key -- [X] enable_key_rotation -- [ ] encrypt -- [ ] generate_data_key -- [ ] generate_data_key_without_plaintext -- [ ] generate_random -- [X] get_key_policy -- [X] get_key_rotation_status -- [ ] get_parameters_for_import -- [ ] import_key_material -- [ ] list_aliases -- [ ] list_grants -- [ ] list_key_policies -- [X] list_keys -- [ ] list_resource_tags -- [ ] list_retirable_grants -- [X] put_key_policy -- [ ] re_encrypt -- [ ] retire_grant -- [ ] revoke_grant -- [ ] schedule_key_deletion -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_key_description - -## lambda - 0% implemented -- [ ] add_permission -- [ ] create_alias -- [ ] create_event_source_mapping -- [ ] create_function -- [ ] delete_alias -- [ ] delete_event_source_mapping -- [ ] delete_function -- [ ] delete_function_concurrency -- [ ] get_account_settings -- [ ] get_alias -- [ ] get_event_source_mapping -- [ ] get_function -- [ ] get_function_configuration -- [ ] get_policy -- [ ] invoke -- [ ] invoke_async -- [ ] list_aliases -- [ ] list_event_source_mappings -- [ ] list_functions -- [ ] list_tags -- [ ] list_versions_by_function -- [ ] publish_version -- [ ] put_function_concurrency -- [ ] remove_permission -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_event_source_mapping -- [ ] update_function_code -- [ ] update_function_configuration - -## lex-models - 0% implemented -- [ ] create_bot_version -- [ ] create_intent_version -- [ ] create_slot_type_version -- [ ] delete_bot -- [ ] delete_bot_alias -- [ ] delete_bot_channel_association -- [ ] delete_bot_version -- [ ] delete_intent -- [ ] delete_intent_version -- [ ] delete_slot_type -- [ ] delete_slot_type_version -- [ ] delete_utterances -- [ ] get_bot -- [ ] get_bot_alias -- [ ] get_bot_aliases -- [ ] get_bot_channel_association -- [ ] get_bot_channel_associations -- [ ] get_bot_versions -- [ ] get_bots -- [ ] get_builtin_intent -- [ ] get_builtin_intents -- [ ] get_builtin_slot_types -- [ ] get_export -- [ ] get_import -- [ ] get_intent -- [ ] get_intent_versions -- [ ] get_intents -- [ ] get_slot_type -- [ ] get_slot_type_versions -- [ ] get_slot_types -- [ ] get_utterances_view -- [ ] put_bot -- [ ] put_bot_alias -- [ ] put_intent -- [ ] put_slot_type -- [ ] start_import - -## lex-runtime - 0% implemented -- [ ] post_content -- [ ] post_text - -## lightsail - 0% implemented -- [ ] allocate_static_ip -- [ ] attach_disk -- [ ] attach_instances_to_load_balancer -- [ ] attach_load_balancer_tls_certificate -- [ ] attach_static_ip -- [ ] close_instance_public_ports -- [ ] create_disk -- [ ] create_disk_from_snapshot -- [ ] create_disk_snapshot -- [ ] create_domain -- [ ] create_domain_entry -- [ ] create_instance_snapshot -- [ ] create_instances -- [ ] create_instances_from_snapshot -- [ ] create_key_pair -- [ ] create_load_balancer -- [ ] create_load_balancer_tls_certificate -- [ ] delete_disk -- [ ] delete_disk_snapshot -- [ ] delete_domain -- [ ] delete_domain_entry -- [ ] delete_instance -- [ ] delete_instance_snapshot -- [ ] delete_key_pair -- [ ] delete_load_balancer -- [ ] delete_load_balancer_tls_certificate -- [ ] detach_disk -- [ ] detach_instances_from_load_balancer -- [ ] detach_static_ip -- [ ] download_default_key_pair -- [ ] get_active_names -- [ ] get_blueprints -- [ ] get_bundles -- [ ] get_disk -- [ ] get_disk_snapshot -- [ ] get_disk_snapshots -- [ ] get_disks -- [ ] get_domain -- [ ] get_domains -- [ ] get_instance -- [ ] get_instance_access_details -- [ ] get_instance_metric_data -- [ ] get_instance_port_states -- [ ] get_instance_snapshot -- [ ] get_instance_snapshots -- [ ] get_instance_state -- [ ] get_instances -- [ ] get_key_pair -- [ ] get_key_pairs -- [ ] get_load_balancer -- [ ] get_load_balancer_metric_data -- [ ] get_load_balancer_tls_certificates -- [ ] get_load_balancers -- [ ] get_operation -- [ ] get_operations -- [ ] get_operations_for_resource -- [ ] get_regions -- [ ] get_static_ip -- [ ] get_static_ips -- [ ] import_key_pair -- [ ] is_vpc_peered -- [ ] open_instance_public_ports -- [ ] peer_vpc -- [ ] put_instance_public_ports -- [ ] reboot_instance -- [ ] release_static_ip -- [ ] start_instance -- [ ] stop_instance -- [ ] unpeer_vpc -- [ ] update_domain_entry -- [ ] update_load_balancer_attribute - -## logs - 27% implemented -- [ ] associate_kms_key -- [ ] cancel_export_task -- [ ] create_export_task -- [X] create_log_group -- [X] create_log_stream -- [ ] delete_destination -- [X] delete_log_group -- [X] delete_log_stream -- [ ] delete_metric_filter -- [ ] delete_resource_policy -- [ ] delete_retention_policy -- [ ] delete_subscription_filter -- [ ] describe_destinations -- [ ] describe_export_tasks -- [X] describe_log_groups -- [X] describe_log_streams -- [ ] describe_metric_filters -- [ ] describe_resource_policies -- [ ] describe_subscription_filters -- [ ] disassociate_kms_key -- [X] filter_log_events -- [X] get_log_events -- [ ] list_tags_log_group -- [ ] put_destination -- [ ] put_destination_policy -- [X] put_log_events -- [ ] put_metric_filter -- [ ] put_resource_policy -- [ ] put_retention_policy -- [ ] put_subscription_filter -- [ ] tag_log_group -- [ ] test_metric_filter -- [ ] untag_log_group - -## machinelearning - 0% implemented -- [ ] add_tags -- [ ] create_batch_prediction -- [ ] create_data_source_from_rds -- [ ] create_data_source_from_redshift -- [ ] create_data_source_from_s3 -- [ ] create_evaluation -- [ ] create_ml_model -- [ ] create_realtime_endpoint -- [ ] delete_batch_prediction -- [ ] delete_data_source -- [ ] delete_evaluation -- [ ] delete_ml_model -- [ ] delete_realtime_endpoint -- [ ] delete_tags -- [ ] describe_batch_predictions -- [ ] describe_data_sources -- [ ] describe_evaluations -- [ ] describe_ml_models -- [ ] describe_tags -- [ ] get_batch_prediction -- [ ] get_data_source -- [ ] get_evaluation -- [ ] get_ml_model -- [ ] predict -- [ ] update_batch_prediction -- [ ] update_data_source -- [ ] update_evaluation -- [ ] update_ml_model - -## marketplace-entitlement - 0% implemented -- [ ] get_entitlements - -## marketplacecommerceanalytics - 0% implemented -- [ ] generate_data_set -- [ ] start_support_data_export - -## mediaconvert - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_job_template -- [ ] create_preset -- [ ] create_queue -- [ ] delete_job_template -- [ ] delete_preset -- [ ] delete_queue -- [ ] describe_endpoints -- [ ] get_job -- [ ] get_job_template -- [ ] get_preset -- [ ] get_queue -- [ ] list_job_templates -- [ ] list_jobs -- [ ] list_presets -- [ ] list_queues -- [ ] update_job_template -- [ ] update_preset -- [ ] update_queue - -## medialive - 0% implemented -- [ ] create_channel -- [ ] create_input -- [ ] create_input_security_group -- [ ] delete_channel -- [ ] delete_input -- [ ] delete_input_security_group -- [ ] describe_channel -- [ ] describe_input -- [ ] describe_input_security_group -- [ ] list_channels -- [ ] list_input_security_groups -- [ ] list_inputs -- [ ] start_channel -- [ ] stop_channel -- [ ] update_channel -- [ ] update_input -- [ ] update_input_security_group - -## mediapackage - 0% implemented -- [ ] create_channel -- [ ] create_origin_endpoint -- [ ] delete_channel -- [ ] delete_origin_endpoint -- [ ] describe_channel -- [ ] describe_origin_endpoint -- [ ] list_channels -- [ ] list_origin_endpoints -- [ ] rotate_channel_credentials -- [ ] update_channel -- [ ] update_origin_endpoint - -## mediastore - 0% implemented -- [ ] create_container -- [ ] delete_container -- [ ] delete_container_policy -- [ ] delete_cors_policy -- [ ] describe_container -- [ ] get_container_policy -- [ ] get_cors_policy -- [ ] list_containers -- [ ] put_container_policy -- [ ] put_cors_policy - -## mediastore-data - 0% implemented -- [ ] delete_object -- [ ] describe_object -- [ ] get_object -- [ ] list_items -- [ ] put_object - -## meteringmarketplace - 0% implemented -- [ ] batch_meter_usage -- [ ] meter_usage -- [ ] resolve_customer - -## mgh - 0% implemented -- [ ] associate_created_artifact -- [ ] associate_discovered_resource -- [ ] create_progress_update_stream -- [ ] delete_progress_update_stream -- [ ] describe_application_state -- [ ] describe_migration_task -- [ ] disassociate_created_artifact -- [ ] disassociate_discovered_resource -- [ ] import_migration_task -- [ ] list_created_artifacts -- [ ] list_discovered_resources -- [ ] list_migration_tasks -- [ ] list_progress_update_streams -- [ ] notify_application_state -- [ ] notify_migration_task_state -- [ ] put_resource_attributes - -## mobile - 0% implemented -- [ ] create_project -- [ ] delete_project -- [ ] describe_bundle -- [ ] describe_project -- [ ] export_bundle -- [ ] export_project -- [ ] list_bundles -- [ ] list_projects -- [ ] update_project - -## mq - 0% implemented -- [ ] create_broker -- [ ] create_configuration -- [ ] create_user -- [ ] delete_broker -- [ ] delete_user -- [ ] describe_broker -- [ ] describe_configuration -- [ ] describe_configuration_revision -- [ ] describe_user -- [ ] list_brokers -- [ ] list_configuration_revisions -- [ ] list_configurations -- [ ] list_users -- [ ] reboot_broker -- [ ] update_broker -- [ ] update_configuration -- [ ] update_user - -## mturk - 0% implemented -- [ ] accept_qualification_request -- [ ] approve_assignment -- [ ] associate_qualification_with_worker -- [ ] create_additional_assignments_for_hit -- [ ] create_hit -- [ ] create_hit_type -- [ ] create_hit_with_hit_type -- [ ] create_qualification_type -- [ ] create_worker_block -- [ ] delete_hit -- [ ] delete_qualification_type -- [ ] delete_worker_block -- [ ] disassociate_qualification_from_worker -- [ ] get_account_balance -- [ ] get_assignment -- [ ] get_file_upload_url -- [ ] get_hit -- [ ] get_qualification_score -- [ ] get_qualification_type -- [ ] list_assignments_for_hit -- [ ] list_bonus_payments -- [ ] list_hits -- [ ] list_hits_for_qualification_type -- [ ] list_qualification_requests -- [ ] list_qualification_types -- [ ] list_review_policy_results_for_hit -- [ ] list_reviewable_hits -- [ ] list_worker_blocks -- [ ] list_workers_with_qualification_type -- [ ] notify_workers -- [ ] reject_assignment -- [ ] reject_qualification_request -- [ ] send_bonus -- [ ] send_test_event_notification -- [ ] update_expiration_for_hit -- [ ] update_hit_review_status -- [ ] update_hit_type_of_hit -- [ ] update_notification_settings -- [ ] update_qualification_type - -## opsworks - 12% implemented -- [ ] assign_instance -- [ ] assign_volume -- [ ] associate_elastic_ip -- [ ] attach_elastic_load_balancer -- [ ] clone_stack -- [X] create_app -- [ ] create_deployment -- [X] create_instance -- [X] create_layer -- [X] create_stack -- [ ] create_user_profile -- [ ] delete_app -- [ ] delete_instance -- [ ] delete_layer -- [ ] delete_stack -- [ ] delete_user_profile -- [ ] deregister_ecs_cluster -- [ ] deregister_elastic_ip -- [ ] deregister_instance -- [ ] deregister_rds_db_instance -- [ ] deregister_volume -- [ ] describe_agent_versions -- [X] describe_apps -- [ ] describe_commands -- [ ] describe_deployments -- [ ] describe_ecs_clusters -- [ ] describe_elastic_ips -- [ ] describe_elastic_load_balancers -- [X] describe_instances -- [X] describe_layers -- [ ] describe_load_based_auto_scaling -- [ ] describe_my_user_profile -- [ ] describe_operating_systems -- [ ] describe_permissions -- [ ] describe_raid_arrays -- [ ] describe_rds_db_instances -- [ ] describe_service_errors -- [ ] describe_stack_provisioning_parameters -- [ ] describe_stack_summary -- [X] describe_stacks -- [ ] describe_time_based_auto_scaling -- [ ] describe_user_profiles -- [ ] describe_volumes -- [ ] detach_elastic_load_balancer -- [ ] disassociate_elastic_ip -- [ ] get_hostname_suggestion -- [ ] grant_access -- [ ] list_tags -- [ ] reboot_instance -- [ ] register_ecs_cluster -- [ ] register_elastic_ip -- [ ] register_instance -- [ ] register_rds_db_instance -- [ ] register_volume -- [ ] set_load_based_auto_scaling -- [ ] set_permission -- [ ] set_time_based_auto_scaling -- [X] start_instance -- [ ] start_stack -- [ ] stop_instance -- [ ] stop_stack -- [ ] tag_resource -- [ ] unassign_instance -- [ ] unassign_volume -- [ ] untag_resource -- [ ] update_app -- [ ] update_elastic_ip -- [ ] update_instance -- [ ] update_layer -- [ ] update_my_user_profile -- [ ] update_rds_db_instance -- [ ] update_stack -- [ ] update_user_profile -- [ ] update_volume - -## opsworkscm - 0% implemented -- [ ] associate_node -- [ ] create_backup -- [ ] create_server -- [ ] delete_backup -- [ ] delete_server -- [ ] describe_account_attributes -- [ ] describe_backups -- [ ] describe_events -- [ ] describe_node_association_status -- [ ] describe_servers -- [ ] disassociate_node -- [ ] restore_server -- [ ] start_maintenance -- [ ] update_server -- [ ] update_server_engine_attributes - -## organizations - 0% implemented -- [ ] accept_handshake -- [ ] attach_policy -- [ ] cancel_handshake -- [ ] create_account -- [ ] create_organization -- [ ] create_organizational_unit -- [ ] create_policy -- [ ] decline_handshake -- [ ] delete_organization -- [ ] delete_organizational_unit -- [ ] delete_policy -- [ ] describe_account -- [ ] describe_create_account_status -- [ ] describe_handshake -- [ ] describe_organization -- [ ] describe_organizational_unit -- [ ] describe_policy -- [ ] detach_policy -- [ ] disable_aws_service_access -- [ ] disable_policy_type -- [ ] enable_all_features -- [ ] enable_aws_service_access -- [ ] enable_policy_type -- [ ] invite_account_to_organization -- [ ] leave_organization -- [ ] list_accounts -- [ ] list_accounts_for_parent -- [ ] list_aws_service_access_for_organization -- [ ] list_children -- [ ] list_create_account_status -- [ ] list_handshakes_for_account -- [ ] list_handshakes_for_organization -- [ ] list_organizational_units_for_parent -- [ ] list_parents -- [ ] list_policies -- [ ] list_policies_for_target -- [ ] list_roots -- [ ] list_targets_for_policy -- [ ] move_account -- [ ] remove_account_from_organization -- [ ] update_organizational_unit -- [ ] update_policy - -## pinpoint - 0% implemented -- [ ] create_app -- [ ] create_campaign -- [ ] create_export_job -- [ ] create_import_job -- [ ] create_segment -- [ ] delete_adm_channel -- [ ] delete_apns_channel -- [ ] delete_apns_sandbox_channel -- [ ] delete_apns_voip_channel -- [ ] delete_apns_voip_sandbox_channel -- [ ] delete_app -- [ ] delete_baidu_channel -- [ ] delete_campaign -- [ ] delete_email_channel -- [ ] delete_endpoint -- [ ] delete_event_stream -- [ ] delete_gcm_channel -- [ ] delete_segment -- [ ] delete_sms_channel -- [ ] get_adm_channel -- [ ] get_apns_channel -- [ ] get_apns_sandbox_channel -- [ ] get_apns_voip_channel -- [ ] get_apns_voip_sandbox_channel -- [ ] get_app -- [ ] get_application_settings -- [ ] get_apps -- [ ] get_baidu_channel -- [ ] get_campaign -- [ ] get_campaign_activities -- [ ] get_campaign_version -- [ ] get_campaign_versions -- [ ] get_campaigns -- [ ] get_email_channel -- [ ] get_endpoint -- [ ] get_event_stream -- [ ] get_export_job -- [ ] get_export_jobs -- [ ] get_gcm_channel -- [ ] get_import_job -- [ ] get_import_jobs -- [ ] get_segment -- [ ] get_segment_export_jobs -- [ ] get_segment_import_jobs -- [ ] get_segment_version -- [ ] get_segment_versions -- [ ] get_segments -- [ ] get_sms_channel -- [ ] put_event_stream -- [ ] send_messages -- [ ] send_users_messages -- [ ] update_adm_channel -- [ ] update_apns_channel -- [ ] update_apns_sandbox_channel -- [ ] update_apns_voip_channel -- [ ] update_apns_voip_sandbox_channel -- [ ] update_application_settings -- [ ] update_baidu_channel -- [ ] update_campaign -- [ ] update_email_channel -- [ ] update_endpoint -- [ ] update_endpoints_batch -- [ ] update_gcm_channel -- [ ] update_segment -- [ ] update_sms_channel - -## polly - 83% implemented -- [X] delete_lexicon -- [X] describe_voices -- [X] get_lexicon -- [X] list_lexicons -- [X] put_lexicon -- [ ] synthesize_speech - -## pricing - 0% implemented -- [ ] describe_services -- [ ] get_attribute_values -- [ ] get_products - -## rds - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] authorize_db_security_group_ingress -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] copy_db_snapshot -- [ ] copy_option_group -- [ ] create_db_cluster -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_instance_read_replica -- [ ] create_db_parameter_group -- [ ] create_db_security_group -- [ ] create_db_snapshot -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] create_option_group -- [ ] delete_db_cluster -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_parameter_group -- [ ] delete_db_security_group -- [ ] delete_db_snapshot -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] delete_option_group -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instances -- [ ] describe_db_log_files -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_security_groups -- [ ] describe_db_snapshot_attributes -- [ ] describe_db_snapshots -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_option_group_options -- [ ] describe_option_groups -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_reserved_db_instances -- [ ] describe_reserved_db_instances_offerings -- [ ] describe_source_regions -- [ ] describe_valid_db_instance_modifications -- [ ] download_db_log_file_portion -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_db_cluster -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_snapshot -- [ ] modify_db_snapshot_attribute -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] modify_option_group -- [ ] promote_read_replica -- [ ] promote_read_replica_db_cluster -- [ ] purchase_reserved_db_instances_offering -- [ ] reboot_db_instance -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_s3 -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time -- [ ] restore_db_instance_from_db_snapshot -- [ ] restore_db_instance_from_s3 -- [ ] restore_db_instance_to_point_in_time -- [ ] revoke_db_security_group_ingress -- [ ] start_db_instance -- [ ] stop_db_instance - -## redshift - 41% implemented -- [ ] authorize_cluster_security_group_ingress -- [ ] authorize_snapshot_access -- [ ] copy_cluster_snapshot -- [X] create_cluster -- [X] create_cluster_parameter_group -- [X] create_cluster_security_group -- [X] create_cluster_snapshot -- [X] create_cluster_subnet_group -- [ ] create_event_subscription -- [ ] create_hsm_client_certificate -- [ ] create_hsm_configuration -- [X] create_snapshot_copy_grant -- [X] create_tags -- [X] delete_cluster -- [X] delete_cluster_parameter_group -- [X] delete_cluster_security_group -- [X] delete_cluster_snapshot -- [X] delete_cluster_subnet_group -- [ ] delete_event_subscription -- [ ] delete_hsm_client_certificate -- [ ] delete_hsm_configuration -- [X] delete_snapshot_copy_grant -- [X] delete_tags -- [X] describe_cluster_parameter_groups -- [ ] describe_cluster_parameters -- [X] describe_cluster_security_groups -- [X] describe_cluster_snapshots -- [X] describe_cluster_subnet_groups -- [ ] describe_cluster_versions -- [X] describe_clusters -- [ ] describe_default_cluster_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_hsm_client_certificates -- [ ] describe_hsm_configurations -- [ ] describe_logging_status -- [ ] describe_orderable_cluster_options -- [ ] describe_reserved_node_offerings -- [ ] describe_reserved_nodes -- [ ] describe_resize -- [X] describe_snapshot_copy_grants -- [ ] describe_table_restore_status -- [X] describe_tags -- [ ] disable_logging -- [X] disable_snapshot_copy -- [ ] enable_logging -- [X] enable_snapshot_copy -- [ ] get_cluster_credentials -- [X] modify_cluster -- [ ] modify_cluster_iam_roles -- [ ] modify_cluster_parameter_group -- [ ] modify_cluster_subnet_group -- [ ] modify_event_subscription -- [X] modify_snapshot_copy_retention_period -- [ ] purchase_reserved_node_offering -- [ ] reboot_cluster -- [ ] reset_cluster_parameter_group -- [X] restore_from_cluster_snapshot -- [ ] restore_table_from_cluster_snapshot -- [ ] revoke_cluster_security_group_ingress -- [ ] revoke_snapshot_access -- [ ] rotate_encryption_key - -## rekognition - 0% implemented -- [ ] compare_faces -- [ ] create_collection -- [ ] create_stream_processor -- [ ] delete_collection -- [ ] delete_faces -- [ ] delete_stream_processor -- [ ] describe_stream_processor -- [ ] detect_faces -- [ ] detect_labels -- [ ] detect_moderation_labels -- [ ] detect_text -- [ ] get_celebrity_info -- [ ] get_celebrity_recognition -- [ ] get_content_moderation -- [ ] get_face_detection -- [ ] get_face_search -- [ ] get_label_detection -- [ ] get_person_tracking -- [ ] index_faces -- [ ] list_collections -- [ ] list_faces -- [ ] list_stream_processors -- [ ] recognize_celebrities -- [ ] search_faces -- [ ] search_faces_by_image -- [ ] start_celebrity_recognition -- [ ] start_content_moderation -- [ ] start_face_detection -- [ ] start_face_search -- [ ] start_label_detection -- [ ] start_person_tracking -- [ ] start_stream_processor -- [ ] stop_stream_processor - -## resource-groups - 0% implemented -- [ ] create_group -- [ ] delete_group -- [ ] get_group -- [ ] get_group_query -- [ ] get_tags -- [ ] list_group_resources -- [ ] list_groups -- [ ] search_resources -- [ ] tag -- [ ] untag -- [ ] update_group -- [ ] update_group_query - -## resourcegroupstaggingapi - 60% implemented -- [X] get_resources -- [X] get_tag_keys -- [X] get_tag_values -- [ ] tag_resources -- [ ] untag_resources - -## route53 - 12% implemented -- [ ] associate_vpc_with_hosted_zone -- [ ] change_resource_record_sets -- [X] change_tags_for_resource -- [X] create_health_check -- [X] create_hosted_zone -- [ ] create_query_logging_config -- [ ] create_reusable_delegation_set -- [ ] create_traffic_policy -- [ ] create_traffic_policy_instance -- [ ] create_traffic_policy_version -- [ ] create_vpc_association_authorization -- [X] delete_health_check -- [X] delete_hosted_zone -- [ ] delete_query_logging_config -- [ ] delete_reusable_delegation_set -- [ ] delete_traffic_policy -- [ ] delete_traffic_policy_instance -- [ ] delete_vpc_association_authorization -- [ ] disassociate_vpc_from_hosted_zone -- [ ] get_account_limit -- [ ] get_change -- [ ] get_checker_ip_ranges -- [ ] get_geo_location -- [ ] get_health_check -- [ ] get_health_check_count -- [ ] get_health_check_last_failure_reason -- [ ] get_health_check_status -- [X] get_hosted_zone -- [ ] get_hosted_zone_count -- [ ] get_hosted_zone_limit -- [ ] get_query_logging_config -- [ ] get_reusable_delegation_set -- [ ] get_reusable_delegation_set_limit -- [ ] get_traffic_policy -- [ ] get_traffic_policy_instance -- [ ] get_traffic_policy_instance_count -- [ ] list_geo_locations -- [ ] list_health_checks -- [ ] list_hosted_zones -- [ ] list_hosted_zones_by_name -- [ ] list_query_logging_configs -- [ ] list_resource_record_sets -- [ ] list_reusable_delegation_sets -- [X] list_tags_for_resource -- [ ] list_tags_for_resources -- [ ] list_traffic_policies -- [ ] list_traffic_policy_instances -- [ ] list_traffic_policy_instances_by_hosted_zone -- [ ] list_traffic_policy_instances_by_policy -- [ ] list_traffic_policy_versions -- [ ] list_vpc_association_authorizations -- [ ] test_dns_answer -- [ ] update_health_check -- [ ] update_hosted_zone_comment -- [ ] update_traffic_policy_comment -- [ ] update_traffic_policy_instance - -## route53domains - 0% implemented -- [ ] check_domain_availability -- [ ] check_domain_transferability -- [ ] delete_tags_for_domain -- [ ] disable_domain_auto_renew -- [ ] disable_domain_transfer_lock -- [ ] enable_domain_auto_renew -- [ ] enable_domain_transfer_lock -- [ ] get_contact_reachability_status -- [ ] get_domain_detail -- [ ] get_domain_suggestions -- [ ] get_operation_detail -- [ ] list_domains -- [ ] list_operations -- [ ] list_tags_for_domain -- [ ] register_domain -- [ ] renew_domain -- [ ] resend_contact_reachability_email -- [ ] retrieve_domain_auth_code -- [ ] transfer_domain -- [ ] update_domain_contact -- [ ] update_domain_contact_privacy -- [ ] update_domain_nameservers -- [ ] update_tags_for_domain -- [ ] view_billing - -## s3 - 15% implemented -- [ ] abort_multipart_upload -- [ ] complete_multipart_upload -- [ ] copy_object -- [X] create_bucket -- [ ] create_multipart_upload -- [X] delete_bucket -- [ ] delete_bucket_analytics_configuration -- [X] delete_bucket_cors -- [ ] delete_bucket_encryption -- [ ] delete_bucket_inventory_configuration -- [ ] delete_bucket_lifecycle -- [ ] delete_bucket_metrics_configuration -- [X] delete_bucket_policy -- [ ] delete_bucket_replication -- [X] delete_bucket_tagging -- [ ] delete_bucket_website -- [ ] delete_object -- [ ] delete_object_tagging -- [ ] delete_objects -- [ ] get_bucket_accelerate_configuration -- [X] get_bucket_acl -- [ ] get_bucket_analytics_configuration -- [ ] get_bucket_cors -- [ ] get_bucket_encryption -- [ ] get_bucket_inventory_configuration -- [ ] get_bucket_lifecycle -- [ ] get_bucket_lifecycle_configuration -- [ ] get_bucket_location -- [ ] get_bucket_logging -- [ ] get_bucket_metrics_configuration -- [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration -- [X] get_bucket_policy -- [ ] get_bucket_replication -- [ ] get_bucket_request_payment -- [ ] get_bucket_tagging -- [X] get_bucket_versioning -- [ ] get_bucket_website -- [ ] get_object -- [ ] get_object_acl -- [ ] get_object_tagging -- [ ] get_object_torrent -- [ ] head_bucket -- [ ] head_object -- [ ] list_bucket_analytics_configurations -- [ ] list_bucket_inventory_configurations -- [ ] list_bucket_metrics_configurations -- [ ] list_buckets -- [ ] list_multipart_uploads -- [ ] list_object_versions -- [ ] list_objects -- [ ] list_objects_v2 -- [ ] list_parts -- [ ] put_bucket_accelerate_configuration -- [ ] put_bucket_acl -- [ ] put_bucket_analytics_configuration -- [X] put_bucket_cors -- [ ] put_bucket_encryption -- [ ] put_bucket_inventory_configuration -- [ ] put_bucket_lifecycle -- [ ] put_bucket_lifecycle_configuration -- [X] put_bucket_logging -- [ ] put_bucket_metrics_configuration -- [ ] put_bucket_notification -- [X] put_bucket_notification_configuration -- [ ] put_bucket_policy -- [ ] put_bucket_replication -- [ ] put_bucket_request_payment -- [X] put_bucket_tagging -- [ ] put_bucket_versioning -- [ ] put_bucket_website -- [ ] put_object -- [ ] put_object_acl -- [ ] put_object_tagging -- [ ] restore_object -- [ ] select_object_content -- [ ] upload_part -- [ ] upload_part_copy - -## sagemaker - 0% implemented -- [ ] add_tags -- [ ] create_endpoint -- [ ] create_endpoint_config -- [ ] create_model -- [ ] create_notebook_instance -- [ ] create_notebook_instance_lifecycle_config -- [ ] create_presigned_notebook_instance_url -- [ ] create_training_job -- [ ] delete_endpoint -- [ ] delete_endpoint_config -- [ ] delete_model -- [ ] delete_notebook_instance -- [ ] delete_notebook_instance_lifecycle_config -- [ ] delete_tags -- [ ] describe_endpoint -- [ ] describe_endpoint_config -- [ ] describe_model -- [ ] describe_notebook_instance -- [ ] describe_notebook_instance_lifecycle_config -- [ ] describe_training_job -- [ ] list_endpoint_configs -- [ ] list_endpoints -- [ ] list_models -- [ ] list_notebook_instance_lifecycle_configs -- [ ] list_notebook_instances -- [ ] list_tags -- [ ] list_training_jobs -- [ ] start_notebook_instance -- [ ] stop_notebook_instance -- [ ] stop_training_job -- [ ] update_endpoint -- [ ] update_endpoint_weights_and_capacities -- [ ] update_notebook_instance -- [ ] update_notebook_instance_lifecycle_config - -## sagemaker-runtime - 0% implemented -- [ ] invoke_endpoint - -## sdb - 0% implemented -- [ ] batch_delete_attributes -- [ ] batch_put_attributes -- [ ] create_domain -- [ ] delete_attributes -- [ ] delete_domain -- [ ] domain_metadata -- [ ] get_attributes -- [ ] list_domains -- [ ] put_attributes -- [ ] select - -## secretsmanager - 27% implemented -- [ ] cancel_rotate_secret -- [X] create_secret -- [ ] delete_secret -- [X] describe_secret -- [X] get_random_password -- [X] get_secret_value -- [ ] list_secret_version_ids -- [ ] list_secrets -- [ ] put_secret_value -- [ ] restore_secret -- [ ] rotate_secret -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_secret -- [ ] update_secret_version_stage - -## serverlessrepo - 0% implemented -- [ ] create_application -- [ ] create_application_version -- [ ] create_cloud_formation_change_set -- [ ] delete_application -- [ ] get_application -- [ ] get_application_policy -- [ ] list_application_versions -- [ ] list_applications -- [ ] put_application_policy -- [ ] update_application - -## servicecatalog - 0% implemented -- [ ] accept_portfolio_share -- [ ] associate_principal_with_portfolio -- [ ] associate_product_with_portfolio -- [ ] associate_tag_option_with_resource -- [ ] copy_product -- [ ] create_constraint -- [ ] create_portfolio -- [ ] create_portfolio_share -- [ ] create_product -- [ ] create_provisioned_product_plan -- [ ] create_provisioning_artifact -- [ ] create_tag_option -- [ ] delete_constraint -- [ ] delete_portfolio -- [ ] delete_portfolio_share -- [ ] delete_product -- [ ] delete_provisioned_product_plan -- [ ] delete_provisioning_artifact -- [ ] delete_tag_option -- [ ] describe_constraint -- [ ] describe_copy_product_status -- [ ] describe_portfolio -- [ ] describe_product -- [ ] describe_product_as_admin -- [ ] describe_product_view -- [ ] describe_provisioned_product -- [ ] describe_provisioned_product_plan -- [ ] describe_provisioning_artifact -- [ ] describe_provisioning_parameters -- [ ] describe_record -- [ ] describe_tag_option -- [ ] disassociate_principal_from_portfolio -- [ ] disassociate_product_from_portfolio -- [ ] disassociate_tag_option_from_resource -- [ ] execute_provisioned_product_plan -- [ ] list_accepted_portfolio_shares -- [ ] list_constraints_for_portfolio -- [ ] list_launch_paths -- [ ] list_portfolio_access -- [ ] list_portfolios -- [ ] list_portfolios_for_product -- [ ] list_principals_for_portfolio -- [ ] list_provisioned_product_plans -- [ ] list_provisioning_artifacts -- [ ] list_record_history -- [ ] list_resources_for_tag_option -- [ ] list_tag_options -- [ ] provision_product -- [ ] reject_portfolio_share -- [ ] scan_provisioned_products -- [ ] search_products -- [ ] search_products_as_admin -- [ ] search_provisioned_products -- [ ] terminate_provisioned_product -- [ ] update_constraint -- [ ] update_portfolio -- [ ] update_product -- [ ] update_provisioned_product -- [ ] update_provisioning_artifact -- [ ] update_tag_option - -## servicediscovery - 0% implemented -- [ ] create_private_dns_namespace -- [ ] create_public_dns_namespace -- [ ] create_service -- [ ] delete_namespace -- [ ] delete_service -- [ ] deregister_instance -- [ ] get_instance -- [ ] get_instances_health_status -- [ ] get_namespace -- [ ] get_operation -- [ ] get_service -- [ ] list_instances -- [ ] list_namespaces -- [ ] list_operations -- [ ] list_services -- [ ] register_instance -- [ ] update_instance_custom_health_status -- [ ] update_service - -## ses - 11% implemented -- [ ] clone_receipt_rule_set -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination -- [ ] create_configuration_set_tracking_options -- [ ] create_custom_verification_email_template -- [ ] create_receipt_filter -- [ ] create_receipt_rule -- [ ] create_receipt_rule_set -- [ ] create_template -- [ ] delete_configuration_set -- [ ] delete_configuration_set_event_destination -- [ ] delete_configuration_set_tracking_options -- [ ] delete_custom_verification_email_template -- [X] delete_identity -- [ ] delete_identity_policy -- [ ] delete_receipt_filter -- [ ] delete_receipt_rule -- [ ] delete_receipt_rule_set -- [ ] delete_template -- [ ] delete_verified_email_address -- [ ] describe_active_receipt_rule_set -- [ ] describe_configuration_set -- [ ] describe_receipt_rule -- [ ] describe_receipt_rule_set -- [ ] get_account_sending_enabled -- [ ] get_custom_verification_email_template -- [ ] get_identity_dkim_attributes -- [ ] get_identity_mail_from_domain_attributes -- [ ] get_identity_notification_attributes -- [ ] get_identity_policies -- [ ] get_identity_verification_attributes -- [X] get_send_quota -- [ ] get_send_statistics -- [ ] get_template -- [ ] list_configuration_sets -- [ ] list_custom_verification_email_templates -- [X] list_identities -- [ ] list_identity_policies -- [ ] list_receipt_filters -- [ ] list_receipt_rule_sets -- [ ] list_templates -- [X] list_verified_email_addresses -- [ ] put_identity_policy -- [ ] reorder_receipt_rule_set -- [ ] send_bounce -- [ ] send_bulk_templated_email -- [ ] send_custom_verification_email -- [X] send_email -- [X] send_raw_email -- [ ] send_templated_email -- [ ] set_active_receipt_rule_set -- [ ] set_identity_dkim_enabled -- [ ] set_identity_feedback_forwarding_enabled -- [ ] set_identity_headers_in_notifications_enabled -- [ ] set_identity_mail_from_domain -- [ ] set_identity_notification_topic -- [ ] set_receipt_rule_position -- [ ] test_render_template -- [ ] update_account_sending_enabled -- [ ] update_configuration_set_event_destination -- [ ] update_configuration_set_reputation_metrics_enabled -- [ ] update_configuration_set_sending_enabled -- [ ] update_configuration_set_tracking_options -- [ ] update_custom_verification_email_template -- [ ] update_receipt_rule -- [ ] update_template -- [ ] verify_domain_dkim -- [ ] verify_domain_identity -- [X] verify_email_address -- [X] verify_email_identity - -## shield - 0% implemented -- [ ] create_protection -- [ ] create_subscription -- [ ] delete_protection -- [ ] delete_subscription -- [ ] describe_attack -- [ ] describe_protection -- [ ] describe_subscription -- [ ] get_subscription_state -- [ ] list_attacks -- [ ] list_protections - -## sms - 0% implemented -- [ ] create_replication_job -- [ ] delete_replication_job -- [ ] delete_server_catalog -- [ ] disassociate_connector -- [ ] get_connectors -- [ ] get_replication_jobs -- [ ] get_replication_runs -- [ ] get_servers -- [ ] import_server_catalog -- [ ] start_on_demand_replication_run -- [ ] update_replication_job - -## snowball - 0% implemented -- [ ] cancel_cluster -- [ ] cancel_job -- [ ] create_address -- [ ] create_cluster -- [ ] create_job -- [ ] describe_address -- [ ] describe_addresses -- [ ] describe_cluster -- [ ] describe_job -- [ ] get_job_manifest -- [ ] get_job_unlock_code -- [ ] get_snowball_usage -- [ ] list_cluster_jobs -- [ ] list_clusters -- [ ] list_jobs -- [ ] update_cluster -- [ ] update_job - -## sns - 53% implemented -- [ ] add_permission -- [ ] check_if_phone_number_is_opted_out -- [ ] confirm_subscription -- [X] create_platform_application -- [X] create_platform_endpoint -- [X] create_topic -- [X] delete_endpoint -- [X] delete_platform_application -- [X] delete_topic -- [ ] get_endpoint_attributes -- [ ] get_platform_application_attributes -- [ ] get_sms_attributes -- [X] get_subscription_attributes -- [ ] get_topic_attributes -- [X] list_endpoints_by_platform_application -- [ ] list_phone_numbers_opted_out -- [X] list_platform_applications -- [X] list_subscriptions -- [ ] list_subscriptions_by_topic -- [X] list_topics -- [ ] opt_in_phone_number -- [X] publish -- [ ] remove_permission -- [X] set_endpoint_attributes -- [ ] set_platform_application_attributes -- [ ] set_sms_attributes -- [X] set_subscription_attributes -- [ ] set_topic_attributes -- [X] subscribe -- [X] unsubscribe - -## sqs - 65% implemented -- [X] add_permission -- [X] change_message_visibility -- [ ] change_message_visibility_batch -- [X] create_queue -- [X] delete_message -- [ ] delete_message_batch -- [X] delete_queue -- [ ] get_queue_attributes -- [ ] get_queue_url -- [X] list_dead_letter_source_queues -- [ ] list_queue_tags -- [X] list_queues -- [X] purge_queue -- [ ] receive_message -- [X] remove_permission -- [X] send_message -- [ ] send_message_batch -- [X] set_queue_attributes -- [X] tag_queue -- [X] untag_queue - -## ssm - 11% implemented -- [X] add_tags_to_resource -- [ ] cancel_command -- [ ] create_activation -- [ ] create_association -- [ ] create_association_batch -- [ ] create_document -- [ ] create_maintenance_window -- [ ] create_patch_baseline -- [ ] create_resource_data_sync -- [ ] delete_activation -- [ ] delete_association -- [ ] delete_document -- [ ] delete_maintenance_window -- [X] delete_parameter -- [X] delete_parameters -- [ ] delete_patch_baseline -- [ ] delete_resource_data_sync -- [ ] deregister_managed_instance -- [ ] deregister_patch_baseline_for_patch_group -- [ ] deregister_target_from_maintenance_window -- [ ] deregister_task_from_maintenance_window -- [ ] describe_activations -- [ ] describe_association -- [ ] describe_automation_executions -- [ ] describe_automation_step_executions -- [ ] describe_available_patches -- [ ] describe_document -- [ ] describe_document_permission -- [ ] describe_effective_instance_associations -- [ ] describe_effective_patches_for_patch_baseline -- [ ] describe_instance_associations_status -- [ ] describe_instance_information -- [ ] describe_instance_patch_states -- [ ] describe_instance_patch_states_for_patch_group -- [ ] describe_instance_patches -- [ ] describe_maintenance_window_execution_task_invocations -- [ ] describe_maintenance_window_execution_tasks -- [ ] describe_maintenance_window_executions -- [ ] describe_maintenance_window_targets -- [ ] describe_maintenance_window_tasks -- [ ] describe_maintenance_windows -- [ ] describe_parameters -- [ ] describe_patch_baselines -- [ ] describe_patch_group_state -- [ ] describe_patch_groups -- [ ] get_automation_execution -- [ ] get_command_invocation -- [ ] get_default_patch_baseline -- [ ] get_deployable_patch_snapshot_for_instance -- [ ] get_document -- [ ] get_inventory -- [ ] get_inventory_schema -- [ ] get_maintenance_window -- [ ] get_maintenance_window_execution -- [ ] get_maintenance_window_execution_task -- [ ] get_maintenance_window_execution_task_invocation -- [ ] get_maintenance_window_task -- [X] get_parameter -- [ ] get_parameter_history -- [X] get_parameters -- [X] get_parameters_by_path -- [ ] get_patch_baseline -- [ ] get_patch_baseline_for_patch_group -- [ ] list_association_versions -- [ ] list_associations -- [ ] list_command_invocations -- [X] list_commands -- [ ] list_compliance_items -- [ ] list_compliance_summaries -- [ ] list_document_versions -- [ ] list_documents -- [ ] list_inventory_entries -- [ ] list_resource_compliance_summaries -- [ ] list_resource_data_sync -- [X] list_tags_for_resource -- [ ] modify_document_permission -- [ ] put_compliance_items -- [ ] put_inventory -- [X] put_parameter -- [ ] register_default_patch_baseline -- [ ] register_patch_baseline_for_patch_group -- [ ] register_target_with_maintenance_window -- [ ] register_task_with_maintenance_window -- [X] remove_tags_from_resource -- [ ] send_automation_signal -- [X] send_command -- [ ] start_automation_execution -- [ ] stop_automation_execution -- [ ] update_association -- [ ] update_association_status -- [ ] update_document -- [ ] update_document_default_version -- [ ] update_maintenance_window -- [ ] update_maintenance_window_target -- [ ] update_maintenance_window_task -- [ ] update_managed_instance_role -- [ ] update_patch_baseline - -## stepfunctions - 0% implemented -- [ ] create_activity -- [ ] create_state_machine -- [ ] delete_activity -- [ ] delete_state_machine -- [ ] describe_activity -- [ ] describe_execution -- [ ] describe_state_machine -- [ ] describe_state_machine_for_execution -- [ ] get_activity_task -- [ ] get_execution_history -- [ ] list_activities -- [ ] list_executions -- [ ] list_state_machines -- [ ] send_task_failure -- [ ] send_task_heartbeat -- [ ] send_task_success -- [ ] start_execution -- [ ] stop_execution -- [ ] update_state_machine - -## storagegateway - 0% implemented -- [ ] activate_gateway -- [ ] add_cache -- [ ] add_tags_to_resource -- [ ] add_upload_buffer -- [ ] add_working_storage -- [ ] cancel_archival -- [ ] cancel_retrieval -- [ ] create_cached_iscsi_volume -- [ ] create_nfs_file_share -- [ ] create_snapshot -- [ ] create_snapshot_from_volume_recovery_point -- [ ] create_stored_iscsi_volume -- [ ] create_tape_with_barcode -- [ ] create_tapes -- [ ] delete_bandwidth_rate_limit -- [ ] delete_chap_credentials -- [ ] delete_file_share -- [ ] delete_gateway -- [ ] delete_snapshot_schedule -- [ ] delete_tape -- [ ] delete_tape_archive -- [ ] delete_volume -- [ ] describe_bandwidth_rate_limit -- [ ] describe_cache -- [ ] describe_cached_iscsi_volumes -- [ ] describe_chap_credentials -- [ ] describe_gateway_information -- [ ] describe_maintenance_start_time -- [ ] describe_nfs_file_shares -- [ ] describe_snapshot_schedule -- [ ] describe_stored_iscsi_volumes -- [ ] describe_tape_archives -- [ ] describe_tape_recovery_points -- [ ] describe_tapes -- [ ] describe_upload_buffer -- [ ] describe_vtl_devices -- [ ] describe_working_storage -- [ ] disable_gateway -- [ ] list_file_shares -- [ ] list_gateways -- [ ] list_local_disks -- [ ] list_tags_for_resource -- [ ] list_tapes -- [ ] list_volume_initiators -- [ ] list_volume_recovery_points -- [ ] list_volumes -- [ ] notify_when_uploaded -- [ ] refresh_cache -- [ ] remove_tags_from_resource -- [ ] reset_cache -- [ ] retrieve_tape_archive -- [ ] retrieve_tape_recovery_point -- [ ] set_local_console_password -- [ ] shutdown_gateway -- [ ] start_gateway -- [ ] update_bandwidth_rate_limit -- [ ] update_chap_credentials -- [ ] update_gateway_information -- [ ] update_gateway_software_now -- [ ] update_maintenance_start_time -- [ ] update_nfs_file_share -- [ ] update_snapshot_schedule -- [ ] update_vtl_device_type - -## sts - 42% implemented -- [X] assume_role -- [ ] assume_role_with_saml -- [ ] assume_role_with_web_identity -- [ ] decode_authorization_message -- [ ] get_caller_identity -- [X] get_federation_token -- [X] get_session_token - -## support - 0% implemented -- [ ] add_attachments_to_set -- [ ] add_communication_to_case -- [ ] create_case -- [ ] describe_attachment -- [ ] describe_cases -- [ ] describe_communications -- [ ] describe_services -- [ ] describe_severity_levels -- [ ] describe_trusted_advisor_check_refresh_statuses -- [ ] describe_trusted_advisor_check_result -- [ ] describe_trusted_advisor_check_summaries -- [ ] describe_trusted_advisor_checks -- [ ] refresh_trusted_advisor_check -- [ ] resolve_case - -## swf - 58% implemented -- [ ] count_closed_workflow_executions -- [ ] count_open_workflow_executions -- [X] count_pending_activity_tasks -- [X] count_pending_decision_tasks -- [ ] deprecate_activity_type -- [X] deprecate_domain -- [ ] deprecate_workflow_type -- [ ] describe_activity_type -- [X] describe_domain -- [X] describe_workflow_execution -- [ ] describe_workflow_type -- [ ] get_workflow_execution_history -- [ ] list_activity_types -- [X] list_closed_workflow_executions -- [X] list_domains -- [X] list_open_workflow_executions -- [ ] list_workflow_types -- [X] poll_for_activity_task -- [X] poll_for_decision_task -- [X] record_activity_task_heartbeat -- [ ] register_activity_type -- [X] register_domain -- [ ] register_workflow_type -- [ ] request_cancel_workflow_execution -- [ ] respond_activity_task_canceled -- [X] respond_activity_task_completed -- [X] respond_activity_task_failed -- [X] respond_decision_task_completed -- [X] signal_workflow_execution -- [X] start_workflow_execution -- [X] terminate_workflow_execution - -## transcribe - 0% implemented -- [ ] create_vocabulary -- [ ] delete_vocabulary -- [ ] get_transcription_job -- [ ] get_vocabulary -- [ ] list_transcription_jobs -- [ ] list_vocabularies -- [ ] start_transcription_job -- [ ] update_vocabulary - -## translate - 0% implemented -- [ ] translate_text - -## waf - 0% implemented -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## waf-regional - 0% implemented -- [ ] associate_web_acl -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] disassociate_web_acl -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_web_acl_for_resource -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_resources_for_web_acl -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## workdocs - 0% implemented -- [ ] abort_document_version_upload -- [ ] activate_user -- [ ] add_resource_permissions -- [ ] create_comment -- [ ] create_custom_metadata -- [ ] create_folder -- [ ] create_labels -- [ ] create_notification_subscription -- [ ] create_user -- [ ] deactivate_user -- [ ] delete_comment -- [ ] delete_custom_metadata -- [ ] delete_document -- [ ] delete_folder -- [ ] delete_folder_contents -- [ ] delete_labels -- [ ] delete_notification_subscription -- [ ] delete_user -- [ ] describe_activities -- [ ] describe_comments -- [ ] describe_document_versions -- [ ] describe_folder_contents -- [ ] describe_groups -- [ ] describe_notification_subscriptions -- [ ] describe_resource_permissions -- [ ] describe_root_folders -- [ ] describe_users -- [ ] get_current_user -- [ ] get_document -- [ ] get_document_path -- [ ] get_document_version -- [ ] get_folder -- [ ] get_folder_path -- [ ] initiate_document_version_upload -- [ ] remove_all_resource_permissions -- [ ] remove_resource_permission -- [ ] update_document -- [ ] update_document_version -- [ ] update_folder -- [ ] update_user - -## workmail - 0% implemented -- [ ] associate_delegate_to_resource -- [ ] associate_member_to_group -- [ ] create_alias -- [ ] create_group -- [ ] create_resource -- [ ] create_user -- [ ] delete_alias -- [ ] delete_group -- [ ] delete_mailbox_permissions -- [ ] delete_resource -- [ ] delete_user -- [ ] deregister_from_work_mail -- [ ] describe_group -- [ ] describe_organization -- [ ] describe_resource -- [ ] describe_user -- [ ] disassociate_delegate_from_resource -- [ ] disassociate_member_from_group -- [ ] list_aliases -- [ ] list_group_members -- [ ] list_groups -- [ ] list_mailbox_permissions -- [ ] list_organizations -- [ ] list_resource_delegates -- [ ] list_resources -- [ ] list_users -- [ ] put_mailbox_permissions -- [ ] register_to_work_mail -- [ ] reset_password -- [ ] update_primary_email_address -- [ ] update_resource - -## workspaces - 0% implemented -- [ ] create_tags -- [ ] create_workspaces -- [ ] delete_tags -- [ ] describe_tags -- [ ] describe_workspace_bundles -- [ ] describe_workspace_directories -- [ ] describe_workspaces -- [ ] describe_workspaces_connection_status -- [ ] modify_workspace_properties -- [ ] reboot_workspaces -- [ ] rebuild_workspaces -- [ ] start_workspaces -- [ ] stop_workspaces -- [ ] terminate_workspaces - -## xray - 0% implemented -- [ ] batch_get_traces -- [ ] get_service_graph -- [ ] get_trace_graph -- [ ] get_trace_summaries -- [ ] put_telemetry_records -- [ ] put_trace_segments + +## acm - 41% implemented +- [X] add_tags_to_certificate +- [X] delete_certificate +- [ ] describe_certificate +- [ ] export_certificate +- [X] get_certificate +- [ ] import_certificate +- [ ] list_certificates +- [ ] list_tags_for_certificate +- [X] remove_tags_from_certificate +- [X] request_certificate +- [ ] resend_validation_email +- [ ] update_certificate_options + +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] restore_certificate_authority +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority + +## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_contact_from_address_book +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_device_events +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 24% implemented +- [ ] create_api_key +- [ ] create_authorizer +- [ ] create_base_path_mapping +- [X] create_deployment +- [ ] create_documentation_part +- [ ] create_documentation_version +- [ ] create_domain_name +- [ ] create_model +- [ ] create_request_validator +- [X] create_resource +- [X] create_rest_api +- [X] create_stage +- [X] create_usage_plan +- [X] create_usage_plan_key +- [ ] create_vpc_link +- [ ] delete_api_key +- [ ] delete_authorizer +- [ ] delete_base_path_mapping +- [ ] delete_client_certificate +- [X] delete_deployment +- [ ] delete_documentation_part +- [ ] delete_documentation_version +- [ ] delete_domain_name +- [ ] delete_gateway_response +- [X] delete_integration +- [X] delete_integration_response +- [ ] delete_method +- [X] delete_method_response +- [ ] delete_model +- [ ] delete_request_validator +- [X] delete_resource +- [X] delete_rest_api +- [ ] delete_stage +- [X] delete_usage_plan +- [X] delete_usage_plan_key +- [ ] delete_vpc_link +- [ ] flush_stage_authorizers_cache +- [ ] flush_stage_cache +- [ ] generate_client_certificate +- [ ] get_account +- [ ] get_api_key +- [ ] get_api_keys +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_base_path_mapping +- [ ] get_base_path_mappings +- [ ] get_client_certificate +- [ ] get_client_certificates +- [X] get_deployment +- [X] get_deployments +- [ ] get_documentation_part +- [ ] get_documentation_parts +- [ ] get_documentation_version +- [ ] get_documentation_versions +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_export +- [ ] get_gateway_response +- [ ] get_gateway_responses +- [X] get_integration +- [X] get_integration_response +- [X] get_method +- [X] get_method_response +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_request_validator +- [ ] get_request_validators +- [X] get_resource +- [ ] get_resources +- [X] get_rest_api +- [ ] get_rest_apis +- [ ] get_sdk +- [ ] get_sdk_type +- [ ] get_sdk_types +- [X] get_stage +- [X] get_stages +- [ ] get_tags +- [ ] get_usage +- [X] get_usage_plan +- [X] get_usage_plan_key +- [X] get_usage_plan_keys +- [X] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links +- [ ] import_api_keys +- [ ] import_documentation_parts +- [ ] import_rest_api +- [ ] put_gateway_response +- [ ] put_integration +- [ ] put_integration_response +- [ ] put_method +- [ ] put_method_response +- [ ] put_rest_api +- [ ] tag_resource +- [ ] test_invoke_authorizer +- [ ] test_invoke_method +- [ ] untag_resource +- [ ] update_account +- [ ] update_api_key +- [ ] update_authorizer +- [ ] update_base_path_mapping +- [ ] update_client_certificate +- [ ] update_deployment +- [ ] update_documentation_part +- [ ] update_documentation_version +- [ ] update_domain_name +- [ ] update_gateway_response +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_method +- [ ] update_method_response +- [ ] update_model +- [ ] update_request_validator +- [ ] update_resource +- [ ] update_rest_api +- [X] update_stage +- [ ] update_usage +- [ ] update_usage_plan +- [ ] update_vpc_link + +## application-autoscaling - 0% implemented +- [ ] delete_scaling_policy +- [ ] delete_scheduled_action +- [ ] deregister_scalable_target +- [ ] describe_scalable_targets +- [ ] describe_scaling_activities +- [ ] describe_scaling_policies +- [ ] describe_scheduled_actions +- [ ] put_scaling_policy +- [ ] put_scheduled_action +- [ ] register_scalable_target + +## appstream - 0% implemented +- [ ] associate_fleet +- [ ] copy_image +- [ ] create_directory_config +- [ ] create_fleet +- [ ] create_image_builder +- [ ] create_image_builder_streaming_url +- [ ] create_stack +- [ ] create_streaming_url +- [ ] delete_directory_config +- [ ] delete_fleet +- [ ] delete_image +- [ ] delete_image_builder +- [ ] delete_image_permissions +- [ ] delete_stack +- [ ] describe_directory_configs +- [ ] describe_fleets +- [ ] describe_image_builders +- [ ] describe_image_permissions +- [ ] describe_images +- [ ] describe_sessions +- [ ] describe_stacks +- [ ] disassociate_fleet +- [ ] expire_session +- [ ] list_associated_fleets +- [ ] list_associated_stacks +- [ ] list_tags_for_resource +- [ ] start_fleet +- [ ] start_image_builder +- [ ] stop_fleet +- [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_directory_config +- [ ] update_fleet +- [ ] update_image_permissions +- [ ] update_stack + +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_api_key +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + +## athena - 0% implemented +- [ ] batch_get_named_query +- [ ] batch_get_query_execution +- [ ] create_named_query +- [ ] delete_named_query +- [ ] get_named_query +- [ ] get_query_execution +- [ ] get_query_results +- [ ] list_named_queries +- [ ] list_query_executions +- [ ] start_query_execution +- [ ] stop_query_execution + +## autoscaling - 44% implemented +- [X] attach_instances +- [X] attach_load_balancer_target_groups +- [X] attach_load_balancers +- [ ] complete_lifecycle_action +- [X] create_auto_scaling_group +- [X] create_launch_configuration +- [X] create_or_update_tags +- [X] delete_auto_scaling_group +- [X] delete_launch_configuration +- [ ] delete_lifecycle_hook +- [ ] delete_notification_configuration +- [X] delete_policy +- [ ] delete_scheduled_action +- [ ] delete_tags +- [ ] describe_account_limits +- [ ] describe_adjustment_types +- [X] describe_auto_scaling_groups +- [X] describe_auto_scaling_instances +- [ ] describe_auto_scaling_notification_types +- [X] describe_launch_configurations +- [ ] describe_lifecycle_hook_types +- [ ] describe_lifecycle_hooks +- [X] describe_load_balancer_target_groups +- [X] describe_load_balancers +- [ ] describe_metric_collection_types +- [ ] describe_notification_configurations +- [X] describe_policies +- [ ] describe_scaling_activities +- [ ] describe_scaling_process_types +- [ ] describe_scheduled_actions +- [ ] describe_tags +- [ ] describe_termination_policy_types +- [X] detach_instances +- [X] detach_load_balancer_target_groups +- [X] detach_load_balancers +- [ ] disable_metrics_collection +- [ ] enable_metrics_collection +- [ ] enter_standby +- [X] execute_policy +- [ ] exit_standby +- [ ] put_lifecycle_hook +- [ ] put_notification_configuration +- [ ] put_scaling_policy +- [ ] put_scheduled_update_group_action +- [ ] record_lifecycle_action_heartbeat +- [ ] resume_processes +- [X] set_desired_capacity +- [X] set_instance_health +- [ ] set_instance_protection +- [X] suspend_processes +- [ ] terminate_instance_in_auto_scaling_group +- [X] update_auto_scaling_group + +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans +- [ ] update_scaling_plan + +## batch - 93% implemented +- [ ] cancel_job +- [X] create_compute_environment +- [X] create_job_queue +- [X] delete_compute_environment +- [X] delete_job_queue +- [X] deregister_job_definition +- [X] describe_compute_environments +- [X] describe_job_definitions +- [X] describe_job_queues +- [X] describe_jobs +- [X] list_jobs +- [X] register_job_definition +- [X] submit_job +- [X] terminate_job +- [X] update_compute_environment +- [X] update_job_queue + +## budgets - 0% implemented +- [ ] create_budget +- [ ] create_notification +- [ ] create_subscriber +- [ ] delete_budget +- [ ] delete_notification +- [ ] delete_subscriber +- [ ] describe_budget +- [ ] describe_budgets +- [ ] describe_notifications_for_budget +- [ ] describe_subscribers_for_notification +- [ ] update_budget +- [ ] update_notification +- [ ] update_subscriber + +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + +## clouddirectory - 0% implemented +- [ ] add_facet_to_object +- [ ] apply_schema +- [ ] attach_object +- [ ] attach_policy +- [ ] attach_to_index +- [ ] attach_typed_link +- [ ] batch_read +- [ ] batch_write +- [ ] create_directory +- [ ] create_facet +- [ ] create_index +- [ ] create_object +- [ ] create_schema +- [ ] create_typed_link_facet +- [ ] delete_directory +- [ ] delete_facet +- [ ] delete_object +- [ ] delete_schema +- [ ] delete_typed_link_facet +- [ ] detach_from_index +- [ ] detach_object +- [ ] detach_policy +- [ ] detach_typed_link +- [ ] disable_directory +- [ ] enable_directory +- [ ] get_applied_schema_version +- [ ] get_directory +- [ ] get_facet +- [ ] get_link_attributes +- [ ] get_object_attributes +- [ ] get_object_information +- [ ] get_schema_as_json +- [ ] get_typed_link_facet_information +- [ ] list_applied_schema_arns +- [ ] list_attached_indices +- [ ] list_development_schema_arns +- [ ] list_directories +- [ ] list_facet_attributes +- [ ] list_facet_names +- [ ] list_incoming_typed_links +- [ ] list_index +- [ ] list_managed_schema_arns +- [ ] list_object_attributes +- [ ] list_object_children +- [ ] list_object_parent_paths +- [ ] list_object_parents +- [ ] list_object_policies +- [ ] list_outgoing_typed_links +- [ ] list_policy_attachments +- [ ] list_published_schema_arns +- [ ] list_tags_for_resource +- [ ] list_typed_link_facet_attributes +- [ ] list_typed_link_facet_names +- [ ] lookup_policy +- [ ] publish_schema +- [ ] put_schema_from_json +- [ ] remove_facet_from_object +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_facet +- [ ] update_link_attributes +- [ ] update_object_attributes +- [ ] update_schema +- [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema + +## cloudformation - 21% implemented +- [ ] cancel_update_stack +- [ ] continue_update_rollback +- [X] create_change_set +- [X] create_stack +- [ ] create_stack_instances +- [ ] create_stack_set +- [ ] delete_change_set +- [X] delete_stack +- [ ] delete_stack_instances +- [ ] delete_stack_set +- [ ] describe_account_limits +- [ ] describe_change_set +- [ ] describe_stack_events +- [ ] describe_stack_instance +- [ ] describe_stack_resource +- [ ] describe_stack_resources +- [ ] describe_stack_set +- [ ] describe_stack_set_operation +- [X] describe_stacks +- [ ] estimate_template_cost +- [X] execute_change_set +- [ ] get_stack_policy +- [ ] get_template +- [ ] get_template_summary +- [ ] list_change_sets +- [X] list_exports +- [ ] list_imports +- [ ] list_stack_instances +- [X] list_stack_resources +- [ ] list_stack_set_operation_results +- [ ] list_stack_set_operations +- [ ] list_stack_sets +- [X] list_stacks +- [ ] set_stack_policy +- [ ] signal_resource +- [ ] stop_stack_set_operation +- [X] update_stack +- [ ] update_stack_instances +- [ ] update_stack_set +- [ ] update_termination_protection +- [ ] validate_template + +## cloudfront - 0% implemented +- [ ] create_cloud_front_origin_access_identity +- [ ] create_distribution +- [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile +- [ ] create_invalidation +- [ ] create_public_key +- [ ] create_streaming_distribution +- [ ] create_streaming_distribution_with_tags +- [ ] delete_cloud_front_origin_access_identity +- [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key +- [ ] delete_streaming_distribution +- [ ] get_cloud_front_origin_access_identity +- [ ] get_cloud_front_origin_access_identity_config +- [ ] get_distribution +- [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config +- [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config +- [ ] get_streaming_distribution +- [ ] get_streaming_distribution_config +- [ ] list_cloud_front_origin_access_identities +- [ ] list_distributions +- [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles +- [ ] list_invalidations +- [ ] list_public_keys +- [ ] list_streaming_distributions +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cloud_front_origin_access_identity +- [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key +- [ ] update_streaming_distribution + +## cloudhsm - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_hapg +- [ ] create_hsm +- [ ] create_luna_client +- [ ] delete_hapg +- [ ] delete_hsm +- [ ] delete_luna_client +- [ ] describe_hapg +- [ ] describe_hsm +- [ ] describe_luna_client +- [ ] get_config +- [ ] list_available_zones +- [ ] list_hapgs +- [ ] list_hsms +- [ ] list_luna_clients +- [ ] list_tags_for_resource +- [ ] modify_hapg +- [ ] modify_hsm +- [ ] modify_luna_client +- [ ] remove_tags_from_resource + +## cloudhsmv2 - 0% implemented +- [ ] copy_backup_to_region +- [ ] create_cluster +- [ ] create_hsm +- [ ] delete_cluster +- [ ] delete_hsm +- [ ] describe_backups +- [ ] describe_clusters +- [ ] initialize_cluster +- [ ] list_tags +- [ ] tag_resource +- [ ] untag_resource + +## cloudsearch - 0% implemented +- [ ] build_suggesters +- [ ] create_domain +- [ ] define_analysis_scheme +- [ ] define_expression +- [ ] define_index_field +- [ ] define_suggester +- [ ] delete_analysis_scheme +- [ ] delete_domain +- [ ] delete_expression +- [ ] delete_index_field +- [ ] delete_suggester +- [ ] describe_analysis_schemes +- [ ] describe_availability_options +- [ ] describe_domains +- [ ] describe_expressions +- [ ] describe_index_fields +- [ ] describe_scaling_parameters +- [ ] describe_service_access_policies +- [ ] describe_suggesters +- [ ] index_documents +- [ ] list_domain_names +- [ ] update_availability_options +- [ ] update_scaling_parameters +- [ ] update_service_access_policies + +## cloudsearchdomain - 0% implemented +- [ ] search +- [ ] suggest +- [ ] upload_documents + +## cloudtrail - 0% implemented +- [ ] add_tags +- [ ] create_trail +- [ ] delete_trail +- [ ] describe_trails +- [ ] get_event_selectors +- [ ] get_trail_status +- [ ] list_public_keys +- [ ] list_tags +- [ ] lookup_events +- [ ] put_event_selectors +- [ ] remove_tags +- [ ] start_logging +- [ ] stop_logging +- [ ] update_trail + +## cloudwatch - 56% implemented +- [X] delete_alarms +- [X] delete_dashboards +- [ ] describe_alarm_history +- [ ] describe_alarms +- [ ] describe_alarms_for_metric +- [ ] disable_alarm_actions +- [ ] enable_alarm_actions +- [X] get_dashboard +- [ ] get_metric_data +- [X] get_metric_statistics +- [X] list_dashboards +- [ ] list_metrics +- [X] put_dashboard +- [X] put_metric_alarm +- [X] put_metric_data +- [X] set_alarm_state + +## codebuild - 0% implemented +- [ ] batch_delete_builds +- [ ] batch_get_builds +- [ ] batch_get_projects +- [ ] create_project +- [ ] create_webhook +- [ ] delete_project +- [ ] delete_webhook +- [ ] invalidate_project_cache +- [ ] list_builds +- [ ] list_builds_for_project +- [ ] list_curated_environment_images +- [ ] list_projects +- [ ] start_build +- [ ] stop_build +- [ ] update_project +- [ ] update_webhook + +## codecommit - 0% implemented +- [ ] batch_get_repositories +- [ ] create_branch +- [ ] create_pull_request +- [ ] create_repository +- [ ] delete_branch +- [ ] delete_comment_content +- [ ] delete_repository +- [ ] describe_pull_request_events +- [ ] get_blob +- [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request +- [ ] get_commit +- [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request +- [ ] get_repository +- [ ] get_repository_triggers +- [ ] list_branches +- [ ] list_pull_requests +- [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply +- [ ] put_file +- [ ] put_repository_triggers +- [ ] test_repository_triggers +- [ ] update_comment +- [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title +- [ ] update_repository_description +- [ ] update_repository_name + +## codedeploy - 0% implemented +- [ ] add_tags_to_on_premises_instances +- [ ] batch_get_application_revisions +- [ ] batch_get_applications +- [ ] batch_get_deployment_groups +- [ ] batch_get_deployment_instances +- [ ] batch_get_deployments +- [ ] batch_get_on_premises_instances +- [ ] continue_deployment +- [ ] create_application +- [ ] create_deployment +- [ ] create_deployment_config +- [ ] create_deployment_group +- [ ] delete_application +- [ ] delete_deployment_config +- [ ] delete_deployment_group +- [ ] delete_git_hub_account_token +- [ ] deregister_on_premises_instance +- [ ] get_application +- [ ] get_application_revision +- [ ] get_deployment +- [ ] get_deployment_config +- [ ] get_deployment_group +- [ ] get_deployment_instance +- [ ] get_on_premises_instance +- [ ] list_application_revisions +- [ ] list_applications +- [ ] list_deployment_configs +- [ ] list_deployment_groups +- [ ] list_deployment_instances +- [ ] list_deployments +- [ ] list_git_hub_account_token_names +- [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status +- [ ] register_application_revision +- [ ] register_on_premises_instance +- [ ] remove_tags_from_on_premises_instances +- [ ] skip_wait_time_for_instance_termination +- [ ] stop_deployment +- [ ] update_application +- [ ] update_deployment_group + +## codepipeline - 0% implemented +- [ ] acknowledge_job +- [ ] acknowledge_third_party_job +- [ ] create_custom_action_type +- [ ] create_pipeline +- [ ] delete_custom_action_type +- [ ] delete_pipeline +- [ ] delete_webhook +- [ ] deregister_webhook_with_third_party +- [ ] disable_stage_transition +- [ ] enable_stage_transition +- [ ] get_job_details +- [ ] get_pipeline +- [ ] get_pipeline_execution +- [ ] get_pipeline_state +- [ ] get_third_party_job_details +- [ ] list_action_types +- [ ] list_pipeline_executions +- [ ] list_pipelines +- [ ] list_webhooks +- [ ] poll_for_jobs +- [ ] poll_for_third_party_jobs +- [ ] put_action_revision +- [ ] put_approval_result +- [ ] put_job_failure_result +- [ ] put_job_success_result +- [ ] put_third_party_job_failure_result +- [ ] put_third_party_job_success_result +- [ ] put_webhook +- [ ] register_webhook_with_third_party +- [ ] retry_stage_execution +- [ ] start_pipeline_execution +- [ ] update_pipeline + +## codestar - 0% implemented +- [ ] associate_team_member +- [ ] create_project +- [ ] create_user_profile +- [ ] delete_project +- [ ] delete_user_profile +- [ ] describe_project +- [ ] describe_user_profile +- [ ] disassociate_team_member +- [ ] list_projects +- [ ] list_resources +- [ ] list_tags_for_project +- [ ] list_team_members +- [ ] list_user_profiles +- [ ] tag_project +- [ ] untag_project +- [ ] update_project +- [ ] update_team_member +- [ ] update_user_profile + +## cognito-identity - 22% implemented +- [X] create_identity_pool +- [ ] delete_identities +- [ ] delete_identity_pool +- [ ] describe_identity +- [ ] describe_identity_pool +- [X] get_credentials_for_identity +- [X] get_id +- [ ] get_identity_pool_roles +- [ ] get_open_id_token +- [X] get_open_id_token_for_developer_identity +- [ ] list_identities +- [ ] list_identity_pools +- [ ] lookup_developer_identity +- [ ] merge_developer_identities +- [ ] set_identity_pool_roles +- [ ] unlink_developer_identity +- [ ] unlink_identity +- [ ] update_identity_pool + +## cognito-idp - 25% implemented +- [ ] add_custom_attributes +- [ ] admin_add_user_to_group +- [ ] admin_confirm_sign_up +- [X] admin_create_user +- [X] admin_delete_user +- [ ] admin_delete_user_attributes +- [ ] admin_disable_provider_for_user +- [ ] admin_disable_user +- [ ] admin_enable_user +- [ ] admin_forget_device +- [ ] admin_get_device +- [X] admin_get_user +- [X] admin_initiate_auth +- [ ] admin_link_provider_for_user +- [ ] admin_list_devices +- [ ] admin_list_groups_for_user +- [ ] admin_list_user_auth_events +- [ ] admin_remove_user_from_group +- [ ] admin_reset_user_password +- [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference +- [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback +- [ ] admin_update_device_status +- [ ] admin_update_user_attributes +- [ ] admin_user_global_sign_out +- [ ] associate_software_token +- [X] change_password +- [ ] confirm_device +- [X] confirm_forgot_password +- [ ] confirm_sign_up +- [ ] create_group +- [X] create_identity_provider +- [ ] create_resource_server +- [ ] create_user_import_job +- [X] create_user_pool +- [X] create_user_pool_client +- [X] create_user_pool_domain +- [ ] delete_group +- [X] delete_identity_provider +- [ ] delete_resource_server +- [ ] delete_user +- [ ] delete_user_attributes +- [X] delete_user_pool +- [X] delete_user_pool_client +- [X] delete_user_pool_domain +- [X] describe_identity_provider +- [ ] describe_resource_server +- [ ] describe_risk_configuration +- [ ] describe_user_import_job +- [X] describe_user_pool +- [X] describe_user_pool_client +- [X] describe_user_pool_domain +- [ ] forget_device +- [ ] forgot_password +- [ ] get_csv_header +- [ ] get_device +- [ ] get_group +- [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate +- [ ] get_ui_customization +- [ ] get_user +- [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config +- [ ] global_sign_out +- [ ] initiate_auth +- [ ] list_devices +- [ ] list_groups +- [X] list_identity_providers +- [ ] list_resource_servers +- [ ] list_user_import_jobs +- [X] list_user_pool_clients +- [X] list_user_pools +- [X] list_users +- [ ] list_users_in_group +- [ ] resend_confirmation_code +- [X] respond_to_auth_challenge +- [ ] set_risk_configuration +- [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config +- [ ] set_user_settings +- [ ] sign_up +- [ ] start_user_import_job +- [ ] stop_user_import_job +- [ ] update_auth_event_feedback +- [ ] update_device_status +- [ ] update_group +- [ ] update_identity_provider +- [ ] update_resource_server +- [ ] update_user_attributes +- [ ] update_user_pool +- [X] update_user_pool_client +- [ ] verify_software_token +- [ ] verify_user_attribute + +## cognito-sync - 0% implemented +- [ ] bulk_publish +- [ ] delete_dataset +- [ ] describe_dataset +- [ ] describe_identity_pool_usage +- [ ] describe_identity_usage +- [ ] get_bulk_publish_details +- [ ] get_cognito_events +- [ ] get_identity_pool_configuration +- [ ] list_datasets +- [ ] list_identity_pool_usage +- [ ] list_records +- [ ] register_device +- [ ] set_cognito_events +- [ ] set_identity_pool_configuration +- [ ] subscribe_to_dataset +- [ ] unsubscribe_from_dataset +- [ ] update_records + +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] batch_detect_syntax +- [ ] describe_dominant_language_detection_job +- [ ] describe_entities_detection_job +- [ ] describe_key_phrases_detection_job +- [ ] describe_sentiment_detection_job +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] detect_syntax +- [ ] list_dominant_language_detection_jobs +- [ ] list_entities_detection_jobs +- [ ] list_key_phrases_detection_jobs +- [ ] list_sentiment_detection_jobs +- [ ] list_topics_detection_jobs +- [ ] start_dominant_language_detection_job +- [ ] start_entities_detection_job +- [ ] start_key_phrases_detection_job +- [ ] start_sentiment_detection_job +- [ ] start_topics_detection_job +- [ ] stop_dominant_language_detection_job +- [ ] stop_entities_detection_job +- [ ] stop_key_phrases_detection_job +- [ ] stop_sentiment_detection_job + +## config - 0% implemented +- [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization +- [ ] delete_config_rule +- [ ] delete_configuration_aggregator +- [ ] delete_configuration_recorder +- [ ] delete_delivery_channel +- [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request +- [ ] delete_retention_configuration +- [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations +- [ ] describe_compliance_by_config_rule +- [ ] describe_compliance_by_resource +- [ ] describe_config_rule_evaluation_status +- [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators +- [ ] describe_configuration_recorder_status +- [ ] describe_configuration_recorders +- [ ] describe_delivery_channel_status +- [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] describe_retention_configurations +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary +- [ ] get_compliance_details_by_config_rule +- [ ] get_compliance_details_by_resource +- [ ] get_compliance_summary_by_config_rule +- [ ] get_compliance_summary_by_resource_type +- [ ] get_discovered_resource_counts +- [ ] get_resource_config_history +- [ ] list_discovered_resources +- [ ] put_aggregation_authorization +- [ ] put_config_rule +- [ ] put_configuration_aggregator +- [ ] put_configuration_recorder +- [ ] put_delivery_channel +- [ ] put_evaluations +- [ ] put_retention_configuration +- [ ] start_config_rules_evaluation +- [ ] start_configuration_recorder +- [ ] stop_configuration_recorder + +## connect - 0% implemented +- [ ] create_user +- [ ] delete_user +- [ ] describe_user +- [ ] describe_user_hierarchy_group +- [ ] describe_user_hierarchy_structure +- [ ] get_federation_token +- [ ] list_routing_profiles +- [ ] list_security_profiles +- [ ] list_user_hierarchy_groups +- [ ] list_users +- [ ] start_outbound_voice_contact +- [ ] stop_contact +- [ ] update_user_hierarchy +- [ ] update_user_identity_info +- [ ] update_user_phone_config +- [ ] update_user_routing_profile +- [ ] update_user_security_profiles + +## cur - 0% implemented +- [ ] delete_report_definition +- [ ] describe_report_definitions +- [ ] put_report_definition + +## datapipeline - 42% implemented +- [X] activate_pipeline +- [ ] add_tags +- [X] create_pipeline +- [ ] deactivate_pipeline +- [X] delete_pipeline +- [X] describe_objects +- [X] describe_pipelines +- [ ] evaluate_expression +- [X] get_pipeline_definition +- [X] list_pipelines +- [ ] poll_for_task +- [X] put_pipeline_definition +- [ ] query_objects +- [ ] remove_tags +- [ ] report_task_progress +- [ ] report_task_runner_heartbeat +- [ ] set_status +- [ ] set_task_status +- [ ] validate_pipeline_definition + +## dax - 0% implemented +- [ ] create_cluster +- [ ] create_parameter_group +- [ ] create_subnet_group +- [ ] decrease_replication_factor +- [ ] delete_cluster +- [ ] delete_parameter_group +- [ ] delete_subnet_group +- [ ] describe_clusters +- [ ] describe_default_parameters +- [ ] describe_events +- [ ] describe_parameter_groups +- [ ] describe_parameters +- [ ] describe_subnet_groups +- [ ] increase_replication_factor +- [ ] list_tags +- [ ] reboot_node +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cluster +- [ ] update_parameter_group +- [ ] update_subnet_group + +## devicefarm - 0% implemented +- [ ] create_device_pool +- [ ] create_instance_profile +- [ ] create_network_profile +- [ ] create_project +- [ ] create_remote_access_session +- [ ] create_upload +- [ ] create_vpce_configuration +- [ ] delete_device_pool +- [ ] delete_instance_profile +- [ ] delete_network_profile +- [ ] delete_project +- [ ] delete_remote_access_session +- [ ] delete_run +- [ ] delete_upload +- [ ] delete_vpce_configuration +- [ ] get_account_settings +- [ ] get_device +- [ ] get_device_instance +- [ ] get_device_pool +- [ ] get_device_pool_compatibility +- [ ] get_instance_profile +- [ ] get_job +- [ ] get_network_profile +- [ ] get_offering_status +- [ ] get_project +- [ ] get_remote_access_session +- [ ] get_run +- [ ] get_suite +- [ ] get_test +- [ ] get_upload +- [ ] get_vpce_configuration +- [ ] install_to_remote_access_session +- [ ] list_artifacts +- [ ] list_device_instances +- [ ] list_device_pools +- [ ] list_devices +- [ ] list_instance_profiles +- [ ] list_jobs +- [ ] list_network_profiles +- [ ] list_offering_promotions +- [ ] list_offering_transactions +- [ ] list_offerings +- [ ] list_projects +- [ ] list_remote_access_sessions +- [ ] list_runs +- [ ] list_samples +- [ ] list_suites +- [ ] list_tests +- [ ] list_unique_problems +- [ ] list_uploads +- [ ] list_vpce_configurations +- [ ] purchase_offering +- [ ] renew_offering +- [ ] schedule_run +- [ ] stop_remote_access_session +- [ ] stop_run +- [ ] update_device_instance +- [ ] update_device_pool +- [ ] update_instance_profile +- [ ] update_network_profile +- [ ] update_project +- [ ] update_vpce_configuration + +## directconnect - 0% implemented +- [ ] allocate_connection_on_interconnect +- [ ] allocate_hosted_connection +- [ ] allocate_private_virtual_interface +- [ ] allocate_public_virtual_interface +- [ ] associate_connection_with_lag +- [ ] associate_hosted_connection +- [ ] associate_virtual_interface +- [ ] confirm_connection +- [ ] confirm_private_virtual_interface +- [ ] confirm_public_virtual_interface +- [ ] create_bgp_peer +- [ ] create_connection +- [ ] create_direct_connect_gateway +- [ ] create_direct_connect_gateway_association +- [ ] create_interconnect +- [ ] create_lag +- [ ] create_private_virtual_interface +- [ ] create_public_virtual_interface +- [ ] delete_bgp_peer +- [ ] delete_connection +- [ ] delete_direct_connect_gateway +- [ ] delete_direct_connect_gateway_association +- [ ] delete_interconnect +- [ ] delete_lag +- [ ] delete_virtual_interface +- [ ] describe_connection_loa +- [ ] describe_connections +- [ ] describe_connections_on_interconnect +- [ ] describe_direct_connect_gateway_associations +- [ ] describe_direct_connect_gateway_attachments +- [ ] describe_direct_connect_gateways +- [ ] describe_hosted_connections +- [ ] describe_interconnect_loa +- [ ] describe_interconnects +- [ ] describe_lags +- [ ] describe_loa +- [ ] describe_locations +- [ ] describe_tags +- [ ] describe_virtual_gateways +- [ ] describe_virtual_interfaces +- [ ] disassociate_connection_from_lag +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_lag + +## discovery - 0% implemented +- [ ] associate_configuration_items_to_application +- [ ] create_application +- [ ] create_tags +- [ ] delete_applications +- [ ] delete_tags +- [ ] describe_agents +- [ ] describe_configurations +- [ ] describe_export_configurations +- [ ] describe_export_tasks +- [ ] describe_tags +- [ ] disassociate_configuration_items_from_application +- [ ] export_configurations +- [ ] get_discovery_summary +- [ ] list_configurations +- [ ] list_server_neighbors +- [ ] start_data_collection_by_agent_ids +- [ ] start_export_task +- [ ] stop_data_collection_by_agent_ids +- [ ] update_application + +## dlm - 0% implemented +- [ ] create_lifecycle_policy +- [ ] delete_lifecycle_policy +- [ ] get_lifecycle_policies +- [ ] get_lifecycle_policy +- [ ] update_lifecycle_policy + +## dms - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_endpoint +- [ ] create_event_subscription +- [ ] create_replication_instance +- [ ] create_replication_subnet_group +- [ ] create_replication_task +- [ ] delete_certificate +- [ ] delete_endpoint +- [ ] delete_event_subscription +- [ ] delete_replication_instance +- [ ] delete_replication_subnet_group +- [ ] delete_replication_task +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_connections +- [ ] describe_endpoint_types +- [ ] describe_endpoints +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_replication_instances +- [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs +- [ ] describe_replication_instances +- [ ] describe_replication_subnet_groups +- [ ] describe_replication_task_assessment_results +- [ ] describe_replication_tasks +- [ ] describe_schemas +- [ ] describe_table_statistics +- [ ] import_certificate +- [ ] list_tags_for_resource +- [ ] modify_endpoint +- [ ] modify_event_subscription +- [ ] modify_replication_instance +- [ ] modify_replication_subnet_group +- [ ] modify_replication_task +- [ ] reboot_replication_instance +- [ ] refresh_schemas +- [ ] reload_tables +- [ ] remove_tags_from_resource +- [ ] start_replication_task +- [ ] start_replication_task_assessment +- [ ] stop_replication_task +- [ ] test_connection + +## ds - 0% implemented +- [ ] add_ip_routes +- [ ] add_tags_to_resource +- [ ] cancel_schema_extension +- [ ] connect_directory +- [ ] create_alias +- [ ] create_computer +- [ ] create_conditional_forwarder +- [ ] create_directory +- [ ] create_microsoft_ad +- [ ] create_snapshot +- [ ] create_trust +- [ ] delete_conditional_forwarder +- [ ] delete_directory +- [ ] delete_snapshot +- [ ] delete_trust +- [ ] deregister_event_topic +- [ ] describe_conditional_forwarders +- [ ] describe_directories +- [ ] describe_domain_controllers +- [ ] describe_event_topics +- [ ] describe_snapshots +- [ ] describe_trusts +- [ ] disable_radius +- [ ] disable_sso +- [ ] enable_radius +- [ ] enable_sso +- [ ] get_directory_limits +- [ ] get_snapshot_limits +- [ ] list_ip_routes +- [ ] list_schema_extensions +- [ ] list_tags_for_resource +- [ ] register_event_topic +- [ ] remove_ip_routes +- [ ] remove_tags_from_resource +- [ ] reset_user_password +- [ ] restore_from_snapshot +- [ ] start_schema_extension +- [ ] update_conditional_forwarder +- [ ] update_number_of_domain_controllers +- [ ] update_radius +- [ ] verify_trust + +## dynamodb - 21% implemented +- [ ] batch_get_item +- [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table +- [X] create_table +- [ ] delete_backup +- [X] delete_item +- [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table +- [ ] describe_global_table_settings +- [ ] describe_limits +- [ ] describe_table +- [ ] describe_time_to_live +- [X] get_item +- [ ] list_backups +- [ ] list_global_tables +- [ ] list_tables +- [ ] list_tags_of_resource +- [X] put_item +- [X] query +- [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time +- [X] scan +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_continuous_backups +- [ ] update_global_table +- [ ] update_global_table_settings +- [ ] update_item +- [ ] update_table +- [ ] update_time_to_live + +## dynamodbstreams - 0% implemented +- [ ] describe_stream +- [ ] get_records +- [ ] get_shard_iterator +- [ ] list_streams + +## ec2 - 36% implemented +- [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections +- [X] accept_vpc_peering_connection +- [X] allocate_address +- [ ] allocate_hosts +- [ ] assign_ipv6_addresses +- [ ] assign_private_ip_addresses +- [X] associate_address +- [X] associate_dhcp_options +- [ ] associate_iam_instance_profile +- [X] associate_route_table +- [ ] associate_subnet_cidr_block +- [X] associate_vpc_cidr_block +- [ ] attach_classic_link_vpc +- [X] attach_internet_gateway +- [X] attach_network_interface +- [X] attach_volume +- [X] attach_vpn_gateway +- [X] authorize_security_group_egress +- [X] authorize_security_group_ingress +- [ ] bundle_instance +- [ ] cancel_bundle_task +- [ ] cancel_conversion_task +- [ ] cancel_export_task +- [ ] cancel_import_task +- [ ] cancel_reserved_instances_listing +- [X] cancel_spot_fleet_requests +- [X] cancel_spot_instance_requests +- [ ] confirm_product_instance +- [ ] copy_fpga_image +- [X] copy_image +- [X] copy_snapshot +- [X] create_customer_gateway +- [ ] create_default_subnet +- [ ] create_default_vpc +- [X] create_dhcp_options +- [ ] create_egress_only_internet_gateway +- [ ] create_fleet +- [ ] create_flow_logs +- [ ] create_fpga_image +- [X] create_image +- [ ] create_instance_export_task +- [X] create_internet_gateway +- [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version +- [X] create_nat_gateway +- [X] create_network_acl +- [X] create_network_acl_entry +- [X] create_network_interface +- [ ] create_network_interface_permission +- [ ] create_placement_group +- [ ] create_reserved_instances_listing +- [X] create_route +- [X] create_route_table +- [X] create_security_group +- [X] create_snapshot +- [ ] create_spot_datafeed_subscription +- [X] create_subnet +- [X] create_tags +- [X] create_volume +- [X] create_vpc +- [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration +- [X] create_vpc_peering_connection +- [X] create_vpn_connection +- [ ] create_vpn_connection_route +- [X] create_vpn_gateway +- [X] delete_customer_gateway +- [ ] delete_dhcp_options +- [ ] delete_egress_only_internet_gateway +- [ ] delete_fleets +- [ ] delete_flow_logs +- [ ] delete_fpga_image +- [X] delete_internet_gateway +- [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions +- [X] delete_nat_gateway +- [X] delete_network_acl +- [X] delete_network_acl_entry +- [X] delete_network_interface +- [ ] delete_network_interface_permission +- [ ] delete_placement_group +- [X] delete_route +- [X] delete_route_table +- [X] delete_security_group +- [X] delete_snapshot +- [ ] delete_spot_datafeed_subscription +- [X] delete_subnet +- [X] delete_tags +- [X] delete_volume +- [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations +- [ ] delete_vpc_endpoints +- [X] delete_vpc_peering_connection +- [X] delete_vpn_connection +- [ ] delete_vpn_connection_route +- [X] delete_vpn_gateway +- [X] deregister_image +- [ ] describe_account_attributes +- [X] describe_addresses +- [ ] describe_aggregate_id_format +- [X] describe_availability_zones +- [ ] describe_bundle_tasks +- [ ] describe_classic_link_instances +- [ ] describe_conversion_tasks +- [ ] describe_customer_gateways +- [X] describe_dhcp_options +- [ ] describe_egress_only_internet_gateways +- [ ] describe_elastic_gpus +- [ ] describe_export_tasks +- [ ] describe_fleet_history +- [ ] describe_fleet_instances +- [ ] describe_fleets +- [ ] describe_flow_logs +- [ ] describe_fpga_image_attribute +- [ ] describe_fpga_images +- [ ] describe_host_reservation_offerings +- [ ] describe_host_reservations +- [ ] describe_hosts +- [ ] describe_iam_instance_profile_associations +- [ ] describe_id_format +- [ ] describe_identity_id_format +- [ ] describe_image_attribute +- [X] describe_images +- [ ] describe_import_image_tasks +- [ ] describe_import_snapshot_tasks +- [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications +- [ ] describe_instance_status +- [ ] describe_instances +- [X] describe_internet_gateways +- [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates +- [ ] describe_moving_addresses +- [ ] describe_nat_gateways +- [ ] describe_network_acls +- [ ] describe_network_interface_attribute +- [ ] describe_network_interface_permissions +- [X] describe_network_interfaces +- [ ] describe_placement_groups +- [ ] describe_prefix_lists +- [ ] describe_principal_id_format +- [X] describe_regions +- [ ] describe_reserved_instances +- [ ] describe_reserved_instances_listings +- [ ] describe_reserved_instances_modifications +- [ ] describe_reserved_instances_offerings +- [ ] describe_route_tables +- [ ] describe_scheduled_instance_availability +- [ ] describe_scheduled_instances +- [ ] describe_security_group_references +- [X] describe_security_groups +- [ ] describe_snapshot_attribute +- [X] describe_snapshots +- [ ] describe_spot_datafeed_subscription +- [X] describe_spot_fleet_instances +- [ ] describe_spot_fleet_request_history +- [X] describe_spot_fleet_requests +- [X] describe_spot_instance_requests +- [ ] describe_spot_price_history +- [ ] describe_stale_security_groups +- [ ] describe_subnets +- [X] describe_tags +- [ ] describe_volume_attribute +- [ ] describe_volume_status +- [X] describe_volumes +- [ ] describe_volumes_modifications +- [X] describe_vpc_attribute +- [ ] describe_vpc_classic_link +- [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions +- [ ] describe_vpc_endpoint_services +- [ ] describe_vpc_endpoints +- [ ] describe_vpc_peering_connections +- [ ] describe_vpcs +- [X] describe_vpn_connections +- [ ] describe_vpn_gateways +- [ ] detach_classic_link_vpc +- [X] detach_internet_gateway +- [X] detach_network_interface +- [X] detach_volume +- [X] detach_vpn_gateway +- [ ] disable_vgw_route_propagation +- [ ] disable_vpc_classic_link +- [ ] disable_vpc_classic_link_dns_support +- [X] disassociate_address +- [ ] disassociate_iam_instance_profile +- [X] disassociate_route_table +- [ ] disassociate_subnet_cidr_block +- [X] disassociate_vpc_cidr_block +- [ ] enable_vgw_route_propagation +- [ ] enable_volume_io +- [ ] enable_vpc_classic_link +- [ ] enable_vpc_classic_link_dns_support +- [ ] get_console_output +- [ ] get_console_screenshot +- [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data +- [ ] get_password_data +- [ ] get_reserved_instances_exchange_quote +- [ ] import_image +- [ ] import_instance +- [X] import_key_pair +- [ ] import_snapshot +- [ ] import_volume +- [ ] modify_fleet +- [ ] modify_fpga_image_attribute +- [ ] modify_hosts +- [ ] modify_id_format +- [ ] modify_identity_id_format +- [ ] modify_image_attribute +- [X] modify_instance_attribute +- [ ] modify_instance_credit_specification +- [ ] modify_instance_placement +- [ ] modify_launch_template +- [X] modify_network_interface_attribute +- [ ] modify_reserved_instances +- [ ] modify_snapshot_attribute +- [X] modify_spot_fleet_request +- [X] modify_subnet_attribute +- [ ] modify_volume +- [ ] modify_volume_attribute +- [X] modify_vpc_attribute +- [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions +- [ ] modify_vpc_peering_connection_options +- [ ] modify_vpc_tenancy +- [ ] monitor_instances +- [ ] move_address_to_vpc +- [ ] purchase_host_reservation +- [ ] purchase_reserved_instances_offering +- [ ] purchase_scheduled_instances +- [X] reboot_instances +- [ ] register_image +- [ ] reject_vpc_endpoint_connections +- [X] reject_vpc_peering_connection +- [X] release_address +- [ ] release_hosts +- [ ] replace_iam_instance_profile_association +- [X] replace_network_acl_association +- [X] replace_network_acl_entry +- [X] replace_route +- [X] replace_route_table_association +- [ ] report_instance_status +- [X] request_spot_fleet +- [X] request_spot_instances +- [ ] reset_fpga_image_attribute +- [ ] reset_image_attribute +- [ ] reset_instance_attribute +- [ ] reset_network_interface_attribute +- [ ] reset_snapshot_attribute +- [ ] restore_address_to_classic +- [X] revoke_security_group_egress +- [X] revoke_security_group_ingress +- [ ] run_instances +- [ ] run_scheduled_instances +- [X] start_instances +- [X] stop_instances +- [X] terminate_instances +- [ ] unassign_ipv6_addresses +- [ ] unassign_private_ip_addresses +- [ ] unmonitor_instances +- [ ] update_security_group_rule_descriptions_egress +- [ ] update_security_group_rule_descriptions_ingress + +## ecr - 31% implemented +- [ ] batch_check_layer_availability +- [ ] batch_delete_image +- [X] batch_get_image +- [ ] complete_layer_upload +- [X] create_repository +- [ ] delete_lifecycle_policy +- [X] delete_repository +- [ ] delete_repository_policy +- [X] describe_images +- [X] describe_repositories +- [ ] get_authorization_token +- [ ] get_download_url_for_layer +- [ ] get_lifecycle_policy +- [ ] get_lifecycle_policy_preview +- [ ] get_repository_policy +- [ ] initiate_layer_upload +- [X] list_images +- [X] put_image +- [ ] put_lifecycle_policy +- [ ] set_repository_policy +- [ ] start_lifecycle_policy_preview +- [ ] upload_layer_part + +## ecs - 87% implemented +- [X] create_cluster +- [X] create_service +- [X] delete_attributes +- [X] delete_cluster +- [X] delete_service +- [X] deregister_container_instance +- [X] deregister_task_definition +- [X] describe_clusters +- [X] describe_container_instances +- [X] describe_services +- [X] describe_task_definition +- [X] describe_tasks +- [ ] discover_poll_endpoint +- [X] list_attributes +- [X] list_clusters +- [X] list_container_instances +- [X] list_services +- [X] list_task_definition_families +- [X] list_task_definitions +- [X] list_tasks +- [X] put_attributes +- [X] register_container_instance +- [X] register_task_definition +- [X] run_task +- [X] start_task +- [X] stop_task +- [ ] submit_container_state_change +- [ ] submit_task_state_change +- [ ] update_container_agent +- [X] update_container_instances_state +- [X] update_service + +## efs - 0% implemented +- [ ] create_file_system +- [ ] create_mount_target +- [ ] create_tags +- [ ] delete_file_system +- [ ] delete_mount_target +- [ ] delete_tags +- [ ] describe_file_systems +- [ ] describe_mount_target_security_groups +- [ ] describe_mount_targets +- [ ] describe_tags +- [ ] modify_mount_target_security_groups +- [ ] update_file_system + +## eks - 0% implemented +- [ ] create_cluster +- [ ] delete_cluster +- [ ] describe_cluster +- [ ] list_clusters + +## elasticache - 0% implemented +- [ ] add_tags_to_resource +- [ ] authorize_cache_security_group_ingress +- [ ] copy_snapshot +- [ ] create_cache_cluster +- [ ] create_cache_parameter_group +- [ ] create_cache_security_group +- [ ] create_cache_subnet_group +- [ ] create_replication_group +- [ ] create_snapshot +- [ ] delete_cache_cluster +- [ ] delete_cache_parameter_group +- [ ] delete_cache_security_group +- [ ] delete_cache_subnet_group +- [ ] delete_replication_group +- [ ] delete_snapshot +- [ ] describe_cache_clusters +- [ ] describe_cache_engine_versions +- [ ] describe_cache_parameter_groups +- [ ] describe_cache_parameters +- [ ] describe_cache_security_groups +- [ ] describe_cache_subnet_groups +- [ ] describe_engine_default_parameters +- [ ] describe_events +- [ ] describe_replication_groups +- [ ] describe_reserved_cache_nodes +- [ ] describe_reserved_cache_nodes_offerings +- [ ] describe_snapshots +- [ ] list_allowed_node_type_modifications +- [ ] list_tags_for_resource +- [ ] modify_cache_cluster +- [ ] modify_cache_parameter_group +- [ ] modify_cache_subnet_group +- [ ] modify_replication_group +- [ ] modify_replication_group_shard_configuration +- [ ] purchase_reserved_cache_nodes_offering +- [ ] reboot_cache_cluster +- [ ] remove_tags_from_resource +- [ ] reset_cache_parameter_group +- [ ] revoke_cache_security_group_ingress +- [ ] test_failover + +## elasticbeanstalk - 0% implemented +- [ ] abort_environment_update +- [ ] apply_environment_managed_action +- [ ] check_dns_availability +- [ ] compose_environments +- [ ] create_application +- [ ] create_application_version +- [ ] create_configuration_template +- [ ] create_environment +- [ ] create_platform_version +- [ ] create_storage_location +- [ ] delete_application +- [ ] delete_application_version +- [ ] delete_configuration_template +- [ ] delete_environment_configuration +- [ ] delete_platform_version +- [ ] describe_account_attributes +- [ ] describe_application_versions +- [ ] describe_applications +- [ ] describe_configuration_options +- [ ] describe_configuration_settings +- [ ] describe_environment_health +- [ ] describe_environment_managed_action_history +- [ ] describe_environment_managed_actions +- [ ] describe_environment_resources +- [ ] describe_environments +- [ ] describe_events +- [ ] describe_instances_health +- [ ] describe_platform_version +- [ ] list_available_solution_stacks +- [ ] list_platform_versions +- [ ] list_tags_for_resource +- [ ] rebuild_environment +- [ ] request_environment_info +- [ ] restart_app_server +- [ ] retrieve_environment_info +- [ ] swap_environment_cnames +- [ ] terminate_environment +- [ ] update_application +- [ ] update_application_resource_lifecycle +- [ ] update_application_version +- [ ] update_configuration_template +- [ ] update_environment +- [ ] update_tags_for_resource +- [ ] validate_configuration_settings + +## elastictranscoder - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_pipeline +- [ ] create_preset +- [ ] delete_pipeline +- [ ] delete_preset +- [ ] list_jobs_by_pipeline +- [ ] list_jobs_by_status +- [ ] list_pipelines +- [ ] list_presets +- [ ] read_job +- [ ] read_pipeline +- [ ] read_preset +- [ ] test_role +- [ ] update_pipeline +- [ ] update_pipeline_notifications +- [ ] update_pipeline_status + +## elb - 34% implemented +- [ ] add_tags +- [X] apply_security_groups_to_load_balancer +- [ ] attach_load_balancer_to_subnets +- [X] configure_health_check +- [X] create_app_cookie_stickiness_policy +- [X] create_lb_cookie_stickiness_policy +- [X] create_load_balancer +- [X] create_load_balancer_listeners +- [ ] create_load_balancer_policy +- [X] delete_load_balancer +- [X] delete_load_balancer_listeners +- [ ] delete_load_balancer_policy +- [ ] deregister_instances_from_load_balancer +- [ ] describe_account_limits +- [ ] describe_instance_health +- [ ] describe_load_balancer_attributes +- [ ] describe_load_balancer_policies +- [ ] describe_load_balancer_policy_types +- [X] describe_load_balancers +- [ ] describe_tags +- [ ] detach_load_balancer_from_subnets +- [ ] disable_availability_zones_for_load_balancer +- [ ] enable_availability_zones_for_load_balancer +- [ ] modify_load_balancer_attributes +- [ ] register_instances_with_load_balancer +- [ ] remove_tags +- [ ] set_load_balancer_listener_ssl_certificate +- [ ] set_load_balancer_policies_for_backend_server +- [X] set_load_balancer_policies_of_listener + +## elbv2 - 70% implemented +- [ ] add_listener_certificates +- [ ] add_tags +- [X] create_listener +- [X] create_load_balancer +- [X] create_rule +- [X] create_target_group +- [X] delete_listener +- [X] delete_load_balancer +- [X] delete_rule +- [X] delete_target_group +- [X] deregister_targets +- [ ] describe_account_limits +- [ ] describe_listener_certificates +- [X] describe_listeners +- [X] describe_load_balancer_attributes +- [X] describe_load_balancers +- [X] describe_rules +- [ ] describe_ssl_policies +- [ ] describe_tags +- [ ] describe_target_group_attributes +- [X] describe_target_groups +- [X] describe_target_health +- [X] modify_listener +- [X] modify_load_balancer_attributes +- [X] modify_rule +- [X] modify_target_group +- [ ] modify_target_group_attributes +- [X] register_targets +- [ ] remove_listener_certificates +- [ ] remove_tags +- [X] set_ip_address_type +- [X] set_rule_priorities +- [X] set_security_groups +- [X] set_subnets + +## emr - 55% implemented +- [ ] add_instance_fleet +- [X] add_instance_groups +- [X] add_job_flow_steps +- [X] add_tags +- [ ] cancel_steps +- [ ] create_security_configuration +- [ ] delete_security_configuration +- [ ] describe_cluster +- [X] describe_job_flows +- [ ] describe_security_configuration +- [X] describe_step +- [X] list_bootstrap_actions +- [X] list_clusters +- [ ] list_instance_fleets +- [X] list_instance_groups +- [ ] list_instances +- [ ] list_security_configurations +- [X] list_steps +- [ ] modify_instance_fleet +- [X] modify_instance_groups +- [ ] put_auto_scaling_policy +- [ ] remove_auto_scaling_policy +- [X] remove_tags +- [X] run_job_flow +- [X] set_termination_protection +- [X] set_visible_to_all_users +- [X] terminate_job_flows + +## es - 0% implemented +- [ ] add_tags +- [ ] create_elasticsearch_domain +- [ ] delete_elasticsearch_domain +- [ ] delete_elasticsearch_service_role +- [ ] describe_elasticsearch_domain +- [ ] describe_elasticsearch_domain_config +- [ ] describe_elasticsearch_domains +- [ ] describe_elasticsearch_instance_type_limits +- [ ] describe_reserved_elasticsearch_instance_offerings +- [ ] describe_reserved_elasticsearch_instances +- [ ] list_domain_names +- [ ] list_elasticsearch_instance_types +- [ ] list_elasticsearch_versions +- [ ] list_tags +- [ ] purchase_reserved_elasticsearch_instance_offering +- [ ] remove_tags +- [ ] update_elasticsearch_domain_config + +## events - 100% implemented +- [X] delete_rule +- [X] describe_event_bus +- [X] describe_rule +- [X] disable_rule +- [X] enable_rule +- [X] list_rule_names_by_target +- [X] list_rules +- [X] list_targets_by_rule +- [X] put_events +- [X] put_permission +- [X] put_rule +- [X] put_targets +- [X] remove_permission +- [X] remove_targets +- [X] test_event_pattern + +## firehose - 0% implemented +- [ ] create_delivery_stream +- [ ] delete_delivery_stream +- [ ] describe_delivery_stream +- [ ] list_delivery_streams +- [ ] list_tags_for_delivery_stream +- [ ] put_record +- [ ] put_record_batch +- [ ] tag_delivery_stream +- [ ] untag_delivery_stream +- [ ] update_destination + +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + +## gamelift - 0% implemented +- [ ] accept_match +- [ ] create_alias +- [ ] create_build +- [ ] create_fleet +- [ ] create_game_session +- [ ] create_game_session_queue +- [ ] create_matchmaking_configuration +- [ ] create_matchmaking_rule_set +- [ ] create_player_session +- [ ] create_player_sessions +- [ ] create_vpc_peering_authorization +- [ ] create_vpc_peering_connection +- [ ] delete_alias +- [ ] delete_build +- [ ] delete_fleet +- [ ] delete_game_session_queue +- [ ] delete_matchmaking_configuration +- [ ] delete_scaling_policy +- [ ] delete_vpc_peering_authorization +- [ ] delete_vpc_peering_connection +- [ ] describe_alias +- [ ] describe_build +- [ ] describe_ec2_instance_limits +- [ ] describe_fleet_attributes +- [ ] describe_fleet_capacity +- [ ] describe_fleet_events +- [ ] describe_fleet_port_settings +- [ ] describe_fleet_utilization +- [ ] describe_game_session_details +- [ ] describe_game_session_placement +- [ ] describe_game_session_queues +- [ ] describe_game_sessions +- [ ] describe_instances +- [ ] describe_matchmaking +- [ ] describe_matchmaking_configurations +- [ ] describe_matchmaking_rule_sets +- [ ] describe_player_sessions +- [ ] describe_runtime_configuration +- [ ] describe_scaling_policies +- [ ] describe_vpc_peering_authorizations +- [ ] describe_vpc_peering_connections +- [ ] get_game_session_log_url +- [ ] get_instance_access +- [ ] list_aliases +- [ ] list_builds +- [ ] list_fleets +- [ ] put_scaling_policy +- [ ] request_upload_credentials +- [ ] resolve_alias +- [ ] search_game_sessions +- [ ] start_fleet_actions +- [ ] start_game_session_placement +- [ ] start_match_backfill +- [ ] start_matchmaking +- [ ] stop_fleet_actions +- [ ] stop_game_session_placement +- [ ] stop_matchmaking +- [ ] update_alias +- [ ] update_build +- [ ] update_fleet_attributes +- [ ] update_fleet_capacity +- [ ] update_fleet_port_settings +- [ ] update_game_session +- [ ] update_game_session_queue +- [ ] update_matchmaking_configuration +- [ ] update_runtime_configuration +- [ ] validate_matchmaking_rule_set + +## glacier - 12% implemented +- [ ] abort_multipart_upload +- [ ] abort_vault_lock +- [ ] add_tags_to_vault +- [ ] complete_multipart_upload +- [ ] complete_vault_lock +- [X] create_vault +- [ ] delete_archive +- [X] delete_vault +- [ ] delete_vault_access_policy +- [ ] delete_vault_notifications +- [ ] describe_job +- [ ] describe_vault +- [ ] get_data_retrieval_policy +- [ ] get_job_output +- [ ] get_vault_access_policy +- [ ] get_vault_lock +- [ ] get_vault_notifications +- [X] initiate_job +- [ ] initiate_multipart_upload +- [ ] initiate_vault_lock +- [X] list_jobs +- [ ] list_multipart_uploads +- [ ] list_parts +- [ ] list_provisioned_capacity +- [ ] list_tags_for_vault +- [ ] list_vaults +- [ ] purchase_provisioned_capacity +- [ ] remove_tags_from_vault +- [ ] set_data_retrieval_policy +- [ ] set_vault_access_policy +- [ ] set_vault_notifications +- [ ] upload_archive +- [ ] upload_multipart_part + +## glue - 6% implemented +- [ ] batch_create_partition +- [ ] batch_delete_connection +- [ ] batch_delete_partition +- [ ] batch_delete_table +- [ ] batch_delete_table_version +- [ ] batch_get_partition +- [ ] batch_stop_job_run +- [ ] create_classifier +- [ ] create_connection +- [ ] create_crawler +- [X] create_database +- [ ] create_dev_endpoint +- [ ] create_job +- [ ] create_partition +- [ ] create_script +- [X] create_table +- [ ] create_trigger +- [ ] create_user_defined_function +- [ ] delete_classifier +- [ ] delete_connection +- [ ] delete_crawler +- [ ] delete_database +- [ ] delete_dev_endpoint +- [ ] delete_job +- [ ] delete_partition +- [ ] delete_table +- [ ] delete_table_version +- [ ] delete_trigger +- [ ] delete_user_defined_function +- [ ] get_catalog_import_status +- [ ] get_classifier +- [ ] get_classifiers +- [ ] get_connection +- [ ] get_connections +- [ ] get_crawler +- [ ] get_crawler_metrics +- [ ] get_crawlers +- [X] get_database +- [ ] get_databases +- [ ] get_dataflow_graph +- [ ] get_dev_endpoint +- [ ] get_dev_endpoints +- [ ] get_job +- [ ] get_job_run +- [ ] get_job_runs +- [ ] get_jobs +- [ ] get_mapping +- [ ] get_partition +- [ ] get_partitions +- [ ] get_plan +- [X] get_table +- [ ] get_table_version +- [ ] get_table_versions +- [X] get_tables +- [ ] get_trigger +- [ ] get_triggers +- [ ] get_user_defined_function +- [ ] get_user_defined_functions +- [ ] import_catalog_to_glue +- [ ] reset_job_bookmark +- [ ] start_crawler +- [ ] start_crawler_schedule +- [ ] start_job_run +- [ ] start_trigger +- [ ] stop_crawler +- [ ] stop_crawler_schedule +- [ ] stop_trigger +- [ ] update_classifier +- [ ] update_connection +- [ ] update_crawler +- [ ] update_crawler_schedule +- [ ] update_database +- [ ] update_dev_endpoint +- [ ] update_job +- [ ] update_partition +- [ ] update_table +- [ ] update_trigger +- [ ] update_user_defined_function + +## greengrass - 0% implemented +- [ ] associate_role_to_group +- [ ] associate_service_role_to_account +- [ ] create_core_definition +- [ ] create_core_definition_version +- [ ] create_deployment +- [ ] create_device_definition +- [ ] create_device_definition_version +- [ ] create_function_definition +- [ ] create_function_definition_version +- [ ] create_group +- [ ] create_group_certificate_authority +- [ ] create_group_version +- [ ] create_logger_definition +- [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job +- [ ] create_subscription_definition +- [ ] create_subscription_definition_version +- [ ] delete_core_definition +- [ ] delete_device_definition +- [ ] delete_function_definition +- [ ] delete_group +- [ ] delete_logger_definition +- [ ] delete_resource_definition +- [ ] delete_subscription_definition +- [ ] disassociate_role_from_group +- [ ] disassociate_service_role_from_account +- [ ] get_associated_role +- [ ] get_connectivity_info +- [ ] get_core_definition +- [ ] get_core_definition_version +- [ ] get_deployment_status +- [ ] get_device_definition +- [ ] get_device_definition_version +- [ ] get_function_definition +- [ ] get_function_definition_version +- [ ] get_group +- [ ] get_group_certificate_authority +- [ ] get_group_certificate_configuration +- [ ] get_group_version +- [ ] get_logger_definition +- [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version +- [ ] get_service_role_for_account +- [ ] get_subscription_definition +- [ ] get_subscription_definition_version +- [ ] list_core_definition_versions +- [ ] list_core_definitions +- [ ] list_deployments +- [ ] list_device_definition_versions +- [ ] list_device_definitions +- [ ] list_function_definition_versions +- [ ] list_function_definitions +- [ ] list_group_certificate_authorities +- [ ] list_group_versions +- [ ] list_groups +- [ ] list_logger_definition_versions +- [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions +- [ ] list_subscription_definition_versions +- [ ] list_subscription_definitions +- [ ] reset_deployments +- [ ] update_connectivity_info +- [ ] update_core_definition +- [ ] update_device_definition +- [ ] update_function_definition +- [ ] update_group +- [ ] update_group_certificate_configuration +- [ ] update_logger_definition +- [ ] update_resource_definition +- [ ] update_subscription_definition + +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_filter +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_filter +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_filter +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_filters +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_filter +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + +## health - 0% implemented +- [ ] describe_affected_entities +- [ ] describe_entity_aggregates +- [ ] describe_event_aggregates +- [ ] describe_event_details +- [ ] describe_event_types +- [ ] describe_events + +## iam - 47% implemented +- [ ] add_client_id_to_open_id_connect_provider +- [X] add_role_to_instance_profile +- [X] add_user_to_group +- [X] attach_group_policy +- [X] attach_role_policy +- [X] attach_user_policy +- [ ] change_password +- [X] create_access_key +- [X] create_account_alias +- [X] create_group +- [X] create_instance_profile +- [X] create_login_profile +- [ ] create_open_id_connect_provider +- [X] create_policy +- [X] create_policy_version +- [X] create_role +- [ ] create_saml_provider +- [ ] create_service_linked_role +- [ ] create_service_specific_credential +- [X] create_user +- [ ] create_virtual_mfa_device +- [X] deactivate_mfa_device +- [X] delete_access_key +- [X] delete_account_alias +- [ ] delete_account_password_policy +- [ ] delete_group +- [ ] delete_group_policy +- [ ] delete_instance_profile +- [X] delete_login_profile +- [ ] delete_open_id_connect_provider +- [ ] delete_policy +- [X] delete_policy_version +- [X] delete_role +- [ ] delete_role_permissions_boundary +- [X] delete_role_policy +- [ ] delete_saml_provider +- [X] delete_server_certificate +- [ ] delete_service_linked_role +- [ ] delete_service_specific_credential +- [ ] delete_signing_certificate +- [ ] delete_ssh_public_key +- [X] delete_user +- [ ] delete_user_permissions_boundary +- [X] delete_user_policy +- [ ] delete_virtual_mfa_device +- [X] detach_group_policy +- [X] detach_role_policy +- [X] detach_user_policy +- [X] enable_mfa_device +- [ ] generate_credential_report +- [ ] get_access_key_last_used +- [X] get_account_authorization_details +- [ ] get_account_password_policy +- [ ] get_account_summary +- [ ] get_context_keys_for_custom_policy +- [ ] get_context_keys_for_principal_policy +- [X] get_credential_report +- [X] get_group +- [X] get_group_policy +- [X] get_instance_profile +- [X] get_login_profile +- [ ] get_open_id_connect_provider +- [X] get_policy +- [X] get_policy_version +- [X] get_role +- [X] get_role_policy +- [ ] get_saml_provider +- [X] get_server_certificate +- [ ] get_service_linked_role_deletion_status +- [ ] get_ssh_public_key +- [X] get_user +- [X] get_user_policy +- [ ] list_access_keys +- [X] list_account_aliases +- [X] list_attached_group_policies +- [X] list_attached_role_policies +- [X] list_attached_user_policies +- [ ] list_entities_for_policy +- [X] list_group_policies +- [X] list_groups +- [ ] list_groups_for_user +- [ ] list_instance_profiles +- [ ] list_instance_profiles_for_role +- [X] list_mfa_devices +- [ ] list_open_id_connect_providers +- [X] list_policies +- [X] list_policy_versions +- [X] list_role_policies +- [ ] list_roles +- [ ] list_saml_providers +- [ ] list_server_certificates +- [ ] list_service_specific_credentials +- [ ] list_signing_certificates +- [ ] list_ssh_public_keys +- [X] list_user_policies +- [X] list_users +- [ ] list_virtual_mfa_devices +- [X] put_group_policy +- [ ] put_role_permissions_boundary +- [X] put_role_policy +- [ ] put_user_permissions_boundary +- [X] put_user_policy +- [ ] remove_client_id_from_open_id_connect_provider +- [X] remove_role_from_instance_profile +- [X] remove_user_from_group +- [ ] reset_service_specific_credential +- [ ] resync_mfa_device +- [ ] set_default_policy_version +- [ ] simulate_custom_policy +- [ ] simulate_principal_policy +- [X] update_access_key +- [ ] update_account_password_policy +- [ ] update_assume_role_policy +- [ ] update_group +- [X] update_login_profile +- [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role +- [ ] update_role_description +- [ ] update_saml_provider +- [ ] update_server_certificate +- [ ] update_service_specific_credential +- [ ] update_signing_certificate +- [ ] update_ssh_public_key +- [ ] update_user +- [ ] upload_server_certificate +- [ ] upload_signing_certificate +- [ ] upload_ssh_public_key + +## importexport - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] get_shipping_label +- [ ] get_status +- [ ] list_jobs +- [ ] update_job + +## inspector - 0% implemented +- [ ] add_attributes_to_findings +- [ ] create_assessment_target +- [ ] create_assessment_template +- [ ] create_exclusions_preview +- [ ] create_resource_group +- [ ] delete_assessment_run +- [ ] delete_assessment_target +- [ ] delete_assessment_template +- [ ] describe_assessment_runs +- [ ] describe_assessment_targets +- [ ] describe_assessment_templates +- [ ] describe_cross_account_access_role +- [ ] describe_exclusions +- [ ] describe_findings +- [ ] describe_resource_groups +- [ ] describe_rules_packages +- [ ] get_assessment_report +- [ ] get_exclusions_preview +- [ ] get_telemetry_metadata +- [ ] list_assessment_run_agents +- [ ] list_assessment_runs +- [ ] list_assessment_targets +- [ ] list_assessment_templates +- [ ] list_event_subscriptions +- [ ] list_exclusions +- [ ] list_findings +- [ ] list_rules_packages +- [ ] list_tags_for_resource +- [ ] preview_agents +- [ ] register_cross_account_access_role +- [ ] remove_attributes_from_findings +- [ ] set_tags_for_resource +- [ ] start_assessment_run +- [ ] stop_assessment_run +- [ ] subscribe_to_event +- [ ] unsubscribe_from_event +- [ ] update_assessment_target + +## iot - 25% implemented +- [ ] accept_certificate_transfer +- [X] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [ ] attach_policy +- [X] attach_principal_policy +- [ ] attach_security_profile +- [X] attach_thing_principal +- [ ] cancel_audit_task +- [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] cancel_job_execution +- [ ] clear_default_authorizer +- [ ] create_authorizer +- [ ] create_certificate_from_csr +- [X] create_job +- [X] create_keys_and_certificate +- [ ] create_ota_update +- [X] create_policy +- [ ] create_policy_version +- [ ] create_role_alias +- [ ] create_scheduled_audit +- [ ] create_security_profile +- [ ] create_stream +- [X] create_thing +- [X] create_thing_group +- [X] create_thing_type +- [ ] create_topic_rule +- [ ] delete_account_audit_configuration +- [ ] delete_authorizer +- [ ] delete_ca_certificate +- [X] delete_certificate +- [ ] delete_job +- [ ] delete_job_execution +- [ ] delete_ota_update +- [X] delete_policy +- [ ] delete_policy_version +- [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_scheduled_audit +- [ ] delete_security_profile +- [ ] delete_stream +- [X] delete_thing +- [X] delete_thing_group +- [X] delete_thing_type +- [ ] delete_topic_rule +- [ ] delete_v2_logging_level +- [ ] deprecate_thing_type +- [ ] describe_account_audit_configuration +- [ ] describe_audit_task +- [ ] describe_authorizer +- [ ] describe_ca_certificate +- [X] describe_certificate +- [ ] describe_default_authorizer +- [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [X] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_scheduled_audit +- [ ] describe_security_profile +- [ ] describe_stream +- [X] describe_thing +- [X] describe_thing_group +- [ ] describe_thing_registration_task +- [X] describe_thing_type +- [ ] detach_policy +- [X] detach_principal_policy +- [ ] detach_security_profile +- [X] detach_thing_principal +- [ ] disable_topic_rule +- [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [X] get_job_document +- [ ] get_logging_options +- [ ] get_ota_update +- [X] get_policy +- [ ] get_policy_version +- [ ] get_registration_code +- [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_active_violations +- [ ] list_attached_policies +- [ ] list_audit_findings +- [ ] list_audit_tasks +- [ ] list_authorizers +- [ ] list_ca_certificates +- [X] list_certificates +- [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates +- [ ] list_outgoing_certificates +- [X] list_policies +- [X] list_policy_principals +- [ ] list_policy_versions +- [X] list_principal_policies +- [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_scheduled_audits +- [ ] list_security_profiles +- [ ] list_security_profiles_for_target +- [ ] list_streams +- [ ] list_targets_for_policy +- [ ] list_targets_for_security_profile +- [X] list_thing_groups +- [X] list_thing_groups_for_thing +- [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks +- [X] list_thing_types +- [X] list_things +- [X] list_things_in_thing_group +- [ ] list_topic_rules +- [ ] list_v2_logging_levels +- [ ] list_violation_events +- [ ] register_ca_certificate +- [ ] register_certificate +- [ ] register_thing +- [ ] reject_certificate_transfer +- [X] remove_thing_from_thing_group +- [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer +- [ ] set_default_policy_version +- [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_on_demand_audit_task +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer +- [ ] transfer_certificate +- [ ] update_account_audit_configuration +- [ ] update_authorizer +- [ ] update_ca_certificate +- [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_scheduled_audit +- [ ] update_security_profile +- [ ] update_stream +- [X] update_thing +- [X] update_thing_group +- [X] update_thing_groups_for_thing +- [ ] validate_security_profile_behaviors + +## iot-data - 100% implemented +- [X] delete_thing_shadow +- [X] get_thing_shadow +- [X] publish +- [X] update_thing_shadow + +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + +## iot1click-devices - 0% implemented +- [ ] claim_devices_by_claim_code +- [ ] describe_device +- [ ] finalize_device_claim +- [ ] get_device_methods +- [ ] initiate_device_claim +- [ ] invoke_device_method +- [ ] list_device_events +- [ ] list_devices +- [ ] unclaim_device +- [ ] update_device_state + +## iot1click-projects - 0% implemented +- [ ] associate_device_with_placement +- [ ] create_placement +- [ ] create_project +- [ ] delete_placement +- [ ] delete_project +- [ ] describe_placement +- [ ] describe_project +- [ ] disassociate_device_from_placement +- [ ] get_devices_in_placement +- [ ] list_placements +- [ ] list_projects +- [ ] update_placement +- [ ] update_project + +## iotanalytics - 0% implemented +- [ ] batch_put_message +- [ ] cancel_pipeline_reprocessing +- [ ] create_channel +- [ ] create_dataset +- [ ] create_dataset_content +- [ ] create_datastore +- [ ] create_pipeline +- [ ] delete_channel +- [ ] delete_dataset +- [ ] delete_dataset_content +- [ ] delete_datastore +- [ ] delete_pipeline +- [ ] describe_channel +- [ ] describe_dataset +- [ ] describe_datastore +- [ ] describe_logging_options +- [ ] describe_pipeline +- [ ] get_dataset_content +- [ ] list_channels +- [ ] list_datasets +- [ ] list_datastores +- [ ] list_pipelines +- [ ] list_tags_for_resource +- [ ] put_logging_options +- [ ] run_pipeline_activity +- [ ] sample_channel_data +- [ ] start_pipeline_reprocessing +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_channel +- [ ] update_dataset +- [ ] update_datastore +- [ ] update_pipeline + +## kinesis - 46% implemented +- [X] add_tags_to_stream +- [X] create_stream +- [ ] decrease_stream_retention_period +- [X] delete_stream +- [ ] deregister_stream_consumer +- [ ] describe_limits +- [X] describe_stream +- [ ] describe_stream_consumer +- [ ] describe_stream_summary +- [ ] disable_enhanced_monitoring +- [ ] enable_enhanced_monitoring +- [X] get_records +- [X] get_shard_iterator +- [ ] increase_stream_retention_period +- [ ] list_shards +- [ ] list_stream_consumers +- [X] list_streams +- [X] list_tags_for_stream +- [X] merge_shards +- [X] put_record +- [X] put_records +- [ ] register_stream_consumer +- [X] remove_tags_from_stream +- [X] split_shard +- [ ] start_stream_encryption +- [ ] stop_stream_encryption +- [ ] subscribe_to_shard +- [ ] update_shard_count + +## kinesis-video-archived-media - 0% implemented +- [ ] get_hls_streaming_session_url +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + +## kinesisanalytics - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] describe_application +- [ ] discover_input_schema +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + +## kms - 25% implemented +- [ ] cancel_key_deletion +- [ ] create_alias +- [ ] create_grant +- [X] create_key +- [ ] decrypt +- [X] delete_alias +- [ ] delete_imported_key_material +- [X] describe_key +- [ ] disable_key +- [X] disable_key_rotation +- [ ] enable_key +- [X] enable_key_rotation +- [ ] encrypt +- [ ] generate_data_key +- [ ] generate_data_key_without_plaintext +- [ ] generate_random +- [X] get_key_policy +- [X] get_key_rotation_status +- [ ] get_parameters_for_import +- [ ] import_key_material +- [ ] list_aliases +- [ ] list_grants +- [ ] list_key_policies +- [X] list_keys +- [ ] list_resource_tags +- [ ] list_retirable_grants +- [X] put_key_policy +- [ ] re_encrypt +- [ ] retire_grant +- [ ] revoke_grant +- [ ] schedule_key_deletion +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_key_description + +## lambda - 0% implemented +- [ ] add_permission +- [ ] create_alias +- [ ] create_event_source_mapping +- [ ] create_function +- [ ] delete_alias +- [ ] delete_event_source_mapping +- [ ] delete_function +- [ ] delete_function_concurrency +- [ ] get_account_settings +- [ ] get_alias +- [ ] get_event_source_mapping +- [ ] get_function +- [ ] get_function_configuration +- [ ] get_policy +- [ ] invoke +- [ ] invoke_async +- [ ] list_aliases +- [ ] list_event_source_mappings +- [ ] list_functions +- [ ] list_tags +- [ ] list_versions_by_function +- [ ] publish_version +- [ ] put_function_concurrency +- [ ] remove_permission +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_event_source_mapping +- [ ] update_function_code +- [ ] update_function_configuration + +## lex-models - 0% implemented +- [ ] create_bot_version +- [ ] create_intent_version +- [ ] create_slot_type_version +- [ ] delete_bot +- [ ] delete_bot_alias +- [ ] delete_bot_channel_association +- [ ] delete_bot_version +- [ ] delete_intent +- [ ] delete_intent_version +- [ ] delete_slot_type +- [ ] delete_slot_type_version +- [ ] delete_utterances +- [ ] get_bot +- [ ] get_bot_alias +- [ ] get_bot_aliases +- [ ] get_bot_channel_association +- [ ] get_bot_channel_associations +- [ ] get_bot_versions +- [ ] get_bots +- [ ] get_builtin_intent +- [ ] get_builtin_intents +- [ ] get_builtin_slot_types +- [ ] get_export +- [ ] get_import +- [ ] get_intent +- [ ] get_intent_versions +- [ ] get_intents +- [ ] get_slot_type +- [ ] get_slot_type_versions +- [ ] get_slot_types +- [ ] get_utterances_view +- [ ] put_bot +- [ ] put_bot_alias +- [ ] put_intent +- [ ] put_slot_type +- [ ] start_import + +## lex-runtime - 0% implemented +- [ ] post_content +- [ ] post_text + +## lightsail - 0% implemented +- [ ] allocate_static_ip +- [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate +- [ ] attach_static_ip +- [ ] close_instance_public_ports +- [ ] create_disk +- [ ] create_disk_from_snapshot +- [ ] create_disk_snapshot +- [ ] create_domain +- [ ] create_domain_entry +- [ ] create_instance_snapshot +- [ ] create_instances +- [ ] create_instances_from_snapshot +- [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate +- [ ] delete_disk +- [ ] delete_disk_snapshot +- [ ] delete_domain +- [ ] delete_domain_entry +- [ ] delete_instance +- [ ] delete_instance_snapshot +- [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate +- [ ] detach_disk +- [ ] detach_instances_from_load_balancer +- [ ] detach_static_ip +- [ ] download_default_key_pair +- [ ] get_active_names +- [ ] get_blueprints +- [ ] get_bundles +- [ ] get_disk +- [ ] get_disk_snapshot +- [ ] get_disk_snapshots +- [ ] get_disks +- [ ] get_domain +- [ ] get_domains +- [ ] get_instance +- [ ] get_instance_access_details +- [ ] get_instance_metric_data +- [ ] get_instance_port_states +- [ ] get_instance_snapshot +- [ ] get_instance_snapshots +- [ ] get_instance_state +- [ ] get_instances +- [ ] get_key_pair +- [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers +- [ ] get_operation +- [ ] get_operations +- [ ] get_operations_for_resource +- [ ] get_regions +- [ ] get_static_ip +- [ ] get_static_ips +- [ ] import_key_pair +- [ ] is_vpc_peered +- [ ] open_instance_public_ports +- [ ] peer_vpc +- [ ] put_instance_public_ports +- [ ] reboot_instance +- [ ] release_static_ip +- [ ] start_instance +- [ ] stop_instance +- [ ] unpeer_vpc +- [ ] update_domain_entry +- [ ] update_load_balancer_attribute + +## logs - 27% implemented +- [ ] associate_kms_key +- [ ] cancel_export_task +- [ ] create_export_task +- [X] create_log_group +- [X] create_log_stream +- [ ] delete_destination +- [X] delete_log_group +- [X] delete_log_stream +- [ ] delete_metric_filter +- [ ] delete_resource_policy +- [ ] delete_retention_policy +- [ ] delete_subscription_filter +- [ ] describe_destinations +- [ ] describe_export_tasks +- [X] describe_log_groups +- [X] describe_log_streams +- [ ] describe_metric_filters +- [ ] describe_resource_policies +- [ ] describe_subscription_filters +- [ ] disassociate_kms_key +- [X] filter_log_events +- [X] get_log_events +- [ ] list_tags_log_group +- [ ] put_destination +- [ ] put_destination_policy +- [X] put_log_events +- [ ] put_metric_filter +- [ ] put_resource_policy +- [ ] put_retention_policy +- [ ] put_subscription_filter +- [ ] tag_log_group +- [ ] test_metric_filter +- [ ] untag_log_group + +## machinelearning - 0% implemented +- [ ] add_tags +- [ ] create_batch_prediction +- [ ] create_data_source_from_rds +- [ ] create_data_source_from_redshift +- [ ] create_data_source_from_s3 +- [ ] create_evaluation +- [ ] create_ml_model +- [ ] create_realtime_endpoint +- [ ] delete_batch_prediction +- [ ] delete_data_source +- [ ] delete_evaluation +- [ ] delete_ml_model +- [ ] delete_realtime_endpoint +- [ ] delete_tags +- [ ] describe_batch_predictions +- [ ] describe_data_sources +- [ ] describe_evaluations +- [ ] describe_ml_models +- [ ] describe_tags +- [ ] get_batch_prediction +- [ ] get_data_source +- [ ] get_evaluation +- [ ] get_ml_model +- [ ] predict +- [ ] update_batch_prediction +- [ ] update_data_source +- [ ] update_evaluation +- [ ] update_ml_model + +## macie - 0% implemented +- [ ] associate_member_account +- [ ] associate_s3_resources +- [ ] disassociate_member_account +- [ ] disassociate_s3_resources +- [ ] list_member_accounts +- [ ] list_s3_resources +- [ ] update_s3_resources + +## marketplace-entitlement - 0% implemented +- [ ] get_entitlements + +## marketplacecommerceanalytics - 0% implemented +- [ ] generate_data_set +- [ ] start_support_data_export + +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] delete_reservation +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] describe_offering +- [ ] describe_reservation +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] list_offerings +- [ ] list_reservations +- [ ] purchase_offering +- [ ] start_channel +- [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] delete_cors_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] get_cors_policy +- [ ] list_containers +- [ ] put_container_policy +- [ ] put_cors_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + +## mediatailor - 0% implemented +- [ ] delete_playback_configuration +- [ ] get_playback_configuration +- [ ] list_playback_configurations +- [ ] put_playback_configuration + +## meteringmarketplace - 0% implemented +- [ ] batch_meter_usage +- [ ] meter_usage +- [ ] resolve_customer + +## mgh - 0% implemented +- [ ] associate_created_artifact +- [ ] associate_discovered_resource +- [ ] create_progress_update_stream +- [ ] delete_progress_update_stream +- [ ] describe_application_state +- [ ] describe_migration_task +- [ ] disassociate_created_artifact +- [ ] disassociate_discovered_resource +- [ ] import_migration_task +- [ ] list_created_artifacts +- [ ] list_discovered_resources +- [ ] list_migration_tasks +- [ ] list_progress_update_streams +- [ ] notify_application_state +- [ ] notify_migration_task_state +- [ ] put_resource_attributes + +## mobile - 0% implemented +- [ ] create_project +- [ ] delete_project +- [ ] describe_bundle +- [ ] describe_project +- [ ] export_bundle +- [ ] export_project +- [ ] list_bundles +- [ ] list_projects +- [ ] update_project + +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + +## mturk - 0% implemented +- [ ] accept_qualification_request +- [ ] approve_assignment +- [ ] associate_qualification_with_worker +- [ ] create_additional_assignments_for_hit +- [ ] create_hit +- [ ] create_hit_type +- [ ] create_hit_with_hit_type +- [ ] create_qualification_type +- [ ] create_worker_block +- [ ] delete_hit +- [ ] delete_qualification_type +- [ ] delete_worker_block +- [ ] disassociate_qualification_from_worker +- [ ] get_account_balance +- [ ] get_assignment +- [ ] get_file_upload_url +- [ ] get_hit +- [ ] get_qualification_score +- [ ] get_qualification_type +- [ ] list_assignments_for_hit +- [ ] list_bonus_payments +- [ ] list_hits +- [ ] list_hits_for_qualification_type +- [ ] list_qualification_requests +- [ ] list_qualification_types +- [ ] list_review_policy_results_for_hit +- [ ] list_reviewable_hits +- [ ] list_worker_blocks +- [ ] list_workers_with_qualification_type +- [ ] notify_workers +- [ ] reject_assignment +- [ ] reject_qualification_request +- [ ] send_bonus +- [ ] send_test_event_notification +- [ ] update_expiration_for_hit +- [ ] update_hit_review_status +- [ ] update_hit_type_of_hit +- [ ] update_notification_settings +- [ ] update_qualification_type + +## neptune - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_parameter_group +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_valid_db_instance_modifications +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] promote_read_replica_db_cluster +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time + +## opsworks - 12% implemented +- [ ] assign_instance +- [ ] assign_volume +- [ ] associate_elastic_ip +- [ ] attach_elastic_load_balancer +- [ ] clone_stack +- [X] create_app +- [ ] create_deployment +- [X] create_instance +- [X] create_layer +- [X] create_stack +- [ ] create_user_profile +- [ ] delete_app +- [ ] delete_instance +- [ ] delete_layer +- [ ] delete_stack +- [ ] delete_user_profile +- [ ] deregister_ecs_cluster +- [ ] deregister_elastic_ip +- [ ] deregister_instance +- [ ] deregister_rds_db_instance +- [ ] deregister_volume +- [ ] describe_agent_versions +- [X] describe_apps +- [ ] describe_commands +- [ ] describe_deployments +- [ ] describe_ecs_clusters +- [ ] describe_elastic_ips +- [ ] describe_elastic_load_balancers +- [X] describe_instances +- [X] describe_layers +- [ ] describe_load_based_auto_scaling +- [ ] describe_my_user_profile +- [ ] describe_operating_systems +- [ ] describe_permissions +- [ ] describe_raid_arrays +- [ ] describe_rds_db_instances +- [ ] describe_service_errors +- [ ] describe_stack_provisioning_parameters +- [ ] describe_stack_summary +- [X] describe_stacks +- [ ] describe_time_based_auto_scaling +- [ ] describe_user_profiles +- [ ] describe_volumes +- [ ] detach_elastic_load_balancer +- [ ] disassociate_elastic_ip +- [ ] get_hostname_suggestion +- [ ] grant_access +- [ ] list_tags +- [ ] reboot_instance +- [ ] register_ecs_cluster +- [ ] register_elastic_ip +- [ ] register_instance +- [ ] register_rds_db_instance +- [ ] register_volume +- [ ] set_load_based_auto_scaling +- [ ] set_permission +- [ ] set_time_based_auto_scaling +- [X] start_instance +- [ ] start_stack +- [ ] stop_instance +- [ ] stop_stack +- [ ] tag_resource +- [ ] unassign_instance +- [ ] unassign_volume +- [ ] untag_resource +- [ ] update_app +- [ ] update_elastic_ip +- [ ] update_instance +- [ ] update_layer +- [ ] update_my_user_profile +- [ ] update_rds_db_instance +- [ ] update_stack +- [ ] update_user_profile +- [ ] update_volume + +## opsworkscm - 0% implemented +- [ ] associate_node +- [ ] create_backup +- [ ] create_server +- [ ] delete_backup +- [ ] delete_server +- [ ] describe_account_attributes +- [ ] describe_backups +- [ ] describe_events +- [ ] describe_node_association_status +- [ ] describe_servers +- [ ] disassociate_node +- [ ] restore_server +- [ ] start_maintenance +- [ ] update_server +- [ ] update_server_engine_attributes + +## organizations - 0% implemented +- [ ] accept_handshake +- [ ] attach_policy +- [ ] cancel_handshake +- [ ] create_account +- [ ] create_organization +- [ ] create_organizational_unit +- [ ] create_policy +- [ ] decline_handshake +- [ ] delete_organization +- [ ] delete_organizational_unit +- [ ] delete_policy +- [ ] describe_account +- [ ] describe_create_account_status +- [ ] describe_handshake +- [ ] describe_organization +- [ ] describe_organizational_unit +- [ ] describe_policy +- [ ] detach_policy +- [ ] disable_aws_service_access +- [ ] disable_policy_type +- [ ] enable_all_features +- [ ] enable_aws_service_access +- [ ] enable_policy_type +- [ ] invite_account_to_organization +- [ ] leave_organization +- [ ] list_accounts +- [ ] list_accounts_for_parent +- [ ] list_aws_service_access_for_organization +- [ ] list_children +- [ ] list_create_account_status +- [ ] list_handshakes_for_account +- [ ] list_handshakes_for_organization +- [ ] list_organizational_units_for_parent +- [ ] list_parents +- [ ] list_policies +- [ ] list_policies_for_target +- [ ] list_roots +- [ ] list_targets_for_policy +- [ ] move_account +- [ ] remove_account_from_organization +- [ ] update_organizational_unit +- [ ] update_policy + +## pi - 0% implemented +- [ ] describe_dimension_keys +- [ ] get_resource_metrics + +## pinpoint - 0% implemented +- [ ] create_app +- [ ] create_campaign +- [ ] create_export_job +- [ ] create_import_job +- [ ] create_segment +- [ ] delete_adm_channel +- [ ] delete_apns_channel +- [ ] delete_apns_sandbox_channel +- [ ] delete_apns_voip_channel +- [ ] delete_apns_voip_sandbox_channel +- [ ] delete_app +- [ ] delete_baidu_channel +- [ ] delete_campaign +- [ ] delete_email_channel +- [ ] delete_endpoint +- [ ] delete_event_stream +- [ ] delete_gcm_channel +- [ ] delete_segment +- [ ] delete_sms_channel +- [ ] delete_user_endpoints +- [ ] get_adm_channel +- [ ] get_apns_channel +- [ ] get_apns_sandbox_channel +- [ ] get_apns_voip_channel +- [ ] get_apns_voip_sandbox_channel +- [ ] get_app +- [ ] get_application_settings +- [ ] get_apps +- [ ] get_baidu_channel +- [ ] get_campaign +- [ ] get_campaign_activities +- [ ] get_campaign_version +- [ ] get_campaign_versions +- [ ] get_campaigns +- [ ] get_channels +- [ ] get_email_channel +- [ ] get_endpoint +- [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs +- [ ] get_gcm_channel +- [ ] get_import_job +- [ ] get_import_jobs +- [ ] get_segment +- [ ] get_segment_export_jobs +- [ ] get_segment_import_jobs +- [ ] get_segment_version +- [ ] get_segment_versions +- [ ] get_segments +- [ ] get_sms_channel +- [ ] get_user_endpoints +- [ ] phone_number_validate +- [ ] put_event_stream +- [ ] put_events +- [ ] remove_attributes +- [ ] send_messages +- [ ] send_users_messages +- [ ] update_adm_channel +- [ ] update_apns_channel +- [ ] update_apns_sandbox_channel +- [ ] update_apns_voip_channel +- [ ] update_apns_voip_sandbox_channel +- [ ] update_application_settings +- [ ] update_baidu_channel +- [ ] update_campaign +- [ ] update_email_channel +- [ ] update_endpoint +- [ ] update_endpoints_batch +- [ ] update_gcm_channel +- [ ] update_segment +- [ ] update_sms_channel + +## polly - 55% implemented +- [X] delete_lexicon +- [X] describe_voices +- [X] get_lexicon +- [ ] get_speech_synthesis_task +- [X] list_lexicons +- [ ] list_speech_synthesis_tasks +- [X] put_lexicon +- [ ] start_speech_synthesis_task +- [ ] synthesize_speech + +## pricing - 0% implemented +- [ ] describe_services +- [ ] get_attribute_values +- [ ] get_products + +## rds - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] authorize_db_security_group_ingress +- [ ] backtrack_db_cluster +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] copy_db_snapshot +- [ ] copy_option_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_instance_read_replica +- [ ] create_db_parameter_group +- [ ] create_db_security_group +- [ ] create_db_snapshot +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] create_option_group +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_security_group +- [ ] delete_db_snapshot +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] delete_option_group +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_db_cluster_backtracks +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_log_files +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_security_groups +- [ ] describe_db_snapshot_attributes +- [ ] describe_db_snapshots +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_option_group_options +- [ ] describe_option_groups +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_reserved_db_instances +- [ ] describe_reserved_db_instances_offerings +- [ ] describe_source_regions +- [ ] describe_valid_db_instance_modifications +- [ ] download_db_log_file_portion +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_current_db_cluster_capacity +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_snapshot +- [ ] modify_db_snapshot_attribute +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] modify_option_group +- [ ] promote_read_replica +- [ ] promote_read_replica_db_cluster +- [ ] purchase_reserved_db_instances_offering +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_s3 +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time +- [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_from_s3 +- [ ] restore_db_instance_to_point_in_time +- [ ] revoke_db_security_group_ingress +- [ ] start_db_instance +- [ ] stop_db_instance + +## redshift - 38% implemented +- [ ] accept_reserved_node_exchange +- [ ] authorize_cluster_security_group_ingress +- [ ] authorize_snapshot_access +- [ ] copy_cluster_snapshot +- [X] create_cluster +- [X] create_cluster_parameter_group +- [X] create_cluster_security_group +- [X] create_cluster_snapshot +- [X] create_cluster_subnet_group +- [ ] create_event_subscription +- [ ] create_hsm_client_certificate +- [ ] create_hsm_configuration +- [X] create_snapshot_copy_grant +- [X] create_tags +- [X] delete_cluster +- [X] delete_cluster_parameter_group +- [X] delete_cluster_security_group +- [X] delete_cluster_snapshot +- [X] delete_cluster_subnet_group +- [ ] delete_event_subscription +- [ ] delete_hsm_client_certificate +- [ ] delete_hsm_configuration +- [X] delete_snapshot_copy_grant +- [X] delete_tags +- [ ] describe_cluster_db_revisions +- [X] describe_cluster_parameter_groups +- [ ] describe_cluster_parameters +- [X] describe_cluster_security_groups +- [X] describe_cluster_snapshots +- [X] describe_cluster_subnet_groups +- [ ] describe_cluster_tracks +- [ ] describe_cluster_versions +- [X] describe_clusters +- [ ] describe_default_cluster_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_hsm_client_certificates +- [ ] describe_hsm_configurations +- [ ] describe_logging_status +- [ ] describe_orderable_cluster_options +- [ ] describe_reserved_node_offerings +- [ ] describe_reserved_nodes +- [ ] describe_resize +- [X] describe_snapshot_copy_grants +- [ ] describe_table_restore_status +- [X] describe_tags +- [ ] disable_logging +- [X] disable_snapshot_copy +- [ ] enable_logging +- [X] enable_snapshot_copy +- [ ] get_cluster_credentials +- [ ] get_reserved_node_exchange_offerings +- [X] modify_cluster +- [ ] modify_cluster_db_revision +- [ ] modify_cluster_iam_roles +- [ ] modify_cluster_parameter_group +- [ ] modify_cluster_subnet_group +- [ ] modify_event_subscription +- [X] modify_snapshot_copy_retention_period +- [ ] purchase_reserved_node_offering +- [ ] reboot_cluster +- [ ] reset_cluster_parameter_group +- [X] restore_from_cluster_snapshot +- [ ] restore_table_from_cluster_snapshot +- [ ] revoke_cluster_security_group_ingress +- [ ] revoke_snapshot_access +- [ ] rotate_encryption_key + +## rekognition - 0% implemented +- [ ] compare_faces +- [ ] create_collection +- [ ] create_stream_processor +- [ ] delete_collection +- [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_stream_processor +- [ ] detect_faces +- [ ] detect_labels +- [ ] detect_moderation_labels +- [ ] detect_text +- [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking +- [ ] index_faces +- [ ] list_collections +- [ ] list_faces +- [ ] list_stream_processors +- [ ] recognize_celebrities +- [ ] search_faces +- [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor + +## resource-groups - 0% implemented +- [ ] create_group +- [ ] delete_group +- [ ] get_group +- [ ] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [ ] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [ ] update_group +- [ ] update_group_query + +## resourcegroupstaggingapi - 60% implemented +- [X] get_resources +- [X] get_tag_keys +- [X] get_tag_values +- [ ] tag_resources +- [ ] untag_resources + +## route53 - 12% implemented +- [ ] associate_vpc_with_hosted_zone +- [ ] change_resource_record_sets +- [X] change_tags_for_resource +- [X] create_health_check +- [X] create_hosted_zone +- [ ] create_query_logging_config +- [ ] create_reusable_delegation_set +- [ ] create_traffic_policy +- [ ] create_traffic_policy_instance +- [ ] create_traffic_policy_version +- [ ] create_vpc_association_authorization +- [X] delete_health_check +- [X] delete_hosted_zone +- [ ] delete_query_logging_config +- [ ] delete_reusable_delegation_set +- [ ] delete_traffic_policy +- [ ] delete_traffic_policy_instance +- [ ] delete_vpc_association_authorization +- [ ] disassociate_vpc_from_hosted_zone +- [ ] get_account_limit +- [ ] get_change +- [ ] get_checker_ip_ranges +- [ ] get_geo_location +- [ ] get_health_check +- [ ] get_health_check_count +- [ ] get_health_check_last_failure_reason +- [ ] get_health_check_status +- [X] get_hosted_zone +- [ ] get_hosted_zone_count +- [ ] get_hosted_zone_limit +- [ ] get_query_logging_config +- [ ] get_reusable_delegation_set +- [ ] get_reusable_delegation_set_limit +- [ ] get_traffic_policy +- [ ] get_traffic_policy_instance +- [ ] get_traffic_policy_instance_count +- [ ] list_geo_locations +- [ ] list_health_checks +- [ ] list_hosted_zones +- [ ] list_hosted_zones_by_name +- [ ] list_query_logging_configs +- [ ] list_resource_record_sets +- [ ] list_reusable_delegation_sets +- [X] list_tags_for_resource +- [ ] list_tags_for_resources +- [ ] list_traffic_policies +- [ ] list_traffic_policy_instances +- [ ] list_traffic_policy_instances_by_hosted_zone +- [ ] list_traffic_policy_instances_by_policy +- [ ] list_traffic_policy_versions +- [ ] list_vpc_association_authorizations +- [ ] test_dns_answer +- [ ] update_health_check +- [ ] update_hosted_zone_comment +- [ ] update_traffic_policy_comment +- [ ] update_traffic_policy_instance + +## route53domains - 0% implemented +- [ ] check_domain_availability +- [ ] check_domain_transferability +- [ ] delete_tags_for_domain +- [ ] disable_domain_auto_renew +- [ ] disable_domain_transfer_lock +- [ ] enable_domain_auto_renew +- [ ] enable_domain_transfer_lock +- [ ] get_contact_reachability_status +- [ ] get_domain_detail +- [ ] get_domain_suggestions +- [ ] get_operation_detail +- [ ] list_domains +- [ ] list_operations +- [ ] list_tags_for_domain +- [ ] register_domain +- [ ] renew_domain +- [ ] resend_contact_reachability_email +- [ ] retrieve_domain_auth_code +- [ ] transfer_domain +- [ ] update_domain_contact +- [ ] update_domain_contact_privacy +- [ ] update_domain_nameservers +- [ ] update_tags_for_domain +- [ ] view_billing + +## s3 - 15% implemented +- [ ] abort_multipart_upload +- [ ] complete_multipart_upload +- [ ] copy_object +- [X] create_bucket +- [ ] create_multipart_upload +- [X] delete_bucket +- [ ] delete_bucket_analytics_configuration +- [X] delete_bucket_cors +- [ ] delete_bucket_encryption +- [ ] delete_bucket_inventory_configuration +- [ ] delete_bucket_lifecycle +- [ ] delete_bucket_metrics_configuration +- [X] delete_bucket_policy +- [ ] delete_bucket_replication +- [X] delete_bucket_tagging +- [ ] delete_bucket_website +- [ ] delete_object +- [ ] delete_object_tagging +- [ ] delete_objects +- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_acl +- [ ] get_bucket_analytics_configuration +- [ ] get_bucket_cors +- [ ] get_bucket_encryption +- [ ] get_bucket_inventory_configuration +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging +- [ ] get_bucket_metrics_configuration +- [ ] get_bucket_notification +- [ ] get_bucket_notification_configuration +- [X] get_bucket_policy +- [ ] get_bucket_replication +- [ ] get_bucket_request_payment +- [ ] get_bucket_tagging +- [X] get_bucket_versioning +- [ ] get_bucket_website +- [ ] get_object +- [ ] get_object_acl +- [ ] get_object_tagging +- [ ] get_object_torrent +- [ ] head_bucket +- [ ] head_object +- [ ] list_bucket_analytics_configurations +- [ ] list_bucket_inventory_configurations +- [ ] list_bucket_metrics_configurations +- [ ] list_buckets +- [ ] list_multipart_uploads +- [ ] list_object_versions +- [ ] list_objects +- [ ] list_objects_v2 +- [ ] list_parts +- [ ] put_bucket_accelerate_configuration +- [ ] put_bucket_acl +- [ ] put_bucket_analytics_configuration +- [X] put_bucket_cors +- [ ] put_bucket_encryption +- [ ] put_bucket_inventory_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration +- [X] put_bucket_logging +- [ ] put_bucket_metrics_configuration +- [ ] put_bucket_notification +- [X] put_bucket_notification_configuration +- [ ] put_bucket_policy +- [ ] put_bucket_replication +- [ ] put_bucket_request_payment +- [X] put_bucket_tagging +- [ ] put_bucket_versioning +- [ ] put_bucket_website +- [ ] put_object +- [ ] put_object_acl +- [ ] put_object_tagging +- [ ] restore_object +- [ ] select_object_content +- [ ] upload_part +- [ ] upload_part_copy + +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_hyper_parameter_tuning_job +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] create_transform_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_hyper_parameter_tuning_job +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_training_job +- [ ] describe_transform_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_hyper_parameter_tuning_jobs +- [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] list_training_jobs_for_hyper_parameter_tuning_job +- [ ] list_transform_jobs +- [ ] start_notebook_instance +- [ ] stop_hyper_parameter_tuning_job +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] stop_transform_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + +## sdb - 0% implemented +- [ ] batch_delete_attributes +- [ ] batch_put_attributes +- [ ] create_domain +- [ ] delete_attributes +- [ ] delete_domain +- [ ] domain_metadata +- [ ] get_attributes +- [ ] list_domains +- [ ] put_attributes +- [ ] select + +## secretsmanager - 22% implemented +- [ ] cancel_rotate_secret +- [X] create_secret +- [ ] delete_resource_policy +- [ ] delete_secret +- [X] describe_secret +- [X] get_random_password +- [ ] get_resource_policy +- [X] get_secret_value +- [ ] list_secret_version_ids +- [ ] list_secrets +- [ ] put_resource_policy +- [ ] put_secret_value +- [ ] restore_secret +- [ ] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] delete_application +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + +## servicecatalog - 0% implemented +- [ ] accept_portfolio_share +- [ ] associate_principal_with_portfolio +- [ ] associate_product_with_portfolio +- [ ] associate_tag_option_with_resource +- [ ] copy_product +- [ ] create_constraint +- [ ] create_portfolio +- [ ] create_portfolio_share +- [ ] create_product +- [ ] create_provisioned_product_plan +- [ ] create_provisioning_artifact +- [ ] create_tag_option +- [ ] delete_constraint +- [ ] delete_portfolio +- [ ] delete_portfolio_share +- [ ] delete_product +- [ ] delete_provisioned_product_plan +- [ ] delete_provisioning_artifact +- [ ] delete_tag_option +- [ ] describe_constraint +- [ ] describe_copy_product_status +- [ ] describe_portfolio +- [ ] describe_product +- [ ] describe_product_as_admin +- [ ] describe_product_view +- [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan +- [ ] describe_provisioning_artifact +- [ ] describe_provisioning_parameters +- [ ] describe_record +- [ ] describe_tag_option +- [ ] disassociate_principal_from_portfolio +- [ ] disassociate_product_from_portfolio +- [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan +- [ ] list_accepted_portfolio_shares +- [ ] list_constraints_for_portfolio +- [ ] list_launch_paths +- [ ] list_portfolio_access +- [ ] list_portfolios +- [ ] list_portfolios_for_product +- [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans +- [ ] list_provisioning_artifacts +- [ ] list_record_history +- [ ] list_resources_for_tag_option +- [ ] list_tag_options +- [ ] provision_product +- [ ] reject_portfolio_share +- [ ] scan_provisioned_products +- [ ] search_products +- [ ] search_products_as_admin +- [ ] search_provisioned_products +- [ ] terminate_provisioned_product +- [ ] update_constraint +- [ ] update_portfolio +- [ ] update_product +- [ ] update_provisioned_product +- [ ] update_provisioning_artifact +- [ ] update_tag_option + +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_instance_custom_health_status +- [ ] update_service + +## ses - 11% implemented +- [ ] clone_receipt_rule_set +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template +- [ ] create_receipt_filter +- [ ] create_receipt_rule +- [ ] create_receipt_rule_set +- [ ] create_template +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template +- [X] delete_identity +- [ ] delete_identity_policy +- [ ] delete_receipt_filter +- [ ] delete_receipt_rule +- [ ] delete_receipt_rule_set +- [ ] delete_template +- [ ] delete_verified_email_address +- [ ] describe_active_receipt_rule_set +- [ ] describe_configuration_set +- [ ] describe_receipt_rule +- [ ] describe_receipt_rule_set +- [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template +- [ ] get_identity_dkim_attributes +- [ ] get_identity_mail_from_domain_attributes +- [ ] get_identity_notification_attributes +- [ ] get_identity_policies +- [ ] get_identity_verification_attributes +- [X] get_send_quota +- [ ] get_send_statistics +- [ ] get_template +- [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates +- [X] list_identities +- [ ] list_identity_policies +- [ ] list_receipt_filters +- [ ] list_receipt_rule_sets +- [ ] list_templates +- [X] list_verified_email_addresses +- [ ] put_identity_policy +- [ ] reorder_receipt_rule_set +- [ ] send_bounce +- [ ] send_bulk_templated_email +- [ ] send_custom_verification_email +- [X] send_email +- [X] send_raw_email +- [ ] send_templated_email +- [ ] set_active_receipt_rule_set +- [ ] set_identity_dkim_enabled +- [ ] set_identity_feedback_forwarding_enabled +- [ ] set_identity_headers_in_notifications_enabled +- [ ] set_identity_mail_from_domain +- [ ] set_identity_notification_topic +- [ ] set_receipt_rule_position +- [ ] test_render_template +- [ ] update_account_sending_enabled +- [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_reputation_metrics_enabled +- [ ] update_configuration_set_sending_enabled +- [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template +- [ ] update_receipt_rule +- [ ] update_template +- [ ] verify_domain_dkim +- [ ] verify_domain_identity +- [X] verify_email_address +- [X] verify_email_identity + +## shield - 0% implemented +- [ ] associate_drt_log_bucket +- [ ] associate_drt_role +- [ ] create_protection +- [ ] create_subscription +- [ ] delete_protection +- [ ] delete_subscription +- [ ] describe_attack +- [ ] describe_drt_access +- [ ] describe_emergency_contact_settings +- [ ] describe_protection +- [ ] describe_subscription +- [ ] disassociate_drt_log_bucket +- [ ] disassociate_drt_role +- [ ] get_subscription_state +- [ ] list_attacks +- [ ] list_protections +- [ ] update_emergency_contact_settings +- [ ] update_subscription + +## sms - 0% implemented +- [ ] create_replication_job +- [ ] delete_replication_job +- [ ] delete_server_catalog +- [ ] disassociate_connector +- [ ] get_connectors +- [ ] get_replication_jobs +- [ ] get_replication_runs +- [ ] get_servers +- [ ] import_server_catalog +- [ ] start_on_demand_replication_run +- [ ] update_replication_job + +## snowball - 0% implemented +- [ ] cancel_cluster +- [ ] cancel_job +- [ ] create_address +- [ ] create_cluster +- [ ] create_job +- [ ] describe_address +- [ ] describe_addresses +- [ ] describe_cluster +- [ ] describe_job +- [ ] get_job_manifest +- [ ] get_job_unlock_code +- [ ] get_snowball_usage +- [ ] list_cluster_jobs +- [ ] list_clusters +- [ ] list_compatible_images +- [ ] list_jobs +- [ ] update_cluster +- [ ] update_job + +## sns - 53% implemented +- [ ] add_permission +- [ ] check_if_phone_number_is_opted_out +- [ ] confirm_subscription +- [X] create_platform_application +- [X] create_platform_endpoint +- [X] create_topic +- [X] delete_endpoint +- [X] delete_platform_application +- [X] delete_topic +- [ ] get_endpoint_attributes +- [ ] get_platform_application_attributes +- [ ] get_sms_attributes +- [X] get_subscription_attributes +- [ ] get_topic_attributes +- [X] list_endpoints_by_platform_application +- [ ] list_phone_numbers_opted_out +- [X] list_platform_applications +- [X] list_subscriptions +- [ ] list_subscriptions_by_topic +- [X] list_topics +- [ ] opt_in_phone_number +- [X] publish +- [ ] remove_permission +- [X] set_endpoint_attributes +- [ ] set_platform_application_attributes +- [ ] set_sms_attributes +- [X] set_subscription_attributes +- [ ] set_topic_attributes +- [X] subscribe +- [X] unsubscribe + +## sqs - 65% implemented +- [X] add_permission +- [X] change_message_visibility +- [ ] change_message_visibility_batch +- [X] create_queue +- [X] delete_message +- [ ] delete_message_batch +- [X] delete_queue +- [ ] get_queue_attributes +- [ ] get_queue_url +- [X] list_dead_letter_source_queues +- [ ] list_queue_tags +- [X] list_queues +- [X] purge_queue +- [ ] receive_message +- [X] remove_permission +- [X] send_message +- [ ] send_message_batch +- [X] set_queue_attributes +- [X] tag_queue +- [X] untag_queue + +## ssm - 10% implemented +- [X] add_tags_to_resource +- [ ] cancel_command +- [ ] create_activation +- [ ] create_association +- [ ] create_association_batch +- [ ] create_document +- [ ] create_maintenance_window +- [ ] create_patch_baseline +- [ ] create_resource_data_sync +- [ ] delete_activation +- [ ] delete_association +- [ ] delete_document +- [ ] delete_inventory +- [ ] delete_maintenance_window +- [X] delete_parameter +- [X] delete_parameters +- [ ] delete_patch_baseline +- [ ] delete_resource_data_sync +- [ ] deregister_managed_instance +- [ ] deregister_patch_baseline_for_patch_group +- [ ] deregister_target_from_maintenance_window +- [ ] deregister_task_from_maintenance_window +- [ ] describe_activations +- [ ] describe_association +- [ ] describe_association_execution_targets +- [ ] describe_association_executions +- [ ] describe_automation_executions +- [ ] describe_automation_step_executions +- [ ] describe_available_patches +- [ ] describe_document +- [ ] describe_document_permission +- [ ] describe_effective_instance_associations +- [ ] describe_effective_patches_for_patch_baseline +- [ ] describe_instance_associations_status +- [ ] describe_instance_information +- [ ] describe_instance_patch_states +- [ ] describe_instance_patch_states_for_patch_group +- [ ] describe_instance_patches +- [ ] describe_inventory_deletions +- [ ] describe_maintenance_window_execution_task_invocations +- [ ] describe_maintenance_window_execution_tasks +- [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_targets +- [ ] describe_maintenance_window_tasks +- [ ] describe_maintenance_windows +- [ ] describe_parameters +- [ ] describe_patch_baselines +- [ ] describe_patch_group_state +- [ ] describe_patch_groups +- [ ] get_automation_execution +- [ ] get_command_invocation +- [ ] get_default_patch_baseline +- [ ] get_deployable_patch_snapshot_for_instance +- [ ] get_document +- [ ] get_inventory +- [ ] get_inventory_schema +- [ ] get_maintenance_window +- [ ] get_maintenance_window_execution +- [ ] get_maintenance_window_execution_task +- [ ] get_maintenance_window_execution_task_invocation +- [ ] get_maintenance_window_task +- [X] get_parameter +- [ ] get_parameter_history +- [X] get_parameters +- [X] get_parameters_by_path +- [ ] get_patch_baseline +- [ ] get_patch_baseline_for_patch_group +- [ ] label_parameter_version +- [ ] list_association_versions +- [ ] list_associations +- [ ] list_command_invocations +- [X] list_commands +- [ ] list_compliance_items +- [ ] list_compliance_summaries +- [ ] list_document_versions +- [ ] list_documents +- [ ] list_inventory_entries +- [ ] list_resource_compliance_summaries +- [ ] list_resource_data_sync +- [X] list_tags_for_resource +- [ ] modify_document_permission +- [ ] put_compliance_items +- [ ] put_inventory +- [X] put_parameter +- [ ] register_default_patch_baseline +- [ ] register_patch_baseline_for_patch_group +- [ ] register_target_with_maintenance_window +- [ ] register_task_with_maintenance_window +- [X] remove_tags_from_resource +- [ ] send_automation_signal +- [X] send_command +- [ ] start_associations_once +- [ ] start_automation_execution +- [ ] stop_automation_execution +- [ ] update_association +- [ ] update_association_status +- [ ] update_document +- [ ] update_document_default_version +- [ ] update_maintenance_window +- [ ] update_maintenance_window_target +- [ ] update_maintenance_window_task +- [ ] update_managed_instance_role +- [ ] update_patch_baseline + +## stepfunctions - 0% implemented +- [ ] create_activity +- [ ] create_state_machine +- [ ] delete_activity +- [ ] delete_state_machine +- [ ] describe_activity +- [ ] describe_execution +- [ ] describe_state_machine +- [ ] describe_state_machine_for_execution +- [ ] get_activity_task +- [ ] get_execution_history +- [ ] list_activities +- [ ] list_executions +- [ ] list_state_machines +- [ ] send_task_failure +- [ ] send_task_heartbeat +- [ ] send_task_success +- [ ] start_execution +- [ ] stop_execution +- [ ] update_state_machine + +## storagegateway - 0% implemented +- [ ] activate_gateway +- [ ] add_cache +- [ ] add_tags_to_resource +- [ ] add_upload_buffer +- [ ] add_working_storage +- [ ] cancel_archival +- [ ] cancel_retrieval +- [ ] create_cached_iscsi_volume +- [ ] create_nfs_file_share +- [ ] create_smb_file_share +- [ ] create_snapshot +- [ ] create_snapshot_from_volume_recovery_point +- [ ] create_stored_iscsi_volume +- [ ] create_tape_with_barcode +- [ ] create_tapes +- [ ] delete_bandwidth_rate_limit +- [ ] delete_chap_credentials +- [ ] delete_file_share +- [ ] delete_gateway +- [ ] delete_snapshot_schedule +- [ ] delete_tape +- [ ] delete_tape_archive +- [ ] delete_volume +- [ ] describe_bandwidth_rate_limit +- [ ] describe_cache +- [ ] describe_cached_iscsi_volumes +- [ ] describe_chap_credentials +- [ ] describe_gateway_information +- [ ] describe_maintenance_start_time +- [ ] describe_nfs_file_shares +- [ ] describe_smb_file_shares +- [ ] describe_smb_settings +- [ ] describe_snapshot_schedule +- [ ] describe_stored_iscsi_volumes +- [ ] describe_tape_archives +- [ ] describe_tape_recovery_points +- [ ] describe_tapes +- [ ] describe_upload_buffer +- [ ] describe_vtl_devices +- [ ] describe_working_storage +- [ ] disable_gateway +- [ ] join_domain +- [ ] list_file_shares +- [ ] list_gateways +- [ ] list_local_disks +- [ ] list_tags_for_resource +- [ ] list_tapes +- [ ] list_volume_initiators +- [ ] list_volume_recovery_points +- [ ] list_volumes +- [ ] notify_when_uploaded +- [ ] refresh_cache +- [ ] remove_tags_from_resource +- [ ] reset_cache +- [ ] retrieve_tape_archive +- [ ] retrieve_tape_recovery_point +- [ ] set_local_console_password +- [ ] set_smb_guest_password +- [ ] shutdown_gateway +- [ ] start_gateway +- [ ] update_bandwidth_rate_limit +- [ ] update_chap_credentials +- [ ] update_gateway_information +- [ ] update_gateway_software_now +- [ ] update_maintenance_start_time +- [ ] update_nfs_file_share +- [ ] update_smb_file_share +- [ ] update_snapshot_schedule +- [ ] update_vtl_device_type + +## sts - 42% implemented +- [X] assume_role +- [ ] assume_role_with_saml +- [ ] assume_role_with_web_identity +- [ ] decode_authorization_message +- [ ] get_caller_identity +- [X] get_federation_token +- [X] get_session_token + +## support - 0% implemented +- [ ] add_attachments_to_set +- [ ] add_communication_to_case +- [ ] create_case +- [ ] describe_attachment +- [ ] describe_cases +- [ ] describe_communications +- [ ] describe_services +- [ ] describe_severity_levels +- [ ] describe_trusted_advisor_check_refresh_statuses +- [ ] describe_trusted_advisor_check_result +- [ ] describe_trusted_advisor_check_summaries +- [ ] describe_trusted_advisor_checks +- [ ] refresh_trusted_advisor_check +- [ ] resolve_case + +## swf - 58% implemented +- [ ] count_closed_workflow_executions +- [ ] count_open_workflow_executions +- [X] count_pending_activity_tasks +- [X] count_pending_decision_tasks +- [ ] deprecate_activity_type +- [X] deprecate_domain +- [ ] deprecate_workflow_type +- [ ] describe_activity_type +- [X] describe_domain +- [X] describe_workflow_execution +- [ ] describe_workflow_type +- [ ] get_workflow_execution_history +- [ ] list_activity_types +- [X] list_closed_workflow_executions +- [X] list_domains +- [X] list_open_workflow_executions +- [ ] list_workflow_types +- [X] poll_for_activity_task +- [X] poll_for_decision_task +- [X] record_activity_task_heartbeat +- [ ] register_activity_type +- [X] register_domain +- [ ] register_workflow_type +- [ ] request_cancel_workflow_execution +- [ ] respond_activity_task_canceled +- [X] respond_activity_task_completed +- [X] respond_activity_task_failed +- [X] respond_decision_task_completed +- [X] signal_workflow_execution +- [X] start_workflow_execution +- [X] terminate_workflow_execution + +## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary +- [ ] get_transcription_job +- [ ] get_vocabulary +- [ ] list_transcription_jobs +- [ ] list_vocabularies +- [ ] start_transcription_job +- [ ] update_vocabulary + +## translate - 0% implemented +- [ ] translate_text + +## waf - 0% implemented +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## waf-regional - 0% implemented +- [ ] associate_web_acl +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] disassociate_web_acl +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_web_acl_for_resource +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_resources_for_web_acl +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## workdocs - 0% implemented +- [ ] abort_document_version_upload +- [ ] activate_user +- [ ] add_resource_permissions +- [ ] create_comment +- [ ] create_custom_metadata +- [ ] create_folder +- [ ] create_labels +- [ ] create_notification_subscription +- [ ] create_user +- [ ] deactivate_user +- [ ] delete_comment +- [ ] delete_custom_metadata +- [ ] delete_document +- [ ] delete_folder +- [ ] delete_folder_contents +- [ ] delete_labels +- [ ] delete_notification_subscription +- [ ] delete_user +- [ ] describe_activities +- [ ] describe_comments +- [ ] describe_document_versions +- [ ] describe_folder_contents +- [ ] describe_groups +- [ ] describe_notification_subscriptions +- [ ] describe_resource_permissions +- [ ] describe_root_folders +- [ ] describe_users +- [ ] get_current_user +- [ ] get_document +- [ ] get_document_path +- [ ] get_document_version +- [ ] get_folder +- [ ] get_folder_path +- [ ] initiate_document_version_upload +- [ ] remove_all_resource_permissions +- [ ] remove_resource_permission +- [ ] update_document +- [ ] update_document_version +- [ ] update_folder +- [ ] update_user + +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_mailbox_permissions +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_mailbox_permissions +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] put_mailbox_permissions +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + +## workspaces - 0% implemented +- [ ] associate_ip_groups +- [ ] authorize_ip_rules +- [ ] create_ip_group +- [ ] create_tags +- [ ] create_workspaces +- [ ] delete_ip_group +- [ ] delete_tags +- [ ] describe_ip_groups +- [ ] describe_tags +- [ ] describe_workspace_bundles +- [ ] describe_workspace_directories +- [ ] describe_workspaces +- [ ] describe_workspaces_connection_status +- [ ] disassociate_ip_groups +- [ ] modify_workspace_properties +- [ ] modify_workspace_state +- [ ] reboot_workspaces +- [ ] rebuild_workspaces +- [ ] revoke_ip_rules +- [ ] start_workspaces +- [ ] stop_workspaces +- [ ] terminate_workspaces +- [ ] update_rules_of_ip_group + +## xray - 0% implemented +- [ ] batch_get_traces +- [ ] get_encryption_config +- [ ] get_service_graph +- [ ] get_trace_graph +- [ ] get_trace_summaries +- [ ] put_encryption_config +- [ ] put_telemetry_records +- [ ] put_trace_segments diff --git a/moto/iot/models.py b/moto/iot/models.py index c36bb985f50d..931af192a1fa 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -610,6 +610,9 @@ def create_job(self, job_id, targets, document_source, document, description, pr def describe_job(self, job_id): return self.jobs[job_id] + def get_job_document(self, job_id): + return self.jobs[job_id] + available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 006c4c4cc741..0d367792562a 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -149,6 +149,16 @@ def describe_job(self): targetSelection=job.target_selection ))) + def get_job_document(self): + job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) + + if job.document is not None: + json.dumps({'document': job.document}) + else: + # job.document_source is not None: + # TODO: needs to be implemented to get document_source's content from S3 + return json.dumps({'document': ''}) + def create_keys_and_certificate(self): set_as_active = self._get_bool_param("setAsActive") cert, key_pair = self.iot_backend.create_keys_and_certificate( diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 5c6effd7a2de..1f2305360615 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import json -import sure # noqa + import boto3 from moto import mock_iot @@ -681,3 +681,65 @@ def test_describe_job_1(): "expiresInSec").which.should.equal(123) job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_get_job_document_with_document_source(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal('') + + +@mock_iot +def test_get_job_document_with_document(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob1" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps({'foo': 'bar'}), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal('') From 1c5c5036e364140fe979660b499cce73a344edb7 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 24 Sep 2018 13:04:39 +0200 Subject: [PATCH 004/658] fixing errors on get_job_document --- moto/iot/responses.py | 2 +- tests/test_iot/test_iot.py | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 0d367792562a..c71d4942a9f3 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -153,7 +153,7 @@ def get_job_document(self): job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) if job.document is not None: - json.dumps({'document': job.document}) + return json.dumps({'document': job.document}) else: # job.document_source is not None: # TODO: needs to be implemented to get document_source's content from S3 diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 1f2305360615..759c7d3c71f3 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +import sure # noqa import boto3 @@ -555,7 +556,10 @@ def test_create_job(): client = boto3.client('iot', region_name='eu-west-1') name = "my-thing" job_id = "TestJob" - # thing + # thing# job document + # job_document = { + # "field": "value" + # } thing = client.create_thing(thingName=name) thing.should.have.key('thingName').which.should.equal(name) thing.should.have.key('thingArn') @@ -718,16 +722,21 @@ def test_get_job_document_with_document_source(): def test_get_job_document_with_document(): client = boto3.client('iot', region_name='eu-west-1') name = "my-thing" - job_id = "TestJob1" + job_id = "TestJob" # thing thing = client.create_thing(thingName=name) thing.should.have.key('thingName').which.should.equal(name) thing.should.have.key('thingArn') + # job document + job_document = { + "field": "value" + } + job = client.create_job( jobId=job_id, targets=[thing["thingArn"]], - document=json.dumps({'foo': 'bar'}), + document=json.dumps(job_document), presignedUrlConfig={ 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', 'expiresInSec': 123 @@ -742,4 +751,4 @@ def test_get_job_document_with_document(): job.should.have.key('jobArn') job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal('') + job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") From 0ba213ffcca58ddb595156161363a6aa294ba71b Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Tue, 16 Oct 2018 15:29:56 +0200 Subject: [PATCH 005/658] Merge branch 'master' of https://github.com/spulec/moto into spulec-master --- .travis.yml | 13 + AUTHORS.md | 1 + CHANGELOG.md | 5 + IMPLEMENTATION_COVERAGE.md | 9199 ++++++++--------- README.md | 2 + docs/index.rst | 10 +- moto/__init__.py | 3 +- moto/backends.py | 2 + moto/cloudformation/parsing.py | 1 + moto/cognitoidentity/responses.py | 5 +- moto/cognitoidentity/utils.py | 2 +- moto/cognitoidp/models.py | 26 +- moto/core/models.py | 11 + moto/dynamodb2/models.py | 13 +- moto/dynamodb2/responses.py | 32 +- moto/ec2/models.py | 18 + moto/ec2/responses/vpc_peering_connections.py | 31 +- moto/ecs/models.py | 8 +- moto/ecs/responses.py | 3 +- moto/elb/responses.py | 26 +- moto/glue/exceptions.py | 59 +- moto/glue/models.py | 104 +- moto/glue/responses.py | 103 +- moto/iam/models.py | 35 +- moto/iam/responses.py | 25 + moto/kms/models.py | 33 +- moto/kms/responses.py | 50 + moto/logs/exceptions.py | 2 +- moto/logs/models.py | 4 +- moto/organizations/__init__.py | 6 + moto/organizations/models.py | 296 + moto/organizations/responses.py | 87 + moto/organizations/urls.py | 10 + moto/organizations/utils.py | 59 + moto/packages/httpretty/core.py | 21 +- moto/rds/models.py | 6 + moto/rds2/models.py | 42 +- moto/rds2/responses.py | 3 +- moto/redshift/models.py | 2 + moto/s3/models.py | 54 +- moto/s3/responses.py | 24 +- moto/secretsmanager/models.py | 51 + moto/secretsmanager/responses.py | 12 + moto/server.py | 3 + moto/ses/models.py | 3 +- moto/sqs/models.py | 20 +- moto/sqs/responses.py | 2 +- moto/ssm/models.py | 102 +- moto/ssm/responses.py | 5 + requirements-dev.txt | 4 +- setup.py | 7 +- .../test_cognitoidentity.py | 14 + tests/test_cognitoidp/test_cognitoidp.py | 14 + tests/test_core/test_decorator_calls.py | 11 + tests/test_dynamodb2/test_dynamodb.py | 52 +- tests/test_ec2/test_elastic_block_store.py | 4 +- tests/test_ec2/test_vpc_peering.py | 39 +- tests/test_ecs/test_ecs_boto3.py | 115 + tests/test_elb/test_elb.py | 34 + tests/test_glue/fixtures/datacatalog.py | 25 + tests/test_glue/helpers.py | 81 +- tests/test_glue/test_datacatalog.py | 362 +- tests/test_iam/test_iam.py | 31 +- tests/test_kms/test_kms.py | 102 +- tests/test_logs/test_logs.py | 14 +- tests/test_organizations/__init__.py | 0 .../organizations_test_utils.py | 136 + .../test_organizations_boto3.py | 322 + tests/test_rds2/test_rds2.py | 155 + tests/test_redshift/test_redshift.py | 4 + tests/test_s3/test_s3.py | 66 + tests/test_s3/test_s3_lifecycle.py | 121 + .../test_secretsmanager.py | 109 +- tests/test_secretsmanager/test_server.py | 286 + tests/test_sqs/test_sqs.py | 40 + tests/test_ssm/test_ssm_boto3.py | 118 +- 76 files changed, 7929 insertions(+), 4971 deletions(-) create mode 100644 moto/organizations/__init__.py create mode 100644 moto/organizations/models.py create mode 100644 moto/organizations/responses.py create mode 100644 moto/organizations/urls.py create mode 100644 moto/organizations/utils.py create mode 100644 tests/test_organizations/__init__.py create mode 100644 tests/test_organizations/organizations_test_utils.py create mode 100644 tests/test_organizations/test_organizations_boto3.py diff --git a/.travis.yml b/.travis.yml index f1b7ac40dde2..de22818b895d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,6 +8,19 @@ python: env: - TEST_SERVER_MODE=false - TEST_SERVER_MODE=true +# Due to incomplete Python 3.7 support on Travis CI ( +# https://github.com/travis-ci/travis-ci/issues/9815), +# using a matrix is necessary +matrix: + include: + - python: 3.7 + env: TEST_SERVER_MODE=false + dist: xenial + sudo: true + - python: 3.7 + env: TEST_SERVER_MODE=true + dist: xenial + sudo: true before_install: - export BOTO_CONFIG=/dev/null install: diff --git a/AUTHORS.md b/AUTHORS.md index 6b7c96291e29..0a152505a921 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -53,3 +53,4 @@ Moto is written by Steve Pulec with contributions from: * [Jim Shields](https://github.com/jimjshields) * [William Richard](https://github.com/william-richard) * [Alex Casalboni](https://github.com/alexcasalboni) +* [Jon Beilke](https://github.com/jrbeilke) diff --git a/CHANGELOG.md b/CHANGELOG.md index 202da6ce6366..7f7ee44487cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,11 @@ Moto Changelog =================== +1.3.6 +----- + + * Fix boto3 pinning. + 1.3.5 ----- diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 7fbbbcbb0a3b..17b864dc3a0c 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,4771 +1,4428 @@ - -## acm - 41% implemented -- [X] add_tags_to_certificate -- [X] delete_certificate -- [ ] describe_certificate -- [ ] export_certificate -- [X] get_certificate -- [ ] import_certificate -- [ ] list_certificates -- [ ] list_tags_for_certificate -- [X] remove_tags_from_certificate -- [X] request_certificate -- [ ] resend_validation_email -- [ ] update_certificate_options - -## acm-pca - 0% implemented -- [ ] create_certificate_authority -- [ ] create_certificate_authority_audit_report -- [ ] delete_certificate_authority -- [ ] describe_certificate_authority -- [ ] describe_certificate_authority_audit_report -- [ ] get_certificate -- [ ] get_certificate_authority_certificate -- [ ] get_certificate_authority_csr -- [ ] import_certificate_authority_certificate -- [ ] issue_certificate -- [ ] list_certificate_authorities -- [ ] list_tags -- [ ] restore_certificate_authority -- [ ] revoke_certificate -- [ ] tag_certificate_authority -- [ ] untag_certificate_authority -- [ ] update_certificate_authority - -## alexaforbusiness - 0% implemented -- [ ] associate_contact_with_address_book -- [ ] associate_device_with_room -- [ ] associate_skill_group_with_room -- [ ] create_address_book -- [ ] create_contact -- [ ] create_profile -- [ ] create_room -- [ ] create_skill_group -- [ ] create_user -- [ ] delete_address_book -- [ ] delete_contact -- [ ] delete_profile -- [ ] delete_room -- [ ] delete_room_skill_parameter -- [ ] delete_skill_group -- [ ] delete_user -- [ ] disassociate_contact_from_address_book -- [ ] disassociate_device_from_room -- [ ] disassociate_skill_group_from_room -- [ ] get_address_book -- [ ] get_contact -- [ ] get_device -- [ ] get_profile -- [ ] get_room -- [ ] get_room_skill_parameter -- [ ] get_skill_group -- [ ] list_device_events -- [ ] list_skills -- [ ] list_tags -- [ ] put_room_skill_parameter -- [ ] resolve_room -- [ ] revoke_invitation -- [ ] search_address_books -- [ ] search_contacts -- [ ] search_devices -- [ ] search_profiles -- [ ] search_rooms -- [ ] search_skill_groups -- [ ] search_users -- [ ] send_invitation -- [ ] start_device_sync -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_address_book -- [ ] update_contact -- [ ] update_device -- [ ] update_profile -- [ ] update_room -- [ ] update_skill_group - -## apigateway - 24% implemented -- [ ] create_api_key -- [ ] create_authorizer -- [ ] create_base_path_mapping -- [X] create_deployment -- [ ] create_documentation_part -- [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model -- [ ] create_request_validator -- [X] create_resource -- [X] create_rest_api -- [X] create_stage -- [X] create_usage_plan -- [X] create_usage_plan_key -- [ ] create_vpc_link -- [ ] delete_api_key -- [ ] delete_authorizer -- [ ] delete_base_path_mapping -- [ ] delete_client_certificate -- [X] delete_deployment -- [ ] delete_documentation_part -- [ ] delete_documentation_version -- [ ] delete_domain_name -- [ ] delete_gateway_response -- [X] delete_integration -- [X] delete_integration_response -- [ ] delete_method -- [X] delete_method_response -- [ ] delete_model -- [ ] delete_request_validator -- [X] delete_resource -- [X] delete_rest_api -- [ ] delete_stage -- [X] delete_usage_plan -- [X] delete_usage_plan_key -- [ ] delete_vpc_link -- [ ] flush_stage_authorizers_cache -- [ ] flush_stage_cache -- [ ] generate_client_certificate -- [ ] get_account -- [ ] get_api_key -- [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers -- [ ] get_base_path_mapping -- [ ] get_base_path_mappings -- [ ] get_client_certificate -- [ ] get_client_certificates -- [X] get_deployment -- [X] get_deployments -- [ ] get_documentation_part -- [ ] get_documentation_parts -- [ ] get_documentation_version -- [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names -- [ ] get_export -- [ ] get_gateway_response -- [ ] get_gateway_responses -- [X] get_integration -- [X] get_integration_response -- [X] get_method -- [X] get_method_response -- [ ] get_model -- [ ] get_model_template -- [ ] get_models -- [ ] get_request_validator -- [ ] get_request_validators -- [X] get_resource -- [ ] get_resources -- [X] get_rest_api -- [ ] get_rest_apis -- [ ] get_sdk -- [ ] get_sdk_type -- [ ] get_sdk_types -- [X] get_stage -- [X] get_stages -- [ ] get_tags -- [ ] get_usage -- [X] get_usage_plan -- [X] get_usage_plan_key -- [X] get_usage_plan_keys -- [X] get_usage_plans -- [ ] get_vpc_link -- [ ] get_vpc_links -- [ ] import_api_keys -- [ ] import_documentation_parts -- [ ] import_rest_api -- [ ] put_gateway_response -- [ ] put_integration -- [ ] put_integration_response -- [ ] put_method -- [ ] put_method_response -- [ ] put_rest_api -- [ ] tag_resource -- [ ] test_invoke_authorizer -- [ ] test_invoke_method -- [ ] untag_resource -- [ ] update_account -- [ ] update_api_key -- [ ] update_authorizer -- [ ] update_base_path_mapping -- [ ] update_client_certificate -- [ ] update_deployment -- [ ] update_documentation_part -- [ ] update_documentation_version -- [ ] update_domain_name -- [ ] update_gateway_response -- [ ] update_integration -- [ ] update_integration_response -- [ ] update_method -- [ ] update_method_response -- [ ] update_model -- [ ] update_request_validator -- [ ] update_resource -- [ ] update_rest_api -- [X] update_stage -- [ ] update_usage -- [ ] update_usage_plan -- [ ] update_vpc_link - -## application-autoscaling - 0% implemented -- [ ] delete_scaling_policy -- [ ] delete_scheduled_action -- [ ] deregister_scalable_target -- [ ] describe_scalable_targets -- [ ] describe_scaling_activities -- [ ] describe_scaling_policies -- [ ] describe_scheduled_actions -- [ ] put_scaling_policy -- [ ] put_scheduled_action -- [ ] register_scalable_target - -## appstream - 0% implemented -- [ ] associate_fleet -- [ ] copy_image -- [ ] create_directory_config -- [ ] create_fleet -- [ ] create_image_builder -- [ ] create_image_builder_streaming_url -- [ ] create_stack -- [ ] create_streaming_url -- [ ] delete_directory_config -- [ ] delete_fleet -- [ ] delete_image -- [ ] delete_image_builder -- [ ] delete_image_permissions -- [ ] delete_stack -- [ ] describe_directory_configs -- [ ] describe_fleets -- [ ] describe_image_builders -- [ ] describe_image_permissions -- [ ] describe_images -- [ ] describe_sessions -- [ ] describe_stacks -- [ ] disassociate_fleet -- [ ] expire_session -- [ ] list_associated_fleets -- [ ] list_associated_stacks -- [ ] list_tags_for_resource -- [ ] start_fleet -- [ ] start_image_builder -- [ ] stop_fleet -- [ ] stop_image_builder -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_directory_config -- [ ] update_fleet -- [ ] update_image_permissions -- [ ] update_stack - -## appsync - 0% implemented -- [ ] create_api_key -- [ ] create_data_source -- [ ] create_graphql_api -- [ ] create_resolver -- [ ] create_type -- [ ] delete_api_key -- [ ] delete_data_source -- [ ] delete_graphql_api -- [ ] delete_resolver -- [ ] delete_type -- [ ] get_data_source -- [ ] get_graphql_api -- [ ] get_introspection_schema -- [ ] get_resolver -- [ ] get_schema_creation_status -- [ ] get_type -- [ ] list_api_keys -- [ ] list_data_sources -- [ ] list_graphql_apis -- [ ] list_resolvers -- [ ] list_types -- [ ] start_schema_creation -- [ ] update_api_key -- [ ] update_data_source -- [ ] update_graphql_api -- [ ] update_resolver -- [ ] update_type - -## athena - 0% implemented -- [ ] batch_get_named_query -- [ ] batch_get_query_execution -- [ ] create_named_query -- [ ] delete_named_query -- [ ] get_named_query -- [ ] get_query_execution -- [ ] get_query_results -- [ ] list_named_queries -- [ ] list_query_executions -- [ ] start_query_execution -- [ ] stop_query_execution - -## autoscaling - 44% implemented -- [X] attach_instances -- [X] attach_load_balancer_target_groups -- [X] attach_load_balancers -- [ ] complete_lifecycle_action -- [X] create_auto_scaling_group -- [X] create_launch_configuration -- [X] create_or_update_tags -- [X] delete_auto_scaling_group -- [X] delete_launch_configuration -- [ ] delete_lifecycle_hook -- [ ] delete_notification_configuration -- [X] delete_policy -- [ ] delete_scheduled_action -- [ ] delete_tags -- [ ] describe_account_limits -- [ ] describe_adjustment_types -- [X] describe_auto_scaling_groups -- [X] describe_auto_scaling_instances -- [ ] describe_auto_scaling_notification_types -- [X] describe_launch_configurations -- [ ] describe_lifecycle_hook_types -- [ ] describe_lifecycle_hooks -- [X] describe_load_balancer_target_groups -- [X] describe_load_balancers -- [ ] describe_metric_collection_types -- [ ] describe_notification_configurations -- [X] describe_policies -- [ ] describe_scaling_activities -- [ ] describe_scaling_process_types -- [ ] describe_scheduled_actions -- [ ] describe_tags -- [ ] describe_termination_policy_types -- [X] detach_instances -- [X] detach_load_balancer_target_groups -- [X] detach_load_balancers -- [ ] disable_metrics_collection -- [ ] enable_metrics_collection -- [ ] enter_standby -- [X] execute_policy -- [ ] exit_standby -- [ ] put_lifecycle_hook -- [ ] put_notification_configuration -- [ ] put_scaling_policy -- [ ] put_scheduled_update_group_action -- [ ] record_lifecycle_action_heartbeat -- [ ] resume_processes -- [X] set_desired_capacity -- [X] set_instance_health -- [ ] set_instance_protection -- [X] suspend_processes -- [ ] terminate_instance_in_auto_scaling_group -- [X] update_auto_scaling_group - -## autoscaling-plans - 0% implemented -- [ ] create_scaling_plan -- [ ] delete_scaling_plan -- [ ] describe_scaling_plan_resources -- [ ] describe_scaling_plans -- [ ] update_scaling_plan - -## batch - 93% implemented -- [ ] cancel_job -- [X] create_compute_environment -- [X] create_job_queue -- [X] delete_compute_environment -- [X] delete_job_queue -- [X] deregister_job_definition -- [X] describe_compute_environments -- [X] describe_job_definitions -- [X] describe_job_queues -- [X] describe_jobs -- [X] list_jobs -- [X] register_job_definition -- [X] submit_job -- [X] terminate_job -- [X] update_compute_environment -- [X] update_job_queue - -## budgets - 0% implemented -- [ ] create_budget -- [ ] create_notification -- [ ] create_subscriber -- [ ] delete_budget -- [ ] delete_notification -- [ ] delete_subscriber -- [ ] describe_budget -- [ ] describe_budgets -- [ ] describe_notifications_for_budget -- [ ] describe_subscribers_for_notification -- [ ] update_budget -- [ ] update_notification -- [ ] update_subscriber - -## ce - 0% implemented -- [ ] get_cost_and_usage -- [ ] get_dimension_values -- [ ] get_reservation_coverage -- [ ] get_reservation_purchase_recommendation -- [ ] get_reservation_utilization -- [ ] get_tags - -## cloud9 - 0% implemented -- [ ] create_environment_ec2 -- [ ] create_environment_membership -- [ ] delete_environment -- [ ] delete_environment_membership -- [ ] describe_environment_memberships -- [ ] describe_environment_status -- [ ] describe_environments -- [ ] list_environments -- [ ] update_environment -- [ ] update_environment_membership - -## clouddirectory - 0% implemented -- [ ] add_facet_to_object -- [ ] apply_schema -- [ ] attach_object -- [ ] attach_policy -- [ ] attach_to_index -- [ ] attach_typed_link -- [ ] batch_read -- [ ] batch_write -- [ ] create_directory -- [ ] create_facet -- [ ] create_index -- [ ] create_object -- [ ] create_schema -- [ ] create_typed_link_facet -- [ ] delete_directory -- [ ] delete_facet -- [ ] delete_object -- [ ] delete_schema -- [ ] delete_typed_link_facet -- [ ] detach_from_index -- [ ] detach_object -- [ ] detach_policy -- [ ] detach_typed_link -- [ ] disable_directory -- [ ] enable_directory -- [ ] get_applied_schema_version -- [ ] get_directory -- [ ] get_facet -- [ ] get_link_attributes -- [ ] get_object_attributes -- [ ] get_object_information -- [ ] get_schema_as_json -- [ ] get_typed_link_facet_information -- [ ] list_applied_schema_arns -- [ ] list_attached_indices -- [ ] list_development_schema_arns -- [ ] list_directories -- [ ] list_facet_attributes -- [ ] list_facet_names -- [ ] list_incoming_typed_links -- [ ] list_index -- [ ] list_managed_schema_arns -- [ ] list_object_attributes -- [ ] list_object_children -- [ ] list_object_parent_paths -- [ ] list_object_parents -- [ ] list_object_policies -- [ ] list_outgoing_typed_links -- [ ] list_policy_attachments -- [ ] list_published_schema_arns -- [ ] list_tags_for_resource -- [ ] list_typed_link_facet_attributes -- [ ] list_typed_link_facet_names -- [ ] lookup_policy -- [ ] publish_schema -- [ ] put_schema_from_json -- [ ] remove_facet_from_object -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_facet -- [ ] update_link_attributes -- [ ] update_object_attributes -- [ ] update_schema -- [ ] update_typed_link_facet -- [ ] upgrade_applied_schema -- [ ] upgrade_published_schema - -## cloudformation - 21% implemented -- [ ] cancel_update_stack -- [ ] continue_update_rollback -- [X] create_change_set -- [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set -- [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set -- [ ] describe_account_limits -- [ ] describe_change_set -- [ ] describe_stack_events -- [ ] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation -- [X] describe_stacks -- [ ] estimate_template_cost -- [X] execute_change_set -- [ ] get_stack_policy -- [ ] get_template -- [ ] get_template_summary -- [ ] list_change_sets -- [X] list_exports -- [ ] list_imports -- [ ] list_stack_instances -- [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets -- [X] list_stacks -- [ ] set_stack_policy -- [ ] signal_resource -- [ ] stop_stack_set_operation -- [X] update_stack -- [ ] update_stack_instances -- [ ] update_stack_set -- [ ] update_termination_protection -- [ ] validate_template - -## cloudfront - 0% implemented -- [ ] create_cloud_front_origin_access_identity -- [ ] create_distribution -- [ ] create_distribution_with_tags -- [ ] create_field_level_encryption_config -- [ ] create_field_level_encryption_profile -- [ ] create_invalidation -- [ ] create_public_key -- [ ] create_streaming_distribution -- [ ] create_streaming_distribution_with_tags -- [ ] delete_cloud_front_origin_access_identity -- [ ] delete_distribution -- [ ] delete_field_level_encryption_config -- [ ] delete_field_level_encryption_profile -- [ ] delete_public_key -- [ ] delete_streaming_distribution -- [ ] get_cloud_front_origin_access_identity -- [ ] get_cloud_front_origin_access_identity_config -- [ ] get_distribution -- [ ] get_distribution_config -- [ ] get_field_level_encryption -- [ ] get_field_level_encryption_config -- [ ] get_field_level_encryption_profile -- [ ] get_field_level_encryption_profile_config -- [ ] get_invalidation -- [ ] get_public_key -- [ ] get_public_key_config -- [ ] get_streaming_distribution -- [ ] get_streaming_distribution_config -- [ ] list_cloud_front_origin_access_identities -- [ ] list_distributions -- [ ] list_distributions_by_web_acl_id -- [ ] list_field_level_encryption_configs -- [ ] list_field_level_encryption_profiles -- [ ] list_invalidations -- [ ] list_public_keys -- [ ] list_streaming_distributions -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cloud_front_origin_access_identity -- [ ] update_distribution -- [ ] update_field_level_encryption_config -- [ ] update_field_level_encryption_profile -- [ ] update_public_key -- [ ] update_streaming_distribution - -## cloudhsm - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_hapg -- [ ] create_hsm -- [ ] create_luna_client -- [ ] delete_hapg -- [ ] delete_hsm -- [ ] delete_luna_client -- [ ] describe_hapg -- [ ] describe_hsm -- [ ] describe_luna_client -- [ ] get_config -- [ ] list_available_zones -- [ ] list_hapgs -- [ ] list_hsms -- [ ] list_luna_clients -- [ ] list_tags_for_resource -- [ ] modify_hapg -- [ ] modify_hsm -- [ ] modify_luna_client -- [ ] remove_tags_from_resource - -## cloudhsmv2 - 0% implemented -- [ ] copy_backup_to_region -- [ ] create_cluster -- [ ] create_hsm -- [ ] delete_cluster -- [ ] delete_hsm -- [ ] describe_backups -- [ ] describe_clusters -- [ ] initialize_cluster -- [ ] list_tags -- [ ] tag_resource -- [ ] untag_resource - -## cloudsearch - 0% implemented -- [ ] build_suggesters -- [ ] create_domain -- [ ] define_analysis_scheme -- [ ] define_expression -- [ ] define_index_field -- [ ] define_suggester -- [ ] delete_analysis_scheme -- [ ] delete_domain -- [ ] delete_expression -- [ ] delete_index_field -- [ ] delete_suggester -- [ ] describe_analysis_schemes -- [ ] describe_availability_options -- [ ] describe_domains -- [ ] describe_expressions -- [ ] describe_index_fields -- [ ] describe_scaling_parameters -- [ ] describe_service_access_policies -- [ ] describe_suggesters -- [ ] index_documents -- [ ] list_domain_names -- [ ] update_availability_options -- [ ] update_scaling_parameters -- [ ] update_service_access_policies - -## cloudsearchdomain - 0% implemented -- [ ] search -- [ ] suggest -- [ ] upload_documents - -## cloudtrail - 0% implemented -- [ ] add_tags -- [ ] create_trail -- [ ] delete_trail -- [ ] describe_trails -- [ ] get_event_selectors -- [ ] get_trail_status -- [ ] list_public_keys -- [ ] list_tags -- [ ] lookup_events -- [ ] put_event_selectors -- [ ] remove_tags -- [ ] start_logging -- [ ] stop_logging -- [ ] update_trail - -## cloudwatch - 56% implemented -- [X] delete_alarms -- [X] delete_dashboards -- [ ] describe_alarm_history -- [ ] describe_alarms -- [ ] describe_alarms_for_metric -- [ ] disable_alarm_actions -- [ ] enable_alarm_actions -- [X] get_dashboard -- [ ] get_metric_data -- [X] get_metric_statistics -- [X] list_dashboards -- [ ] list_metrics -- [X] put_dashboard -- [X] put_metric_alarm -- [X] put_metric_data -- [X] set_alarm_state - -## codebuild - 0% implemented -- [ ] batch_delete_builds -- [ ] batch_get_builds -- [ ] batch_get_projects -- [ ] create_project -- [ ] create_webhook -- [ ] delete_project -- [ ] delete_webhook -- [ ] invalidate_project_cache -- [ ] list_builds -- [ ] list_builds_for_project -- [ ] list_curated_environment_images -- [ ] list_projects -- [ ] start_build -- [ ] stop_build -- [ ] update_project -- [ ] update_webhook - -## codecommit - 0% implemented -- [ ] batch_get_repositories -- [ ] create_branch -- [ ] create_pull_request -- [ ] create_repository -- [ ] delete_branch -- [ ] delete_comment_content -- [ ] delete_repository -- [ ] describe_pull_request_events -- [ ] get_blob -- [ ] get_branch -- [ ] get_comment -- [ ] get_comments_for_compared_commit -- [ ] get_comments_for_pull_request -- [ ] get_commit -- [ ] get_differences -- [ ] get_merge_conflicts -- [ ] get_pull_request -- [ ] get_repository -- [ ] get_repository_triggers -- [ ] list_branches -- [ ] list_pull_requests -- [ ] list_repositories -- [ ] merge_pull_request_by_fast_forward -- [ ] post_comment_for_compared_commit -- [ ] post_comment_for_pull_request -- [ ] post_comment_reply -- [ ] put_file -- [ ] put_repository_triggers -- [ ] test_repository_triggers -- [ ] update_comment -- [ ] update_default_branch -- [ ] update_pull_request_description -- [ ] update_pull_request_status -- [ ] update_pull_request_title -- [ ] update_repository_description -- [ ] update_repository_name - -## codedeploy - 0% implemented -- [ ] add_tags_to_on_premises_instances -- [ ] batch_get_application_revisions -- [ ] batch_get_applications -- [ ] batch_get_deployment_groups -- [ ] batch_get_deployment_instances -- [ ] batch_get_deployments -- [ ] batch_get_on_premises_instances -- [ ] continue_deployment -- [ ] create_application -- [ ] create_deployment -- [ ] create_deployment_config -- [ ] create_deployment_group -- [ ] delete_application -- [ ] delete_deployment_config -- [ ] delete_deployment_group -- [ ] delete_git_hub_account_token -- [ ] deregister_on_premises_instance -- [ ] get_application -- [ ] get_application_revision -- [ ] get_deployment -- [ ] get_deployment_config -- [ ] get_deployment_group -- [ ] get_deployment_instance -- [ ] get_on_premises_instance -- [ ] list_application_revisions -- [ ] list_applications -- [ ] list_deployment_configs -- [ ] list_deployment_groups -- [ ] list_deployment_instances -- [ ] list_deployments -- [ ] list_git_hub_account_token_names -- [ ] list_on_premises_instances -- [ ] put_lifecycle_event_hook_execution_status -- [ ] register_application_revision -- [ ] register_on_premises_instance -- [ ] remove_tags_from_on_premises_instances -- [ ] skip_wait_time_for_instance_termination -- [ ] stop_deployment -- [ ] update_application -- [ ] update_deployment_group - -## codepipeline - 0% implemented -- [ ] acknowledge_job -- [ ] acknowledge_third_party_job -- [ ] create_custom_action_type -- [ ] create_pipeline -- [ ] delete_custom_action_type -- [ ] delete_pipeline -- [ ] delete_webhook -- [ ] deregister_webhook_with_third_party -- [ ] disable_stage_transition -- [ ] enable_stage_transition -- [ ] get_job_details -- [ ] get_pipeline -- [ ] get_pipeline_execution -- [ ] get_pipeline_state -- [ ] get_third_party_job_details -- [ ] list_action_types -- [ ] list_pipeline_executions -- [ ] list_pipelines -- [ ] list_webhooks -- [ ] poll_for_jobs -- [ ] poll_for_third_party_jobs -- [ ] put_action_revision -- [ ] put_approval_result -- [ ] put_job_failure_result -- [ ] put_job_success_result -- [ ] put_third_party_job_failure_result -- [ ] put_third_party_job_success_result -- [ ] put_webhook -- [ ] register_webhook_with_third_party -- [ ] retry_stage_execution -- [ ] start_pipeline_execution -- [ ] update_pipeline - -## codestar - 0% implemented -- [ ] associate_team_member -- [ ] create_project -- [ ] create_user_profile -- [ ] delete_project -- [ ] delete_user_profile -- [ ] describe_project -- [ ] describe_user_profile -- [ ] disassociate_team_member -- [ ] list_projects -- [ ] list_resources -- [ ] list_tags_for_project -- [ ] list_team_members -- [ ] list_user_profiles -- [ ] tag_project -- [ ] untag_project -- [ ] update_project -- [ ] update_team_member -- [ ] update_user_profile - -## cognito-identity - 22% implemented -- [X] create_identity_pool -- [ ] delete_identities -- [ ] delete_identity_pool -- [ ] describe_identity -- [ ] describe_identity_pool -- [X] get_credentials_for_identity -- [X] get_id -- [ ] get_identity_pool_roles -- [ ] get_open_id_token -- [X] get_open_id_token_for_developer_identity -- [ ] list_identities -- [ ] list_identity_pools -- [ ] lookup_developer_identity -- [ ] merge_developer_identities -- [ ] set_identity_pool_roles -- [ ] unlink_developer_identity -- [ ] unlink_identity -- [ ] update_identity_pool - -## cognito-idp - 25% implemented -- [ ] add_custom_attributes -- [ ] admin_add_user_to_group -- [ ] admin_confirm_sign_up -- [X] admin_create_user -- [X] admin_delete_user -- [ ] admin_delete_user_attributes -- [ ] admin_disable_provider_for_user -- [ ] admin_disable_user -- [ ] admin_enable_user -- [ ] admin_forget_device -- [ ] admin_get_device -- [X] admin_get_user -- [X] admin_initiate_auth -- [ ] admin_link_provider_for_user -- [ ] admin_list_devices -- [ ] admin_list_groups_for_user -- [ ] admin_list_user_auth_events -- [ ] admin_remove_user_from_group -- [ ] admin_reset_user_password -- [ ] admin_respond_to_auth_challenge -- [ ] admin_set_user_mfa_preference -- [ ] admin_set_user_settings -- [ ] admin_update_auth_event_feedback -- [ ] admin_update_device_status -- [ ] admin_update_user_attributes -- [ ] admin_user_global_sign_out -- [ ] associate_software_token -- [X] change_password -- [ ] confirm_device -- [X] confirm_forgot_password -- [ ] confirm_sign_up -- [ ] create_group -- [X] create_identity_provider -- [ ] create_resource_server -- [ ] create_user_import_job -- [X] create_user_pool -- [X] create_user_pool_client -- [X] create_user_pool_domain -- [ ] delete_group -- [X] delete_identity_provider -- [ ] delete_resource_server -- [ ] delete_user -- [ ] delete_user_attributes -- [X] delete_user_pool -- [X] delete_user_pool_client -- [X] delete_user_pool_domain -- [X] describe_identity_provider -- [ ] describe_resource_server -- [ ] describe_risk_configuration -- [ ] describe_user_import_job -- [X] describe_user_pool -- [X] describe_user_pool_client -- [X] describe_user_pool_domain -- [ ] forget_device -- [ ] forgot_password -- [ ] get_csv_header -- [ ] get_device -- [ ] get_group -- [ ] get_identity_provider_by_identifier -- [ ] get_signing_certificate -- [ ] get_ui_customization -- [ ] get_user -- [ ] get_user_attribute_verification_code -- [ ] get_user_pool_mfa_config -- [ ] global_sign_out -- [ ] initiate_auth -- [ ] list_devices -- [ ] list_groups -- [X] list_identity_providers -- [ ] list_resource_servers -- [ ] list_user_import_jobs -- [X] list_user_pool_clients -- [X] list_user_pools -- [X] list_users -- [ ] list_users_in_group -- [ ] resend_confirmation_code -- [X] respond_to_auth_challenge -- [ ] set_risk_configuration -- [ ] set_ui_customization -- [ ] set_user_mfa_preference -- [ ] set_user_pool_mfa_config -- [ ] set_user_settings -- [ ] sign_up -- [ ] start_user_import_job -- [ ] stop_user_import_job -- [ ] update_auth_event_feedback -- [ ] update_device_status -- [ ] update_group -- [ ] update_identity_provider -- [ ] update_resource_server -- [ ] update_user_attributes -- [ ] update_user_pool -- [X] update_user_pool_client -- [ ] verify_software_token -- [ ] verify_user_attribute - -## cognito-sync - 0% implemented -- [ ] bulk_publish -- [ ] delete_dataset -- [ ] describe_dataset -- [ ] describe_identity_pool_usage -- [ ] describe_identity_usage -- [ ] get_bulk_publish_details -- [ ] get_cognito_events -- [ ] get_identity_pool_configuration -- [ ] list_datasets -- [ ] list_identity_pool_usage -- [ ] list_records -- [ ] register_device -- [ ] set_cognito_events -- [ ] set_identity_pool_configuration -- [ ] subscribe_to_dataset -- [ ] unsubscribe_from_dataset -- [ ] update_records - -## comprehend - 0% implemented -- [ ] batch_detect_dominant_language -- [ ] batch_detect_entities -- [ ] batch_detect_key_phrases -- [ ] batch_detect_sentiment -- [ ] batch_detect_syntax -- [ ] describe_dominant_language_detection_job -- [ ] describe_entities_detection_job -- [ ] describe_key_phrases_detection_job -- [ ] describe_sentiment_detection_job -- [ ] describe_topics_detection_job -- [ ] detect_dominant_language -- [ ] detect_entities -- [ ] detect_key_phrases -- [ ] detect_sentiment -- [ ] detect_syntax -- [ ] list_dominant_language_detection_jobs -- [ ] list_entities_detection_jobs -- [ ] list_key_phrases_detection_jobs -- [ ] list_sentiment_detection_jobs -- [ ] list_topics_detection_jobs -- [ ] start_dominant_language_detection_job -- [ ] start_entities_detection_job -- [ ] start_key_phrases_detection_job -- [ ] start_sentiment_detection_job -- [ ] start_topics_detection_job -- [ ] stop_dominant_language_detection_job -- [ ] stop_entities_detection_job -- [ ] stop_key_phrases_detection_job -- [ ] stop_sentiment_detection_job - -## config - 0% implemented -- [ ] batch_get_resource_config -- [ ] delete_aggregation_authorization -- [ ] delete_config_rule -- [ ] delete_configuration_aggregator -- [ ] delete_configuration_recorder -- [ ] delete_delivery_channel -- [ ] delete_evaluation_results -- [ ] delete_pending_aggregation_request -- [ ] delete_retention_configuration -- [ ] deliver_config_snapshot -- [ ] describe_aggregate_compliance_by_config_rules -- [ ] describe_aggregation_authorizations -- [ ] describe_compliance_by_config_rule -- [ ] describe_compliance_by_resource -- [ ] describe_config_rule_evaluation_status -- [ ] describe_config_rules -- [ ] describe_configuration_aggregator_sources_status -- [ ] describe_configuration_aggregators -- [ ] describe_configuration_recorder_status -- [ ] describe_configuration_recorders -- [ ] describe_delivery_channel_status -- [ ] describe_delivery_channels -- [ ] describe_pending_aggregation_requests -- [ ] describe_retention_configurations -- [ ] get_aggregate_compliance_details_by_config_rule -- [ ] get_aggregate_config_rule_compliance_summary -- [ ] get_compliance_details_by_config_rule -- [ ] get_compliance_details_by_resource -- [ ] get_compliance_summary_by_config_rule -- [ ] get_compliance_summary_by_resource_type -- [ ] get_discovered_resource_counts -- [ ] get_resource_config_history -- [ ] list_discovered_resources -- [ ] put_aggregation_authorization -- [ ] put_config_rule -- [ ] put_configuration_aggregator -- [ ] put_configuration_recorder -- [ ] put_delivery_channel -- [ ] put_evaluations -- [ ] put_retention_configuration -- [ ] start_config_rules_evaluation -- [ ] start_configuration_recorder -- [ ] stop_configuration_recorder - -## connect - 0% implemented -- [ ] create_user -- [ ] delete_user -- [ ] describe_user -- [ ] describe_user_hierarchy_group -- [ ] describe_user_hierarchy_structure -- [ ] get_federation_token -- [ ] list_routing_profiles -- [ ] list_security_profiles -- [ ] list_user_hierarchy_groups -- [ ] list_users -- [ ] start_outbound_voice_contact -- [ ] stop_contact -- [ ] update_user_hierarchy -- [ ] update_user_identity_info -- [ ] update_user_phone_config -- [ ] update_user_routing_profile -- [ ] update_user_security_profiles - -## cur - 0% implemented -- [ ] delete_report_definition -- [ ] describe_report_definitions -- [ ] put_report_definition - -## datapipeline - 42% implemented -- [X] activate_pipeline -- [ ] add_tags -- [X] create_pipeline -- [ ] deactivate_pipeline -- [X] delete_pipeline -- [X] describe_objects -- [X] describe_pipelines -- [ ] evaluate_expression -- [X] get_pipeline_definition -- [X] list_pipelines -- [ ] poll_for_task -- [X] put_pipeline_definition -- [ ] query_objects -- [ ] remove_tags -- [ ] report_task_progress -- [ ] report_task_runner_heartbeat -- [ ] set_status -- [ ] set_task_status -- [ ] validate_pipeline_definition - -## dax - 0% implemented -- [ ] create_cluster -- [ ] create_parameter_group -- [ ] create_subnet_group -- [ ] decrease_replication_factor -- [ ] delete_cluster -- [ ] delete_parameter_group -- [ ] delete_subnet_group -- [ ] describe_clusters -- [ ] describe_default_parameters -- [ ] describe_events -- [ ] describe_parameter_groups -- [ ] describe_parameters -- [ ] describe_subnet_groups -- [ ] increase_replication_factor -- [ ] list_tags -- [ ] reboot_node -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cluster -- [ ] update_parameter_group -- [ ] update_subnet_group - -## devicefarm - 0% implemented -- [ ] create_device_pool -- [ ] create_instance_profile -- [ ] create_network_profile -- [ ] create_project -- [ ] create_remote_access_session -- [ ] create_upload -- [ ] create_vpce_configuration -- [ ] delete_device_pool -- [ ] delete_instance_profile -- [ ] delete_network_profile -- [ ] delete_project -- [ ] delete_remote_access_session -- [ ] delete_run -- [ ] delete_upload -- [ ] delete_vpce_configuration -- [ ] get_account_settings -- [ ] get_device -- [ ] get_device_instance -- [ ] get_device_pool -- [ ] get_device_pool_compatibility -- [ ] get_instance_profile -- [ ] get_job -- [ ] get_network_profile -- [ ] get_offering_status -- [ ] get_project -- [ ] get_remote_access_session -- [ ] get_run -- [ ] get_suite -- [ ] get_test -- [ ] get_upload -- [ ] get_vpce_configuration -- [ ] install_to_remote_access_session -- [ ] list_artifacts -- [ ] list_device_instances -- [ ] list_device_pools -- [ ] list_devices -- [ ] list_instance_profiles -- [ ] list_jobs -- [ ] list_network_profiles -- [ ] list_offering_promotions -- [ ] list_offering_transactions -- [ ] list_offerings -- [ ] list_projects -- [ ] list_remote_access_sessions -- [ ] list_runs -- [ ] list_samples -- [ ] list_suites -- [ ] list_tests -- [ ] list_unique_problems -- [ ] list_uploads -- [ ] list_vpce_configurations -- [ ] purchase_offering -- [ ] renew_offering -- [ ] schedule_run -- [ ] stop_remote_access_session -- [ ] stop_run -- [ ] update_device_instance -- [ ] update_device_pool -- [ ] update_instance_profile -- [ ] update_network_profile -- [ ] update_project -- [ ] update_vpce_configuration - -## directconnect - 0% implemented -- [ ] allocate_connection_on_interconnect -- [ ] allocate_hosted_connection -- [ ] allocate_private_virtual_interface -- [ ] allocate_public_virtual_interface -- [ ] associate_connection_with_lag -- [ ] associate_hosted_connection -- [ ] associate_virtual_interface -- [ ] confirm_connection -- [ ] confirm_private_virtual_interface -- [ ] confirm_public_virtual_interface -- [ ] create_bgp_peer -- [ ] create_connection -- [ ] create_direct_connect_gateway -- [ ] create_direct_connect_gateway_association -- [ ] create_interconnect -- [ ] create_lag -- [ ] create_private_virtual_interface -- [ ] create_public_virtual_interface -- [ ] delete_bgp_peer -- [ ] delete_connection -- [ ] delete_direct_connect_gateway -- [ ] delete_direct_connect_gateway_association -- [ ] delete_interconnect -- [ ] delete_lag -- [ ] delete_virtual_interface -- [ ] describe_connection_loa -- [ ] describe_connections -- [ ] describe_connections_on_interconnect -- [ ] describe_direct_connect_gateway_associations -- [ ] describe_direct_connect_gateway_attachments -- [ ] describe_direct_connect_gateways -- [ ] describe_hosted_connections -- [ ] describe_interconnect_loa -- [ ] describe_interconnects -- [ ] describe_lags -- [ ] describe_loa -- [ ] describe_locations -- [ ] describe_tags -- [ ] describe_virtual_gateways -- [ ] describe_virtual_interfaces -- [ ] disassociate_connection_from_lag -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_lag - -## discovery - 0% implemented -- [ ] associate_configuration_items_to_application -- [ ] create_application -- [ ] create_tags -- [ ] delete_applications -- [ ] delete_tags -- [ ] describe_agents -- [ ] describe_configurations -- [ ] describe_export_configurations -- [ ] describe_export_tasks -- [ ] describe_tags -- [ ] disassociate_configuration_items_from_application -- [ ] export_configurations -- [ ] get_discovery_summary -- [ ] list_configurations -- [ ] list_server_neighbors -- [ ] start_data_collection_by_agent_ids -- [ ] start_export_task -- [ ] stop_data_collection_by_agent_ids -- [ ] update_application - -## dlm - 0% implemented -- [ ] create_lifecycle_policy -- [ ] delete_lifecycle_policy -- [ ] get_lifecycle_policies -- [ ] get_lifecycle_policy -- [ ] update_lifecycle_policy - -## dms - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_endpoint -- [ ] create_event_subscription -- [ ] create_replication_instance -- [ ] create_replication_subnet_group -- [ ] create_replication_task -- [ ] delete_certificate -- [ ] delete_endpoint -- [ ] delete_event_subscription -- [ ] delete_replication_instance -- [ ] delete_replication_subnet_group -- [ ] delete_replication_task -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_connections -- [ ] describe_endpoint_types -- [ ] describe_endpoints -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_replication_instances -- [ ] describe_refresh_schemas_status -- [ ] describe_replication_instance_task_logs -- [ ] describe_replication_instances -- [ ] describe_replication_subnet_groups -- [ ] describe_replication_task_assessment_results -- [ ] describe_replication_tasks -- [ ] describe_schemas -- [ ] describe_table_statistics -- [ ] import_certificate -- [ ] list_tags_for_resource -- [ ] modify_endpoint -- [ ] modify_event_subscription -- [ ] modify_replication_instance -- [ ] modify_replication_subnet_group -- [ ] modify_replication_task -- [ ] reboot_replication_instance -- [ ] refresh_schemas -- [ ] reload_tables -- [ ] remove_tags_from_resource -- [ ] start_replication_task -- [ ] start_replication_task_assessment -- [ ] stop_replication_task -- [ ] test_connection - -## ds - 0% implemented -- [ ] add_ip_routes -- [ ] add_tags_to_resource -- [ ] cancel_schema_extension -- [ ] connect_directory -- [ ] create_alias -- [ ] create_computer -- [ ] create_conditional_forwarder -- [ ] create_directory -- [ ] create_microsoft_ad -- [ ] create_snapshot -- [ ] create_trust -- [ ] delete_conditional_forwarder -- [ ] delete_directory -- [ ] delete_snapshot -- [ ] delete_trust -- [ ] deregister_event_topic -- [ ] describe_conditional_forwarders -- [ ] describe_directories -- [ ] describe_domain_controllers -- [ ] describe_event_topics -- [ ] describe_snapshots -- [ ] describe_trusts -- [ ] disable_radius -- [ ] disable_sso -- [ ] enable_radius -- [ ] enable_sso -- [ ] get_directory_limits -- [ ] get_snapshot_limits -- [ ] list_ip_routes -- [ ] list_schema_extensions -- [ ] list_tags_for_resource -- [ ] register_event_topic -- [ ] remove_ip_routes -- [ ] remove_tags_from_resource -- [ ] reset_user_password -- [ ] restore_from_snapshot -- [ ] start_schema_extension -- [ ] update_conditional_forwarder -- [ ] update_number_of_domain_controllers -- [ ] update_radius -- [ ] verify_trust - -## dynamodb - 21% implemented -- [ ] batch_get_item -- [ ] batch_write_item -- [ ] create_backup -- [ ] create_global_table -- [X] create_table -- [ ] delete_backup -- [X] delete_item -- [X] delete_table -- [ ] describe_backup -- [ ] describe_continuous_backups -- [ ] describe_global_table -- [ ] describe_global_table_settings -- [ ] describe_limits -- [ ] describe_table -- [ ] describe_time_to_live -- [X] get_item -- [ ] list_backups -- [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource -- [X] put_item -- [X] query -- [ ] restore_table_from_backup -- [ ] restore_table_to_point_in_time -- [X] scan -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_continuous_backups -- [ ] update_global_table -- [ ] update_global_table_settings -- [ ] update_item -- [ ] update_table -- [ ] update_time_to_live - -## dynamodbstreams - 0% implemented -- [ ] describe_stream -- [ ] get_records -- [ ] get_shard_iterator -- [ ] list_streams - -## ec2 - 36% implemented -- [ ] accept_reserved_instances_exchange_quote -- [ ] accept_vpc_endpoint_connections -- [X] accept_vpc_peering_connection -- [X] allocate_address -- [ ] allocate_hosts -- [ ] assign_ipv6_addresses -- [ ] assign_private_ip_addresses -- [X] associate_address -- [X] associate_dhcp_options -- [ ] associate_iam_instance_profile -- [X] associate_route_table -- [ ] associate_subnet_cidr_block -- [X] associate_vpc_cidr_block -- [ ] attach_classic_link_vpc -- [X] attach_internet_gateway -- [X] attach_network_interface -- [X] attach_volume -- [X] attach_vpn_gateway -- [X] authorize_security_group_egress -- [X] authorize_security_group_ingress -- [ ] bundle_instance -- [ ] cancel_bundle_task -- [ ] cancel_conversion_task -- [ ] cancel_export_task -- [ ] cancel_import_task -- [ ] cancel_reserved_instances_listing -- [X] cancel_spot_fleet_requests -- [X] cancel_spot_instance_requests -- [ ] confirm_product_instance -- [ ] copy_fpga_image -- [X] copy_image -- [X] copy_snapshot -- [X] create_customer_gateway -- [ ] create_default_subnet -- [ ] create_default_vpc -- [X] create_dhcp_options -- [ ] create_egress_only_internet_gateway -- [ ] create_fleet -- [ ] create_flow_logs -- [ ] create_fpga_image -- [X] create_image -- [ ] create_instance_export_task -- [X] create_internet_gateway -- [X] create_key_pair -- [ ] create_launch_template -- [ ] create_launch_template_version -- [X] create_nat_gateway -- [X] create_network_acl -- [X] create_network_acl_entry -- [X] create_network_interface -- [ ] create_network_interface_permission -- [ ] create_placement_group -- [ ] create_reserved_instances_listing -- [X] create_route -- [X] create_route_table -- [X] create_security_group -- [X] create_snapshot -- [ ] create_spot_datafeed_subscription -- [X] create_subnet -- [X] create_tags -- [X] create_volume -- [X] create_vpc -- [ ] create_vpc_endpoint -- [ ] create_vpc_endpoint_connection_notification -- [ ] create_vpc_endpoint_service_configuration -- [X] create_vpc_peering_connection -- [X] create_vpn_connection -- [ ] create_vpn_connection_route -- [X] create_vpn_gateway -- [X] delete_customer_gateway -- [ ] delete_dhcp_options -- [ ] delete_egress_only_internet_gateway -- [ ] delete_fleets -- [ ] delete_flow_logs -- [ ] delete_fpga_image -- [X] delete_internet_gateway -- [X] delete_key_pair -- [ ] delete_launch_template -- [ ] delete_launch_template_versions -- [X] delete_nat_gateway -- [X] delete_network_acl -- [X] delete_network_acl_entry -- [X] delete_network_interface -- [ ] delete_network_interface_permission -- [ ] delete_placement_group -- [X] delete_route -- [X] delete_route_table -- [X] delete_security_group -- [X] delete_snapshot -- [ ] delete_spot_datafeed_subscription -- [X] delete_subnet -- [X] delete_tags -- [X] delete_volume -- [X] delete_vpc -- [ ] delete_vpc_endpoint_connection_notifications -- [ ] delete_vpc_endpoint_service_configurations -- [ ] delete_vpc_endpoints -- [X] delete_vpc_peering_connection -- [X] delete_vpn_connection -- [ ] delete_vpn_connection_route -- [X] delete_vpn_gateway -- [X] deregister_image -- [ ] describe_account_attributes -- [X] describe_addresses -- [ ] describe_aggregate_id_format -- [X] describe_availability_zones -- [ ] describe_bundle_tasks -- [ ] describe_classic_link_instances -- [ ] describe_conversion_tasks -- [ ] describe_customer_gateways -- [X] describe_dhcp_options -- [ ] describe_egress_only_internet_gateways -- [ ] describe_elastic_gpus -- [ ] describe_export_tasks -- [ ] describe_fleet_history -- [ ] describe_fleet_instances -- [ ] describe_fleets -- [ ] describe_flow_logs -- [ ] describe_fpga_image_attribute -- [ ] describe_fpga_images -- [ ] describe_host_reservation_offerings -- [ ] describe_host_reservations -- [ ] describe_hosts -- [ ] describe_iam_instance_profile_associations -- [ ] describe_id_format -- [ ] describe_identity_id_format -- [ ] describe_image_attribute -- [X] describe_images -- [ ] describe_import_image_tasks -- [ ] describe_import_snapshot_tasks -- [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications -- [ ] describe_instance_status -- [ ] describe_instances -- [X] describe_internet_gateways -- [X] describe_key_pairs -- [ ] describe_launch_template_versions -- [ ] describe_launch_templates -- [ ] describe_moving_addresses -- [ ] describe_nat_gateways -- [ ] describe_network_acls -- [ ] describe_network_interface_attribute -- [ ] describe_network_interface_permissions -- [X] describe_network_interfaces -- [ ] describe_placement_groups -- [ ] describe_prefix_lists -- [ ] describe_principal_id_format -- [X] describe_regions -- [ ] describe_reserved_instances -- [ ] describe_reserved_instances_listings -- [ ] describe_reserved_instances_modifications -- [ ] describe_reserved_instances_offerings -- [ ] describe_route_tables -- [ ] describe_scheduled_instance_availability -- [ ] describe_scheduled_instances -- [ ] describe_security_group_references -- [X] describe_security_groups -- [ ] describe_snapshot_attribute -- [X] describe_snapshots -- [ ] describe_spot_datafeed_subscription -- [X] describe_spot_fleet_instances -- [ ] describe_spot_fleet_request_history -- [X] describe_spot_fleet_requests -- [X] describe_spot_instance_requests -- [ ] describe_spot_price_history -- [ ] describe_stale_security_groups -- [ ] describe_subnets -- [X] describe_tags -- [ ] describe_volume_attribute -- [ ] describe_volume_status -- [X] describe_volumes -- [ ] describe_volumes_modifications -- [X] describe_vpc_attribute -- [ ] describe_vpc_classic_link -- [ ] describe_vpc_classic_link_dns_support -- [ ] describe_vpc_endpoint_connection_notifications -- [ ] describe_vpc_endpoint_connections -- [ ] describe_vpc_endpoint_service_configurations -- [ ] describe_vpc_endpoint_service_permissions -- [ ] describe_vpc_endpoint_services -- [ ] describe_vpc_endpoints -- [ ] describe_vpc_peering_connections -- [ ] describe_vpcs -- [X] describe_vpn_connections -- [ ] describe_vpn_gateways -- [ ] detach_classic_link_vpc -- [X] detach_internet_gateway -- [X] detach_network_interface -- [X] detach_volume -- [X] detach_vpn_gateway -- [ ] disable_vgw_route_propagation -- [ ] disable_vpc_classic_link -- [ ] disable_vpc_classic_link_dns_support -- [X] disassociate_address -- [ ] disassociate_iam_instance_profile -- [X] disassociate_route_table -- [ ] disassociate_subnet_cidr_block -- [X] disassociate_vpc_cidr_block -- [ ] enable_vgw_route_propagation -- [ ] enable_volume_io -- [ ] enable_vpc_classic_link -- [ ] enable_vpc_classic_link_dns_support -- [ ] get_console_output -- [ ] get_console_screenshot -- [ ] get_host_reservation_purchase_preview -- [ ] get_launch_template_data -- [ ] get_password_data -- [ ] get_reserved_instances_exchange_quote -- [ ] import_image -- [ ] import_instance -- [X] import_key_pair -- [ ] import_snapshot -- [ ] import_volume -- [ ] modify_fleet -- [ ] modify_fpga_image_attribute -- [ ] modify_hosts -- [ ] modify_id_format -- [ ] modify_identity_id_format -- [ ] modify_image_attribute -- [X] modify_instance_attribute -- [ ] modify_instance_credit_specification -- [ ] modify_instance_placement -- [ ] modify_launch_template -- [X] modify_network_interface_attribute -- [ ] modify_reserved_instances -- [ ] modify_snapshot_attribute -- [X] modify_spot_fleet_request -- [X] modify_subnet_attribute -- [ ] modify_volume -- [ ] modify_volume_attribute -- [X] modify_vpc_attribute -- [ ] modify_vpc_endpoint -- [ ] modify_vpc_endpoint_connection_notification -- [ ] modify_vpc_endpoint_service_configuration -- [ ] modify_vpc_endpoint_service_permissions -- [ ] modify_vpc_peering_connection_options -- [ ] modify_vpc_tenancy -- [ ] monitor_instances -- [ ] move_address_to_vpc -- [ ] purchase_host_reservation -- [ ] purchase_reserved_instances_offering -- [ ] purchase_scheduled_instances -- [X] reboot_instances -- [ ] register_image -- [ ] reject_vpc_endpoint_connections -- [X] reject_vpc_peering_connection -- [X] release_address -- [ ] release_hosts -- [ ] replace_iam_instance_profile_association -- [X] replace_network_acl_association -- [X] replace_network_acl_entry -- [X] replace_route -- [X] replace_route_table_association -- [ ] report_instance_status -- [X] request_spot_fleet -- [X] request_spot_instances -- [ ] reset_fpga_image_attribute -- [ ] reset_image_attribute -- [ ] reset_instance_attribute -- [ ] reset_network_interface_attribute -- [ ] reset_snapshot_attribute -- [ ] restore_address_to_classic -- [X] revoke_security_group_egress -- [X] revoke_security_group_ingress -- [ ] run_instances -- [ ] run_scheduled_instances -- [X] start_instances -- [X] stop_instances -- [X] terminate_instances -- [ ] unassign_ipv6_addresses -- [ ] unassign_private_ip_addresses -- [ ] unmonitor_instances -- [ ] update_security_group_rule_descriptions_egress -- [ ] update_security_group_rule_descriptions_ingress - -## ecr - 31% implemented -- [ ] batch_check_layer_availability -- [ ] batch_delete_image -- [X] batch_get_image -- [ ] complete_layer_upload -- [X] create_repository -- [ ] delete_lifecycle_policy -- [X] delete_repository -- [ ] delete_repository_policy -- [X] describe_images -- [X] describe_repositories -- [ ] get_authorization_token -- [ ] get_download_url_for_layer -- [ ] get_lifecycle_policy -- [ ] get_lifecycle_policy_preview -- [ ] get_repository_policy -- [ ] initiate_layer_upload -- [X] list_images -- [X] put_image -- [ ] put_lifecycle_policy -- [ ] set_repository_policy -- [ ] start_lifecycle_policy_preview -- [ ] upload_layer_part - -## ecs - 87% implemented -- [X] create_cluster -- [X] create_service -- [X] delete_attributes -- [X] delete_cluster -- [X] delete_service -- [X] deregister_container_instance -- [X] deregister_task_definition -- [X] describe_clusters -- [X] describe_container_instances -- [X] describe_services -- [X] describe_task_definition -- [X] describe_tasks -- [ ] discover_poll_endpoint -- [X] list_attributes -- [X] list_clusters -- [X] list_container_instances -- [X] list_services -- [X] list_task_definition_families -- [X] list_task_definitions -- [X] list_tasks -- [X] put_attributes -- [X] register_container_instance -- [X] register_task_definition -- [X] run_task -- [X] start_task -- [X] stop_task -- [ ] submit_container_state_change -- [ ] submit_task_state_change -- [ ] update_container_agent -- [X] update_container_instances_state -- [X] update_service - -## efs - 0% implemented -- [ ] create_file_system -- [ ] create_mount_target -- [ ] create_tags -- [ ] delete_file_system -- [ ] delete_mount_target -- [ ] delete_tags -- [ ] describe_file_systems -- [ ] describe_mount_target_security_groups -- [ ] describe_mount_targets -- [ ] describe_tags -- [ ] modify_mount_target_security_groups -- [ ] update_file_system - -## eks - 0% implemented -- [ ] create_cluster -- [ ] delete_cluster -- [ ] describe_cluster -- [ ] list_clusters - -## elasticache - 0% implemented -- [ ] add_tags_to_resource -- [ ] authorize_cache_security_group_ingress -- [ ] copy_snapshot -- [ ] create_cache_cluster -- [ ] create_cache_parameter_group -- [ ] create_cache_security_group -- [ ] create_cache_subnet_group -- [ ] create_replication_group -- [ ] create_snapshot -- [ ] delete_cache_cluster -- [ ] delete_cache_parameter_group -- [ ] delete_cache_security_group -- [ ] delete_cache_subnet_group -- [ ] delete_replication_group -- [ ] delete_snapshot -- [ ] describe_cache_clusters -- [ ] describe_cache_engine_versions -- [ ] describe_cache_parameter_groups -- [ ] describe_cache_parameters -- [ ] describe_cache_security_groups -- [ ] describe_cache_subnet_groups -- [ ] describe_engine_default_parameters -- [ ] describe_events -- [ ] describe_replication_groups -- [ ] describe_reserved_cache_nodes -- [ ] describe_reserved_cache_nodes_offerings -- [ ] describe_snapshots -- [ ] list_allowed_node_type_modifications -- [ ] list_tags_for_resource -- [ ] modify_cache_cluster -- [ ] modify_cache_parameter_group -- [ ] modify_cache_subnet_group -- [ ] modify_replication_group -- [ ] modify_replication_group_shard_configuration -- [ ] purchase_reserved_cache_nodes_offering -- [ ] reboot_cache_cluster -- [ ] remove_tags_from_resource -- [ ] reset_cache_parameter_group -- [ ] revoke_cache_security_group_ingress -- [ ] test_failover - -## elasticbeanstalk - 0% implemented -- [ ] abort_environment_update -- [ ] apply_environment_managed_action -- [ ] check_dns_availability -- [ ] compose_environments -- [ ] create_application -- [ ] create_application_version -- [ ] create_configuration_template -- [ ] create_environment -- [ ] create_platform_version -- [ ] create_storage_location -- [ ] delete_application -- [ ] delete_application_version -- [ ] delete_configuration_template -- [ ] delete_environment_configuration -- [ ] delete_platform_version -- [ ] describe_account_attributes -- [ ] describe_application_versions -- [ ] describe_applications -- [ ] describe_configuration_options -- [ ] describe_configuration_settings -- [ ] describe_environment_health -- [ ] describe_environment_managed_action_history -- [ ] describe_environment_managed_actions -- [ ] describe_environment_resources -- [ ] describe_environments -- [ ] describe_events -- [ ] describe_instances_health -- [ ] describe_platform_version -- [ ] list_available_solution_stacks -- [ ] list_platform_versions -- [ ] list_tags_for_resource -- [ ] rebuild_environment -- [ ] request_environment_info -- [ ] restart_app_server -- [ ] retrieve_environment_info -- [ ] swap_environment_cnames -- [ ] terminate_environment -- [ ] update_application -- [ ] update_application_resource_lifecycle -- [ ] update_application_version -- [ ] update_configuration_template -- [ ] update_environment -- [ ] update_tags_for_resource -- [ ] validate_configuration_settings - -## elastictranscoder - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_pipeline -- [ ] create_preset -- [ ] delete_pipeline -- [ ] delete_preset -- [ ] list_jobs_by_pipeline -- [ ] list_jobs_by_status -- [ ] list_pipelines -- [ ] list_presets -- [ ] read_job -- [ ] read_pipeline -- [ ] read_preset -- [ ] test_role -- [ ] update_pipeline -- [ ] update_pipeline_notifications -- [ ] update_pipeline_status - -## elb - 34% implemented -- [ ] add_tags -- [X] apply_security_groups_to_load_balancer -- [ ] attach_load_balancer_to_subnets -- [X] configure_health_check -- [X] create_app_cookie_stickiness_policy -- [X] create_lb_cookie_stickiness_policy -- [X] create_load_balancer -- [X] create_load_balancer_listeners -- [ ] create_load_balancer_policy -- [X] delete_load_balancer -- [X] delete_load_balancer_listeners -- [ ] delete_load_balancer_policy -- [ ] deregister_instances_from_load_balancer -- [ ] describe_account_limits -- [ ] describe_instance_health -- [ ] describe_load_balancer_attributes -- [ ] describe_load_balancer_policies -- [ ] describe_load_balancer_policy_types -- [X] describe_load_balancers -- [ ] describe_tags -- [ ] detach_load_balancer_from_subnets -- [ ] disable_availability_zones_for_load_balancer -- [ ] enable_availability_zones_for_load_balancer -- [ ] modify_load_balancer_attributes -- [ ] register_instances_with_load_balancer -- [ ] remove_tags -- [ ] set_load_balancer_listener_ssl_certificate -- [ ] set_load_balancer_policies_for_backend_server -- [X] set_load_balancer_policies_of_listener - -## elbv2 - 70% implemented -- [ ] add_listener_certificates -- [ ] add_tags -- [X] create_listener -- [X] create_load_balancer -- [X] create_rule -- [X] create_target_group -- [X] delete_listener -- [X] delete_load_balancer -- [X] delete_rule -- [X] delete_target_group -- [X] deregister_targets -- [ ] describe_account_limits -- [ ] describe_listener_certificates -- [X] describe_listeners -- [X] describe_load_balancer_attributes -- [X] describe_load_balancers -- [X] describe_rules -- [ ] describe_ssl_policies -- [ ] describe_tags -- [ ] describe_target_group_attributes -- [X] describe_target_groups -- [X] describe_target_health -- [X] modify_listener -- [X] modify_load_balancer_attributes -- [X] modify_rule -- [X] modify_target_group -- [ ] modify_target_group_attributes -- [X] register_targets -- [ ] remove_listener_certificates -- [ ] remove_tags -- [X] set_ip_address_type -- [X] set_rule_priorities -- [X] set_security_groups -- [X] set_subnets - -## emr - 55% implemented -- [ ] add_instance_fleet -- [X] add_instance_groups -- [X] add_job_flow_steps -- [X] add_tags -- [ ] cancel_steps -- [ ] create_security_configuration -- [ ] delete_security_configuration -- [ ] describe_cluster -- [X] describe_job_flows -- [ ] describe_security_configuration -- [X] describe_step -- [X] list_bootstrap_actions -- [X] list_clusters -- [ ] list_instance_fleets -- [X] list_instance_groups -- [ ] list_instances -- [ ] list_security_configurations -- [X] list_steps -- [ ] modify_instance_fleet -- [X] modify_instance_groups -- [ ] put_auto_scaling_policy -- [ ] remove_auto_scaling_policy -- [X] remove_tags -- [X] run_job_flow -- [X] set_termination_protection -- [X] set_visible_to_all_users -- [X] terminate_job_flows - -## es - 0% implemented -- [ ] add_tags -- [ ] create_elasticsearch_domain -- [ ] delete_elasticsearch_domain -- [ ] delete_elasticsearch_service_role -- [ ] describe_elasticsearch_domain -- [ ] describe_elasticsearch_domain_config -- [ ] describe_elasticsearch_domains -- [ ] describe_elasticsearch_instance_type_limits -- [ ] describe_reserved_elasticsearch_instance_offerings -- [ ] describe_reserved_elasticsearch_instances -- [ ] list_domain_names -- [ ] list_elasticsearch_instance_types -- [ ] list_elasticsearch_versions -- [ ] list_tags -- [ ] purchase_reserved_elasticsearch_instance_offering -- [ ] remove_tags -- [ ] update_elasticsearch_domain_config - -## events - 100% implemented -- [X] delete_rule -- [X] describe_event_bus -- [X] describe_rule -- [X] disable_rule -- [X] enable_rule -- [X] list_rule_names_by_target -- [X] list_rules -- [X] list_targets_by_rule -- [X] put_events -- [X] put_permission -- [X] put_rule -- [X] put_targets -- [X] remove_permission -- [X] remove_targets -- [X] test_event_pattern - -## firehose - 0% implemented -- [ ] create_delivery_stream -- [ ] delete_delivery_stream -- [ ] describe_delivery_stream -- [ ] list_delivery_streams -- [ ] list_tags_for_delivery_stream -- [ ] put_record -- [ ] put_record_batch -- [ ] tag_delivery_stream -- [ ] untag_delivery_stream -- [ ] update_destination - -## fms - 0% implemented -- [ ] associate_admin_account -- [ ] delete_notification_channel -- [ ] delete_policy -- [ ] disassociate_admin_account -- [ ] get_admin_account -- [ ] get_compliance_detail -- [ ] get_notification_channel -- [ ] get_policy -- [ ] list_compliance_status -- [ ] list_policies -- [ ] put_notification_channel -- [ ] put_policy - -## gamelift - 0% implemented -- [ ] accept_match -- [ ] create_alias -- [ ] create_build -- [ ] create_fleet -- [ ] create_game_session -- [ ] create_game_session_queue -- [ ] create_matchmaking_configuration -- [ ] create_matchmaking_rule_set -- [ ] create_player_session -- [ ] create_player_sessions -- [ ] create_vpc_peering_authorization -- [ ] create_vpc_peering_connection -- [ ] delete_alias -- [ ] delete_build -- [ ] delete_fleet -- [ ] delete_game_session_queue -- [ ] delete_matchmaking_configuration -- [ ] delete_scaling_policy -- [ ] delete_vpc_peering_authorization -- [ ] delete_vpc_peering_connection -- [ ] describe_alias -- [ ] describe_build -- [ ] describe_ec2_instance_limits -- [ ] describe_fleet_attributes -- [ ] describe_fleet_capacity -- [ ] describe_fleet_events -- [ ] describe_fleet_port_settings -- [ ] describe_fleet_utilization -- [ ] describe_game_session_details -- [ ] describe_game_session_placement -- [ ] describe_game_session_queues -- [ ] describe_game_sessions -- [ ] describe_instances -- [ ] describe_matchmaking -- [ ] describe_matchmaking_configurations -- [ ] describe_matchmaking_rule_sets -- [ ] describe_player_sessions -- [ ] describe_runtime_configuration -- [ ] describe_scaling_policies -- [ ] describe_vpc_peering_authorizations -- [ ] describe_vpc_peering_connections -- [ ] get_game_session_log_url -- [ ] get_instance_access -- [ ] list_aliases -- [ ] list_builds -- [ ] list_fleets -- [ ] put_scaling_policy -- [ ] request_upload_credentials -- [ ] resolve_alias -- [ ] search_game_sessions -- [ ] start_fleet_actions -- [ ] start_game_session_placement -- [ ] start_match_backfill -- [ ] start_matchmaking -- [ ] stop_fleet_actions -- [ ] stop_game_session_placement -- [ ] stop_matchmaking -- [ ] update_alias -- [ ] update_build -- [ ] update_fleet_attributes -- [ ] update_fleet_capacity -- [ ] update_fleet_port_settings -- [ ] update_game_session -- [ ] update_game_session_queue -- [ ] update_matchmaking_configuration -- [ ] update_runtime_configuration -- [ ] validate_matchmaking_rule_set - -## glacier - 12% implemented -- [ ] abort_multipart_upload -- [ ] abort_vault_lock -- [ ] add_tags_to_vault -- [ ] complete_multipart_upload -- [ ] complete_vault_lock -- [X] create_vault -- [ ] delete_archive -- [X] delete_vault -- [ ] delete_vault_access_policy -- [ ] delete_vault_notifications -- [ ] describe_job -- [ ] describe_vault -- [ ] get_data_retrieval_policy -- [ ] get_job_output -- [ ] get_vault_access_policy -- [ ] get_vault_lock -- [ ] get_vault_notifications -- [X] initiate_job -- [ ] initiate_multipart_upload -- [ ] initiate_vault_lock -- [X] list_jobs -- [ ] list_multipart_uploads -- [ ] list_parts -- [ ] list_provisioned_capacity -- [ ] list_tags_for_vault -- [ ] list_vaults -- [ ] purchase_provisioned_capacity -- [ ] remove_tags_from_vault -- [ ] set_data_retrieval_policy -- [ ] set_vault_access_policy -- [ ] set_vault_notifications -- [ ] upload_archive -- [ ] upload_multipart_part - -## glue - 6% implemented -- [ ] batch_create_partition -- [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table -- [ ] batch_delete_table_version -- [ ] batch_get_partition -- [ ] batch_stop_job_run -- [ ] create_classifier -- [ ] create_connection -- [ ] create_crawler -- [X] create_database -- [ ] create_dev_endpoint -- [ ] create_job -- [ ] create_partition -- [ ] create_script -- [X] create_table -- [ ] create_trigger -- [ ] create_user_defined_function -- [ ] delete_classifier -- [ ] delete_connection -- [ ] delete_crawler -- [ ] delete_database -- [ ] delete_dev_endpoint -- [ ] delete_job -- [ ] delete_partition -- [ ] delete_table -- [ ] delete_table_version -- [ ] delete_trigger -- [ ] delete_user_defined_function -- [ ] get_catalog_import_status -- [ ] get_classifier -- [ ] get_classifiers -- [ ] get_connection -- [ ] get_connections -- [ ] get_crawler -- [ ] get_crawler_metrics -- [ ] get_crawlers -- [X] get_database -- [ ] get_databases -- [ ] get_dataflow_graph -- [ ] get_dev_endpoint -- [ ] get_dev_endpoints -- [ ] get_job -- [ ] get_job_run -- [ ] get_job_runs -- [ ] get_jobs -- [ ] get_mapping -- [ ] get_partition -- [ ] get_partitions -- [ ] get_plan -- [X] get_table -- [ ] get_table_version -- [ ] get_table_versions -- [X] get_tables -- [ ] get_trigger -- [ ] get_triggers -- [ ] get_user_defined_function -- [ ] get_user_defined_functions -- [ ] import_catalog_to_glue -- [ ] reset_job_bookmark -- [ ] start_crawler -- [ ] start_crawler_schedule -- [ ] start_job_run -- [ ] start_trigger -- [ ] stop_crawler -- [ ] stop_crawler_schedule -- [ ] stop_trigger -- [ ] update_classifier -- [ ] update_connection -- [ ] update_crawler -- [ ] update_crawler_schedule -- [ ] update_database -- [ ] update_dev_endpoint -- [ ] update_job -- [ ] update_partition -- [ ] update_table -- [ ] update_trigger -- [ ] update_user_defined_function - -## greengrass - 0% implemented -- [ ] associate_role_to_group -- [ ] associate_service_role_to_account -- [ ] create_core_definition -- [ ] create_core_definition_version -- [ ] create_deployment -- [ ] create_device_definition -- [ ] create_device_definition_version -- [ ] create_function_definition -- [ ] create_function_definition_version -- [ ] create_group -- [ ] create_group_certificate_authority -- [ ] create_group_version -- [ ] create_logger_definition -- [ ] create_logger_definition_version -- [ ] create_resource_definition -- [ ] create_resource_definition_version -- [ ] create_software_update_job -- [ ] create_subscription_definition -- [ ] create_subscription_definition_version -- [ ] delete_core_definition -- [ ] delete_device_definition -- [ ] delete_function_definition -- [ ] delete_group -- [ ] delete_logger_definition -- [ ] delete_resource_definition -- [ ] delete_subscription_definition -- [ ] disassociate_role_from_group -- [ ] disassociate_service_role_from_account -- [ ] get_associated_role -- [ ] get_connectivity_info -- [ ] get_core_definition -- [ ] get_core_definition_version -- [ ] get_deployment_status -- [ ] get_device_definition -- [ ] get_device_definition_version -- [ ] get_function_definition -- [ ] get_function_definition_version -- [ ] get_group -- [ ] get_group_certificate_authority -- [ ] get_group_certificate_configuration -- [ ] get_group_version -- [ ] get_logger_definition -- [ ] get_logger_definition_version -- [ ] get_resource_definition -- [ ] get_resource_definition_version -- [ ] get_service_role_for_account -- [ ] get_subscription_definition -- [ ] get_subscription_definition_version -- [ ] list_core_definition_versions -- [ ] list_core_definitions -- [ ] list_deployments -- [ ] list_device_definition_versions -- [ ] list_device_definitions -- [ ] list_function_definition_versions -- [ ] list_function_definitions -- [ ] list_group_certificate_authorities -- [ ] list_group_versions -- [ ] list_groups -- [ ] list_logger_definition_versions -- [ ] list_logger_definitions -- [ ] list_resource_definition_versions -- [ ] list_resource_definitions -- [ ] list_subscription_definition_versions -- [ ] list_subscription_definitions -- [ ] reset_deployments -- [ ] update_connectivity_info -- [ ] update_core_definition -- [ ] update_device_definition -- [ ] update_function_definition -- [ ] update_group -- [ ] update_group_certificate_configuration -- [ ] update_logger_definition -- [ ] update_resource_definition -- [ ] update_subscription_definition - -## guardduty - 0% implemented -- [ ] accept_invitation -- [ ] archive_findings -- [ ] create_detector -- [ ] create_filter -- [ ] create_ip_set -- [ ] create_members -- [ ] create_sample_findings -- [ ] create_threat_intel_set -- [ ] decline_invitations -- [ ] delete_detector -- [ ] delete_filter -- [ ] delete_invitations -- [ ] delete_ip_set -- [ ] delete_members -- [ ] delete_threat_intel_set -- [ ] disassociate_from_master_account -- [ ] disassociate_members -- [ ] get_detector -- [ ] get_filter -- [ ] get_findings -- [ ] get_findings_statistics -- [ ] get_invitations_count -- [ ] get_ip_set -- [ ] get_master_account -- [ ] get_members -- [ ] get_threat_intel_set -- [ ] invite_members -- [ ] list_detectors -- [ ] list_filters -- [ ] list_findings -- [ ] list_invitations -- [ ] list_ip_sets -- [ ] list_members -- [ ] list_threat_intel_sets -- [ ] start_monitoring_members -- [ ] stop_monitoring_members -- [ ] unarchive_findings -- [ ] update_detector -- [ ] update_filter -- [ ] update_findings_feedback -- [ ] update_ip_set -- [ ] update_threat_intel_set - -## health - 0% implemented -- [ ] describe_affected_entities -- [ ] describe_entity_aggregates -- [ ] describe_event_aggregates -- [ ] describe_event_details -- [ ] describe_event_types -- [ ] describe_events - -## iam - 47% implemented -- [ ] add_client_id_to_open_id_connect_provider -- [X] add_role_to_instance_profile -- [X] add_user_to_group -- [X] attach_group_policy -- [X] attach_role_policy -- [X] attach_user_policy -- [ ] change_password -- [X] create_access_key -- [X] create_account_alias -- [X] create_group -- [X] create_instance_profile -- [X] create_login_profile -- [ ] create_open_id_connect_provider -- [X] create_policy -- [X] create_policy_version -- [X] create_role -- [ ] create_saml_provider -- [ ] create_service_linked_role -- [ ] create_service_specific_credential -- [X] create_user -- [ ] create_virtual_mfa_device -- [X] deactivate_mfa_device -- [X] delete_access_key -- [X] delete_account_alias -- [ ] delete_account_password_policy -- [ ] delete_group -- [ ] delete_group_policy -- [ ] delete_instance_profile -- [X] delete_login_profile -- [ ] delete_open_id_connect_provider -- [ ] delete_policy -- [X] delete_policy_version -- [X] delete_role -- [ ] delete_role_permissions_boundary -- [X] delete_role_policy -- [ ] delete_saml_provider -- [X] delete_server_certificate -- [ ] delete_service_linked_role -- [ ] delete_service_specific_credential -- [ ] delete_signing_certificate -- [ ] delete_ssh_public_key -- [X] delete_user -- [ ] delete_user_permissions_boundary -- [X] delete_user_policy -- [ ] delete_virtual_mfa_device -- [X] detach_group_policy -- [X] detach_role_policy -- [X] detach_user_policy -- [X] enable_mfa_device -- [ ] generate_credential_report -- [ ] get_access_key_last_used -- [X] get_account_authorization_details -- [ ] get_account_password_policy -- [ ] get_account_summary -- [ ] get_context_keys_for_custom_policy -- [ ] get_context_keys_for_principal_policy -- [X] get_credential_report -- [X] get_group -- [X] get_group_policy -- [X] get_instance_profile -- [X] get_login_profile -- [ ] get_open_id_connect_provider -- [X] get_policy -- [X] get_policy_version -- [X] get_role -- [X] get_role_policy -- [ ] get_saml_provider -- [X] get_server_certificate -- [ ] get_service_linked_role_deletion_status -- [ ] get_ssh_public_key -- [X] get_user -- [X] get_user_policy -- [ ] list_access_keys -- [X] list_account_aliases -- [X] list_attached_group_policies -- [X] list_attached_role_policies -- [X] list_attached_user_policies -- [ ] list_entities_for_policy -- [X] list_group_policies -- [X] list_groups -- [ ] list_groups_for_user -- [ ] list_instance_profiles -- [ ] list_instance_profiles_for_role -- [X] list_mfa_devices -- [ ] list_open_id_connect_providers -- [X] list_policies -- [X] list_policy_versions -- [X] list_role_policies -- [ ] list_roles -- [ ] list_saml_providers -- [ ] list_server_certificates -- [ ] list_service_specific_credentials -- [ ] list_signing_certificates -- [ ] list_ssh_public_keys -- [X] list_user_policies -- [X] list_users -- [ ] list_virtual_mfa_devices -- [X] put_group_policy -- [ ] put_role_permissions_boundary -- [X] put_role_policy -- [ ] put_user_permissions_boundary -- [X] put_user_policy -- [ ] remove_client_id_from_open_id_connect_provider -- [X] remove_role_from_instance_profile -- [X] remove_user_from_group -- [ ] reset_service_specific_credential -- [ ] resync_mfa_device -- [ ] set_default_policy_version -- [ ] simulate_custom_policy -- [ ] simulate_principal_policy -- [X] update_access_key -- [ ] update_account_password_policy -- [ ] update_assume_role_policy -- [ ] update_group -- [X] update_login_profile -- [ ] update_open_id_connect_provider_thumbprint -- [ ] update_role -- [ ] update_role_description -- [ ] update_saml_provider -- [ ] update_server_certificate -- [ ] update_service_specific_credential -- [ ] update_signing_certificate -- [ ] update_ssh_public_key -- [ ] update_user -- [ ] upload_server_certificate -- [ ] upload_signing_certificate -- [ ] upload_ssh_public_key - -## importexport - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] get_shipping_label -- [ ] get_status -- [ ] list_jobs -- [ ] update_job - -## inspector - 0% implemented -- [ ] add_attributes_to_findings -- [ ] create_assessment_target -- [ ] create_assessment_template -- [ ] create_exclusions_preview -- [ ] create_resource_group -- [ ] delete_assessment_run -- [ ] delete_assessment_target -- [ ] delete_assessment_template -- [ ] describe_assessment_runs -- [ ] describe_assessment_targets -- [ ] describe_assessment_templates -- [ ] describe_cross_account_access_role -- [ ] describe_exclusions -- [ ] describe_findings -- [ ] describe_resource_groups -- [ ] describe_rules_packages -- [ ] get_assessment_report -- [ ] get_exclusions_preview -- [ ] get_telemetry_metadata -- [ ] list_assessment_run_agents -- [ ] list_assessment_runs -- [ ] list_assessment_targets -- [ ] list_assessment_templates -- [ ] list_event_subscriptions -- [ ] list_exclusions -- [ ] list_findings -- [ ] list_rules_packages -- [ ] list_tags_for_resource -- [ ] preview_agents -- [ ] register_cross_account_access_role -- [ ] remove_attributes_from_findings -- [ ] set_tags_for_resource -- [ ] start_assessment_run -- [ ] stop_assessment_run -- [ ] subscribe_to_event -- [ ] unsubscribe_from_event -- [ ] update_assessment_target - -## iot - 25% implemented -- [ ] accept_certificate_transfer -- [X] add_thing_to_thing_group -- [ ] associate_targets_with_job -- [ ] attach_policy -- [X] attach_principal_policy -- [ ] attach_security_profile -- [X] attach_thing_principal -- [ ] cancel_audit_task -- [ ] cancel_certificate_transfer -- [ ] cancel_job -- [ ] cancel_job_execution -- [ ] clear_default_authorizer -- [ ] create_authorizer -- [ ] create_certificate_from_csr -- [X] create_job -- [X] create_keys_and_certificate -- [ ] create_ota_update -- [X] create_policy -- [ ] create_policy_version -- [ ] create_role_alias -- [ ] create_scheduled_audit -- [ ] create_security_profile -- [ ] create_stream -- [X] create_thing -- [X] create_thing_group -- [X] create_thing_type -- [ ] create_topic_rule -- [ ] delete_account_audit_configuration -- [ ] delete_authorizer -- [ ] delete_ca_certificate -- [X] delete_certificate -- [ ] delete_job -- [ ] delete_job_execution -- [ ] delete_ota_update -- [X] delete_policy -- [ ] delete_policy_version -- [ ] delete_registration_code -- [ ] delete_role_alias -- [ ] delete_scheduled_audit -- [ ] delete_security_profile -- [ ] delete_stream -- [X] delete_thing -- [X] delete_thing_group -- [X] delete_thing_type -- [ ] delete_topic_rule -- [ ] delete_v2_logging_level -- [ ] deprecate_thing_type -- [ ] describe_account_audit_configuration -- [ ] describe_audit_task -- [ ] describe_authorizer -- [ ] describe_ca_certificate -- [X] describe_certificate -- [ ] describe_default_authorizer -- [ ] describe_endpoint -- [ ] describe_event_configurations -- [ ] describe_index -- [X] describe_job -- [ ] describe_job_execution -- [ ] describe_role_alias -- [ ] describe_scheduled_audit -- [ ] describe_security_profile -- [ ] describe_stream -- [X] describe_thing -- [X] describe_thing_group -- [ ] describe_thing_registration_task -- [X] describe_thing_type -- [ ] detach_policy -- [X] detach_principal_policy -- [ ] detach_security_profile -- [X] detach_thing_principal -- [ ] disable_topic_rule -- [ ] enable_topic_rule -- [ ] get_effective_policies -- [ ] get_indexing_configuration -- [X] get_job_document -- [ ] get_logging_options -- [ ] get_ota_update -- [X] get_policy -- [ ] get_policy_version -- [ ] get_registration_code -- [ ] get_topic_rule -- [ ] get_v2_logging_options -- [ ] list_active_violations -- [ ] list_attached_policies -- [ ] list_audit_findings -- [ ] list_audit_tasks -- [ ] list_authorizers -- [ ] list_ca_certificates -- [X] list_certificates -- [ ] list_certificates_by_ca -- [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs -- [ ] list_ota_updates -- [ ] list_outgoing_certificates -- [X] list_policies -- [X] list_policy_principals -- [ ] list_policy_versions -- [X] list_principal_policies -- [X] list_principal_things -- [ ] list_role_aliases -- [ ] list_scheduled_audits -- [ ] list_security_profiles -- [ ] list_security_profiles_for_target -- [ ] list_streams -- [ ] list_targets_for_policy -- [ ] list_targets_for_security_profile -- [X] list_thing_groups -- [X] list_thing_groups_for_thing -- [X] list_thing_principals -- [ ] list_thing_registration_task_reports -- [ ] list_thing_registration_tasks -- [X] list_thing_types -- [X] list_things -- [X] list_things_in_thing_group -- [ ] list_topic_rules -- [ ] list_v2_logging_levels -- [ ] list_violation_events -- [ ] register_ca_certificate -- [ ] register_certificate -- [ ] register_thing -- [ ] reject_certificate_transfer -- [X] remove_thing_from_thing_group -- [ ] replace_topic_rule -- [ ] search_index -- [ ] set_default_authorizer -- [ ] set_default_policy_version -- [ ] set_logging_options -- [ ] set_v2_logging_level -- [ ] set_v2_logging_options -- [ ] start_on_demand_audit_task -- [ ] start_thing_registration_task -- [ ] stop_thing_registration_task -- [ ] test_authorization -- [ ] test_invoke_authorizer -- [ ] transfer_certificate -- [ ] update_account_audit_configuration -- [ ] update_authorizer -- [ ] update_ca_certificate -- [X] update_certificate -- [ ] update_event_configurations -- [ ] update_indexing_configuration -- [ ] update_role_alias -- [ ] update_scheduled_audit -- [ ] update_security_profile -- [ ] update_stream -- [X] update_thing -- [X] update_thing_group -- [X] update_thing_groups_for_thing -- [ ] validate_security_profile_behaviors - -## iot-data - 100% implemented -- [X] delete_thing_shadow -- [X] get_thing_shadow -- [X] publish -- [X] update_thing_shadow - -## iot-jobs-data - 0% implemented -- [ ] describe_job_execution -- [ ] get_pending_job_executions -- [ ] start_next_pending_job_execution -- [ ] update_job_execution - -## iot1click-devices - 0% implemented -- [ ] claim_devices_by_claim_code -- [ ] describe_device -- [ ] finalize_device_claim -- [ ] get_device_methods -- [ ] initiate_device_claim -- [ ] invoke_device_method -- [ ] list_device_events -- [ ] list_devices -- [ ] unclaim_device -- [ ] update_device_state - -## iot1click-projects - 0% implemented -- [ ] associate_device_with_placement -- [ ] create_placement -- [ ] create_project -- [ ] delete_placement -- [ ] delete_project -- [ ] describe_placement -- [ ] describe_project -- [ ] disassociate_device_from_placement -- [ ] get_devices_in_placement -- [ ] list_placements -- [ ] list_projects -- [ ] update_placement -- [ ] update_project - -## iotanalytics - 0% implemented -- [ ] batch_put_message -- [ ] cancel_pipeline_reprocessing -- [ ] create_channel -- [ ] create_dataset -- [ ] create_dataset_content -- [ ] create_datastore -- [ ] create_pipeline -- [ ] delete_channel -- [ ] delete_dataset -- [ ] delete_dataset_content -- [ ] delete_datastore -- [ ] delete_pipeline -- [ ] describe_channel -- [ ] describe_dataset -- [ ] describe_datastore -- [ ] describe_logging_options -- [ ] describe_pipeline -- [ ] get_dataset_content -- [ ] list_channels -- [ ] list_datasets -- [ ] list_datastores -- [ ] list_pipelines -- [ ] list_tags_for_resource -- [ ] put_logging_options -- [ ] run_pipeline_activity -- [ ] sample_channel_data -- [ ] start_pipeline_reprocessing -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_channel -- [ ] update_dataset -- [ ] update_datastore -- [ ] update_pipeline - -## kinesis - 46% implemented -- [X] add_tags_to_stream -- [X] create_stream -- [ ] decrease_stream_retention_period -- [X] delete_stream -- [ ] deregister_stream_consumer -- [ ] describe_limits -- [X] describe_stream -- [ ] describe_stream_consumer -- [ ] describe_stream_summary -- [ ] disable_enhanced_monitoring -- [ ] enable_enhanced_monitoring -- [X] get_records -- [X] get_shard_iterator -- [ ] increase_stream_retention_period -- [ ] list_shards -- [ ] list_stream_consumers -- [X] list_streams -- [X] list_tags_for_stream -- [X] merge_shards -- [X] put_record -- [X] put_records -- [ ] register_stream_consumer -- [X] remove_tags_from_stream -- [X] split_shard -- [ ] start_stream_encryption -- [ ] stop_stream_encryption -- [ ] subscribe_to_shard -- [ ] update_shard_count - -## kinesis-video-archived-media - 0% implemented -- [ ] get_hls_streaming_session_url -- [ ] get_media_for_fragment_list -- [ ] list_fragments - -## kinesis-video-media - 0% implemented -- [ ] get_media - -## kinesisanalytics - 0% implemented -- [ ] add_application_cloud_watch_logging_option -- [ ] add_application_input -- [ ] add_application_input_processing_configuration -- [ ] add_application_output -- [ ] add_application_reference_data_source -- [ ] create_application -- [ ] delete_application -- [ ] delete_application_cloud_watch_logging_option -- [ ] delete_application_input_processing_configuration -- [ ] delete_application_output -- [ ] delete_application_reference_data_source -- [ ] describe_application -- [ ] discover_input_schema -- [ ] list_applications -- [ ] start_application -- [ ] stop_application -- [ ] update_application - -## kinesisvideo - 0% implemented -- [ ] create_stream -- [ ] delete_stream -- [ ] describe_stream -- [ ] get_data_endpoint -- [ ] list_streams -- [ ] list_tags_for_stream -- [ ] tag_stream -- [ ] untag_stream -- [ ] update_data_retention -- [ ] update_stream - -## kms - 25% implemented -- [ ] cancel_key_deletion -- [ ] create_alias -- [ ] create_grant -- [X] create_key -- [ ] decrypt -- [X] delete_alias -- [ ] delete_imported_key_material -- [X] describe_key -- [ ] disable_key -- [X] disable_key_rotation -- [ ] enable_key -- [X] enable_key_rotation -- [ ] encrypt -- [ ] generate_data_key -- [ ] generate_data_key_without_plaintext -- [ ] generate_random -- [X] get_key_policy -- [X] get_key_rotation_status -- [ ] get_parameters_for_import -- [ ] import_key_material -- [ ] list_aliases -- [ ] list_grants -- [ ] list_key_policies -- [X] list_keys -- [ ] list_resource_tags -- [ ] list_retirable_grants -- [X] put_key_policy -- [ ] re_encrypt -- [ ] retire_grant -- [ ] revoke_grant -- [ ] schedule_key_deletion -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_key_description - -## lambda - 0% implemented -- [ ] add_permission -- [ ] create_alias -- [ ] create_event_source_mapping -- [ ] create_function -- [ ] delete_alias -- [ ] delete_event_source_mapping -- [ ] delete_function -- [ ] delete_function_concurrency -- [ ] get_account_settings -- [ ] get_alias -- [ ] get_event_source_mapping -- [ ] get_function -- [ ] get_function_configuration -- [ ] get_policy -- [ ] invoke -- [ ] invoke_async -- [ ] list_aliases -- [ ] list_event_source_mappings -- [ ] list_functions -- [ ] list_tags -- [ ] list_versions_by_function -- [ ] publish_version -- [ ] put_function_concurrency -- [ ] remove_permission -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_event_source_mapping -- [ ] update_function_code -- [ ] update_function_configuration - -## lex-models - 0% implemented -- [ ] create_bot_version -- [ ] create_intent_version -- [ ] create_slot_type_version -- [ ] delete_bot -- [ ] delete_bot_alias -- [ ] delete_bot_channel_association -- [ ] delete_bot_version -- [ ] delete_intent -- [ ] delete_intent_version -- [ ] delete_slot_type -- [ ] delete_slot_type_version -- [ ] delete_utterances -- [ ] get_bot -- [ ] get_bot_alias -- [ ] get_bot_aliases -- [ ] get_bot_channel_association -- [ ] get_bot_channel_associations -- [ ] get_bot_versions -- [ ] get_bots -- [ ] get_builtin_intent -- [ ] get_builtin_intents -- [ ] get_builtin_slot_types -- [ ] get_export -- [ ] get_import -- [ ] get_intent -- [ ] get_intent_versions -- [ ] get_intents -- [ ] get_slot_type -- [ ] get_slot_type_versions -- [ ] get_slot_types -- [ ] get_utterances_view -- [ ] put_bot -- [ ] put_bot_alias -- [ ] put_intent -- [ ] put_slot_type -- [ ] start_import - -## lex-runtime - 0% implemented -- [ ] post_content -- [ ] post_text - -## lightsail - 0% implemented -- [ ] allocate_static_ip -- [ ] attach_disk -- [ ] attach_instances_to_load_balancer -- [ ] attach_load_balancer_tls_certificate -- [ ] attach_static_ip -- [ ] close_instance_public_ports -- [ ] create_disk -- [ ] create_disk_from_snapshot -- [ ] create_disk_snapshot -- [ ] create_domain -- [ ] create_domain_entry -- [ ] create_instance_snapshot -- [ ] create_instances -- [ ] create_instances_from_snapshot -- [ ] create_key_pair -- [ ] create_load_balancer -- [ ] create_load_balancer_tls_certificate -- [ ] delete_disk -- [ ] delete_disk_snapshot -- [ ] delete_domain -- [ ] delete_domain_entry -- [ ] delete_instance -- [ ] delete_instance_snapshot -- [ ] delete_key_pair -- [ ] delete_load_balancer -- [ ] delete_load_balancer_tls_certificate -- [ ] detach_disk -- [ ] detach_instances_from_load_balancer -- [ ] detach_static_ip -- [ ] download_default_key_pair -- [ ] get_active_names -- [ ] get_blueprints -- [ ] get_bundles -- [ ] get_disk -- [ ] get_disk_snapshot -- [ ] get_disk_snapshots -- [ ] get_disks -- [ ] get_domain -- [ ] get_domains -- [ ] get_instance -- [ ] get_instance_access_details -- [ ] get_instance_metric_data -- [ ] get_instance_port_states -- [ ] get_instance_snapshot -- [ ] get_instance_snapshots -- [ ] get_instance_state -- [ ] get_instances -- [ ] get_key_pair -- [ ] get_key_pairs -- [ ] get_load_balancer -- [ ] get_load_balancer_metric_data -- [ ] get_load_balancer_tls_certificates -- [ ] get_load_balancers -- [ ] get_operation -- [ ] get_operations -- [ ] get_operations_for_resource -- [ ] get_regions -- [ ] get_static_ip -- [ ] get_static_ips -- [ ] import_key_pair -- [ ] is_vpc_peered -- [ ] open_instance_public_ports -- [ ] peer_vpc -- [ ] put_instance_public_ports -- [ ] reboot_instance -- [ ] release_static_ip -- [ ] start_instance -- [ ] stop_instance -- [ ] unpeer_vpc -- [ ] update_domain_entry -- [ ] update_load_balancer_attribute - -## logs - 27% implemented -- [ ] associate_kms_key -- [ ] cancel_export_task -- [ ] create_export_task -- [X] create_log_group -- [X] create_log_stream -- [ ] delete_destination -- [X] delete_log_group -- [X] delete_log_stream -- [ ] delete_metric_filter -- [ ] delete_resource_policy -- [ ] delete_retention_policy -- [ ] delete_subscription_filter -- [ ] describe_destinations -- [ ] describe_export_tasks -- [X] describe_log_groups -- [X] describe_log_streams -- [ ] describe_metric_filters -- [ ] describe_resource_policies -- [ ] describe_subscription_filters -- [ ] disassociate_kms_key -- [X] filter_log_events -- [X] get_log_events -- [ ] list_tags_log_group -- [ ] put_destination -- [ ] put_destination_policy -- [X] put_log_events -- [ ] put_metric_filter -- [ ] put_resource_policy -- [ ] put_retention_policy -- [ ] put_subscription_filter -- [ ] tag_log_group -- [ ] test_metric_filter -- [ ] untag_log_group - -## machinelearning - 0% implemented -- [ ] add_tags -- [ ] create_batch_prediction -- [ ] create_data_source_from_rds -- [ ] create_data_source_from_redshift -- [ ] create_data_source_from_s3 -- [ ] create_evaluation -- [ ] create_ml_model -- [ ] create_realtime_endpoint -- [ ] delete_batch_prediction -- [ ] delete_data_source -- [ ] delete_evaluation -- [ ] delete_ml_model -- [ ] delete_realtime_endpoint -- [ ] delete_tags -- [ ] describe_batch_predictions -- [ ] describe_data_sources -- [ ] describe_evaluations -- [ ] describe_ml_models -- [ ] describe_tags -- [ ] get_batch_prediction -- [ ] get_data_source -- [ ] get_evaluation -- [ ] get_ml_model -- [ ] predict -- [ ] update_batch_prediction -- [ ] update_data_source -- [ ] update_evaluation -- [ ] update_ml_model - -## macie - 0% implemented -- [ ] associate_member_account -- [ ] associate_s3_resources -- [ ] disassociate_member_account -- [ ] disassociate_s3_resources -- [ ] list_member_accounts -- [ ] list_s3_resources -- [ ] update_s3_resources - -## marketplace-entitlement - 0% implemented -- [ ] get_entitlements - -## marketplacecommerceanalytics - 0% implemented -- [ ] generate_data_set -- [ ] start_support_data_export - -## mediaconvert - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_job_template -- [ ] create_preset -- [ ] create_queue -- [ ] delete_job_template -- [ ] delete_preset -- [ ] delete_queue -- [ ] describe_endpoints -- [ ] get_job -- [ ] get_job_template -- [ ] get_preset -- [ ] get_queue -- [ ] list_job_templates -- [ ] list_jobs -- [ ] list_presets -- [ ] list_queues -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_job_template -- [ ] update_preset -- [ ] update_queue - -## medialive - 0% implemented -- [ ] create_channel -- [ ] create_input -- [ ] create_input_security_group -- [ ] delete_channel -- [ ] delete_input -- [ ] delete_input_security_group -- [ ] delete_reservation -- [ ] describe_channel -- [ ] describe_input -- [ ] describe_input_security_group -- [ ] describe_offering -- [ ] describe_reservation -- [ ] list_channels -- [ ] list_input_security_groups -- [ ] list_inputs -- [ ] list_offerings -- [ ] list_reservations -- [ ] purchase_offering -- [ ] start_channel -- [ ] stop_channel -- [ ] update_channel -- [ ] update_input -- [ ] update_input_security_group - -## mediapackage - 0% implemented -- [ ] create_channel -- [ ] create_origin_endpoint -- [ ] delete_channel -- [ ] delete_origin_endpoint -- [ ] describe_channel -- [ ] describe_origin_endpoint -- [ ] list_channels -- [ ] list_origin_endpoints -- [ ] rotate_channel_credentials -- [ ] update_channel -- [ ] update_origin_endpoint - -## mediastore - 0% implemented -- [ ] create_container -- [ ] delete_container -- [ ] delete_container_policy -- [ ] delete_cors_policy -- [ ] describe_container -- [ ] get_container_policy -- [ ] get_cors_policy -- [ ] list_containers -- [ ] put_container_policy -- [ ] put_cors_policy - -## mediastore-data - 0% implemented -- [ ] delete_object -- [ ] describe_object -- [ ] get_object -- [ ] list_items -- [ ] put_object - -## mediatailor - 0% implemented -- [ ] delete_playback_configuration -- [ ] get_playback_configuration -- [ ] list_playback_configurations -- [ ] put_playback_configuration - -## meteringmarketplace - 0% implemented -- [ ] batch_meter_usage -- [ ] meter_usage -- [ ] resolve_customer - -## mgh - 0% implemented -- [ ] associate_created_artifact -- [ ] associate_discovered_resource -- [ ] create_progress_update_stream -- [ ] delete_progress_update_stream -- [ ] describe_application_state -- [ ] describe_migration_task -- [ ] disassociate_created_artifact -- [ ] disassociate_discovered_resource -- [ ] import_migration_task -- [ ] list_created_artifacts -- [ ] list_discovered_resources -- [ ] list_migration_tasks -- [ ] list_progress_update_streams -- [ ] notify_application_state -- [ ] notify_migration_task_state -- [ ] put_resource_attributes - -## mobile - 0% implemented -- [ ] create_project -- [ ] delete_project -- [ ] describe_bundle -- [ ] describe_project -- [ ] export_bundle -- [ ] export_project -- [ ] list_bundles -- [ ] list_projects -- [ ] update_project - -## mq - 0% implemented -- [ ] create_broker -- [ ] create_configuration -- [ ] create_user -- [ ] delete_broker -- [ ] delete_user -- [ ] describe_broker -- [ ] describe_configuration -- [ ] describe_configuration_revision -- [ ] describe_user -- [ ] list_brokers -- [ ] list_configuration_revisions -- [ ] list_configurations -- [ ] list_users -- [ ] reboot_broker -- [ ] update_broker -- [ ] update_configuration -- [ ] update_user - -## mturk - 0% implemented -- [ ] accept_qualification_request -- [ ] approve_assignment -- [ ] associate_qualification_with_worker -- [ ] create_additional_assignments_for_hit -- [ ] create_hit -- [ ] create_hit_type -- [ ] create_hit_with_hit_type -- [ ] create_qualification_type -- [ ] create_worker_block -- [ ] delete_hit -- [ ] delete_qualification_type -- [ ] delete_worker_block -- [ ] disassociate_qualification_from_worker -- [ ] get_account_balance -- [ ] get_assignment -- [ ] get_file_upload_url -- [ ] get_hit -- [ ] get_qualification_score -- [ ] get_qualification_type -- [ ] list_assignments_for_hit -- [ ] list_bonus_payments -- [ ] list_hits -- [ ] list_hits_for_qualification_type -- [ ] list_qualification_requests -- [ ] list_qualification_types -- [ ] list_review_policy_results_for_hit -- [ ] list_reviewable_hits -- [ ] list_worker_blocks -- [ ] list_workers_with_qualification_type -- [ ] notify_workers -- [ ] reject_assignment -- [ ] reject_qualification_request -- [ ] send_bonus -- [ ] send_test_event_notification -- [ ] update_expiration_for_hit -- [ ] update_hit_review_status -- [ ] update_hit_type_of_hit -- [ ] update_notification_settings -- [ ] update_qualification_type - -## neptune - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] create_db_cluster -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_parameter_group -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] delete_db_cluster -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_parameter_group -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instances -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_valid_db_instance_modifications -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_db_cluster -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] promote_read_replica_db_cluster -- [ ] reboot_db_instance -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time - -## opsworks - 12% implemented -- [ ] assign_instance -- [ ] assign_volume -- [ ] associate_elastic_ip -- [ ] attach_elastic_load_balancer -- [ ] clone_stack -- [X] create_app -- [ ] create_deployment -- [X] create_instance -- [X] create_layer -- [X] create_stack -- [ ] create_user_profile -- [ ] delete_app -- [ ] delete_instance -- [ ] delete_layer -- [ ] delete_stack -- [ ] delete_user_profile -- [ ] deregister_ecs_cluster -- [ ] deregister_elastic_ip -- [ ] deregister_instance -- [ ] deregister_rds_db_instance -- [ ] deregister_volume -- [ ] describe_agent_versions -- [X] describe_apps -- [ ] describe_commands -- [ ] describe_deployments -- [ ] describe_ecs_clusters -- [ ] describe_elastic_ips -- [ ] describe_elastic_load_balancers -- [X] describe_instances -- [X] describe_layers -- [ ] describe_load_based_auto_scaling -- [ ] describe_my_user_profile -- [ ] describe_operating_systems -- [ ] describe_permissions -- [ ] describe_raid_arrays -- [ ] describe_rds_db_instances -- [ ] describe_service_errors -- [ ] describe_stack_provisioning_parameters -- [ ] describe_stack_summary -- [X] describe_stacks -- [ ] describe_time_based_auto_scaling -- [ ] describe_user_profiles -- [ ] describe_volumes -- [ ] detach_elastic_load_balancer -- [ ] disassociate_elastic_ip -- [ ] get_hostname_suggestion -- [ ] grant_access -- [ ] list_tags -- [ ] reboot_instance -- [ ] register_ecs_cluster -- [ ] register_elastic_ip -- [ ] register_instance -- [ ] register_rds_db_instance -- [ ] register_volume -- [ ] set_load_based_auto_scaling -- [ ] set_permission -- [ ] set_time_based_auto_scaling -- [X] start_instance -- [ ] start_stack -- [ ] stop_instance -- [ ] stop_stack -- [ ] tag_resource -- [ ] unassign_instance -- [ ] unassign_volume -- [ ] untag_resource -- [ ] update_app -- [ ] update_elastic_ip -- [ ] update_instance -- [ ] update_layer -- [ ] update_my_user_profile -- [ ] update_rds_db_instance -- [ ] update_stack -- [ ] update_user_profile -- [ ] update_volume - -## opsworkscm - 0% implemented -- [ ] associate_node -- [ ] create_backup -- [ ] create_server -- [ ] delete_backup -- [ ] delete_server -- [ ] describe_account_attributes -- [ ] describe_backups -- [ ] describe_events -- [ ] describe_node_association_status -- [ ] describe_servers -- [ ] disassociate_node -- [ ] restore_server -- [ ] start_maintenance -- [ ] update_server -- [ ] update_server_engine_attributes - -## organizations - 0% implemented -- [ ] accept_handshake -- [ ] attach_policy -- [ ] cancel_handshake -- [ ] create_account -- [ ] create_organization -- [ ] create_organizational_unit -- [ ] create_policy -- [ ] decline_handshake -- [ ] delete_organization -- [ ] delete_organizational_unit -- [ ] delete_policy -- [ ] describe_account -- [ ] describe_create_account_status -- [ ] describe_handshake -- [ ] describe_organization -- [ ] describe_organizational_unit -- [ ] describe_policy -- [ ] detach_policy -- [ ] disable_aws_service_access -- [ ] disable_policy_type -- [ ] enable_all_features -- [ ] enable_aws_service_access -- [ ] enable_policy_type -- [ ] invite_account_to_organization -- [ ] leave_organization -- [ ] list_accounts -- [ ] list_accounts_for_parent -- [ ] list_aws_service_access_for_organization -- [ ] list_children -- [ ] list_create_account_status -- [ ] list_handshakes_for_account -- [ ] list_handshakes_for_organization -- [ ] list_organizational_units_for_parent -- [ ] list_parents -- [ ] list_policies -- [ ] list_policies_for_target -- [ ] list_roots -- [ ] list_targets_for_policy -- [ ] move_account -- [ ] remove_account_from_organization -- [ ] update_organizational_unit -- [ ] update_policy - -## pi - 0% implemented -- [ ] describe_dimension_keys -- [ ] get_resource_metrics - -## pinpoint - 0% implemented -- [ ] create_app -- [ ] create_campaign -- [ ] create_export_job -- [ ] create_import_job -- [ ] create_segment -- [ ] delete_adm_channel -- [ ] delete_apns_channel -- [ ] delete_apns_sandbox_channel -- [ ] delete_apns_voip_channel -- [ ] delete_apns_voip_sandbox_channel -- [ ] delete_app -- [ ] delete_baidu_channel -- [ ] delete_campaign -- [ ] delete_email_channel -- [ ] delete_endpoint -- [ ] delete_event_stream -- [ ] delete_gcm_channel -- [ ] delete_segment -- [ ] delete_sms_channel -- [ ] delete_user_endpoints -- [ ] get_adm_channel -- [ ] get_apns_channel -- [ ] get_apns_sandbox_channel -- [ ] get_apns_voip_channel -- [ ] get_apns_voip_sandbox_channel -- [ ] get_app -- [ ] get_application_settings -- [ ] get_apps -- [ ] get_baidu_channel -- [ ] get_campaign -- [ ] get_campaign_activities -- [ ] get_campaign_version -- [ ] get_campaign_versions -- [ ] get_campaigns -- [ ] get_channels -- [ ] get_email_channel -- [ ] get_endpoint -- [ ] get_event_stream -- [ ] get_export_job -- [ ] get_export_jobs -- [ ] get_gcm_channel -- [ ] get_import_job -- [ ] get_import_jobs -- [ ] get_segment -- [ ] get_segment_export_jobs -- [ ] get_segment_import_jobs -- [ ] get_segment_version -- [ ] get_segment_versions -- [ ] get_segments -- [ ] get_sms_channel -- [ ] get_user_endpoints -- [ ] phone_number_validate -- [ ] put_event_stream -- [ ] put_events -- [ ] remove_attributes -- [ ] send_messages -- [ ] send_users_messages -- [ ] update_adm_channel -- [ ] update_apns_channel -- [ ] update_apns_sandbox_channel -- [ ] update_apns_voip_channel -- [ ] update_apns_voip_sandbox_channel -- [ ] update_application_settings -- [ ] update_baidu_channel -- [ ] update_campaign -- [ ] update_email_channel -- [ ] update_endpoint -- [ ] update_endpoints_batch -- [ ] update_gcm_channel -- [ ] update_segment -- [ ] update_sms_channel - -## polly - 55% implemented -- [X] delete_lexicon -- [X] describe_voices -- [X] get_lexicon -- [ ] get_speech_synthesis_task -- [X] list_lexicons -- [ ] list_speech_synthesis_tasks -- [X] put_lexicon -- [ ] start_speech_synthesis_task -- [ ] synthesize_speech - -## pricing - 0% implemented -- [ ] describe_services -- [ ] get_attribute_values -- [ ] get_products - -## rds - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] authorize_db_security_group_ingress -- [ ] backtrack_db_cluster -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] copy_db_snapshot -- [ ] copy_option_group -- [ ] create_db_cluster -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_instance_read_replica -- [ ] create_db_parameter_group -- [ ] create_db_security_group -- [ ] create_db_snapshot -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] create_option_group -- [ ] delete_db_cluster -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_parameter_group -- [ ] delete_db_security_group -- [ ] delete_db_snapshot -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] delete_option_group -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_db_cluster_backtracks -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instances -- [ ] describe_db_log_files -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_security_groups -- [ ] describe_db_snapshot_attributes -- [ ] describe_db_snapshots -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_option_group_options -- [ ] describe_option_groups -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_reserved_db_instances -- [ ] describe_reserved_db_instances_offerings -- [ ] describe_source_regions -- [ ] describe_valid_db_instance_modifications -- [ ] download_db_log_file_portion -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_current_db_cluster_capacity -- [ ] modify_db_cluster -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_snapshot -- [ ] modify_db_snapshot_attribute -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] modify_option_group -- [ ] promote_read_replica -- [ ] promote_read_replica_db_cluster -- [ ] purchase_reserved_db_instances_offering -- [ ] reboot_db_instance -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_s3 -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time -- [ ] restore_db_instance_from_db_snapshot -- [ ] restore_db_instance_from_s3 -- [ ] restore_db_instance_to_point_in_time -- [ ] revoke_db_security_group_ingress -- [ ] start_db_instance -- [ ] stop_db_instance - -## redshift - 38% implemented -- [ ] accept_reserved_node_exchange -- [ ] authorize_cluster_security_group_ingress -- [ ] authorize_snapshot_access -- [ ] copy_cluster_snapshot -- [X] create_cluster -- [X] create_cluster_parameter_group -- [X] create_cluster_security_group -- [X] create_cluster_snapshot -- [X] create_cluster_subnet_group -- [ ] create_event_subscription -- [ ] create_hsm_client_certificate -- [ ] create_hsm_configuration -- [X] create_snapshot_copy_grant -- [X] create_tags -- [X] delete_cluster -- [X] delete_cluster_parameter_group -- [X] delete_cluster_security_group -- [X] delete_cluster_snapshot -- [X] delete_cluster_subnet_group -- [ ] delete_event_subscription -- [ ] delete_hsm_client_certificate -- [ ] delete_hsm_configuration -- [X] delete_snapshot_copy_grant -- [X] delete_tags -- [ ] describe_cluster_db_revisions -- [X] describe_cluster_parameter_groups -- [ ] describe_cluster_parameters -- [X] describe_cluster_security_groups -- [X] describe_cluster_snapshots -- [X] describe_cluster_subnet_groups -- [ ] describe_cluster_tracks -- [ ] describe_cluster_versions -- [X] describe_clusters -- [ ] describe_default_cluster_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_hsm_client_certificates -- [ ] describe_hsm_configurations -- [ ] describe_logging_status -- [ ] describe_orderable_cluster_options -- [ ] describe_reserved_node_offerings -- [ ] describe_reserved_nodes -- [ ] describe_resize -- [X] describe_snapshot_copy_grants -- [ ] describe_table_restore_status -- [X] describe_tags -- [ ] disable_logging -- [X] disable_snapshot_copy -- [ ] enable_logging -- [X] enable_snapshot_copy -- [ ] get_cluster_credentials -- [ ] get_reserved_node_exchange_offerings -- [X] modify_cluster -- [ ] modify_cluster_db_revision -- [ ] modify_cluster_iam_roles -- [ ] modify_cluster_parameter_group -- [ ] modify_cluster_subnet_group -- [ ] modify_event_subscription -- [X] modify_snapshot_copy_retention_period -- [ ] purchase_reserved_node_offering -- [ ] reboot_cluster -- [ ] reset_cluster_parameter_group -- [X] restore_from_cluster_snapshot -- [ ] restore_table_from_cluster_snapshot -- [ ] revoke_cluster_security_group_ingress -- [ ] revoke_snapshot_access -- [ ] rotate_encryption_key - -## rekognition - 0% implemented -- [ ] compare_faces -- [ ] create_collection -- [ ] create_stream_processor -- [ ] delete_collection -- [ ] delete_faces -- [ ] delete_stream_processor -- [ ] describe_stream_processor -- [ ] detect_faces -- [ ] detect_labels -- [ ] detect_moderation_labels -- [ ] detect_text -- [ ] get_celebrity_info -- [ ] get_celebrity_recognition -- [ ] get_content_moderation -- [ ] get_face_detection -- [ ] get_face_search -- [ ] get_label_detection -- [ ] get_person_tracking -- [ ] index_faces -- [ ] list_collections -- [ ] list_faces -- [ ] list_stream_processors -- [ ] recognize_celebrities -- [ ] search_faces -- [ ] search_faces_by_image -- [ ] start_celebrity_recognition -- [ ] start_content_moderation -- [ ] start_face_detection -- [ ] start_face_search -- [ ] start_label_detection -- [ ] start_person_tracking -- [ ] start_stream_processor -- [ ] stop_stream_processor - -## resource-groups - 0% implemented -- [ ] create_group -- [ ] delete_group -- [ ] get_group -- [ ] get_group_query -- [ ] get_tags -- [ ] list_group_resources -- [ ] list_groups -- [ ] search_resources -- [ ] tag -- [ ] untag -- [ ] update_group -- [ ] update_group_query - -## resourcegroupstaggingapi - 60% implemented -- [X] get_resources -- [X] get_tag_keys -- [X] get_tag_values -- [ ] tag_resources -- [ ] untag_resources - -## route53 - 12% implemented -- [ ] associate_vpc_with_hosted_zone -- [ ] change_resource_record_sets -- [X] change_tags_for_resource -- [X] create_health_check -- [X] create_hosted_zone -- [ ] create_query_logging_config -- [ ] create_reusable_delegation_set -- [ ] create_traffic_policy -- [ ] create_traffic_policy_instance -- [ ] create_traffic_policy_version -- [ ] create_vpc_association_authorization -- [X] delete_health_check -- [X] delete_hosted_zone -- [ ] delete_query_logging_config -- [ ] delete_reusable_delegation_set -- [ ] delete_traffic_policy -- [ ] delete_traffic_policy_instance -- [ ] delete_vpc_association_authorization -- [ ] disassociate_vpc_from_hosted_zone -- [ ] get_account_limit -- [ ] get_change -- [ ] get_checker_ip_ranges -- [ ] get_geo_location -- [ ] get_health_check -- [ ] get_health_check_count -- [ ] get_health_check_last_failure_reason -- [ ] get_health_check_status -- [X] get_hosted_zone -- [ ] get_hosted_zone_count -- [ ] get_hosted_zone_limit -- [ ] get_query_logging_config -- [ ] get_reusable_delegation_set -- [ ] get_reusable_delegation_set_limit -- [ ] get_traffic_policy -- [ ] get_traffic_policy_instance -- [ ] get_traffic_policy_instance_count -- [ ] list_geo_locations -- [ ] list_health_checks -- [ ] list_hosted_zones -- [ ] list_hosted_zones_by_name -- [ ] list_query_logging_configs -- [ ] list_resource_record_sets -- [ ] list_reusable_delegation_sets -- [X] list_tags_for_resource -- [ ] list_tags_for_resources -- [ ] list_traffic_policies -- [ ] list_traffic_policy_instances -- [ ] list_traffic_policy_instances_by_hosted_zone -- [ ] list_traffic_policy_instances_by_policy -- [ ] list_traffic_policy_versions -- [ ] list_vpc_association_authorizations -- [ ] test_dns_answer -- [ ] update_health_check -- [ ] update_hosted_zone_comment -- [ ] update_traffic_policy_comment -- [ ] update_traffic_policy_instance - -## route53domains - 0% implemented -- [ ] check_domain_availability -- [ ] check_domain_transferability -- [ ] delete_tags_for_domain -- [ ] disable_domain_auto_renew -- [ ] disable_domain_transfer_lock -- [ ] enable_domain_auto_renew -- [ ] enable_domain_transfer_lock -- [ ] get_contact_reachability_status -- [ ] get_domain_detail -- [ ] get_domain_suggestions -- [ ] get_operation_detail -- [ ] list_domains -- [ ] list_operations -- [ ] list_tags_for_domain -- [ ] register_domain -- [ ] renew_domain -- [ ] resend_contact_reachability_email -- [ ] retrieve_domain_auth_code -- [ ] transfer_domain -- [ ] update_domain_contact -- [ ] update_domain_contact_privacy -- [ ] update_domain_nameservers -- [ ] update_tags_for_domain -- [ ] view_billing - -## s3 - 15% implemented -- [ ] abort_multipart_upload -- [ ] complete_multipart_upload -- [ ] copy_object -- [X] create_bucket -- [ ] create_multipart_upload -- [X] delete_bucket -- [ ] delete_bucket_analytics_configuration -- [X] delete_bucket_cors -- [ ] delete_bucket_encryption -- [ ] delete_bucket_inventory_configuration -- [ ] delete_bucket_lifecycle -- [ ] delete_bucket_metrics_configuration -- [X] delete_bucket_policy -- [ ] delete_bucket_replication -- [X] delete_bucket_tagging -- [ ] delete_bucket_website -- [ ] delete_object -- [ ] delete_object_tagging -- [ ] delete_objects -- [ ] get_bucket_accelerate_configuration -- [X] get_bucket_acl -- [ ] get_bucket_analytics_configuration -- [ ] get_bucket_cors -- [ ] get_bucket_encryption -- [ ] get_bucket_inventory_configuration -- [ ] get_bucket_lifecycle -- [ ] get_bucket_lifecycle_configuration -- [ ] get_bucket_location -- [ ] get_bucket_logging -- [ ] get_bucket_metrics_configuration -- [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration -- [X] get_bucket_policy -- [ ] get_bucket_replication -- [ ] get_bucket_request_payment -- [ ] get_bucket_tagging -- [X] get_bucket_versioning -- [ ] get_bucket_website -- [ ] get_object -- [ ] get_object_acl -- [ ] get_object_tagging -- [ ] get_object_torrent -- [ ] head_bucket -- [ ] head_object -- [ ] list_bucket_analytics_configurations -- [ ] list_bucket_inventory_configurations -- [ ] list_bucket_metrics_configurations -- [ ] list_buckets -- [ ] list_multipart_uploads -- [ ] list_object_versions -- [ ] list_objects -- [ ] list_objects_v2 -- [ ] list_parts -- [ ] put_bucket_accelerate_configuration -- [ ] put_bucket_acl -- [ ] put_bucket_analytics_configuration -- [X] put_bucket_cors -- [ ] put_bucket_encryption -- [ ] put_bucket_inventory_configuration -- [ ] put_bucket_lifecycle -- [ ] put_bucket_lifecycle_configuration -- [X] put_bucket_logging -- [ ] put_bucket_metrics_configuration -- [ ] put_bucket_notification -- [X] put_bucket_notification_configuration -- [ ] put_bucket_policy -- [ ] put_bucket_replication -- [ ] put_bucket_request_payment -- [X] put_bucket_tagging -- [ ] put_bucket_versioning -- [ ] put_bucket_website -- [ ] put_object -- [ ] put_object_acl -- [ ] put_object_tagging -- [ ] restore_object -- [ ] select_object_content -- [ ] upload_part -- [ ] upload_part_copy - -## sagemaker - 0% implemented -- [ ] add_tags -- [ ] create_endpoint -- [ ] create_endpoint_config -- [ ] create_hyper_parameter_tuning_job -- [ ] create_model -- [ ] create_notebook_instance -- [ ] create_notebook_instance_lifecycle_config -- [ ] create_presigned_notebook_instance_url -- [ ] create_training_job -- [ ] create_transform_job -- [ ] delete_endpoint -- [ ] delete_endpoint_config -- [ ] delete_model -- [ ] delete_notebook_instance -- [ ] delete_notebook_instance_lifecycle_config -- [ ] delete_tags -- [ ] describe_endpoint -- [ ] describe_endpoint_config -- [ ] describe_hyper_parameter_tuning_job -- [ ] describe_model -- [ ] describe_notebook_instance -- [ ] describe_notebook_instance_lifecycle_config -- [ ] describe_training_job -- [ ] describe_transform_job -- [ ] list_endpoint_configs -- [ ] list_endpoints -- [ ] list_hyper_parameter_tuning_jobs -- [ ] list_models -- [ ] list_notebook_instance_lifecycle_configs -- [ ] list_notebook_instances -- [ ] list_tags -- [ ] list_training_jobs -- [ ] list_training_jobs_for_hyper_parameter_tuning_job -- [ ] list_transform_jobs -- [ ] start_notebook_instance -- [ ] stop_hyper_parameter_tuning_job -- [ ] stop_notebook_instance -- [ ] stop_training_job -- [ ] stop_transform_job -- [ ] update_endpoint -- [ ] update_endpoint_weights_and_capacities -- [ ] update_notebook_instance -- [ ] update_notebook_instance_lifecycle_config - -## sagemaker-runtime - 0% implemented -- [ ] invoke_endpoint - -## sdb - 0% implemented -- [ ] batch_delete_attributes -- [ ] batch_put_attributes -- [ ] create_domain -- [ ] delete_attributes -- [ ] delete_domain -- [ ] domain_metadata -- [ ] get_attributes -- [ ] list_domains -- [ ] put_attributes -- [ ] select - -## secretsmanager - 22% implemented -- [ ] cancel_rotate_secret -- [X] create_secret -- [ ] delete_resource_policy -- [ ] delete_secret -- [X] describe_secret -- [X] get_random_password -- [ ] get_resource_policy -- [X] get_secret_value -- [ ] list_secret_version_ids -- [ ] list_secrets -- [ ] put_resource_policy -- [ ] put_secret_value -- [ ] restore_secret -- [ ] rotate_secret -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_secret -- [ ] update_secret_version_stage - -## serverlessrepo - 0% implemented -- [ ] create_application -- [ ] create_application_version -- [ ] create_cloud_formation_change_set -- [ ] delete_application -- [ ] get_application -- [ ] get_application_policy -- [ ] list_application_versions -- [ ] list_applications -- [ ] put_application_policy -- [ ] update_application - -## servicecatalog - 0% implemented -- [ ] accept_portfolio_share -- [ ] associate_principal_with_portfolio -- [ ] associate_product_with_portfolio -- [ ] associate_tag_option_with_resource -- [ ] copy_product -- [ ] create_constraint -- [ ] create_portfolio -- [ ] create_portfolio_share -- [ ] create_product -- [ ] create_provisioned_product_plan -- [ ] create_provisioning_artifact -- [ ] create_tag_option -- [ ] delete_constraint -- [ ] delete_portfolio -- [ ] delete_portfolio_share -- [ ] delete_product -- [ ] delete_provisioned_product_plan -- [ ] delete_provisioning_artifact -- [ ] delete_tag_option -- [ ] describe_constraint -- [ ] describe_copy_product_status -- [ ] describe_portfolio -- [ ] describe_product -- [ ] describe_product_as_admin -- [ ] describe_product_view -- [ ] describe_provisioned_product -- [ ] describe_provisioned_product_plan -- [ ] describe_provisioning_artifact -- [ ] describe_provisioning_parameters -- [ ] describe_record -- [ ] describe_tag_option -- [ ] disassociate_principal_from_portfolio -- [ ] disassociate_product_from_portfolio -- [ ] disassociate_tag_option_from_resource -- [ ] execute_provisioned_product_plan -- [ ] list_accepted_portfolio_shares -- [ ] list_constraints_for_portfolio -- [ ] list_launch_paths -- [ ] list_portfolio_access -- [ ] list_portfolios -- [ ] list_portfolios_for_product -- [ ] list_principals_for_portfolio -- [ ] list_provisioned_product_plans -- [ ] list_provisioning_artifacts -- [ ] list_record_history -- [ ] list_resources_for_tag_option -- [ ] list_tag_options -- [ ] provision_product -- [ ] reject_portfolio_share -- [ ] scan_provisioned_products -- [ ] search_products -- [ ] search_products_as_admin -- [ ] search_provisioned_products -- [ ] terminate_provisioned_product -- [ ] update_constraint -- [ ] update_portfolio -- [ ] update_product -- [ ] update_provisioned_product -- [ ] update_provisioning_artifact -- [ ] update_tag_option - -## servicediscovery - 0% implemented -- [ ] create_private_dns_namespace -- [ ] create_public_dns_namespace -- [ ] create_service -- [ ] delete_namespace -- [ ] delete_service -- [ ] deregister_instance -- [ ] get_instance -- [ ] get_instances_health_status -- [ ] get_namespace -- [ ] get_operation -- [ ] get_service -- [ ] list_instances -- [ ] list_namespaces -- [ ] list_operations -- [ ] list_services -- [ ] register_instance -- [ ] update_instance_custom_health_status -- [ ] update_service - -## ses - 11% implemented -- [ ] clone_receipt_rule_set -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination -- [ ] create_configuration_set_tracking_options -- [ ] create_custom_verification_email_template -- [ ] create_receipt_filter -- [ ] create_receipt_rule -- [ ] create_receipt_rule_set -- [ ] create_template -- [ ] delete_configuration_set -- [ ] delete_configuration_set_event_destination -- [ ] delete_configuration_set_tracking_options -- [ ] delete_custom_verification_email_template -- [X] delete_identity -- [ ] delete_identity_policy -- [ ] delete_receipt_filter -- [ ] delete_receipt_rule -- [ ] delete_receipt_rule_set -- [ ] delete_template -- [ ] delete_verified_email_address -- [ ] describe_active_receipt_rule_set -- [ ] describe_configuration_set -- [ ] describe_receipt_rule -- [ ] describe_receipt_rule_set -- [ ] get_account_sending_enabled -- [ ] get_custom_verification_email_template -- [ ] get_identity_dkim_attributes -- [ ] get_identity_mail_from_domain_attributes -- [ ] get_identity_notification_attributes -- [ ] get_identity_policies -- [ ] get_identity_verification_attributes -- [X] get_send_quota -- [ ] get_send_statistics -- [ ] get_template -- [ ] list_configuration_sets -- [ ] list_custom_verification_email_templates -- [X] list_identities -- [ ] list_identity_policies -- [ ] list_receipt_filters -- [ ] list_receipt_rule_sets -- [ ] list_templates -- [X] list_verified_email_addresses -- [ ] put_identity_policy -- [ ] reorder_receipt_rule_set -- [ ] send_bounce -- [ ] send_bulk_templated_email -- [ ] send_custom_verification_email -- [X] send_email -- [X] send_raw_email -- [ ] send_templated_email -- [ ] set_active_receipt_rule_set -- [ ] set_identity_dkim_enabled -- [ ] set_identity_feedback_forwarding_enabled -- [ ] set_identity_headers_in_notifications_enabled -- [ ] set_identity_mail_from_domain -- [ ] set_identity_notification_topic -- [ ] set_receipt_rule_position -- [ ] test_render_template -- [ ] update_account_sending_enabled -- [ ] update_configuration_set_event_destination -- [ ] update_configuration_set_reputation_metrics_enabled -- [ ] update_configuration_set_sending_enabled -- [ ] update_configuration_set_tracking_options -- [ ] update_custom_verification_email_template -- [ ] update_receipt_rule -- [ ] update_template -- [ ] verify_domain_dkim -- [ ] verify_domain_identity -- [X] verify_email_address -- [X] verify_email_identity - -## shield - 0% implemented -- [ ] associate_drt_log_bucket -- [ ] associate_drt_role -- [ ] create_protection -- [ ] create_subscription -- [ ] delete_protection -- [ ] delete_subscription -- [ ] describe_attack -- [ ] describe_drt_access -- [ ] describe_emergency_contact_settings -- [ ] describe_protection -- [ ] describe_subscription -- [ ] disassociate_drt_log_bucket -- [ ] disassociate_drt_role -- [ ] get_subscription_state -- [ ] list_attacks -- [ ] list_protections -- [ ] update_emergency_contact_settings -- [ ] update_subscription - -## sms - 0% implemented -- [ ] create_replication_job -- [ ] delete_replication_job -- [ ] delete_server_catalog -- [ ] disassociate_connector -- [ ] get_connectors -- [ ] get_replication_jobs -- [ ] get_replication_runs -- [ ] get_servers -- [ ] import_server_catalog -- [ ] start_on_demand_replication_run -- [ ] update_replication_job - -## snowball - 0% implemented -- [ ] cancel_cluster -- [ ] cancel_job -- [ ] create_address -- [ ] create_cluster -- [ ] create_job -- [ ] describe_address -- [ ] describe_addresses -- [ ] describe_cluster -- [ ] describe_job -- [ ] get_job_manifest -- [ ] get_job_unlock_code -- [ ] get_snowball_usage -- [ ] list_cluster_jobs -- [ ] list_clusters -- [ ] list_compatible_images -- [ ] list_jobs -- [ ] update_cluster -- [ ] update_job - -## sns - 53% implemented -- [ ] add_permission -- [ ] check_if_phone_number_is_opted_out -- [ ] confirm_subscription -- [X] create_platform_application -- [X] create_platform_endpoint -- [X] create_topic -- [X] delete_endpoint -- [X] delete_platform_application -- [X] delete_topic -- [ ] get_endpoint_attributes -- [ ] get_platform_application_attributes -- [ ] get_sms_attributes -- [X] get_subscription_attributes -- [ ] get_topic_attributes -- [X] list_endpoints_by_platform_application -- [ ] list_phone_numbers_opted_out -- [X] list_platform_applications -- [X] list_subscriptions -- [ ] list_subscriptions_by_topic -- [X] list_topics -- [ ] opt_in_phone_number -- [X] publish -- [ ] remove_permission -- [X] set_endpoint_attributes -- [ ] set_platform_application_attributes -- [ ] set_sms_attributes -- [X] set_subscription_attributes -- [ ] set_topic_attributes -- [X] subscribe -- [X] unsubscribe - -## sqs - 65% implemented -- [X] add_permission -- [X] change_message_visibility -- [ ] change_message_visibility_batch -- [X] create_queue -- [X] delete_message -- [ ] delete_message_batch -- [X] delete_queue -- [ ] get_queue_attributes -- [ ] get_queue_url -- [X] list_dead_letter_source_queues -- [ ] list_queue_tags -- [X] list_queues -- [X] purge_queue -- [ ] receive_message -- [X] remove_permission -- [X] send_message -- [ ] send_message_batch -- [X] set_queue_attributes -- [X] tag_queue -- [X] untag_queue - -## ssm - 10% implemented -- [X] add_tags_to_resource -- [ ] cancel_command -- [ ] create_activation -- [ ] create_association -- [ ] create_association_batch -- [ ] create_document -- [ ] create_maintenance_window -- [ ] create_patch_baseline -- [ ] create_resource_data_sync -- [ ] delete_activation -- [ ] delete_association -- [ ] delete_document -- [ ] delete_inventory -- [ ] delete_maintenance_window -- [X] delete_parameter -- [X] delete_parameters -- [ ] delete_patch_baseline -- [ ] delete_resource_data_sync -- [ ] deregister_managed_instance -- [ ] deregister_patch_baseline_for_patch_group -- [ ] deregister_target_from_maintenance_window -- [ ] deregister_task_from_maintenance_window -- [ ] describe_activations -- [ ] describe_association -- [ ] describe_association_execution_targets -- [ ] describe_association_executions -- [ ] describe_automation_executions -- [ ] describe_automation_step_executions -- [ ] describe_available_patches -- [ ] describe_document -- [ ] describe_document_permission -- [ ] describe_effective_instance_associations -- [ ] describe_effective_patches_for_patch_baseline -- [ ] describe_instance_associations_status -- [ ] describe_instance_information -- [ ] describe_instance_patch_states -- [ ] describe_instance_patch_states_for_patch_group -- [ ] describe_instance_patches -- [ ] describe_inventory_deletions -- [ ] describe_maintenance_window_execution_task_invocations -- [ ] describe_maintenance_window_execution_tasks -- [ ] describe_maintenance_window_executions -- [ ] describe_maintenance_window_targets -- [ ] describe_maintenance_window_tasks -- [ ] describe_maintenance_windows -- [ ] describe_parameters -- [ ] describe_patch_baselines -- [ ] describe_patch_group_state -- [ ] describe_patch_groups -- [ ] get_automation_execution -- [ ] get_command_invocation -- [ ] get_default_patch_baseline -- [ ] get_deployable_patch_snapshot_for_instance -- [ ] get_document -- [ ] get_inventory -- [ ] get_inventory_schema -- [ ] get_maintenance_window -- [ ] get_maintenance_window_execution -- [ ] get_maintenance_window_execution_task -- [ ] get_maintenance_window_execution_task_invocation -- [ ] get_maintenance_window_task -- [X] get_parameter -- [ ] get_parameter_history -- [X] get_parameters -- [X] get_parameters_by_path -- [ ] get_patch_baseline -- [ ] get_patch_baseline_for_patch_group -- [ ] label_parameter_version -- [ ] list_association_versions -- [ ] list_associations -- [ ] list_command_invocations -- [X] list_commands -- [ ] list_compliance_items -- [ ] list_compliance_summaries -- [ ] list_document_versions -- [ ] list_documents -- [ ] list_inventory_entries -- [ ] list_resource_compliance_summaries -- [ ] list_resource_data_sync -- [X] list_tags_for_resource -- [ ] modify_document_permission -- [ ] put_compliance_items -- [ ] put_inventory -- [X] put_parameter -- [ ] register_default_patch_baseline -- [ ] register_patch_baseline_for_patch_group -- [ ] register_target_with_maintenance_window -- [ ] register_task_with_maintenance_window -- [X] remove_tags_from_resource -- [ ] send_automation_signal -- [X] send_command -- [ ] start_associations_once -- [ ] start_automation_execution -- [ ] stop_automation_execution -- [ ] update_association -- [ ] update_association_status -- [ ] update_document -- [ ] update_document_default_version -- [ ] update_maintenance_window -- [ ] update_maintenance_window_target -- [ ] update_maintenance_window_task -- [ ] update_managed_instance_role -- [ ] update_patch_baseline - -## stepfunctions - 0% implemented -- [ ] create_activity -- [ ] create_state_machine -- [ ] delete_activity -- [ ] delete_state_machine -- [ ] describe_activity -- [ ] describe_execution -- [ ] describe_state_machine -- [ ] describe_state_machine_for_execution -- [ ] get_activity_task -- [ ] get_execution_history -- [ ] list_activities -- [ ] list_executions -- [ ] list_state_machines -- [ ] send_task_failure -- [ ] send_task_heartbeat -- [ ] send_task_success -- [ ] start_execution -- [ ] stop_execution -- [ ] update_state_machine - -## storagegateway - 0% implemented -- [ ] activate_gateway -- [ ] add_cache -- [ ] add_tags_to_resource -- [ ] add_upload_buffer -- [ ] add_working_storage -- [ ] cancel_archival -- [ ] cancel_retrieval -- [ ] create_cached_iscsi_volume -- [ ] create_nfs_file_share -- [ ] create_smb_file_share -- [ ] create_snapshot -- [ ] create_snapshot_from_volume_recovery_point -- [ ] create_stored_iscsi_volume -- [ ] create_tape_with_barcode -- [ ] create_tapes -- [ ] delete_bandwidth_rate_limit -- [ ] delete_chap_credentials -- [ ] delete_file_share -- [ ] delete_gateway -- [ ] delete_snapshot_schedule -- [ ] delete_tape -- [ ] delete_tape_archive -- [ ] delete_volume -- [ ] describe_bandwidth_rate_limit -- [ ] describe_cache -- [ ] describe_cached_iscsi_volumes -- [ ] describe_chap_credentials -- [ ] describe_gateway_information -- [ ] describe_maintenance_start_time -- [ ] describe_nfs_file_shares -- [ ] describe_smb_file_shares -- [ ] describe_smb_settings -- [ ] describe_snapshot_schedule -- [ ] describe_stored_iscsi_volumes -- [ ] describe_tape_archives -- [ ] describe_tape_recovery_points -- [ ] describe_tapes -- [ ] describe_upload_buffer -- [ ] describe_vtl_devices -- [ ] describe_working_storage -- [ ] disable_gateway -- [ ] join_domain -- [ ] list_file_shares -- [ ] list_gateways -- [ ] list_local_disks -- [ ] list_tags_for_resource -- [ ] list_tapes -- [ ] list_volume_initiators -- [ ] list_volume_recovery_points -- [ ] list_volumes -- [ ] notify_when_uploaded -- [ ] refresh_cache -- [ ] remove_tags_from_resource -- [ ] reset_cache -- [ ] retrieve_tape_archive -- [ ] retrieve_tape_recovery_point -- [ ] set_local_console_password -- [ ] set_smb_guest_password -- [ ] shutdown_gateway -- [ ] start_gateway -- [ ] update_bandwidth_rate_limit -- [ ] update_chap_credentials -- [ ] update_gateway_information -- [ ] update_gateway_software_now -- [ ] update_maintenance_start_time -- [ ] update_nfs_file_share -- [ ] update_smb_file_share -- [ ] update_snapshot_schedule -- [ ] update_vtl_device_type - -## sts - 42% implemented -- [X] assume_role -- [ ] assume_role_with_saml -- [ ] assume_role_with_web_identity -- [ ] decode_authorization_message -- [ ] get_caller_identity -- [X] get_federation_token -- [X] get_session_token - -## support - 0% implemented -- [ ] add_attachments_to_set -- [ ] add_communication_to_case -- [ ] create_case -- [ ] describe_attachment -- [ ] describe_cases -- [ ] describe_communications -- [ ] describe_services -- [ ] describe_severity_levels -- [ ] describe_trusted_advisor_check_refresh_statuses -- [ ] describe_trusted_advisor_check_result -- [ ] describe_trusted_advisor_check_summaries -- [ ] describe_trusted_advisor_checks -- [ ] refresh_trusted_advisor_check -- [ ] resolve_case - -## swf - 58% implemented -- [ ] count_closed_workflow_executions -- [ ] count_open_workflow_executions -- [X] count_pending_activity_tasks -- [X] count_pending_decision_tasks -- [ ] deprecate_activity_type -- [X] deprecate_domain -- [ ] deprecate_workflow_type -- [ ] describe_activity_type -- [X] describe_domain -- [X] describe_workflow_execution -- [ ] describe_workflow_type -- [ ] get_workflow_execution_history -- [ ] list_activity_types -- [X] list_closed_workflow_executions -- [X] list_domains -- [X] list_open_workflow_executions -- [ ] list_workflow_types -- [X] poll_for_activity_task -- [X] poll_for_decision_task -- [X] record_activity_task_heartbeat -- [ ] register_activity_type -- [X] register_domain -- [ ] register_workflow_type -- [ ] request_cancel_workflow_execution -- [ ] respond_activity_task_canceled -- [X] respond_activity_task_completed -- [X] respond_activity_task_failed -- [X] respond_decision_task_completed -- [X] signal_workflow_execution -- [X] start_workflow_execution -- [X] terminate_workflow_execution - -## transcribe - 0% implemented -- [ ] create_vocabulary -- [ ] delete_vocabulary -- [ ] get_transcription_job -- [ ] get_vocabulary -- [ ] list_transcription_jobs -- [ ] list_vocabularies -- [ ] start_transcription_job -- [ ] update_vocabulary - -## translate - 0% implemented -- [ ] translate_text - -## waf - 0% implemented -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## waf-regional - 0% implemented -- [ ] associate_web_acl -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] disassociate_web_acl -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_web_acl_for_resource -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_resources_for_web_acl -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## workdocs - 0% implemented -- [ ] abort_document_version_upload -- [ ] activate_user -- [ ] add_resource_permissions -- [ ] create_comment -- [ ] create_custom_metadata -- [ ] create_folder -- [ ] create_labels -- [ ] create_notification_subscription -- [ ] create_user -- [ ] deactivate_user -- [ ] delete_comment -- [ ] delete_custom_metadata -- [ ] delete_document -- [ ] delete_folder -- [ ] delete_folder_contents -- [ ] delete_labels -- [ ] delete_notification_subscription -- [ ] delete_user -- [ ] describe_activities -- [ ] describe_comments -- [ ] describe_document_versions -- [ ] describe_folder_contents -- [ ] describe_groups -- [ ] describe_notification_subscriptions -- [ ] describe_resource_permissions -- [ ] describe_root_folders -- [ ] describe_users -- [ ] get_current_user -- [ ] get_document -- [ ] get_document_path -- [ ] get_document_version -- [ ] get_folder -- [ ] get_folder_path -- [ ] initiate_document_version_upload -- [ ] remove_all_resource_permissions -- [ ] remove_resource_permission -- [ ] update_document -- [ ] update_document_version -- [ ] update_folder -- [ ] update_user - -## workmail - 0% implemented -- [ ] associate_delegate_to_resource -- [ ] associate_member_to_group -- [ ] create_alias -- [ ] create_group -- [ ] create_resource -- [ ] create_user -- [ ] delete_alias -- [ ] delete_group -- [ ] delete_mailbox_permissions -- [ ] delete_resource -- [ ] delete_user -- [ ] deregister_from_work_mail -- [ ] describe_group -- [ ] describe_organization -- [ ] describe_resource -- [ ] describe_user -- [ ] disassociate_delegate_from_resource -- [ ] disassociate_member_from_group -- [ ] list_aliases -- [ ] list_group_members -- [ ] list_groups -- [ ] list_mailbox_permissions -- [ ] list_organizations -- [ ] list_resource_delegates -- [ ] list_resources -- [ ] list_users -- [ ] put_mailbox_permissions -- [ ] register_to_work_mail -- [ ] reset_password -- [ ] update_primary_email_address -- [ ] update_resource - -## workspaces - 0% implemented -- [ ] associate_ip_groups -- [ ] authorize_ip_rules -- [ ] create_ip_group -- [ ] create_tags -- [ ] create_workspaces -- [ ] delete_ip_group -- [ ] delete_tags -- [ ] describe_ip_groups -- [ ] describe_tags -- [ ] describe_workspace_bundles -- [ ] describe_workspace_directories -- [ ] describe_workspaces -- [ ] describe_workspaces_connection_status -- [ ] disassociate_ip_groups -- [ ] modify_workspace_properties -- [ ] modify_workspace_state -- [ ] reboot_workspaces -- [ ] rebuild_workspaces -- [ ] revoke_ip_rules -- [ ] start_workspaces -- [ ] stop_workspaces -- [ ] terminate_workspaces -- [ ] update_rules_of_ip_group - -## xray - 0% implemented -- [ ] batch_get_traces -- [ ] get_encryption_config -- [ ] get_service_graph -- [ ] get_trace_graph -- [ ] get_trace_summaries -- [ ] put_encryption_config -- [ ] put_telemetry_records -- [ ] put_trace_segments + +## acm - 41% implemented +- [X] add_tags_to_certificate +- [X] delete_certificate +- [ ] describe_certificate +- [ ] export_certificate +- [X] get_certificate +- [ ] import_certificate +- [ ] list_certificates +- [ ] list_tags_for_certificate +- [X] remove_tags_from_certificate +- [X] request_certificate +- [ ] resend_validation_email +- [ ] update_certificate_options + +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority + +## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_contact_from_address_book +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 24% implemented +- [ ] create_api_key +- [ ] create_authorizer +- [ ] create_base_path_mapping +- [X] create_deployment +- [ ] create_documentation_part +- [ ] create_documentation_version +- [ ] create_domain_name +- [ ] create_model +- [ ] create_request_validator +- [X] create_resource +- [X] create_rest_api +- [X] create_stage +- [X] create_usage_plan +- [X] create_usage_plan_key +- [ ] create_vpc_link +- [ ] delete_api_key +- [ ] delete_authorizer +- [ ] delete_base_path_mapping +- [ ] delete_client_certificate +- [X] delete_deployment +- [ ] delete_documentation_part +- [ ] delete_documentation_version +- [ ] delete_domain_name +- [ ] delete_gateway_response +- [X] delete_integration +- [X] delete_integration_response +- [ ] delete_method +- [X] delete_method_response +- [ ] delete_model +- [ ] delete_request_validator +- [X] delete_resource +- [X] delete_rest_api +- [ ] delete_stage +- [X] delete_usage_plan +- [X] delete_usage_plan_key +- [ ] delete_vpc_link +- [ ] flush_stage_authorizers_cache +- [ ] flush_stage_cache +- [ ] generate_client_certificate +- [ ] get_account +- [ ] get_api_key +- [ ] get_api_keys +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_base_path_mapping +- [ ] get_base_path_mappings +- [ ] get_client_certificate +- [ ] get_client_certificates +- [X] get_deployment +- [X] get_deployments +- [ ] get_documentation_part +- [ ] get_documentation_parts +- [ ] get_documentation_version +- [ ] get_documentation_versions +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_export +- [ ] get_gateway_response +- [ ] get_gateway_responses +- [X] get_integration +- [X] get_integration_response +- [X] get_method +- [X] get_method_response +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_request_validator +- [ ] get_request_validators +- [X] get_resource +- [ ] get_resources +- [X] get_rest_api +- [ ] get_rest_apis +- [ ] get_sdk +- [ ] get_sdk_type +- [ ] get_sdk_types +- [X] get_stage +- [X] get_stages +- [ ] get_tags +- [ ] get_usage +- [X] get_usage_plan +- [X] get_usage_plan_key +- [X] get_usage_plan_keys +- [X] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links +- [ ] import_api_keys +- [ ] import_documentation_parts +- [ ] import_rest_api +- [ ] put_gateway_response +- [ ] put_integration +- [ ] put_integration_response +- [ ] put_method +- [ ] put_method_response +- [ ] put_rest_api +- [ ] tag_resource +- [ ] test_invoke_authorizer +- [ ] test_invoke_method +- [ ] untag_resource +- [ ] update_account +- [ ] update_api_key +- [ ] update_authorizer +- [ ] update_base_path_mapping +- [ ] update_client_certificate +- [ ] update_deployment +- [ ] update_documentation_part +- [ ] update_documentation_version +- [ ] update_domain_name +- [ ] update_gateway_response +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_method +- [ ] update_method_response +- [ ] update_model +- [ ] update_request_validator +- [ ] update_resource +- [ ] update_rest_api +- [X] update_stage +- [ ] update_usage +- [ ] update_usage_plan +- [ ] update_vpc_link + +## application-autoscaling - 0% implemented +- [ ] delete_scaling_policy +- [ ] delete_scheduled_action +- [ ] deregister_scalable_target +- [ ] describe_scalable_targets +- [ ] describe_scaling_activities +- [ ] describe_scaling_policies +- [ ] describe_scheduled_actions +- [ ] put_scaling_policy +- [ ] put_scheduled_action +- [ ] register_scalable_target + +## appstream - 0% implemented +- [ ] associate_fleet +- [ ] copy_image +- [ ] create_directory_config +- [ ] create_fleet +- [ ] create_image_builder +- [ ] create_image_builder_streaming_url +- [ ] create_stack +- [ ] create_streaming_url +- [ ] delete_directory_config +- [ ] delete_fleet +- [ ] delete_image +- [ ] delete_image_builder +- [ ] delete_stack +- [ ] describe_directory_configs +- [ ] describe_fleets +- [ ] describe_image_builders +- [ ] describe_images +- [ ] describe_sessions +- [ ] describe_stacks +- [ ] disassociate_fleet +- [ ] expire_session +- [ ] list_associated_fleets +- [ ] list_associated_stacks +- [ ] list_tags_for_resource +- [ ] start_fleet +- [ ] start_image_builder +- [ ] stop_fleet +- [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_directory_config +- [ ] update_fleet +- [ ] update_stack + +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_api_key +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + +## athena - 0% implemented +- [ ] batch_get_named_query +- [ ] batch_get_query_execution +- [ ] create_named_query +- [ ] delete_named_query +- [ ] get_named_query +- [ ] get_query_execution +- [ ] get_query_results +- [ ] list_named_queries +- [ ] list_query_executions +- [ ] start_query_execution +- [ ] stop_query_execution + +## autoscaling - 44% implemented +- [X] attach_instances +- [X] attach_load_balancer_target_groups +- [X] attach_load_balancers +- [ ] complete_lifecycle_action +- [X] create_auto_scaling_group +- [X] create_launch_configuration +- [X] create_or_update_tags +- [X] delete_auto_scaling_group +- [X] delete_launch_configuration +- [ ] delete_lifecycle_hook +- [ ] delete_notification_configuration +- [X] delete_policy +- [ ] delete_scheduled_action +- [ ] delete_tags +- [ ] describe_account_limits +- [ ] describe_adjustment_types +- [X] describe_auto_scaling_groups +- [X] describe_auto_scaling_instances +- [ ] describe_auto_scaling_notification_types +- [X] describe_launch_configurations +- [ ] describe_lifecycle_hook_types +- [ ] describe_lifecycle_hooks +- [X] describe_load_balancer_target_groups +- [X] describe_load_balancers +- [ ] describe_metric_collection_types +- [ ] describe_notification_configurations +- [X] describe_policies +- [ ] describe_scaling_activities +- [ ] describe_scaling_process_types +- [ ] describe_scheduled_actions +- [ ] describe_tags +- [ ] describe_termination_policy_types +- [X] detach_instances +- [X] detach_load_balancer_target_groups +- [X] detach_load_balancers +- [ ] disable_metrics_collection +- [ ] enable_metrics_collection +- [ ] enter_standby +- [X] execute_policy +- [ ] exit_standby +- [ ] put_lifecycle_hook +- [ ] put_notification_configuration +- [ ] put_scaling_policy +- [ ] put_scheduled_update_group_action +- [ ] record_lifecycle_action_heartbeat +- [ ] resume_processes +- [X] set_desired_capacity +- [X] set_instance_health +- [ ] set_instance_protection +- [X] suspend_processes +- [ ] terminate_instance_in_auto_scaling_group +- [X] update_auto_scaling_group + +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans + +## batch - 93% implemented +- [ ] cancel_job +- [X] create_compute_environment +- [X] create_job_queue +- [X] delete_compute_environment +- [X] delete_job_queue +- [X] deregister_job_definition +- [X] describe_compute_environments +- [X] describe_job_definitions +- [X] describe_job_queues +- [X] describe_jobs +- [X] list_jobs +- [X] register_job_definition +- [X] submit_job +- [X] terminate_job +- [X] update_compute_environment +- [X] update_job_queue + +## budgets - 0% implemented +- [ ] create_budget +- [ ] create_notification +- [ ] create_subscriber +- [ ] delete_budget +- [ ] delete_notification +- [ ] delete_subscriber +- [ ] describe_budget +- [ ] describe_budgets +- [ ] describe_notifications_for_budget +- [ ] describe_subscribers_for_notification +- [ ] update_budget +- [ ] update_notification +- [ ] update_subscriber + +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + +## clouddirectory - 0% implemented +- [ ] add_facet_to_object +- [ ] apply_schema +- [ ] attach_object +- [ ] attach_policy +- [ ] attach_to_index +- [ ] attach_typed_link +- [ ] batch_read +- [ ] batch_write +- [ ] create_directory +- [ ] create_facet +- [ ] create_index +- [ ] create_object +- [ ] create_schema +- [ ] create_typed_link_facet +- [ ] delete_directory +- [ ] delete_facet +- [ ] delete_object +- [ ] delete_schema +- [ ] delete_typed_link_facet +- [ ] detach_from_index +- [ ] detach_object +- [ ] detach_policy +- [ ] detach_typed_link +- [ ] disable_directory +- [ ] enable_directory +- [ ] get_applied_schema_version +- [ ] get_directory +- [ ] get_facet +- [ ] get_object_attributes +- [ ] get_object_information +- [ ] get_schema_as_json +- [ ] get_typed_link_facet_information +- [ ] list_applied_schema_arns +- [ ] list_attached_indices +- [ ] list_development_schema_arns +- [ ] list_directories +- [ ] list_facet_attributes +- [ ] list_facet_names +- [ ] list_incoming_typed_links +- [ ] list_index +- [ ] list_object_attributes +- [ ] list_object_children +- [ ] list_object_parent_paths +- [ ] list_object_parents +- [ ] list_object_policies +- [ ] list_outgoing_typed_links +- [ ] list_policy_attachments +- [ ] list_published_schema_arns +- [ ] list_tags_for_resource +- [ ] list_typed_link_facet_attributes +- [ ] list_typed_link_facet_names +- [ ] lookup_policy +- [ ] publish_schema +- [ ] put_schema_from_json +- [ ] remove_facet_from_object +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_facet +- [ ] update_object_attributes +- [ ] update_schema +- [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema + +## cloudformation - 21% implemented +- [ ] cancel_update_stack +- [ ] continue_update_rollback +- [X] create_change_set +- [X] create_stack +- [ ] create_stack_instances +- [ ] create_stack_set +- [ ] delete_change_set +- [X] delete_stack +- [ ] delete_stack_instances +- [ ] delete_stack_set +- [ ] describe_account_limits +- [ ] describe_change_set +- [ ] describe_stack_events +- [ ] describe_stack_instance +- [ ] describe_stack_resource +- [ ] describe_stack_resources +- [ ] describe_stack_set +- [ ] describe_stack_set_operation +- [X] describe_stacks +- [ ] estimate_template_cost +- [X] execute_change_set +- [ ] get_stack_policy +- [ ] get_template +- [ ] get_template_summary +- [ ] list_change_sets +- [X] list_exports +- [ ] list_imports +- [ ] list_stack_instances +- [X] list_stack_resources +- [ ] list_stack_set_operation_results +- [ ] list_stack_set_operations +- [ ] list_stack_sets +- [X] list_stacks +- [ ] set_stack_policy +- [ ] signal_resource +- [ ] stop_stack_set_operation +- [X] update_stack +- [ ] update_stack_instances +- [ ] update_stack_set +- [ ] update_termination_protection +- [ ] validate_template + +## cloudfront - 0% implemented +- [ ] create_cloud_front_origin_access_identity +- [ ] create_distribution +- [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile +- [ ] create_invalidation +- [ ] create_public_key +- [ ] create_streaming_distribution +- [ ] create_streaming_distribution_with_tags +- [ ] delete_cloud_front_origin_access_identity +- [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key +- [ ] delete_service_linked_role +- [ ] delete_streaming_distribution +- [ ] get_cloud_front_origin_access_identity +- [ ] get_cloud_front_origin_access_identity_config +- [ ] get_distribution +- [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config +- [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config +- [ ] get_streaming_distribution +- [ ] get_streaming_distribution_config +- [ ] list_cloud_front_origin_access_identities +- [ ] list_distributions +- [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles +- [ ] list_invalidations +- [ ] list_public_keys +- [ ] list_streaming_distributions +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cloud_front_origin_access_identity +- [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key +- [ ] update_streaming_distribution + +## cloudhsm - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_hapg +- [ ] create_hsm +- [ ] create_luna_client +- [ ] delete_hapg +- [ ] delete_hsm +- [ ] delete_luna_client +- [ ] describe_hapg +- [ ] describe_hsm +- [ ] describe_luna_client +- [ ] get_config +- [ ] list_available_zones +- [ ] list_hapgs +- [ ] list_hsms +- [ ] list_luna_clients +- [ ] list_tags_for_resource +- [ ] modify_hapg +- [ ] modify_hsm +- [ ] modify_luna_client +- [ ] remove_tags_from_resource + +## cloudhsmv2 - 0% implemented +- [ ] create_cluster +- [ ] create_hsm +- [ ] delete_cluster +- [ ] delete_hsm +- [ ] describe_backups +- [ ] describe_clusters +- [ ] initialize_cluster +- [ ] list_tags +- [ ] tag_resource +- [ ] untag_resource + +## cloudsearch - 0% implemented +- [ ] build_suggesters +- [ ] create_domain +- [ ] define_analysis_scheme +- [ ] define_expression +- [ ] define_index_field +- [ ] define_suggester +- [ ] delete_analysis_scheme +- [ ] delete_domain +- [ ] delete_expression +- [ ] delete_index_field +- [ ] delete_suggester +- [ ] describe_analysis_schemes +- [ ] describe_availability_options +- [ ] describe_domains +- [ ] describe_expressions +- [ ] describe_index_fields +- [ ] describe_scaling_parameters +- [ ] describe_service_access_policies +- [ ] describe_suggesters +- [ ] index_documents +- [ ] list_domain_names +- [ ] update_availability_options +- [ ] update_scaling_parameters +- [ ] update_service_access_policies + +## cloudsearchdomain - 0% implemented +- [ ] search +- [ ] suggest +- [ ] upload_documents + +## cloudtrail - 0% implemented +- [ ] add_tags +- [ ] create_trail +- [ ] delete_trail +- [ ] describe_trails +- [ ] get_event_selectors +- [ ] get_trail_status +- [ ] list_public_keys +- [ ] list_tags +- [ ] lookup_events +- [ ] put_event_selectors +- [ ] remove_tags +- [ ] start_logging +- [ ] stop_logging +- [ ] update_trail + +## cloudwatch - 56% implemented +- [X] delete_alarms +- [X] delete_dashboards +- [ ] describe_alarm_history +- [ ] describe_alarms +- [ ] describe_alarms_for_metric +- [ ] disable_alarm_actions +- [ ] enable_alarm_actions +- [X] get_dashboard +- [ ] get_metric_data +- [X] get_metric_statistics +- [X] list_dashboards +- [ ] list_metrics +- [X] put_dashboard +- [X] put_metric_alarm +- [X] put_metric_data +- [X] set_alarm_state + +## codebuild - 0% implemented +- [ ] batch_delete_builds +- [ ] batch_get_builds +- [ ] batch_get_projects +- [ ] create_project +- [ ] create_webhook +- [ ] delete_project +- [ ] delete_webhook +- [ ] invalidate_project_cache +- [ ] list_builds +- [ ] list_builds_for_project +- [ ] list_curated_environment_images +- [ ] list_projects +- [ ] start_build +- [ ] stop_build +- [ ] update_project +- [ ] update_webhook + +## codecommit - 0% implemented +- [ ] batch_get_repositories +- [ ] create_branch +- [ ] create_pull_request +- [ ] create_repository +- [ ] delete_branch +- [ ] delete_comment_content +- [ ] delete_repository +- [ ] describe_pull_request_events +- [ ] get_blob +- [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request +- [ ] get_commit +- [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request +- [ ] get_repository +- [ ] get_repository_triggers +- [ ] list_branches +- [ ] list_pull_requests +- [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply +- [ ] put_file +- [ ] put_repository_triggers +- [ ] test_repository_triggers +- [ ] update_comment +- [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title +- [ ] update_repository_description +- [ ] update_repository_name + +## codedeploy - 0% implemented +- [ ] add_tags_to_on_premises_instances +- [ ] batch_get_application_revisions +- [ ] batch_get_applications +- [ ] batch_get_deployment_groups +- [ ] batch_get_deployment_instances +- [ ] batch_get_deployments +- [ ] batch_get_on_premises_instances +- [ ] continue_deployment +- [ ] create_application +- [ ] create_deployment +- [ ] create_deployment_config +- [ ] create_deployment_group +- [ ] delete_application +- [ ] delete_deployment_config +- [ ] delete_deployment_group +- [ ] delete_git_hub_account_token +- [ ] deregister_on_premises_instance +- [ ] get_application +- [ ] get_application_revision +- [ ] get_deployment +- [ ] get_deployment_config +- [ ] get_deployment_group +- [ ] get_deployment_instance +- [ ] get_on_premises_instance +- [ ] list_application_revisions +- [ ] list_applications +- [ ] list_deployment_configs +- [ ] list_deployment_groups +- [ ] list_deployment_instances +- [ ] list_deployments +- [ ] list_git_hub_account_token_names +- [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status +- [ ] register_application_revision +- [ ] register_on_premises_instance +- [ ] remove_tags_from_on_premises_instances +- [ ] skip_wait_time_for_instance_termination +- [ ] stop_deployment +- [ ] update_application +- [ ] update_deployment_group + +## codepipeline - 0% implemented +- [ ] acknowledge_job +- [ ] acknowledge_third_party_job +- [ ] create_custom_action_type +- [ ] create_pipeline +- [ ] delete_custom_action_type +- [ ] delete_pipeline +- [ ] disable_stage_transition +- [ ] enable_stage_transition +- [ ] get_job_details +- [ ] get_pipeline +- [ ] get_pipeline_execution +- [ ] get_pipeline_state +- [ ] get_third_party_job_details +- [ ] list_action_types +- [ ] list_pipeline_executions +- [ ] list_pipelines +- [ ] poll_for_jobs +- [ ] poll_for_third_party_jobs +- [ ] put_action_revision +- [ ] put_approval_result +- [ ] put_job_failure_result +- [ ] put_job_success_result +- [ ] put_third_party_job_failure_result +- [ ] put_third_party_job_success_result +- [ ] retry_stage_execution +- [ ] start_pipeline_execution +- [ ] update_pipeline + +## codestar - 0% implemented +- [ ] associate_team_member +- [ ] create_project +- [ ] create_user_profile +- [ ] delete_project +- [ ] delete_user_profile +- [ ] describe_project +- [ ] describe_user_profile +- [ ] disassociate_team_member +- [ ] list_projects +- [ ] list_resources +- [ ] list_tags_for_project +- [ ] list_team_members +- [ ] list_user_profiles +- [ ] tag_project +- [ ] untag_project +- [ ] update_project +- [ ] update_team_member +- [ ] update_user_profile + +## cognito-identity - 0% implemented +- [ ] create_identity_pool +- [ ] delete_identities +- [ ] delete_identity_pool +- [ ] describe_identity +- [ ] describe_identity_pool +- [ ] get_credentials_for_identity +- [ ] get_id +- [ ] get_identity_pool_roles +- [ ] get_open_id_token +- [ ] get_open_id_token_for_developer_identity +- [ ] list_identities +- [ ] list_identity_pools +- [ ] lookup_developer_identity +- [ ] merge_developer_identities +- [ ] set_identity_pool_roles +- [ ] unlink_developer_identity +- [ ] unlink_identity +- [ ] update_identity_pool + +## cognito-idp - 0% implemented +- [ ] add_custom_attributes +- [ ] admin_add_user_to_group +- [ ] admin_confirm_sign_up +- [ ] admin_create_user +- [ ] admin_delete_user +- [ ] admin_delete_user_attributes +- [ ] admin_disable_provider_for_user +- [ ] admin_disable_user +- [ ] admin_enable_user +- [ ] admin_forget_device +- [ ] admin_get_device +- [ ] admin_get_user +- [ ] admin_initiate_auth +- [ ] admin_link_provider_for_user +- [ ] admin_list_devices +- [ ] admin_list_groups_for_user +- [ ] admin_list_user_auth_events +- [ ] admin_remove_user_from_group +- [ ] admin_reset_user_password +- [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference +- [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback +- [ ] admin_update_device_status +- [ ] admin_update_user_attributes +- [ ] admin_user_global_sign_out +- [ ] associate_software_token +- [ ] change_password +- [ ] confirm_device +- [ ] confirm_forgot_password +- [ ] confirm_sign_up +- [ ] create_group +- [ ] create_identity_provider +- [ ] create_resource_server +- [ ] create_user_import_job +- [ ] create_user_pool +- [ ] create_user_pool_client +- [ ] create_user_pool_domain +- [ ] delete_group +- [ ] delete_identity_provider +- [ ] delete_resource_server +- [ ] delete_user +- [ ] delete_user_attributes +- [ ] delete_user_pool +- [ ] delete_user_pool_client +- [ ] delete_user_pool_domain +- [ ] describe_identity_provider +- [ ] describe_resource_server +- [ ] describe_risk_configuration +- [ ] describe_user_import_job +- [ ] describe_user_pool +- [ ] describe_user_pool_client +- [ ] describe_user_pool_domain +- [ ] forget_device +- [ ] forgot_password +- [ ] get_csv_header +- [ ] get_device +- [ ] get_group +- [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate +- [ ] get_ui_customization +- [ ] get_user +- [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config +- [ ] global_sign_out +- [ ] initiate_auth +- [ ] list_devices +- [ ] list_groups +- [ ] list_identity_providers +- [ ] list_resource_servers +- [ ] list_user_import_jobs +- [ ] list_user_pool_clients +- [ ] list_user_pools +- [ ] list_users +- [ ] list_users_in_group +- [ ] resend_confirmation_code +- [ ] respond_to_auth_challenge +- [ ] set_risk_configuration +- [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config +- [ ] set_user_settings +- [ ] sign_up +- [ ] start_user_import_job +- [ ] stop_user_import_job +- [ ] update_auth_event_feedback +- [ ] update_device_status +- [ ] update_group +- [ ] update_identity_provider +- [ ] update_resource_server +- [ ] update_user_attributes +- [ ] update_user_pool +- [ ] update_user_pool_client +- [ ] verify_software_token +- [ ] verify_user_attribute + +## cognito-sync - 0% implemented +- [ ] bulk_publish +- [ ] delete_dataset +- [ ] describe_dataset +- [ ] describe_identity_pool_usage +- [ ] describe_identity_usage +- [ ] get_bulk_publish_details +- [ ] get_cognito_events +- [ ] get_identity_pool_configuration +- [ ] list_datasets +- [ ] list_identity_pool_usage +- [ ] list_records +- [ ] register_device +- [ ] set_cognito_events +- [ ] set_identity_pool_configuration +- [ ] subscribe_to_dataset +- [ ] unsubscribe_from_dataset +- [ ] update_records + +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] list_topics_detection_jobs +- [ ] start_topics_detection_job + +## config - 0% implemented +- [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization +- [ ] delete_config_rule +- [ ] delete_configuration_aggregator +- [ ] delete_configuration_recorder +- [ ] delete_delivery_channel +- [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request +- [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations +- [ ] describe_compliance_by_config_rule +- [ ] describe_compliance_by_resource +- [ ] describe_config_rule_evaluation_status +- [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators +- [ ] describe_configuration_recorder_status +- [ ] describe_configuration_recorders +- [ ] describe_delivery_channel_status +- [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary +- [ ] get_compliance_details_by_config_rule +- [ ] get_compliance_details_by_resource +- [ ] get_compliance_summary_by_config_rule +- [ ] get_compliance_summary_by_resource_type +- [ ] get_discovered_resource_counts +- [ ] get_resource_config_history +- [ ] list_discovered_resources +- [ ] put_aggregation_authorization +- [ ] put_config_rule +- [ ] put_configuration_aggregator +- [ ] put_configuration_recorder +- [ ] put_delivery_channel +- [ ] put_evaluations +- [ ] start_config_rules_evaluation +- [ ] start_configuration_recorder +- [ ] stop_configuration_recorder + +## connect - 0% implemented +- [ ] start_outbound_voice_contact +- [ ] stop_contact + +## cur - 0% implemented +- [ ] delete_report_definition +- [ ] describe_report_definitions +- [ ] put_report_definition + +## datapipeline - 42% implemented +- [X] activate_pipeline +- [ ] add_tags +- [X] create_pipeline +- [ ] deactivate_pipeline +- [X] delete_pipeline +- [X] describe_objects +- [X] describe_pipelines +- [ ] evaluate_expression +- [X] get_pipeline_definition +- [X] list_pipelines +- [ ] poll_for_task +- [X] put_pipeline_definition +- [ ] query_objects +- [ ] remove_tags +- [ ] report_task_progress +- [ ] report_task_runner_heartbeat +- [ ] set_status +- [ ] set_task_status +- [ ] validate_pipeline_definition + +## dax - 0% implemented +- [ ] create_cluster +- [ ] create_parameter_group +- [ ] create_subnet_group +- [ ] decrease_replication_factor +- [ ] delete_cluster +- [ ] delete_parameter_group +- [ ] delete_subnet_group +- [ ] describe_clusters +- [ ] describe_default_parameters +- [ ] describe_events +- [ ] describe_parameter_groups +- [ ] describe_parameters +- [ ] describe_subnet_groups +- [ ] increase_replication_factor +- [ ] list_tags +- [ ] reboot_node +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cluster +- [ ] update_parameter_group +- [ ] update_subnet_group + +## devicefarm - 0% implemented +- [ ] create_device_pool +- [ ] create_instance_profile +- [ ] create_network_profile +- [ ] create_project +- [ ] create_remote_access_session +- [ ] create_upload +- [ ] delete_device_pool +- [ ] delete_instance_profile +- [ ] delete_network_profile +- [ ] delete_project +- [ ] delete_remote_access_session +- [ ] delete_run +- [ ] delete_upload +- [ ] get_account_settings +- [ ] get_device +- [ ] get_device_instance +- [ ] get_device_pool +- [ ] get_device_pool_compatibility +- [ ] get_instance_profile +- [ ] get_job +- [ ] get_network_profile +- [ ] get_offering_status +- [ ] get_project +- [ ] get_remote_access_session +- [ ] get_run +- [ ] get_suite +- [ ] get_test +- [ ] get_upload +- [ ] install_to_remote_access_session +- [ ] list_artifacts +- [ ] list_device_instances +- [ ] list_device_pools +- [ ] list_devices +- [ ] list_instance_profiles +- [ ] list_jobs +- [ ] list_network_profiles +- [ ] list_offering_promotions +- [ ] list_offering_transactions +- [ ] list_offerings +- [ ] list_projects +- [ ] list_remote_access_sessions +- [ ] list_runs +- [ ] list_samples +- [ ] list_suites +- [ ] list_tests +- [ ] list_unique_problems +- [ ] list_uploads +- [ ] purchase_offering +- [ ] renew_offering +- [ ] schedule_run +- [ ] stop_remote_access_session +- [ ] stop_run +- [ ] update_device_instance +- [ ] update_device_pool +- [ ] update_instance_profile +- [ ] update_network_profile +- [ ] update_project + +## directconnect - 0% implemented +- [ ] allocate_connection_on_interconnect +- [ ] allocate_hosted_connection +- [ ] allocate_private_virtual_interface +- [ ] allocate_public_virtual_interface +- [ ] associate_connection_with_lag +- [ ] associate_hosted_connection +- [ ] associate_virtual_interface +- [ ] confirm_connection +- [ ] confirm_private_virtual_interface +- [ ] confirm_public_virtual_interface +- [ ] create_bgp_peer +- [ ] create_connection +- [ ] create_direct_connect_gateway +- [ ] create_direct_connect_gateway_association +- [ ] create_interconnect +- [ ] create_lag +- [ ] create_private_virtual_interface +- [ ] create_public_virtual_interface +- [ ] delete_bgp_peer +- [ ] delete_connection +- [ ] delete_direct_connect_gateway +- [ ] delete_direct_connect_gateway_association +- [ ] delete_interconnect +- [ ] delete_lag +- [ ] delete_virtual_interface +- [ ] describe_connection_loa +- [ ] describe_connections +- [ ] describe_connections_on_interconnect +- [ ] describe_direct_connect_gateway_associations +- [ ] describe_direct_connect_gateway_attachments +- [ ] describe_direct_connect_gateways +- [ ] describe_hosted_connections +- [ ] describe_interconnect_loa +- [ ] describe_interconnects +- [ ] describe_lags +- [ ] describe_loa +- [ ] describe_locations +- [ ] describe_tags +- [ ] describe_virtual_gateways +- [ ] describe_virtual_interfaces +- [ ] disassociate_connection_from_lag +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_lag + +## discovery - 0% implemented +- [ ] associate_configuration_items_to_application +- [ ] create_application +- [ ] create_tags +- [ ] delete_applications +- [ ] delete_tags +- [ ] describe_agents +- [ ] describe_configurations +- [ ] describe_export_configurations +- [ ] describe_export_tasks +- [ ] describe_tags +- [ ] disassociate_configuration_items_from_application +- [ ] export_configurations +- [ ] get_discovery_summary +- [ ] list_configurations +- [ ] list_server_neighbors +- [ ] start_data_collection_by_agent_ids +- [ ] start_export_task +- [ ] stop_data_collection_by_agent_ids +- [ ] update_application + +## dms - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_endpoint +- [ ] create_event_subscription +- [ ] create_replication_instance +- [ ] create_replication_subnet_group +- [ ] create_replication_task +- [ ] delete_certificate +- [ ] delete_endpoint +- [ ] delete_event_subscription +- [ ] delete_replication_instance +- [ ] delete_replication_subnet_group +- [ ] delete_replication_task +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_connections +- [ ] describe_endpoint_types +- [ ] describe_endpoints +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_replication_instances +- [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs +- [ ] describe_replication_instances +- [ ] describe_replication_subnet_groups +- [ ] describe_replication_task_assessment_results +- [ ] describe_replication_tasks +- [ ] describe_schemas +- [ ] describe_table_statistics +- [ ] import_certificate +- [ ] list_tags_for_resource +- [ ] modify_endpoint +- [ ] modify_event_subscription +- [ ] modify_replication_instance +- [ ] modify_replication_subnet_group +- [ ] modify_replication_task +- [ ] reboot_replication_instance +- [ ] refresh_schemas +- [ ] reload_tables +- [ ] remove_tags_from_resource +- [ ] start_replication_task +- [ ] start_replication_task_assessment +- [ ] stop_replication_task +- [ ] test_connection + +## ds - 0% implemented +- [ ] add_ip_routes +- [ ] add_tags_to_resource +- [ ] cancel_schema_extension +- [ ] connect_directory +- [ ] create_alias +- [ ] create_computer +- [ ] create_conditional_forwarder +- [ ] create_directory +- [ ] create_microsoft_ad +- [ ] create_snapshot +- [ ] create_trust +- [ ] delete_conditional_forwarder +- [ ] delete_directory +- [ ] delete_snapshot +- [ ] delete_trust +- [ ] deregister_event_topic +- [ ] describe_conditional_forwarders +- [ ] describe_directories +- [ ] describe_domain_controllers +- [ ] describe_event_topics +- [ ] describe_snapshots +- [ ] describe_trusts +- [ ] disable_radius +- [ ] disable_sso +- [ ] enable_radius +- [ ] enable_sso +- [ ] get_directory_limits +- [ ] get_snapshot_limits +- [ ] list_ip_routes +- [ ] list_schema_extensions +- [ ] list_tags_for_resource +- [ ] register_event_topic +- [ ] remove_ip_routes +- [ ] remove_tags_from_resource +- [ ] restore_from_snapshot +- [ ] start_schema_extension +- [ ] update_conditional_forwarder +- [ ] update_number_of_domain_controllers +- [ ] update_radius +- [ ] verify_trust + +## dynamodb - 22% implemented +- [ ] batch_get_item +- [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table +- [X] create_table +- [ ] delete_backup +- [X] delete_item +- [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table +- [ ] describe_limits +- [ ] describe_table +- [ ] describe_time_to_live +- [X] get_item +- [ ] list_backups +- [ ] list_global_tables +- [ ] list_tables +- [ ] list_tags_of_resource +- [X] put_item +- [X] query +- [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time +- [X] scan +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_continuous_backups +- [ ] update_global_table +- [ ] update_item +- [ ] update_table +- [ ] update_time_to_live + +## dynamodbstreams - 0% implemented +- [ ] describe_stream +- [ ] get_records +- [ ] get_shard_iterator +- [ ] list_streams + +## ec2 - 37% implemented +- [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections +- [X] accept_vpc_peering_connection +- [X] allocate_address +- [ ] allocate_hosts +- [ ] assign_ipv6_addresses +- [ ] assign_private_ip_addresses +- [X] associate_address +- [X] associate_dhcp_options +- [ ] associate_iam_instance_profile +- [X] associate_route_table +- [ ] associate_subnet_cidr_block +- [X] associate_vpc_cidr_block +- [ ] attach_classic_link_vpc +- [X] attach_internet_gateway +- [X] attach_network_interface +- [X] attach_volume +- [X] attach_vpn_gateway +- [X] authorize_security_group_egress +- [X] authorize_security_group_ingress +- [ ] bundle_instance +- [ ] cancel_bundle_task +- [ ] cancel_conversion_task +- [ ] cancel_export_task +- [ ] cancel_import_task +- [ ] cancel_reserved_instances_listing +- [X] cancel_spot_fleet_requests +- [X] cancel_spot_instance_requests +- [ ] confirm_product_instance +- [ ] copy_fpga_image +- [X] copy_image +- [X] copy_snapshot +- [X] create_customer_gateway +- [ ] create_default_subnet +- [ ] create_default_vpc +- [X] create_dhcp_options +- [ ] create_egress_only_internet_gateway +- [ ] create_flow_logs +- [ ] create_fpga_image +- [X] create_image +- [ ] create_instance_export_task +- [X] create_internet_gateway +- [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version +- [X] create_nat_gateway +- [X] create_network_acl +- [X] create_network_acl_entry +- [X] create_network_interface +- [ ] create_network_interface_permission +- [ ] create_placement_group +- [ ] create_reserved_instances_listing +- [X] create_route +- [X] create_route_table +- [X] create_security_group +- [X] create_snapshot +- [ ] create_spot_datafeed_subscription +- [X] create_subnet +- [X] create_tags +- [X] create_volume +- [X] create_vpc +- [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration +- [X] create_vpc_peering_connection +- [X] create_vpn_connection +- [ ] create_vpn_connection_route +- [X] create_vpn_gateway +- [X] delete_customer_gateway +- [ ] delete_dhcp_options +- [ ] delete_egress_only_internet_gateway +- [ ] delete_flow_logs +- [ ] delete_fpga_image +- [X] delete_internet_gateway +- [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions +- [X] delete_nat_gateway +- [X] delete_network_acl +- [X] delete_network_acl_entry +- [X] delete_network_interface +- [ ] delete_network_interface_permission +- [ ] delete_placement_group +- [X] delete_route +- [X] delete_route_table +- [X] delete_security_group +- [X] delete_snapshot +- [ ] delete_spot_datafeed_subscription +- [X] delete_subnet +- [X] delete_tags +- [X] delete_volume +- [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations +- [ ] delete_vpc_endpoints +- [X] delete_vpc_peering_connection +- [X] delete_vpn_connection +- [ ] delete_vpn_connection_route +- [X] delete_vpn_gateway +- [X] deregister_image +- [ ] describe_account_attributes +- [X] describe_addresses +- [ ] describe_aggregate_id_format +- [X] describe_availability_zones +- [ ] describe_bundle_tasks +- [ ] describe_classic_link_instances +- [ ] describe_conversion_tasks +- [ ] describe_customer_gateways +- [X] describe_dhcp_options +- [ ] describe_egress_only_internet_gateways +- [ ] describe_elastic_gpus +- [ ] describe_export_tasks +- [ ] describe_flow_logs +- [ ] describe_fpga_image_attribute +- [ ] describe_fpga_images +- [ ] describe_host_reservation_offerings +- [ ] describe_host_reservations +- [ ] describe_hosts +- [ ] describe_iam_instance_profile_associations +- [ ] describe_id_format +- [ ] describe_identity_id_format +- [ ] describe_image_attribute +- [X] describe_images +- [ ] describe_import_image_tasks +- [ ] describe_import_snapshot_tasks +- [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications +- [ ] describe_instance_status +- [ ] describe_instances +- [X] describe_internet_gateways +- [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates +- [ ] describe_moving_addresses +- [ ] describe_nat_gateways +- [ ] describe_network_acls +- [ ] describe_network_interface_attribute +- [ ] describe_network_interface_permissions +- [X] describe_network_interfaces +- [ ] describe_placement_groups +- [ ] describe_prefix_lists +- [ ] describe_principal_id_format +- [X] describe_regions +- [ ] describe_reserved_instances +- [ ] describe_reserved_instances_listings +- [ ] describe_reserved_instances_modifications +- [ ] describe_reserved_instances_offerings +- [ ] describe_route_tables +- [ ] describe_scheduled_instance_availability +- [ ] describe_scheduled_instances +- [ ] describe_security_group_references +- [X] describe_security_groups +- [ ] describe_snapshot_attribute +- [X] describe_snapshots +- [ ] describe_spot_datafeed_subscription +- [X] describe_spot_fleet_instances +- [ ] describe_spot_fleet_request_history +- [X] describe_spot_fleet_requests +- [X] describe_spot_instance_requests +- [ ] describe_spot_price_history +- [ ] describe_stale_security_groups +- [ ] describe_subnets +- [X] describe_tags +- [ ] describe_volume_attribute +- [ ] describe_volume_status +- [X] describe_volumes +- [ ] describe_volumes_modifications +- [X] describe_vpc_attribute +- [ ] describe_vpc_classic_link +- [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions +- [ ] describe_vpc_endpoint_services +- [ ] describe_vpc_endpoints +- [ ] describe_vpc_peering_connections +- [ ] describe_vpcs +- [X] describe_vpn_connections +- [ ] describe_vpn_gateways +- [ ] detach_classic_link_vpc +- [X] detach_internet_gateway +- [X] detach_network_interface +- [X] detach_volume +- [X] detach_vpn_gateway +- [ ] disable_vgw_route_propagation +- [ ] disable_vpc_classic_link +- [ ] disable_vpc_classic_link_dns_support +- [X] disassociate_address +- [ ] disassociate_iam_instance_profile +- [X] disassociate_route_table +- [ ] disassociate_subnet_cidr_block +- [X] disassociate_vpc_cidr_block +- [ ] enable_vgw_route_propagation +- [ ] enable_volume_io +- [ ] enable_vpc_classic_link +- [ ] enable_vpc_classic_link_dns_support +- [ ] get_console_output +- [ ] get_console_screenshot +- [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data +- [ ] get_password_data +- [ ] get_reserved_instances_exchange_quote +- [ ] import_image +- [ ] import_instance +- [X] import_key_pair +- [ ] import_snapshot +- [ ] import_volume +- [ ] modify_fpga_image_attribute +- [ ] modify_hosts +- [ ] modify_id_format +- [ ] modify_identity_id_format +- [ ] modify_image_attribute +- [X] modify_instance_attribute +- [ ] modify_instance_credit_specification +- [ ] modify_instance_placement +- [ ] modify_launch_template +- [X] modify_network_interface_attribute +- [ ] modify_reserved_instances +- [ ] modify_snapshot_attribute +- [X] modify_spot_fleet_request +- [X] modify_subnet_attribute +- [ ] modify_volume +- [ ] modify_volume_attribute +- [X] modify_vpc_attribute +- [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions +- [ ] modify_vpc_peering_connection_options +- [ ] modify_vpc_tenancy +- [ ] monitor_instances +- [ ] move_address_to_vpc +- [ ] purchase_host_reservation +- [ ] purchase_reserved_instances_offering +- [ ] purchase_scheduled_instances +- [X] reboot_instances +- [ ] register_image +- [ ] reject_vpc_endpoint_connections +- [X] reject_vpc_peering_connection +- [X] release_address +- [ ] release_hosts +- [ ] replace_iam_instance_profile_association +- [X] replace_network_acl_association +- [X] replace_network_acl_entry +- [X] replace_route +- [X] replace_route_table_association +- [ ] report_instance_status +- [X] request_spot_fleet +- [X] request_spot_instances +- [ ] reset_fpga_image_attribute +- [ ] reset_image_attribute +- [ ] reset_instance_attribute +- [ ] reset_network_interface_attribute +- [ ] reset_snapshot_attribute +- [ ] restore_address_to_classic +- [X] revoke_security_group_egress +- [X] revoke_security_group_ingress +- [ ] run_instances +- [ ] run_scheduled_instances +- [X] start_instances +- [X] stop_instances +- [X] terminate_instances +- [ ] unassign_ipv6_addresses +- [ ] unassign_private_ip_addresses +- [ ] unmonitor_instances +- [ ] update_security_group_rule_descriptions_egress +- [ ] update_security_group_rule_descriptions_ingress + +## ecr - 31% implemented +- [ ] batch_check_layer_availability +- [ ] batch_delete_image +- [X] batch_get_image +- [ ] complete_layer_upload +- [X] create_repository +- [ ] delete_lifecycle_policy +- [X] delete_repository +- [ ] delete_repository_policy +- [X] describe_images +- [X] describe_repositories +- [ ] get_authorization_token +- [ ] get_download_url_for_layer +- [ ] get_lifecycle_policy +- [ ] get_lifecycle_policy_preview +- [ ] get_repository_policy +- [ ] initiate_layer_upload +- [X] list_images +- [X] put_image +- [ ] put_lifecycle_policy +- [ ] set_repository_policy +- [ ] start_lifecycle_policy_preview +- [ ] upload_layer_part + +## ecs - 87% implemented +- [X] create_cluster +- [X] create_service +- [X] delete_attributes +- [X] delete_cluster +- [X] delete_service +- [X] deregister_container_instance +- [X] deregister_task_definition +- [X] describe_clusters +- [X] describe_container_instances +- [X] describe_services +- [X] describe_task_definition +- [X] describe_tasks +- [ ] discover_poll_endpoint +- [X] list_attributes +- [X] list_clusters +- [X] list_container_instances +- [X] list_services +- [X] list_task_definition_families +- [X] list_task_definitions +- [X] list_tasks +- [X] put_attributes +- [X] register_container_instance +- [X] register_task_definition +- [X] run_task +- [X] start_task +- [X] stop_task +- [ ] submit_container_state_change +- [ ] submit_task_state_change +- [ ] update_container_agent +- [X] update_container_instances_state +- [X] update_service + +## efs - 0% implemented +- [ ] create_file_system +- [ ] create_mount_target +- [ ] create_tags +- [ ] delete_file_system +- [ ] delete_mount_target +- [ ] delete_tags +- [ ] describe_file_systems +- [ ] describe_mount_target_security_groups +- [ ] describe_mount_targets +- [ ] describe_tags +- [ ] modify_mount_target_security_groups + +## elasticache - 0% implemented +- [ ] add_tags_to_resource +- [ ] authorize_cache_security_group_ingress +- [ ] copy_snapshot +- [ ] create_cache_cluster +- [ ] create_cache_parameter_group +- [ ] create_cache_security_group +- [ ] create_cache_subnet_group +- [ ] create_replication_group +- [ ] create_snapshot +- [ ] delete_cache_cluster +- [ ] delete_cache_parameter_group +- [ ] delete_cache_security_group +- [ ] delete_cache_subnet_group +- [ ] delete_replication_group +- [ ] delete_snapshot +- [ ] describe_cache_clusters +- [ ] describe_cache_engine_versions +- [ ] describe_cache_parameter_groups +- [ ] describe_cache_parameters +- [ ] describe_cache_security_groups +- [ ] describe_cache_subnet_groups +- [ ] describe_engine_default_parameters +- [ ] describe_events +- [ ] describe_replication_groups +- [ ] describe_reserved_cache_nodes +- [ ] describe_reserved_cache_nodes_offerings +- [ ] describe_snapshots +- [ ] list_allowed_node_type_modifications +- [ ] list_tags_for_resource +- [ ] modify_cache_cluster +- [ ] modify_cache_parameter_group +- [ ] modify_cache_subnet_group +- [ ] modify_replication_group +- [ ] modify_replication_group_shard_configuration +- [ ] purchase_reserved_cache_nodes_offering +- [ ] reboot_cache_cluster +- [ ] remove_tags_from_resource +- [ ] reset_cache_parameter_group +- [ ] revoke_cache_security_group_ingress +- [ ] test_failover + +## elasticbeanstalk - 0% implemented +- [ ] abort_environment_update +- [ ] apply_environment_managed_action +- [ ] check_dns_availability +- [ ] compose_environments +- [ ] create_application +- [ ] create_application_version +- [ ] create_configuration_template +- [ ] create_environment +- [ ] create_platform_version +- [ ] create_storage_location +- [ ] delete_application +- [ ] delete_application_version +- [ ] delete_configuration_template +- [ ] delete_environment_configuration +- [ ] delete_platform_version +- [ ] describe_account_attributes +- [ ] describe_application_versions +- [ ] describe_applications +- [ ] describe_configuration_options +- [ ] describe_configuration_settings +- [ ] describe_environment_health +- [ ] describe_environment_managed_action_history +- [ ] describe_environment_managed_actions +- [ ] describe_environment_resources +- [ ] describe_environments +- [ ] describe_events +- [ ] describe_instances_health +- [ ] describe_platform_version +- [ ] list_available_solution_stacks +- [ ] list_platform_versions +- [ ] list_tags_for_resource +- [ ] rebuild_environment +- [ ] request_environment_info +- [ ] restart_app_server +- [ ] retrieve_environment_info +- [ ] swap_environment_cnames +- [ ] terminate_environment +- [ ] update_application +- [ ] update_application_resource_lifecycle +- [ ] update_application_version +- [ ] update_configuration_template +- [ ] update_environment +- [ ] update_tags_for_resource +- [ ] validate_configuration_settings + +## elastictranscoder - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_pipeline +- [ ] create_preset +- [ ] delete_pipeline +- [ ] delete_preset +- [ ] list_jobs_by_pipeline +- [ ] list_jobs_by_status +- [ ] list_pipelines +- [ ] list_presets +- [ ] read_job +- [ ] read_pipeline +- [ ] read_preset +- [ ] test_role +- [ ] update_pipeline +- [ ] update_pipeline_notifications +- [ ] update_pipeline_status + +## elb - 34% implemented +- [ ] add_tags +- [X] apply_security_groups_to_load_balancer +- [ ] attach_load_balancer_to_subnets +- [X] configure_health_check +- [X] create_app_cookie_stickiness_policy +- [X] create_lb_cookie_stickiness_policy +- [X] create_load_balancer +- [X] create_load_balancer_listeners +- [ ] create_load_balancer_policy +- [X] delete_load_balancer +- [X] delete_load_balancer_listeners +- [ ] delete_load_balancer_policy +- [ ] deregister_instances_from_load_balancer +- [ ] describe_account_limits +- [ ] describe_instance_health +- [ ] describe_load_balancer_attributes +- [ ] describe_load_balancer_policies +- [ ] describe_load_balancer_policy_types +- [X] describe_load_balancers +- [ ] describe_tags +- [ ] detach_load_balancer_from_subnets +- [ ] disable_availability_zones_for_load_balancer +- [ ] enable_availability_zones_for_load_balancer +- [ ] modify_load_balancer_attributes +- [ ] register_instances_with_load_balancer +- [ ] remove_tags +- [ ] set_load_balancer_listener_ssl_certificate +- [ ] set_load_balancer_policies_for_backend_server +- [X] set_load_balancer_policies_of_listener + +## elbv2 - 70% implemented +- [ ] add_listener_certificates +- [ ] add_tags +- [X] create_listener +- [X] create_load_balancer +- [X] create_rule +- [X] create_target_group +- [X] delete_listener +- [X] delete_load_balancer +- [X] delete_rule +- [X] delete_target_group +- [X] deregister_targets +- [ ] describe_account_limits +- [ ] describe_listener_certificates +- [X] describe_listeners +- [X] describe_load_balancer_attributes +- [X] describe_load_balancers +- [X] describe_rules +- [ ] describe_ssl_policies +- [ ] describe_tags +- [ ] describe_target_group_attributes +- [X] describe_target_groups +- [X] describe_target_health +- [X] modify_listener +- [X] modify_load_balancer_attributes +- [X] modify_rule +- [X] modify_target_group +- [ ] modify_target_group_attributes +- [X] register_targets +- [ ] remove_listener_certificates +- [ ] remove_tags +- [X] set_ip_address_type +- [X] set_rule_priorities +- [X] set_security_groups +- [X] set_subnets + +## emr - 55% implemented +- [ ] add_instance_fleet +- [X] add_instance_groups +- [X] add_job_flow_steps +- [X] add_tags +- [ ] cancel_steps +- [ ] create_security_configuration +- [ ] delete_security_configuration +- [ ] describe_cluster +- [X] describe_job_flows +- [ ] describe_security_configuration +- [X] describe_step +- [X] list_bootstrap_actions +- [X] list_clusters +- [ ] list_instance_fleets +- [X] list_instance_groups +- [ ] list_instances +- [ ] list_security_configurations +- [X] list_steps +- [ ] modify_instance_fleet +- [X] modify_instance_groups +- [ ] put_auto_scaling_policy +- [ ] remove_auto_scaling_policy +- [X] remove_tags +- [X] run_job_flow +- [X] set_termination_protection +- [X] set_visible_to_all_users +- [X] terminate_job_flows + +## es - 0% implemented +- [ ] add_tags +- [ ] create_elasticsearch_domain +- [ ] delete_elasticsearch_domain +- [ ] delete_elasticsearch_service_role +- [ ] describe_elasticsearch_domain +- [ ] describe_elasticsearch_domain_config +- [ ] describe_elasticsearch_domains +- [ ] describe_elasticsearch_instance_type_limits +- [ ] list_domain_names +- [ ] list_elasticsearch_instance_types +- [ ] list_elasticsearch_versions +- [ ] list_tags +- [ ] remove_tags +- [ ] update_elasticsearch_domain_config + +## events - 100% implemented +- [X] delete_rule +- [X] describe_event_bus +- [X] describe_rule +- [X] disable_rule +- [X] enable_rule +- [X] list_rule_names_by_target +- [X] list_rules +- [X] list_targets_by_rule +- [X] put_events +- [X] put_permission +- [X] put_rule +- [X] put_targets +- [X] remove_permission +- [X] remove_targets +- [X] test_event_pattern + +## firehose - 0% implemented +- [ ] create_delivery_stream +- [ ] delete_delivery_stream +- [ ] describe_delivery_stream +- [ ] list_delivery_streams +- [ ] put_record +- [ ] put_record_batch +- [ ] update_destination + +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + +## gamelift - 0% implemented +- [ ] accept_match +- [ ] create_alias +- [ ] create_build +- [ ] create_fleet +- [ ] create_game_session +- [ ] create_game_session_queue +- [ ] create_matchmaking_configuration +- [ ] create_matchmaking_rule_set +- [ ] create_player_session +- [ ] create_player_sessions +- [ ] create_vpc_peering_authorization +- [ ] create_vpc_peering_connection +- [ ] delete_alias +- [ ] delete_build +- [ ] delete_fleet +- [ ] delete_game_session_queue +- [ ] delete_matchmaking_configuration +- [ ] delete_scaling_policy +- [ ] delete_vpc_peering_authorization +- [ ] delete_vpc_peering_connection +- [ ] describe_alias +- [ ] describe_build +- [ ] describe_ec2_instance_limits +- [ ] describe_fleet_attributes +- [ ] describe_fleet_capacity +- [ ] describe_fleet_events +- [ ] describe_fleet_port_settings +- [ ] describe_fleet_utilization +- [ ] describe_game_session_details +- [ ] describe_game_session_placement +- [ ] describe_game_session_queues +- [ ] describe_game_sessions +- [ ] describe_instances +- [ ] describe_matchmaking +- [ ] describe_matchmaking_configurations +- [ ] describe_matchmaking_rule_sets +- [ ] describe_player_sessions +- [ ] describe_runtime_configuration +- [ ] describe_scaling_policies +- [ ] describe_vpc_peering_authorizations +- [ ] describe_vpc_peering_connections +- [ ] get_game_session_log_url +- [ ] get_instance_access +- [ ] list_aliases +- [ ] list_builds +- [ ] list_fleets +- [ ] put_scaling_policy +- [ ] request_upload_credentials +- [ ] resolve_alias +- [ ] search_game_sessions +- [ ] start_game_session_placement +- [ ] start_match_backfill +- [ ] start_matchmaking +- [ ] stop_game_session_placement +- [ ] stop_matchmaking +- [ ] update_alias +- [ ] update_build +- [ ] update_fleet_attributes +- [ ] update_fleet_capacity +- [ ] update_fleet_port_settings +- [ ] update_game_session +- [ ] update_game_session_queue +- [ ] update_matchmaking_configuration +- [ ] update_runtime_configuration +- [ ] validate_matchmaking_rule_set + +## glacier - 12% implemented +- [ ] abort_multipart_upload +- [ ] abort_vault_lock +- [ ] add_tags_to_vault +- [ ] complete_multipart_upload +- [ ] complete_vault_lock +- [X] create_vault +- [ ] delete_archive +- [X] delete_vault +- [ ] delete_vault_access_policy +- [ ] delete_vault_notifications +- [ ] describe_job +- [ ] describe_vault +- [ ] get_data_retrieval_policy +- [ ] get_job_output +- [ ] get_vault_access_policy +- [ ] get_vault_lock +- [ ] get_vault_notifications +- [X] initiate_job +- [ ] initiate_multipart_upload +- [ ] initiate_vault_lock +- [X] list_jobs +- [ ] list_multipart_uploads +- [ ] list_parts +- [ ] list_provisioned_capacity +- [ ] list_tags_for_vault +- [ ] list_vaults +- [ ] purchase_provisioned_capacity +- [ ] remove_tags_from_vault +- [ ] set_data_retrieval_policy +- [ ] set_vault_access_policy +- [ ] set_vault_notifications +- [ ] upload_archive +- [ ] upload_multipart_part + +## glue - 0% implemented +- [ ] batch_create_partition +- [ ] batch_delete_connection +- [ ] batch_delete_partition +- [ ] batch_delete_table +- [ ] batch_delete_table_version +- [ ] batch_get_partition +- [ ] batch_stop_job_run +- [ ] create_classifier +- [ ] create_connection +- [ ] create_crawler +- [ ] create_database +- [ ] create_dev_endpoint +- [ ] create_job +- [ ] create_partition +- [ ] create_script +- [ ] create_table +- [ ] create_trigger +- [ ] create_user_defined_function +- [ ] delete_classifier +- [ ] delete_connection +- [ ] delete_crawler +- [ ] delete_database +- [ ] delete_dev_endpoint +- [ ] delete_job +- [ ] delete_partition +- [ ] delete_table +- [ ] delete_table_version +- [ ] delete_trigger +- [ ] delete_user_defined_function +- [ ] get_catalog_import_status +- [ ] get_classifier +- [ ] get_classifiers +- [ ] get_connection +- [ ] get_connections +- [ ] get_crawler +- [ ] get_crawler_metrics +- [ ] get_crawlers +- [ ] get_database +- [ ] get_databases +- [ ] get_dataflow_graph +- [ ] get_dev_endpoint +- [ ] get_dev_endpoints +- [ ] get_job +- [ ] get_job_run +- [ ] get_job_runs +- [ ] get_jobs +- [ ] get_mapping +- [ ] get_partition +- [ ] get_partitions +- [ ] get_plan +- [ ] get_table +- [ ] get_table_version +- [ ] get_table_versions +- [ ] get_tables +- [ ] get_trigger +- [ ] get_triggers +- [ ] get_user_defined_function +- [ ] get_user_defined_functions +- [ ] import_catalog_to_glue +- [ ] reset_job_bookmark +- [ ] start_crawler +- [ ] start_crawler_schedule +- [ ] start_job_run +- [ ] start_trigger +- [ ] stop_crawler +- [ ] stop_crawler_schedule +- [ ] stop_trigger +- [ ] update_classifier +- [ ] update_connection +- [ ] update_crawler +- [ ] update_crawler_schedule +- [ ] update_database +- [ ] update_dev_endpoint +- [ ] update_job +- [ ] update_partition +- [ ] update_table +- [ ] update_trigger +- [ ] update_user_defined_function + +## greengrass - 0% implemented +- [ ] associate_role_to_group +- [ ] associate_service_role_to_account +- [ ] create_core_definition +- [ ] create_core_definition_version +- [ ] create_deployment +- [ ] create_device_definition +- [ ] create_device_definition_version +- [ ] create_function_definition +- [ ] create_function_definition_version +- [ ] create_group +- [ ] create_group_certificate_authority +- [ ] create_group_version +- [ ] create_logger_definition +- [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job +- [ ] create_subscription_definition +- [ ] create_subscription_definition_version +- [ ] delete_core_definition +- [ ] delete_device_definition +- [ ] delete_function_definition +- [ ] delete_group +- [ ] delete_logger_definition +- [ ] delete_resource_definition +- [ ] delete_subscription_definition +- [ ] disassociate_role_from_group +- [ ] disassociate_service_role_from_account +- [ ] get_associated_role +- [ ] get_connectivity_info +- [ ] get_core_definition +- [ ] get_core_definition_version +- [ ] get_deployment_status +- [ ] get_device_definition +- [ ] get_device_definition_version +- [ ] get_function_definition +- [ ] get_function_definition_version +- [ ] get_group +- [ ] get_group_certificate_authority +- [ ] get_group_certificate_configuration +- [ ] get_group_version +- [ ] get_logger_definition +- [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version +- [ ] get_service_role_for_account +- [ ] get_subscription_definition +- [ ] get_subscription_definition_version +- [ ] list_core_definition_versions +- [ ] list_core_definitions +- [ ] list_deployments +- [ ] list_device_definition_versions +- [ ] list_device_definitions +- [ ] list_function_definition_versions +- [ ] list_function_definitions +- [ ] list_group_certificate_authorities +- [ ] list_group_versions +- [ ] list_groups +- [ ] list_logger_definition_versions +- [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions +- [ ] list_subscription_definition_versions +- [ ] list_subscription_definitions +- [ ] reset_deployments +- [ ] update_connectivity_info +- [ ] update_core_definition +- [ ] update_device_definition +- [ ] update_function_definition +- [ ] update_group +- [ ] update_group_certificate_configuration +- [ ] update_logger_definition +- [ ] update_resource_definition +- [ ] update_subscription_definition + +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + +## health - 0% implemented +- [ ] describe_affected_entities +- [ ] describe_entity_aggregates +- [ ] describe_event_aggregates +- [ ] describe_event_details +- [ ] describe_event_types +- [ ] describe_events + +## iam - 48% implemented +- [ ] add_client_id_to_open_id_connect_provider +- [X] add_role_to_instance_profile +- [X] add_user_to_group +- [X] attach_group_policy +- [X] attach_role_policy +- [X] attach_user_policy +- [ ] change_password +- [X] create_access_key +- [X] create_account_alias +- [X] create_group +- [X] create_instance_profile +- [X] create_login_profile +- [ ] create_open_id_connect_provider +- [X] create_policy +- [X] create_policy_version +- [X] create_role +- [ ] create_saml_provider +- [ ] create_service_linked_role +- [ ] create_service_specific_credential +- [X] create_user +- [ ] create_virtual_mfa_device +- [X] deactivate_mfa_device +- [X] delete_access_key +- [X] delete_account_alias +- [ ] delete_account_password_policy +- [ ] delete_group +- [ ] delete_group_policy +- [ ] delete_instance_profile +- [X] delete_login_profile +- [ ] delete_open_id_connect_provider +- [ ] delete_policy +- [X] delete_policy_version +- [X] delete_role +- [X] delete_role_policy +- [ ] delete_saml_provider +- [X] delete_server_certificate +- [ ] delete_service_linked_role +- [ ] delete_service_specific_credential +- [ ] delete_signing_certificate +- [ ] delete_ssh_public_key +- [X] delete_user +- [X] delete_user_policy +- [ ] delete_virtual_mfa_device +- [X] detach_group_policy +- [X] detach_role_policy +- [X] detach_user_policy +- [X] enable_mfa_device +- [ ] generate_credential_report +- [ ] get_access_key_last_used +- [X] get_account_authorization_details +- [ ] get_account_password_policy +- [ ] get_account_summary +- [ ] get_context_keys_for_custom_policy +- [ ] get_context_keys_for_principal_policy +- [X] get_credential_report +- [X] get_group +- [X] get_group_policy +- [X] get_instance_profile +- [X] get_login_profile +- [ ] get_open_id_connect_provider +- [X] get_policy +- [X] get_policy_version +- [X] get_role +- [X] get_role_policy +- [ ] get_saml_provider +- [X] get_server_certificate +- [ ] get_service_linked_role_deletion_status +- [ ] get_ssh_public_key +- [X] get_user +- [X] get_user_policy +- [ ] list_access_keys +- [X] list_account_aliases +- [X] list_attached_group_policies +- [X] list_attached_role_policies +- [X] list_attached_user_policies +- [ ] list_entities_for_policy +- [X] list_group_policies +- [X] list_groups +- [ ] list_groups_for_user +- [ ] list_instance_profiles +- [ ] list_instance_profiles_for_role +- [X] list_mfa_devices +- [ ] list_open_id_connect_providers +- [X] list_policies +- [X] list_policy_versions +- [X] list_role_policies +- [ ] list_roles +- [ ] list_saml_providers +- [ ] list_server_certificates +- [ ] list_service_specific_credentials +- [ ] list_signing_certificates +- [ ] list_ssh_public_keys +- [X] list_user_policies +- [X] list_users +- [ ] list_virtual_mfa_devices +- [X] put_group_policy +- [X] put_role_policy +- [X] put_user_policy +- [ ] remove_client_id_from_open_id_connect_provider +- [X] remove_role_from_instance_profile +- [X] remove_user_from_group +- [ ] reset_service_specific_credential +- [ ] resync_mfa_device +- [ ] set_default_policy_version +- [ ] simulate_custom_policy +- [ ] simulate_principal_policy +- [X] update_access_key +- [ ] update_account_password_policy +- [ ] update_assume_role_policy +- [ ] update_group +- [X] update_login_profile +- [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role +- [ ] update_role_description +- [ ] update_saml_provider +- [ ] update_server_certificate +- [ ] update_service_specific_credential +- [ ] update_signing_certificate +- [ ] update_ssh_public_key +- [ ] update_user +- [ ] upload_server_certificate +- [ ] upload_signing_certificate +- [ ] upload_ssh_public_key + +## importexport - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] get_shipping_label +- [ ] get_status +- [ ] list_jobs +- [ ] update_job + +## inspector - 0% implemented +- [ ] add_attributes_to_findings +- [ ] create_assessment_target +- [ ] create_assessment_template +- [ ] create_resource_group +- [ ] delete_assessment_run +- [ ] delete_assessment_target +- [ ] delete_assessment_template +- [ ] describe_assessment_runs +- [ ] describe_assessment_targets +- [ ] describe_assessment_templates +- [ ] describe_cross_account_access_role +- [ ] describe_findings +- [ ] describe_resource_groups +- [ ] describe_rules_packages +- [ ] get_assessment_report +- [ ] get_telemetry_metadata +- [ ] list_assessment_run_agents +- [ ] list_assessment_runs +- [ ] list_assessment_targets +- [ ] list_assessment_templates +- [ ] list_event_subscriptions +- [ ] list_findings +- [ ] list_rules_packages +- [ ] list_tags_for_resource +- [ ] preview_agents +- [ ] register_cross_account_access_role +- [ ] remove_attributes_from_findings +- [ ] set_tags_for_resource +- [ ] start_assessment_run +- [ ] stop_assessment_run +- [ ] subscribe_to_event +- [ ] unsubscribe_from_event +- [ ] update_assessment_target + +## iot - 30% implemented +- [ ] accept_certificate_transfer +- [X] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [ ] attach_policy +- [X] attach_principal_policy +- [X] attach_thing_principal +- [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] clear_default_authorizer +- [ ] create_authorizer +- [ ] create_certificate_from_csr +- [X] create_job +- [X] create_keys_and_certificate +- [ ] create_ota_update +- [X] create_policy +- [ ] create_policy_version +- [ ] create_role_alias +- [ ] create_stream +- [X] create_thing +- [X] create_thing_group +- [X] create_thing_type +- [ ] create_topic_rule +- [ ] delete_authorizer +- [ ] delete_ca_certificate +- [X] delete_certificate +- [ ] delete_ota_update +- [X] delete_policy +- [ ] delete_policy_version +- [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_stream +- [X] delete_thing +- [X] delete_thing_group +- [X] delete_thing_type +- [ ] delete_topic_rule +- [ ] delete_v2_logging_level +- [ ] deprecate_thing_type +- [ ] describe_authorizer +- [ ] describe_ca_certificate +- [X] describe_certificate +- [ ] describe_default_authorizer +- [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [X] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_stream +- [X] describe_thing +- [X] describe_thing_group +- [ ] describe_thing_registration_task +- [X] describe_thing_type +- [ ] detach_policy +- [X] detach_principal_policy +- [X] detach_thing_principal +- [ ] disable_topic_rule +- [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [ ] get_job_document +- [ ] get_logging_options +- [ ] get_ota_update +- [X] get_policy +- [ ] get_policy_version +- [ ] get_registration_code +- [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_attached_policies +- [ ] list_authorizers +- [ ] list_ca_certificates +- [X] list_certificates +- [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates +- [ ] list_outgoing_certificates +- [X] list_policies +- [X] list_policy_principals +- [ ] list_policy_versions +- [X] list_principal_policies +- [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_streams +- [ ] list_targets_for_policy +- [X] list_thing_groups +- [X] list_thing_groups_for_thing +- [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks +- [X] list_thing_types +- [X] list_things +- [X] list_things_in_thing_group +- [ ] list_topic_rules +- [ ] list_v2_logging_levels +- [ ] register_ca_certificate +- [ ] register_certificate +- [ ] register_thing +- [ ] reject_certificate_transfer +- [X] remove_thing_from_thing_group +- [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer +- [ ] set_default_policy_version +- [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer +- [ ] transfer_certificate +- [ ] update_authorizer +- [ ] update_ca_certificate +- [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_stream +- [X] update_thing +- [X] update_thing_group +- [X] update_thing_groups_for_thing + +## iot-data - 0% implemented +- [ ] delete_thing_shadow +- [ ] get_thing_shadow +- [ ] publish +- [ ] update_thing_shadow + +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + +## kinesis - 56% implemented +- [X] add_tags_to_stream +- [X] create_stream +- [ ] decrease_stream_retention_period +- [X] delete_stream +- [ ] describe_limits +- [X] describe_stream +- [ ] describe_stream_summary +- [ ] disable_enhanced_monitoring +- [ ] enable_enhanced_monitoring +- [X] get_records +- [X] get_shard_iterator +- [ ] increase_stream_retention_period +- [ ] list_shards +- [X] list_streams +- [X] list_tags_for_stream +- [X] merge_shards +- [X] put_record +- [X] put_records +- [X] remove_tags_from_stream +- [X] split_shard +- [ ] start_stream_encryption +- [ ] stop_stream_encryption +- [ ] update_shard_count + +## kinesis-video-archived-media - 0% implemented +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + +## kinesisanalytics - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] describe_application +- [ ] discover_input_schema +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + +## kms - 25% implemented +- [ ] cancel_key_deletion +- [ ] create_alias +- [ ] create_grant +- [X] create_key +- [ ] decrypt +- [X] delete_alias +- [ ] delete_imported_key_material +- [X] describe_key +- [ ] disable_key +- [X] disable_key_rotation +- [ ] enable_key +- [X] enable_key_rotation +- [ ] encrypt +- [ ] generate_data_key +- [ ] generate_data_key_without_plaintext +- [ ] generate_random +- [X] get_key_policy +- [X] get_key_rotation_status +- [ ] get_parameters_for_import +- [ ] import_key_material +- [ ] list_aliases +- [ ] list_grants +- [ ] list_key_policies +- [X] list_keys +- [ ] list_resource_tags +- [ ] list_retirable_grants +- [X] put_key_policy +- [ ] re_encrypt +- [ ] retire_grant +- [ ] revoke_grant +- [ ] schedule_key_deletion +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_key_description + +## lambda - 0% implemented +- [ ] add_permission +- [ ] create_alias +- [ ] create_event_source_mapping +- [ ] create_function +- [ ] delete_alias +- [ ] delete_event_source_mapping +- [ ] delete_function +- [ ] delete_function_concurrency +- [ ] get_account_settings +- [ ] get_alias +- [ ] get_event_source_mapping +- [ ] get_function +- [ ] get_function_configuration +- [ ] get_policy +- [ ] invoke +- [ ] invoke_async +- [ ] list_aliases +- [ ] list_event_source_mappings +- [ ] list_functions +- [ ] list_tags +- [ ] list_versions_by_function +- [ ] publish_version +- [ ] put_function_concurrency +- [ ] remove_permission +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_event_source_mapping +- [ ] update_function_code +- [ ] update_function_configuration + +## lex-models - 0% implemented +- [ ] create_bot_version +- [ ] create_intent_version +- [ ] create_slot_type_version +- [ ] delete_bot +- [ ] delete_bot_alias +- [ ] delete_bot_channel_association +- [ ] delete_bot_version +- [ ] delete_intent +- [ ] delete_intent_version +- [ ] delete_slot_type +- [ ] delete_slot_type_version +- [ ] delete_utterances +- [ ] get_bot +- [ ] get_bot_alias +- [ ] get_bot_aliases +- [ ] get_bot_channel_association +- [ ] get_bot_channel_associations +- [ ] get_bot_versions +- [ ] get_bots +- [ ] get_builtin_intent +- [ ] get_builtin_intents +- [ ] get_builtin_slot_types +- [ ] get_export +- [ ] get_import +- [ ] get_intent +- [ ] get_intent_versions +- [ ] get_intents +- [ ] get_slot_type +- [ ] get_slot_type_versions +- [ ] get_slot_types +- [ ] get_utterances_view +- [ ] put_bot +- [ ] put_bot_alias +- [ ] put_intent +- [ ] put_slot_type +- [ ] start_import + +## lex-runtime - 0% implemented +- [ ] post_content +- [ ] post_text + +## lightsail - 0% implemented +- [ ] allocate_static_ip +- [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate +- [ ] attach_static_ip +- [ ] close_instance_public_ports +- [ ] create_disk +- [ ] create_disk_from_snapshot +- [ ] create_disk_snapshot +- [ ] create_domain +- [ ] create_domain_entry +- [ ] create_instance_snapshot +- [ ] create_instances +- [ ] create_instances_from_snapshot +- [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate +- [ ] delete_disk +- [ ] delete_disk_snapshot +- [ ] delete_domain +- [ ] delete_domain_entry +- [ ] delete_instance +- [ ] delete_instance_snapshot +- [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate +- [ ] detach_disk +- [ ] detach_instances_from_load_balancer +- [ ] detach_static_ip +- [ ] download_default_key_pair +- [ ] get_active_names +- [ ] get_blueprints +- [ ] get_bundles +- [ ] get_disk +- [ ] get_disk_snapshot +- [ ] get_disk_snapshots +- [ ] get_disks +- [ ] get_domain +- [ ] get_domains +- [ ] get_instance +- [ ] get_instance_access_details +- [ ] get_instance_metric_data +- [ ] get_instance_port_states +- [ ] get_instance_snapshot +- [ ] get_instance_snapshots +- [ ] get_instance_state +- [ ] get_instances +- [ ] get_key_pair +- [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers +- [ ] get_operation +- [ ] get_operations +- [ ] get_operations_for_resource +- [ ] get_regions +- [ ] get_static_ip +- [ ] get_static_ips +- [ ] import_key_pair +- [ ] is_vpc_peered +- [ ] open_instance_public_ports +- [ ] peer_vpc +- [ ] put_instance_public_ports +- [ ] reboot_instance +- [ ] release_static_ip +- [ ] start_instance +- [ ] stop_instance +- [ ] unpeer_vpc +- [ ] update_domain_entry +- [ ] update_load_balancer_attribute + +## logs - 27% implemented +- [ ] associate_kms_key +- [ ] cancel_export_task +- [ ] create_export_task +- [X] create_log_group +- [X] create_log_stream +- [ ] delete_destination +- [X] delete_log_group +- [X] delete_log_stream +- [ ] delete_metric_filter +- [ ] delete_resource_policy +- [ ] delete_retention_policy +- [ ] delete_subscription_filter +- [ ] describe_destinations +- [ ] describe_export_tasks +- [X] describe_log_groups +- [X] describe_log_streams +- [ ] describe_metric_filters +- [ ] describe_resource_policies +- [ ] describe_subscription_filters +- [ ] disassociate_kms_key +- [X] filter_log_events +- [X] get_log_events +- [ ] list_tags_log_group +- [ ] put_destination +- [ ] put_destination_policy +- [X] put_log_events +- [ ] put_metric_filter +- [ ] put_resource_policy +- [ ] put_retention_policy +- [ ] put_subscription_filter +- [ ] tag_log_group +- [ ] test_metric_filter +- [ ] untag_log_group + +## machinelearning - 0% implemented +- [ ] add_tags +- [ ] create_batch_prediction +- [ ] create_data_source_from_rds +- [ ] create_data_source_from_redshift +- [ ] create_data_source_from_s3 +- [ ] create_evaluation +- [ ] create_ml_model +- [ ] create_realtime_endpoint +- [ ] delete_batch_prediction +- [ ] delete_data_source +- [ ] delete_evaluation +- [ ] delete_ml_model +- [ ] delete_realtime_endpoint +- [ ] delete_tags +- [ ] describe_batch_predictions +- [ ] describe_data_sources +- [ ] describe_evaluations +- [ ] describe_ml_models +- [ ] describe_tags +- [ ] get_batch_prediction +- [ ] get_data_source +- [ ] get_evaluation +- [ ] get_ml_model +- [ ] predict +- [ ] update_batch_prediction +- [ ] update_data_source +- [ ] update_evaluation +- [ ] update_ml_model + +## marketplace-entitlement - 0% implemented +- [ ] get_entitlements + +## marketplacecommerceanalytics - 0% implemented +- [ ] generate_data_set +- [ ] start_support_data_export + +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] start_channel +- [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] delete_cors_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] get_cors_policy +- [ ] list_containers +- [ ] put_container_policy +- [ ] put_cors_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + +## meteringmarketplace - 0% implemented +- [ ] batch_meter_usage +- [ ] meter_usage +- [ ] resolve_customer + +## mgh - 0% implemented +- [ ] associate_created_artifact +- [ ] associate_discovered_resource +- [ ] create_progress_update_stream +- [ ] delete_progress_update_stream +- [ ] describe_application_state +- [ ] describe_migration_task +- [ ] disassociate_created_artifact +- [ ] disassociate_discovered_resource +- [ ] import_migration_task +- [ ] list_created_artifacts +- [ ] list_discovered_resources +- [ ] list_migration_tasks +- [ ] list_progress_update_streams +- [ ] notify_application_state +- [ ] notify_migration_task_state +- [ ] put_resource_attributes + +## mobile - 0% implemented +- [ ] create_project +- [ ] delete_project +- [ ] describe_bundle +- [ ] describe_project +- [ ] export_bundle +- [ ] export_project +- [ ] list_bundles +- [ ] list_projects +- [ ] update_project + +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + +## mturk - 0% implemented +- [ ] accept_qualification_request +- [ ] approve_assignment +- [ ] associate_qualification_with_worker +- [ ] create_additional_assignments_for_hit +- [ ] create_hit +- [ ] create_hit_type +- [ ] create_hit_with_hit_type +- [ ] create_qualification_type +- [ ] create_worker_block +- [ ] delete_hit +- [ ] delete_qualification_type +- [ ] delete_worker_block +- [ ] disassociate_qualification_from_worker +- [ ] get_account_balance +- [ ] get_assignment +- [ ] get_file_upload_url +- [ ] get_hit +- [ ] get_qualification_score +- [ ] get_qualification_type +- [ ] list_assignments_for_hit +- [ ] list_bonus_payments +- [ ] list_hits +- [ ] list_hits_for_qualification_type +- [ ] list_qualification_requests +- [ ] list_qualification_types +- [ ] list_review_policy_results_for_hit +- [ ] list_reviewable_hits +- [ ] list_worker_blocks +- [ ] list_workers_with_qualification_type +- [ ] notify_workers +- [ ] reject_assignment +- [ ] reject_qualification_request +- [ ] send_bonus +- [ ] send_test_event_notification +- [ ] update_expiration_for_hit +- [ ] update_hit_review_status +- [ ] update_hit_type_of_hit +- [ ] update_notification_settings +- [ ] update_qualification_type + +## opsworks - 12% implemented +- [ ] assign_instance +- [ ] assign_volume +- [ ] associate_elastic_ip +- [ ] attach_elastic_load_balancer +- [ ] clone_stack +- [X] create_app +- [ ] create_deployment +- [X] create_instance +- [X] create_layer +- [X] create_stack +- [ ] create_user_profile +- [ ] delete_app +- [ ] delete_instance +- [ ] delete_layer +- [ ] delete_stack +- [ ] delete_user_profile +- [ ] deregister_ecs_cluster +- [ ] deregister_elastic_ip +- [ ] deregister_instance +- [ ] deregister_rds_db_instance +- [ ] deregister_volume +- [ ] describe_agent_versions +- [X] describe_apps +- [ ] describe_commands +- [ ] describe_deployments +- [ ] describe_ecs_clusters +- [ ] describe_elastic_ips +- [ ] describe_elastic_load_balancers +- [X] describe_instances +- [X] describe_layers +- [ ] describe_load_based_auto_scaling +- [ ] describe_my_user_profile +- [ ] describe_operating_systems +- [ ] describe_permissions +- [ ] describe_raid_arrays +- [ ] describe_rds_db_instances +- [ ] describe_service_errors +- [ ] describe_stack_provisioning_parameters +- [ ] describe_stack_summary +- [X] describe_stacks +- [ ] describe_time_based_auto_scaling +- [ ] describe_user_profiles +- [ ] describe_volumes +- [ ] detach_elastic_load_balancer +- [ ] disassociate_elastic_ip +- [ ] get_hostname_suggestion +- [ ] grant_access +- [ ] list_tags +- [ ] reboot_instance +- [ ] register_ecs_cluster +- [ ] register_elastic_ip +- [ ] register_instance +- [ ] register_rds_db_instance +- [ ] register_volume +- [ ] set_load_based_auto_scaling +- [ ] set_permission +- [ ] set_time_based_auto_scaling +- [X] start_instance +- [ ] start_stack +- [ ] stop_instance +- [ ] stop_stack +- [ ] tag_resource +- [ ] unassign_instance +- [ ] unassign_volume +- [ ] untag_resource +- [ ] update_app +- [ ] update_elastic_ip +- [ ] update_instance +- [ ] update_layer +- [ ] update_my_user_profile +- [ ] update_rds_db_instance +- [ ] update_stack +- [ ] update_user_profile +- [ ] update_volume + +## opsworkscm - 0% implemented +- [ ] associate_node +- [ ] create_backup +- [ ] create_server +- [ ] delete_backup +- [ ] delete_server +- [ ] describe_account_attributes +- [ ] describe_backups +- [ ] describe_events +- [ ] describe_node_association_status +- [ ] describe_servers +- [ ] disassociate_node +- [ ] restore_server +- [ ] start_maintenance +- [ ] update_server +- [ ] update_server_engine_attributes + +## organizations - 30% implemented +- [ ] accept_handshake +- [ ] attach_policy +- [ ] cancel_handshake +- [X] create_account +- [X] create_organization +- [X] create_organizational_unit +- [ ] create_policy +- [ ] decline_handshake +- [ ] delete_organization +- [ ] delete_organizational_unit +- [ ] delete_policy +- [X] describe_account +- [ ] describe_create_account_status +- [ ] describe_handshake +- [X] describe_organization +- [X] describe_organizational_unit +- [ ] describe_policy +- [ ] detach_policy +- [ ] disable_aws_service_access +- [ ] disable_policy_type +- [ ] enable_all_features +- [ ] enable_aws_service_access +- [ ] enable_policy_type +- [ ] invite_account_to_organization +- [ ] leave_organization +- [X] list_accounts +- [X] list_accounts_for_parent +- [ ] list_aws_service_access_for_organization +- [X] list_children +- [ ] list_create_account_status +- [ ] list_handshakes_for_account +- [ ] list_handshakes_for_organization +- [X] list_organizational_units_for_parent +- [X] list_parents +- [ ] list_policies +- [ ] list_policies_for_target +- [X] list_roots +- [ ] list_targets_for_policy +- [X] move_account +- [ ] remove_account_from_organization +- [ ] update_organizational_unit +- [ ] update_policy + +## pinpoint - 0% implemented +- [ ] create_app +- [ ] create_campaign +- [ ] create_export_job +- [ ] create_import_job +- [ ] create_segment +- [ ] delete_adm_channel +- [ ] delete_apns_channel +- [ ] delete_apns_sandbox_channel +- [ ] delete_apns_voip_channel +- [ ] delete_apns_voip_sandbox_channel +- [ ] delete_app +- [ ] delete_baidu_channel +- [ ] delete_campaign +- [ ] delete_email_channel +- [ ] delete_endpoint +- [ ] delete_event_stream +- [ ] delete_gcm_channel +- [ ] delete_segment +- [ ] delete_sms_channel +- [ ] get_adm_channel +- [ ] get_apns_channel +- [ ] get_apns_sandbox_channel +- [ ] get_apns_voip_channel +- [ ] get_apns_voip_sandbox_channel +- [ ] get_app +- [ ] get_application_settings +- [ ] get_apps +- [ ] get_baidu_channel +- [ ] get_campaign +- [ ] get_campaign_activities +- [ ] get_campaign_version +- [ ] get_campaign_versions +- [ ] get_campaigns +- [ ] get_email_channel +- [ ] get_endpoint +- [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs +- [ ] get_gcm_channel +- [ ] get_import_job +- [ ] get_import_jobs +- [ ] get_segment +- [ ] get_segment_export_jobs +- [ ] get_segment_import_jobs +- [ ] get_segment_version +- [ ] get_segment_versions +- [ ] get_segments +- [ ] get_sms_channel +- [ ] put_event_stream +- [ ] send_messages +- [ ] send_users_messages +- [ ] update_adm_channel +- [ ] update_apns_channel +- [ ] update_apns_sandbox_channel +- [ ] update_apns_voip_channel +- [ ] update_apns_voip_sandbox_channel +- [ ] update_application_settings +- [ ] update_baidu_channel +- [ ] update_campaign +- [ ] update_email_channel +- [ ] update_endpoint +- [ ] update_endpoints_batch +- [ ] update_gcm_channel +- [ ] update_segment +- [ ] update_sms_channel + +## polly - 83% implemented +- [X] delete_lexicon +- [X] describe_voices +- [X] get_lexicon +- [X] list_lexicons +- [X] put_lexicon +- [ ] synthesize_speech + +## pricing - 0% implemented +- [ ] describe_services +- [ ] get_attribute_values +- [ ] get_products + +## rds - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] authorize_db_security_group_ingress +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] copy_db_snapshot +- [ ] copy_option_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_instance_read_replica +- [ ] create_db_parameter_group +- [ ] create_db_security_group +- [ ] create_db_snapshot +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] create_option_group +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_security_group +- [ ] delete_db_snapshot +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] delete_option_group +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_log_files +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_security_groups +- [ ] describe_db_snapshot_attributes +- [ ] describe_db_snapshots +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_option_group_options +- [ ] describe_option_groups +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_reserved_db_instances +- [ ] describe_reserved_db_instances_offerings +- [ ] describe_source_regions +- [ ] describe_valid_db_instance_modifications +- [ ] download_db_log_file_portion +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_snapshot +- [ ] modify_db_snapshot_attribute +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] modify_option_group +- [ ] promote_read_replica +- [ ] promote_read_replica_db_cluster +- [ ] purchase_reserved_db_instances_offering +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_s3 +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time +- [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_from_s3 +- [ ] restore_db_instance_to_point_in_time +- [ ] revoke_db_security_group_ingress +- [ ] start_db_instance +- [ ] stop_db_instance + +## redshift - 41% implemented +- [ ] authorize_cluster_security_group_ingress +- [ ] authorize_snapshot_access +- [ ] copy_cluster_snapshot +- [X] create_cluster +- [X] create_cluster_parameter_group +- [X] create_cluster_security_group +- [X] create_cluster_snapshot +- [X] create_cluster_subnet_group +- [ ] create_event_subscription +- [ ] create_hsm_client_certificate +- [ ] create_hsm_configuration +- [X] create_snapshot_copy_grant +- [X] create_tags +- [X] delete_cluster +- [X] delete_cluster_parameter_group +- [X] delete_cluster_security_group +- [X] delete_cluster_snapshot +- [X] delete_cluster_subnet_group +- [ ] delete_event_subscription +- [ ] delete_hsm_client_certificate +- [ ] delete_hsm_configuration +- [X] delete_snapshot_copy_grant +- [X] delete_tags +- [X] describe_cluster_parameter_groups +- [ ] describe_cluster_parameters +- [X] describe_cluster_security_groups +- [X] describe_cluster_snapshots +- [X] describe_cluster_subnet_groups +- [ ] describe_cluster_versions +- [X] describe_clusters +- [ ] describe_default_cluster_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_hsm_client_certificates +- [ ] describe_hsm_configurations +- [ ] describe_logging_status +- [ ] describe_orderable_cluster_options +- [ ] describe_reserved_node_offerings +- [ ] describe_reserved_nodes +- [ ] describe_resize +- [X] describe_snapshot_copy_grants +- [ ] describe_table_restore_status +- [X] describe_tags +- [ ] disable_logging +- [X] disable_snapshot_copy +- [ ] enable_logging +- [X] enable_snapshot_copy +- [ ] get_cluster_credentials +- [X] modify_cluster +- [ ] modify_cluster_iam_roles +- [ ] modify_cluster_parameter_group +- [ ] modify_cluster_subnet_group +- [ ] modify_event_subscription +- [X] modify_snapshot_copy_retention_period +- [ ] purchase_reserved_node_offering +- [ ] reboot_cluster +- [ ] reset_cluster_parameter_group +- [X] restore_from_cluster_snapshot +- [ ] restore_table_from_cluster_snapshot +- [ ] revoke_cluster_security_group_ingress +- [ ] revoke_snapshot_access +- [ ] rotate_encryption_key + +## rekognition - 0% implemented +- [ ] compare_faces +- [ ] create_collection +- [ ] create_stream_processor +- [ ] delete_collection +- [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_stream_processor +- [ ] detect_faces +- [ ] detect_labels +- [ ] detect_moderation_labels +- [ ] detect_text +- [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking +- [ ] index_faces +- [ ] list_collections +- [ ] list_faces +- [ ] list_stream_processors +- [ ] recognize_celebrities +- [ ] search_faces +- [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor + +## resource-groups - 0% implemented +- [ ] create_group +- [ ] delete_group +- [ ] get_group +- [ ] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [ ] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [ ] update_group +- [ ] update_group_query + +## resourcegroupstaggingapi - 60% implemented +- [X] get_resources +- [X] get_tag_keys +- [X] get_tag_values +- [ ] tag_resources +- [ ] untag_resources + +## route53 - 12% implemented +- [ ] associate_vpc_with_hosted_zone +- [ ] change_resource_record_sets +- [X] change_tags_for_resource +- [X] create_health_check +- [X] create_hosted_zone +- [ ] create_query_logging_config +- [ ] create_reusable_delegation_set +- [ ] create_traffic_policy +- [ ] create_traffic_policy_instance +- [ ] create_traffic_policy_version +- [ ] create_vpc_association_authorization +- [X] delete_health_check +- [X] delete_hosted_zone +- [ ] delete_query_logging_config +- [ ] delete_reusable_delegation_set +- [ ] delete_traffic_policy +- [ ] delete_traffic_policy_instance +- [ ] delete_vpc_association_authorization +- [ ] disassociate_vpc_from_hosted_zone +- [ ] get_account_limit +- [ ] get_change +- [ ] get_checker_ip_ranges +- [ ] get_geo_location +- [ ] get_health_check +- [ ] get_health_check_count +- [ ] get_health_check_last_failure_reason +- [ ] get_health_check_status +- [X] get_hosted_zone +- [ ] get_hosted_zone_count +- [ ] get_hosted_zone_limit +- [ ] get_query_logging_config +- [ ] get_reusable_delegation_set +- [ ] get_reusable_delegation_set_limit +- [ ] get_traffic_policy +- [ ] get_traffic_policy_instance +- [ ] get_traffic_policy_instance_count +- [ ] list_geo_locations +- [ ] list_health_checks +- [ ] list_hosted_zones +- [ ] list_hosted_zones_by_name +- [ ] list_query_logging_configs +- [ ] list_resource_record_sets +- [ ] list_reusable_delegation_sets +- [X] list_tags_for_resource +- [ ] list_tags_for_resources +- [ ] list_traffic_policies +- [ ] list_traffic_policy_instances +- [ ] list_traffic_policy_instances_by_hosted_zone +- [ ] list_traffic_policy_instances_by_policy +- [ ] list_traffic_policy_versions +- [ ] list_vpc_association_authorizations +- [ ] test_dns_answer +- [ ] update_health_check +- [ ] update_hosted_zone_comment +- [ ] update_traffic_policy_comment +- [ ] update_traffic_policy_instance + +## route53domains - 0% implemented +- [ ] check_domain_availability +- [ ] check_domain_transferability +- [ ] delete_tags_for_domain +- [ ] disable_domain_auto_renew +- [ ] disable_domain_transfer_lock +- [ ] enable_domain_auto_renew +- [ ] enable_domain_transfer_lock +- [ ] get_contact_reachability_status +- [ ] get_domain_detail +- [ ] get_domain_suggestions +- [ ] get_operation_detail +- [ ] list_domains +- [ ] list_operations +- [ ] list_tags_for_domain +- [ ] register_domain +- [ ] renew_domain +- [ ] resend_contact_reachability_email +- [ ] retrieve_domain_auth_code +- [ ] transfer_domain +- [ ] update_domain_contact +- [ ] update_domain_contact_privacy +- [ ] update_domain_nameservers +- [ ] update_tags_for_domain +- [ ] view_billing + +## s3 - 15% implemented +- [ ] abort_multipart_upload +- [ ] complete_multipart_upload +- [ ] copy_object +- [X] create_bucket +- [ ] create_multipart_upload +- [X] delete_bucket +- [ ] delete_bucket_analytics_configuration +- [X] delete_bucket_cors +- [ ] delete_bucket_encryption +- [ ] delete_bucket_inventory_configuration +- [ ] delete_bucket_lifecycle +- [ ] delete_bucket_metrics_configuration +- [X] delete_bucket_policy +- [ ] delete_bucket_replication +- [X] delete_bucket_tagging +- [ ] delete_bucket_website +- [ ] delete_object +- [ ] delete_object_tagging +- [ ] delete_objects +- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_acl +- [ ] get_bucket_analytics_configuration +- [ ] get_bucket_cors +- [ ] get_bucket_encryption +- [ ] get_bucket_inventory_configuration +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging +- [ ] get_bucket_metrics_configuration +- [ ] get_bucket_notification +- [ ] get_bucket_notification_configuration +- [X] get_bucket_policy +- [ ] get_bucket_replication +- [ ] get_bucket_request_payment +- [ ] get_bucket_tagging +- [X] get_bucket_versioning +- [ ] get_bucket_website +- [ ] get_object +- [ ] get_object_acl +- [ ] get_object_tagging +- [ ] get_object_torrent +- [ ] head_bucket +- [ ] head_object +- [ ] list_bucket_analytics_configurations +- [ ] list_bucket_inventory_configurations +- [ ] list_bucket_metrics_configurations +- [ ] list_buckets +- [ ] list_multipart_uploads +- [ ] list_object_versions +- [ ] list_objects +- [ ] list_objects_v2 +- [ ] list_parts +- [ ] put_bucket_accelerate_configuration +- [ ] put_bucket_acl +- [ ] put_bucket_analytics_configuration +- [X] put_bucket_cors +- [ ] put_bucket_encryption +- [ ] put_bucket_inventory_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration +- [X] put_bucket_logging +- [ ] put_bucket_metrics_configuration +- [ ] put_bucket_notification +- [X] put_bucket_notification_configuration +- [ ] put_bucket_policy +- [ ] put_bucket_replication +- [ ] put_bucket_request_payment +- [X] put_bucket_tagging +- [ ] put_bucket_versioning +- [ ] put_bucket_website +- [ ] put_object +- [ ] put_object_acl +- [ ] put_object_tagging +- [ ] restore_object +- [ ] select_object_content +- [ ] upload_part +- [ ] upload_part_copy + +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_training_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] start_notebook_instance +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + +## sdb - 0% implemented +- [ ] batch_delete_attributes +- [ ] batch_put_attributes +- [ ] create_domain +- [ ] delete_attributes +- [ ] delete_domain +- [ ] domain_metadata +- [ ] get_attributes +- [ ] list_domains +- [ ] put_attributes +- [ ] select + +## secretsmanager - 33% implemented +- [ ] cancel_rotate_secret +- [X] create_secret +- [ ] delete_secret +- [X] describe_secret +- [X] get_random_password +- [X] get_secret_value +- [ ] list_secret_version_ids +- [ ] list_secrets +- [ ] put_secret_value +- [ ] restore_secret +- [X] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] delete_application +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + +## servicecatalog - 0% implemented +- [ ] accept_portfolio_share +- [ ] associate_principal_with_portfolio +- [ ] associate_product_with_portfolio +- [ ] associate_tag_option_with_resource +- [ ] copy_product +- [ ] create_constraint +- [ ] create_portfolio +- [ ] create_portfolio_share +- [ ] create_product +- [ ] create_provisioned_product_plan +- [ ] create_provisioning_artifact +- [ ] create_tag_option +- [ ] delete_constraint +- [ ] delete_portfolio +- [ ] delete_portfolio_share +- [ ] delete_product +- [ ] delete_provisioned_product_plan +- [ ] delete_provisioning_artifact +- [ ] delete_tag_option +- [ ] describe_constraint +- [ ] describe_copy_product_status +- [ ] describe_portfolio +- [ ] describe_product +- [ ] describe_product_as_admin +- [ ] describe_product_view +- [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan +- [ ] describe_provisioning_artifact +- [ ] describe_provisioning_parameters +- [ ] describe_record +- [ ] describe_tag_option +- [ ] disassociate_principal_from_portfolio +- [ ] disassociate_product_from_portfolio +- [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan +- [ ] list_accepted_portfolio_shares +- [ ] list_constraints_for_portfolio +- [ ] list_launch_paths +- [ ] list_portfolio_access +- [ ] list_portfolios +- [ ] list_portfolios_for_product +- [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans +- [ ] list_provisioning_artifacts +- [ ] list_record_history +- [ ] list_resources_for_tag_option +- [ ] list_tag_options +- [ ] provision_product +- [ ] reject_portfolio_share +- [ ] scan_provisioned_products +- [ ] search_products +- [ ] search_products_as_admin +- [ ] search_provisioned_products +- [ ] terminate_provisioned_product +- [ ] update_constraint +- [ ] update_portfolio +- [ ] update_product +- [ ] update_provisioned_product +- [ ] update_provisioning_artifact +- [ ] update_tag_option + +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_instance_custom_health_status +- [ ] update_service + +## ses - 11% implemented +- [ ] clone_receipt_rule_set +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template +- [ ] create_receipt_filter +- [ ] create_receipt_rule +- [ ] create_receipt_rule_set +- [ ] create_template +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template +- [X] delete_identity +- [ ] delete_identity_policy +- [ ] delete_receipt_filter +- [ ] delete_receipt_rule +- [ ] delete_receipt_rule_set +- [ ] delete_template +- [ ] delete_verified_email_address +- [ ] describe_active_receipt_rule_set +- [ ] describe_configuration_set +- [ ] describe_receipt_rule +- [ ] describe_receipt_rule_set +- [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template +- [ ] get_identity_dkim_attributes +- [ ] get_identity_mail_from_domain_attributes +- [ ] get_identity_notification_attributes +- [ ] get_identity_policies +- [ ] get_identity_verification_attributes +- [X] get_send_quota +- [ ] get_send_statistics +- [ ] get_template +- [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates +- [X] list_identities +- [ ] list_identity_policies +- [ ] list_receipt_filters +- [ ] list_receipt_rule_sets +- [ ] list_templates +- [X] list_verified_email_addresses +- [ ] put_identity_policy +- [ ] reorder_receipt_rule_set +- [ ] send_bounce +- [ ] send_bulk_templated_email +- [ ] send_custom_verification_email +- [X] send_email +- [X] send_raw_email +- [ ] send_templated_email +- [ ] set_active_receipt_rule_set +- [ ] set_identity_dkim_enabled +- [ ] set_identity_feedback_forwarding_enabled +- [ ] set_identity_headers_in_notifications_enabled +- [ ] set_identity_mail_from_domain +- [ ] set_identity_notification_topic +- [ ] set_receipt_rule_position +- [ ] test_render_template +- [ ] update_account_sending_enabled +- [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_reputation_metrics_enabled +- [ ] update_configuration_set_sending_enabled +- [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template +- [ ] update_receipt_rule +- [ ] update_template +- [ ] verify_domain_dkim +- [ ] verify_domain_identity +- [X] verify_email_address +- [X] verify_email_identity + +## shield - 0% implemented +- [ ] create_protection +- [ ] create_subscription +- [ ] delete_protection +- [ ] delete_subscription +- [ ] describe_attack +- [ ] describe_protection +- [ ] describe_subscription +- [ ] get_subscription_state +- [ ] list_attacks +- [ ] list_protections + +## sms - 0% implemented +- [ ] create_replication_job +- [ ] delete_replication_job +- [ ] delete_server_catalog +- [ ] disassociate_connector +- [ ] get_connectors +- [ ] get_replication_jobs +- [ ] get_replication_runs +- [ ] get_servers +- [ ] import_server_catalog +- [ ] start_on_demand_replication_run +- [ ] update_replication_job + +## snowball - 0% implemented +- [ ] cancel_cluster +- [ ] cancel_job +- [ ] create_address +- [ ] create_cluster +- [ ] create_job +- [ ] describe_address +- [ ] describe_addresses +- [ ] describe_cluster +- [ ] describe_job +- [ ] get_job_manifest +- [ ] get_job_unlock_code +- [ ] get_snowball_usage +- [ ] list_cluster_jobs +- [ ] list_clusters +- [ ] list_jobs +- [ ] update_cluster +- [ ] update_job + +## sns - 53% implemented +- [ ] add_permission +- [ ] check_if_phone_number_is_opted_out +- [ ] confirm_subscription +- [X] create_platform_application +- [X] create_platform_endpoint +- [X] create_topic +- [X] delete_endpoint +- [X] delete_platform_application +- [X] delete_topic +- [ ] get_endpoint_attributes +- [ ] get_platform_application_attributes +- [ ] get_sms_attributes +- [X] get_subscription_attributes +- [ ] get_topic_attributes +- [X] list_endpoints_by_platform_application +- [ ] list_phone_numbers_opted_out +- [X] list_platform_applications +- [X] list_subscriptions +- [ ] list_subscriptions_by_topic +- [X] list_topics +- [ ] opt_in_phone_number +- [X] publish +- [ ] remove_permission +- [X] set_endpoint_attributes +- [ ] set_platform_application_attributes +- [ ] set_sms_attributes +- [X] set_subscription_attributes +- [ ] set_topic_attributes +- [X] subscribe +- [X] unsubscribe + +## sqs - 65% implemented +- [X] add_permission +- [X] change_message_visibility +- [ ] change_message_visibility_batch +- [X] create_queue +- [X] delete_message +- [ ] delete_message_batch +- [X] delete_queue +- [ ] get_queue_attributes +- [ ] get_queue_url +- [X] list_dead_letter_source_queues +- [ ] list_queue_tags +- [X] list_queues +- [X] purge_queue +- [ ] receive_message +- [X] remove_permission +- [X] send_message +- [ ] send_message_batch +- [X] set_queue_attributes +- [X] tag_queue +- [X] untag_queue + +## ssm - 11% implemented +- [X] add_tags_to_resource +- [ ] cancel_command +- [ ] create_activation +- [ ] create_association +- [ ] create_association_batch +- [ ] create_document +- [ ] create_maintenance_window +- [ ] create_patch_baseline +- [ ] create_resource_data_sync +- [ ] delete_activation +- [ ] delete_association +- [ ] delete_document +- [ ] delete_maintenance_window +- [X] delete_parameter +- [X] delete_parameters +- [ ] delete_patch_baseline +- [ ] delete_resource_data_sync +- [ ] deregister_managed_instance +- [ ] deregister_patch_baseline_for_patch_group +- [ ] deregister_target_from_maintenance_window +- [ ] deregister_task_from_maintenance_window +- [ ] describe_activations +- [ ] describe_association +- [ ] describe_automation_executions +- [ ] describe_automation_step_executions +- [ ] describe_available_patches +- [ ] describe_document +- [ ] describe_document_permission +- [ ] describe_effective_instance_associations +- [ ] describe_effective_patches_for_patch_baseline +- [ ] describe_instance_associations_status +- [ ] describe_instance_information +- [ ] describe_instance_patch_states +- [ ] describe_instance_patch_states_for_patch_group +- [ ] describe_instance_patches +- [ ] describe_maintenance_window_execution_task_invocations +- [ ] describe_maintenance_window_execution_tasks +- [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_targets +- [ ] describe_maintenance_window_tasks +- [ ] describe_maintenance_windows +- [ ] describe_parameters +- [ ] describe_patch_baselines +- [ ] describe_patch_group_state +- [ ] describe_patch_groups +- [ ] get_automation_execution +- [ ] get_command_invocation +- [ ] get_default_patch_baseline +- [ ] get_deployable_patch_snapshot_for_instance +- [ ] get_document +- [ ] get_inventory +- [ ] get_inventory_schema +- [ ] get_maintenance_window +- [ ] get_maintenance_window_execution +- [ ] get_maintenance_window_execution_task +- [ ] get_maintenance_window_execution_task_invocation +- [ ] get_maintenance_window_task +- [X] get_parameter +- [ ] get_parameter_history +- [X] get_parameters +- [X] get_parameters_by_path +- [ ] get_patch_baseline +- [ ] get_patch_baseline_for_patch_group +- [ ] list_association_versions +- [ ] list_associations +- [ ] list_command_invocations +- [X] list_commands +- [ ] list_compliance_items +- [ ] list_compliance_summaries +- [ ] list_document_versions +- [ ] list_documents +- [ ] list_inventory_entries +- [ ] list_resource_compliance_summaries +- [ ] list_resource_data_sync +- [X] list_tags_for_resource +- [ ] modify_document_permission +- [ ] put_compliance_items +- [ ] put_inventory +- [X] put_parameter +- [ ] register_default_patch_baseline +- [ ] register_patch_baseline_for_patch_group +- [ ] register_target_with_maintenance_window +- [ ] register_task_with_maintenance_window +- [X] remove_tags_from_resource +- [ ] send_automation_signal +- [X] send_command +- [ ] start_automation_execution +- [ ] stop_automation_execution +- [ ] update_association +- [ ] update_association_status +- [ ] update_document +- [ ] update_document_default_version +- [ ] update_maintenance_window +- [ ] update_maintenance_window_target +- [ ] update_maintenance_window_task +- [ ] update_managed_instance_role +- [ ] update_patch_baseline + +## stepfunctions - 0% implemented +- [ ] create_activity +- [ ] create_state_machine +- [ ] delete_activity +- [ ] delete_state_machine +- [ ] describe_activity +- [ ] describe_execution +- [ ] describe_state_machine +- [ ] describe_state_machine_for_execution +- [ ] get_activity_task +- [ ] get_execution_history +- [ ] list_activities +- [ ] list_executions +- [ ] list_state_machines +- [ ] send_task_failure +- [ ] send_task_heartbeat +- [ ] send_task_success +- [ ] start_execution +- [ ] stop_execution +- [ ] update_state_machine + +## storagegateway - 0% implemented +- [ ] activate_gateway +- [ ] add_cache +- [ ] add_tags_to_resource +- [ ] add_upload_buffer +- [ ] add_working_storage +- [ ] cancel_archival +- [ ] cancel_retrieval +- [ ] create_cached_iscsi_volume +- [ ] create_nfs_file_share +- [ ] create_snapshot +- [ ] create_snapshot_from_volume_recovery_point +- [ ] create_stored_iscsi_volume +- [ ] create_tape_with_barcode +- [ ] create_tapes +- [ ] delete_bandwidth_rate_limit +- [ ] delete_chap_credentials +- [ ] delete_file_share +- [ ] delete_gateway +- [ ] delete_snapshot_schedule +- [ ] delete_tape +- [ ] delete_tape_archive +- [ ] delete_volume +- [ ] describe_bandwidth_rate_limit +- [ ] describe_cache +- [ ] describe_cached_iscsi_volumes +- [ ] describe_chap_credentials +- [ ] describe_gateway_information +- [ ] describe_maintenance_start_time +- [ ] describe_nfs_file_shares +- [ ] describe_snapshot_schedule +- [ ] describe_stored_iscsi_volumes +- [ ] describe_tape_archives +- [ ] describe_tape_recovery_points +- [ ] describe_tapes +- [ ] describe_upload_buffer +- [ ] describe_vtl_devices +- [ ] describe_working_storage +- [ ] disable_gateway +- [ ] list_file_shares +- [ ] list_gateways +- [ ] list_local_disks +- [ ] list_tags_for_resource +- [ ] list_tapes +- [ ] list_volume_initiators +- [ ] list_volume_recovery_points +- [ ] list_volumes +- [ ] notify_when_uploaded +- [ ] refresh_cache +- [ ] remove_tags_from_resource +- [ ] reset_cache +- [ ] retrieve_tape_archive +- [ ] retrieve_tape_recovery_point +- [ ] set_local_console_password +- [ ] shutdown_gateway +- [ ] start_gateway +- [ ] update_bandwidth_rate_limit +- [ ] update_chap_credentials +- [ ] update_gateway_information +- [ ] update_gateway_software_now +- [ ] update_maintenance_start_time +- [ ] update_nfs_file_share +- [ ] update_snapshot_schedule +- [ ] update_vtl_device_type + +## sts - 42% implemented +- [X] assume_role +- [ ] assume_role_with_saml +- [ ] assume_role_with_web_identity +- [ ] decode_authorization_message +- [ ] get_caller_identity +- [X] get_federation_token +- [X] get_session_token + +## support - 0% implemented +- [ ] add_attachments_to_set +- [ ] add_communication_to_case +- [ ] create_case +- [ ] describe_attachment +- [ ] describe_cases +- [ ] describe_communications +- [ ] describe_services +- [ ] describe_severity_levels +- [ ] describe_trusted_advisor_check_refresh_statuses +- [ ] describe_trusted_advisor_check_result +- [ ] describe_trusted_advisor_check_summaries +- [ ] describe_trusted_advisor_checks +- [ ] refresh_trusted_advisor_check +- [ ] resolve_case + +## swf - 58% implemented +- [ ] count_closed_workflow_executions +- [ ] count_open_workflow_executions +- [X] count_pending_activity_tasks +- [X] count_pending_decision_tasks +- [ ] deprecate_activity_type +- [X] deprecate_domain +- [ ] deprecate_workflow_type +- [ ] describe_activity_type +- [X] describe_domain +- [X] describe_workflow_execution +- [ ] describe_workflow_type +- [ ] get_workflow_execution_history +- [ ] list_activity_types +- [X] list_closed_workflow_executions +- [X] list_domains +- [X] list_open_workflow_executions +- [ ] list_workflow_types +- [X] poll_for_activity_task +- [X] poll_for_decision_task +- [X] record_activity_task_heartbeat +- [ ] register_activity_type +- [X] register_domain +- [ ] register_workflow_type +- [ ] request_cancel_workflow_execution +- [ ] respond_activity_task_canceled +- [X] respond_activity_task_completed +- [X] respond_activity_task_failed +- [X] respond_decision_task_completed +- [X] signal_workflow_execution +- [X] start_workflow_execution +- [X] terminate_workflow_execution + +## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary +- [ ] get_transcription_job +- [ ] get_vocabulary +- [ ] list_transcription_jobs +- [ ] list_vocabularies +- [ ] start_transcription_job +- [ ] update_vocabulary + +## translate - 0% implemented +- [ ] translate_text + +## waf - 0% implemented +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## waf-regional - 0% implemented +- [ ] associate_web_acl +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] disassociate_web_acl +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_web_acl_for_resource +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_resources_for_web_acl +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## workdocs - 0% implemented +- [ ] abort_document_version_upload +- [ ] activate_user +- [ ] add_resource_permissions +- [ ] create_comment +- [ ] create_custom_metadata +- [ ] create_folder +- [ ] create_labels +- [ ] create_notification_subscription +- [ ] create_user +- [ ] deactivate_user +- [ ] delete_comment +- [ ] delete_custom_metadata +- [ ] delete_document +- [ ] delete_folder +- [ ] delete_folder_contents +- [ ] delete_labels +- [ ] delete_notification_subscription +- [ ] delete_user +- [ ] describe_activities +- [ ] describe_comments +- [ ] describe_document_versions +- [ ] describe_folder_contents +- [ ] describe_groups +- [ ] describe_notification_subscriptions +- [ ] describe_resource_permissions +- [ ] describe_root_folders +- [ ] describe_users +- [ ] get_current_user +- [ ] get_document +- [ ] get_document_path +- [ ] get_document_version +- [ ] get_folder +- [ ] get_folder_path +- [ ] initiate_document_version_upload +- [ ] remove_all_resource_permissions +- [ ] remove_resource_permission +- [ ] update_document +- [ ] update_document_version +- [ ] update_folder +- [ ] update_user + +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_mailbox_permissions +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_mailbox_permissions +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] put_mailbox_permissions +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + +## workspaces - 0% implemented +- [ ] create_tags +- [ ] create_workspaces +- [ ] delete_tags +- [ ] describe_tags +- [ ] describe_workspace_bundles +- [ ] describe_workspace_directories +- [ ] describe_workspaces +- [ ] describe_workspaces_connection_status +- [ ] modify_workspace_properties +- [ ] reboot_workspaces +- [ ] rebuild_workspaces +- [ ] start_workspaces +- [ ] stop_workspaces +- [ ] terminate_workspaces + +## xray - 0% implemented +- [ ] batch_get_traces +- [ ] get_service_graph +- [ ] get_trace_graph +- [ ] get_trace_summaries +- [ ] put_telemetry_records +- [ ] put_trace_segments diff --git a/README.md b/README.md index 8618b40423af..791226d6b50b 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | KMS | @mock_kms | basic endpoints done | |------------------------------------------------------------------------------| +| Organizations | @mock_organizations | some core endpoints done | +|------------------------------------------------------------------------------| | Polly | @mock_polly | all endpoints done | |------------------------------------------------------------------------------| | RDS | @mock_rds | core endpoints done | diff --git a/docs/index.rst b/docs/index.rst index 321342401fb9..66e12e4bda86 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -34,11 +34,11 @@ Currently implemented Services: | - DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes| +-----------------------+---------------------+-----------------------------------+ | EC2 | @mock_ec2 | core endpoints done | -| - AMI | | core endpoints done | -| - EBS | | core endpoints done | -| - Instances | | all endpoints done | -| - Security Groups | | core endpoints done | -| - Tags | | all endpoints done | +| - AMI | | - core endpoints done | +| - EBS | | - core endpoints done | +| - Instances | | - all endpoints done | +| - Security Groups | | - core endpoints done | +| - Tags | | - all endpoints done | +-----------------------+---------------------+-----------------------------------+ | ECS | @mock_ecs | basic endpoints done | +-----------------------+---------------------+-----------------------------------+ diff --git a/moto/__init__.py b/moto/__init__.py index b7b65320096e..6992c535e1da 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.3.5' +__version__ = '1.3.6' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa @@ -28,6 +28,7 @@ from .iam import mock_iam, mock_iam_deprecated # flake8: noqa from .kinesis import mock_kinesis, mock_kinesis_deprecated # flake8: noqa from .kms import mock_kms, mock_kms_deprecated # flake8: noqa +from .organizations import mock_organizations # flake8: noqa from .opsworks import mock_opsworks, mock_opsworks_deprecated # flake8: noqa from .polly import mock_polly # flake8: noqa from .rds import mock_rds, mock_rds_deprecated # flake8: noqa diff --git a/moto/backends.py b/moto/backends.py index 8d707373f049..d95424385d27 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -27,6 +27,7 @@ from moto.kms import kms_backends from moto.logs import logs_backends from moto.opsworks import opsworks_backends +from moto.organizations import organizations_backends from moto.polly import polly_backends from moto.rds2 import rds2_backends from moto.redshift import redshift_backends @@ -74,6 +75,7 @@ 'kinesis': kinesis_backends, 'kms': kms_backends, 'opsworks': opsworks_backends, + 'organizations': organizations_backends, 'polly': polly_backends, 'redshift': redshift_backends, 'rds': rds2_backends, diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index c4059a06bc0a..35b05d1013bc 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -387,6 +387,7 @@ def __init__(self, stack_id, stack_name, parameters, tags, region_name, template "AWS::StackName": stack_name, "AWS::URLSuffix": "amazonaws.com", "AWS::NoValue": None, + "AWS::Partition": "aws", } def __getitem__(self, key): diff --git a/moto/cognitoidentity/responses.py b/moto/cognitoidentity/responses.py index ea54b2cff1f2..e7b428329408 100644 --- a/moto/cognitoidentity/responses.py +++ b/moto/cognitoidentity/responses.py @@ -3,6 +3,7 @@ from moto.core.responses import BaseResponse from .models import cognitoidentity_backends +from .utils import get_random_identity_id class CognitoIdentityResponse(BaseResponse): @@ -31,4 +32,6 @@ def get_credentials_for_identity(self): return cognitoidentity_backends[self.region].get_credentials_for_identity(self._get_param('IdentityId')) def get_open_id_token_for_developer_identity(self): - return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity(self._get_param('IdentityId')) + return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity( + self._get_param('IdentityId') or get_random_identity_id(self.region) + ) diff --git a/moto/cognitoidentity/utils.py b/moto/cognitoidentity/utils.py index 359631763ac1..6143d5121f0d 100644 --- a/moto/cognitoidentity/utils.py +++ b/moto/cognitoidentity/utils.py @@ -2,4 +2,4 @@ def get_random_identity_id(region): - return "{0}:{0}".format(region, get_random_hex(length=19)) + return "{0}:{1}".format(region, get_random_hex(length=19)) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 52a73f89f09e..10da0c6ff591 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -24,7 +24,7 @@ class CognitoIdpUserPool(BaseModel): def __init__(self, region, name, extended_config): self.region = region - self.id = str(uuid.uuid4()) + self.id = "{}_{}".format(self.region, str(uuid.uuid4().hex)) self.name = name self.status = None self.extended_config = extended_config or {} @@ -84,7 +84,11 @@ def create_refresh_token(self, client_id, username): return refresh_token def create_access_token(self, client_id, username): - access_token, expires_in = self.create_jwt(client_id, username) + extra_data = self.get_user_extra_data_by_client_id( + client_id, username + ) + access_token, expires_in = self.create_jwt(client_id, username, + extra_data=extra_data) self.access_tokens[access_token] = (client_id, username) return access_token, expires_in @@ -97,6 +101,21 @@ def create_tokens_from_refresh_token(self, refresh_token): id_token, _ = self.create_id_token(client_id, username) return access_token, id_token, expires_in + def get_user_extra_data_by_client_id(self, client_id, username): + extra_data = {} + current_client = self.clients.get(client_id, None) + if current_client: + for readable_field in current_client.get_readable_fields(): + attribute = list(filter( + lambda f: f['Name'] == readable_field, + self.users.get(username).attributes + )) + if len(attribute) > 0: + extra_data.update({ + attribute[0]['Name']: attribute[0]['Value'] + }) + return extra_data + class CognitoIdpUserPoolDomain(BaseModel): @@ -138,6 +157,9 @@ def to_json(self, extended=False): return user_pool_client_json + def get_readable_fields(self): + return self.extended_config.get('ReadAttributes', []) + class CognitoIdpIdentityProvider(BaseModel): diff --git a/moto/core/models.py b/moto/core/models.py index 92dc2a98096a..adc06a9c0701 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -89,6 +89,17 @@ def decorate_class(self, klass): if inspect.ismethod(attr_value) and attr_value.__self__ is klass: continue + # Check if this is a staticmethod. If so, skip patching + for cls in inspect.getmro(klass): + if attr_value.__name__ not in cls.__dict__: + continue + bound_attr_value = cls.__dict__[attr_value.__name__] + if not isinstance(bound_attr_value, staticmethod): + break + else: + # It is a staticmethod, skip patching + continue + try: setattr(klass, attr, self(attr_value, reset=False)) except TypeError: diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index b327c7a4be93..63ad20df6a99 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -154,7 +154,7 @@ def update(self, update_expression, expression_attribute_names, expression_attri # If not exists, changes value to a default if needed, else its the same as it was if value.startswith('if_not_exists'): # Function signature - match = re.match(r'.*if_not_exists\((?P.+),\s*(?P.+)\).*', value) + match = re.match(r'.*if_not_exists\s*\((?P.+),\s*(?P.+)\).*', value) if not match: raise TypeError @@ -162,12 +162,13 @@ def update(self, update_expression, expression_attribute_names, expression_attri # If it already exists, get its value so we dont overwrite it if path in self.attrs: - value = self.attrs[path].cast_value + value = self.attrs[path] - if value in expression_attribute_values: - value = DynamoType(expression_attribute_values[value]) - else: - value = DynamoType({"S": value}) + if type(value) != DynamoType: + if value in expression_attribute_values: + value = DynamoType(expression_attribute_values[value]) + else: + value = DynamoType({"S": value}) if '.' not in key: self.attrs[key] = value diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 3c7e7ffc2dc3..e2f1ef1cc5bb 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -20,6 +20,17 @@ def has_empty_keys_or_values(_dict): ) +def get_empty_str_error(): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return (400, + {'server': 'amazon.com'}, + dynamo_json_dump({'__type': er, + 'message': ('One or more parameter values were ' + 'invalid: An AttributeValue may not ' + 'contain an empty string')} + )) + + class DynamoHandler(BaseResponse): def get_endpoint_name(self, headers): @@ -174,14 +185,7 @@ def put_item(self): item = self.body['Item'] if has_empty_keys_or_values(item): - er = 'com.amazonaws.dynamodb.v20111205#ValidationException' - return (400, - {'server': 'amazon.com'}, - dynamo_json_dump({'__type': er, - 'message': ('One or more parameter values were ' - 'invalid: An AttributeValue may not ' - 'contain an empty string')} - )) + return get_empty_str_error() overwrite = 'Expected' not in self.body if not overwrite: @@ -200,9 +204,9 @@ def put_item(self): if cond_items: expected = {} overwrite = False - exists_re = re.compile('^attribute_exists\((.*)\)$') + exists_re = re.compile('^attribute_exists\s*\((.*)\)$') not_exists_re = re.compile( - '^attribute_not_exists\((.*)\)$') + '^attribute_not_exists\s*\((.*)\)$') for cond in cond_items: exists_m = exists_re.match(cond) @@ -523,6 +527,7 @@ def delete_item(self): return dynamo_json_dump(item_dict) def update_item(self): + name = self.body['TableName'] key = self.body['Key'] update_expression = self.body.get('UpdateExpression') @@ -533,6 +538,9 @@ def update_item(self): 'ExpressionAttributeValues', {}) existing_item = self.dynamodb_backend.get_item(name, key) + if has_empty_keys_or_values(expression_attribute_values): + return get_empty_str_error() + if 'Expected' in self.body: expected = self.body['Expected'] else: @@ -548,9 +556,9 @@ def update_item(self): if cond_items: expected = {} - exists_re = re.compile('^attribute_exists\((.*)\)$') + exists_re = re.compile('^attribute_exists\s*\((.*)\)$') not_exists_re = re.compile( - '^attribute_not_exists\((.*)\)$') + '^attribute_not_exists\s*\((.*)\)$') for cond in cond_items: exists_m = exists_re.match(cond) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 4e26f0f6562c..b94cac4794b1 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -13,6 +13,7 @@ import boto.ec2 from collections import defaultdict +import weakref from datetime import datetime from boto.ec2.instance import Instance as BotoInstance, Reservation from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType @@ -2115,10 +2116,20 @@ def get_cidr_block_association_set(self, ipv6=False): class VPCBackend(object): + __refs__ = defaultdict(list) + def __init__(self): self.vpcs = {} + self.__refs__[self.__class__].append(weakref.ref(self)) super(VPCBackend, self).__init__() + @classmethod + def get_instances(cls): + for inst_ref in cls.__refs__[cls]: + inst = inst_ref() + if inst is not None: + yield inst + def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False): vpc_id = random_vpc_id() vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block) @@ -2142,6 +2153,13 @@ def get_vpc(self, vpc_id): raise InvalidVPCIdError(vpc_id) return self.vpcs.get(vpc_id) + # get vpc by vpc id and aws region + def get_cross_vpc(self, vpc_id, peer_region): + for vpcs in self.get_instances(): + if vpcs.region_name == peer_region: + match_vpc = vpcs.get_vpc(vpc_id) + return match_vpc + def get_all_vpcs(self, vpc_ids=None, filters=None): matches = self.vpcs.values() if vpc_ids: diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 1bccce4f6cae..49d752893319 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -5,8 +5,12 @@ class VPCPeeringConnections(BaseResponse): def create_vpc_peering_connection(self): + peer_region = self._get_param('PeerRegion') + if peer_region == self.region or peer_region is None: + peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId')) + else: + peer_vpc = self.ec2_backend.get_cross_vpc(self._get_param('PeerVpcId'), peer_region) vpc = self.ec2_backend.get_vpc(self._get_param('VpcId')) - peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId')) vpc_pcx = self.ec2_backend.create_vpc_peering_connection(vpc, peer_vpc) template = self.response_template( CREATE_VPC_PEERING_CONNECTION_RESPONSE) @@ -41,26 +45,31 @@ def reject_vpc_peering_connection(self): CREATE_VPC_PEERING_CONNECTION_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - {{ vpc_pcx.id }} + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + {{ vpc_pcx.id }} - 777788889999 - {{ vpc_pcx.vpc.id }} - {{ vpc_pcx.vpc.cidr_block }} + 777788889999 + {{ vpc_pcx.vpc.id }} + {{ vpc_pcx.vpc.cidr_block }} + + false + false + false + 123456789012 {{ vpc_pcx.peer_vpc.id }} - initiating-request - Initiating request to {accepter ID}. + initiating-request + Initiating Request to {accepter ID} 2014-02-18T14:37:25.000Z - + """ diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 55fb4d4d931b..d00853843c0f 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -179,7 +179,7 @@ def response_object(self): class Service(BaseObject): - def __init__(self, cluster, service_name, task_definition, desired_count, load_balancers=None): + def __init__(self, cluster, service_name, task_definition, desired_count, load_balancers=None, scheduling_strategy=None): self.cluster_arn = cluster.arn self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format( service_name) @@ -202,6 +202,7 @@ def __init__(self, cluster, service_name, task_definition, desired_count, load_b } ] self.load_balancers = load_balancers if load_balancers is not None else [] + self.scheduling_strategy = scheduling_strategy if scheduling_strategy is not None else 'REPLICA' self.pending_count = 0 @property @@ -214,6 +215,7 @@ def response_object(self): del response_object['name'], response_object['arn'] response_object['serviceName'] = self.name response_object['serviceArn'] = self.arn + response_object['schedulingStrategy'] = self.scheduling_strategy for deployment in response_object['deployments']: if isinstance(deployment['createdAt'], datetime): @@ -655,7 +657,7 @@ def stop_task(self, cluster_str, task_str, reason): raise Exception("Could not find task {} on cluster {}".format( task_str, cluster_name)) - def create_service(self, cluster_str, service_name, task_definition_str, desired_count, load_balancers=None): + def create_service(self, cluster_str, service_name, task_definition_str, desired_count, load_balancers=None, scheduling_strategy=None): cluster_name = cluster_str.split('/')[-1] if cluster_name in self.clusters: cluster = self.clusters[cluster_name] @@ -665,7 +667,7 @@ def create_service(self, cluster_str, service_name, task_definition_str, desired desired_count = desired_count if desired_count is not None else 0 service = Service(cluster, service_name, - task_definition, desired_count, load_balancers) + task_definition, desired_count, load_balancers, scheduling_strategy) cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) self.services[cluster_service_pair] = service diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index 9455d7a2820f..e0bfefc02c56 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -154,8 +154,9 @@ def create_service(self): task_definition_str = self._get_param('taskDefinition') desired_count = self._get_int_param('desiredCount') load_balancers = self._get_param('loadBalancers') + scheduling_strategy = self._get_param('schedulingStrategy') service = self.ecs_backend.create_service( - cluster_str, service_name, task_definition_str, desired_count, load_balancers) + cluster_str, service_name, task_definition_str, desired_count, load_balancers, scheduling_strategy) return json.dumps({ 'service': service.response_object }) diff --git a/moto/elb/responses.py b/moto/elb/responses.py index 40d6ec2f9e92..b512f56e9dfc 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -259,12 +259,22 @@ def set_load_balancer_policies_for_backend_server(self): def describe_instance_health(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [list(param.values())[0] for param in self._get_list_prefix('Instances.member')] - if len(instance_ids) == 0: - instance_ids = self.elb_backend.get_load_balancer( - load_balancer_name).instance_ids + provided_instance_ids = [ + list(param.values())[0] + for param in self._get_list_prefix('Instances.member') + ] + registered_instances_id = self.elb_backend.get_load_balancer( + load_balancer_name).instance_ids + if len(provided_instance_ids) == 0: + provided_instance_ids = registered_instances_id template = self.response_template(DESCRIBE_INSTANCE_HEALTH_TEMPLATE) - return template.render(instance_ids=instance_ids) + instances = [] + for instance_id in provided_instance_ids: + state = "InService" \ + if instance_id in registered_instances_id\ + else "Unknown" + instances.append({"InstanceId": instance_id, "State": state}) + return template.render(instances=instances) def add_tags(self): @@ -689,11 +699,11 @@ def _add_tags(self, elb): DESCRIBE_INSTANCE_HEALTH_TEMPLATE = """ - {% for instance_id in instance_ids %} + {% for instance in instances %} N/A - {{ instance_id }} - InService + {{ instance['InstanceId'] }} + {{ instance['State'] }} N/A {% endfor %} diff --git a/moto/glue/exceptions.py b/moto/glue/exceptions.py index 62ea1525c8b6..8972adb35e36 100644 --- a/moto/glue/exceptions.py +++ b/moto/glue/exceptions.py @@ -6,19 +6,56 @@ class GlueClientError(JsonRESTError): code = 400 -class DatabaseAlreadyExistsException(GlueClientError): - def __init__(self): - self.code = 400 - super(DatabaseAlreadyExistsException, self).__init__( - 'DatabaseAlreadyExistsException', - 'Database already exists.' +class AlreadyExistsException(GlueClientError): + def __init__(self, typ): + super(GlueClientError, self).__init__( + 'AlreadyExistsException', + '%s already exists.' % (typ), ) -class TableAlreadyExistsException(GlueClientError): +class DatabaseAlreadyExistsException(AlreadyExistsException): + def __init__(self): + super(DatabaseAlreadyExistsException, self).__init__('Database') + + +class TableAlreadyExistsException(AlreadyExistsException): def __init__(self): - self.code = 400 - super(TableAlreadyExistsException, self).__init__( - 'TableAlreadyExistsException', - 'Table already exists.' + super(TableAlreadyExistsException, self).__init__('Table') + + +class PartitionAlreadyExistsException(AlreadyExistsException): + def __init__(self): + super(PartitionAlreadyExistsException, self).__init__('Partition') + + +class EntityNotFoundException(GlueClientError): + def __init__(self, msg): + super(GlueClientError, self).__init__( + 'EntityNotFoundException', + msg, ) + + +class DatabaseNotFoundException(EntityNotFoundException): + def __init__(self, db): + super(DatabaseNotFoundException, self).__init__( + 'Database %s not found.' % db, + ) + + +class TableNotFoundException(EntityNotFoundException): + def __init__(self, tbl): + super(TableNotFoundException, self).__init__( + 'Table %s not found.' % tbl, + ) + + +class PartitionNotFoundException(EntityNotFoundException): + def __init__(self): + super(PartitionNotFoundException, self).__init__("Cannot find partition.") + + +class VersionNotFoundException(EntityNotFoundException): + def __init__(self): + super(VersionNotFoundException, self).__init__("Version not found.") diff --git a/moto/glue/models.py b/moto/glue/models.py index 09b7d60ed9b1..bcf2ec4bf151 100644 --- a/moto/glue/models.py +++ b/moto/glue/models.py @@ -1,8 +1,19 @@ from __future__ import unicode_literals +import time + from moto.core import BaseBackend, BaseModel from moto.compat import OrderedDict -from.exceptions import DatabaseAlreadyExistsException, TableAlreadyExistsException +from.exceptions import ( + JsonRESTError, + DatabaseAlreadyExistsException, + DatabaseNotFoundException, + TableAlreadyExistsException, + TableNotFoundException, + PartitionAlreadyExistsException, + PartitionNotFoundException, + VersionNotFoundException, +) class GlueBackend(BaseBackend): @@ -19,7 +30,10 @@ def create_database(self, database_name): return database def get_database(self, database_name): - return self.databases[database_name] + try: + return self.databases[database_name] + except KeyError: + raise DatabaseNotFoundException(database_name) def create_table(self, database_name, table_name, table_input): database = self.get_database(database_name) @@ -33,7 +47,10 @@ def create_table(self, database_name, table_name, table_input): def get_table(self, database_name, table_name): database = self.get_database(database_name) - return database.tables[table_name] + try: + return database.tables[table_name] + except KeyError: + raise TableNotFoundException(table_name) def get_tables(self, database_name): database = self.get_database(database_name) @@ -52,9 +69,84 @@ class FakeTable(BaseModel): def __init__(self, database_name, table_name, table_input): self.database_name = database_name self.name = table_name - self.table_input = table_input - self.storage_descriptor = self.table_input.get('StorageDescriptor', {}) - self.partition_keys = self.table_input.get('PartitionKeys', []) + self.partitions = OrderedDict() + self.versions = [] + self.update(table_input) + + def update(self, table_input): + self.versions.append(table_input) + + def get_version(self, ver): + try: + if not isinstance(ver, int): + # "1" goes to [0] + ver = int(ver) - 1 + except ValueError as e: + raise JsonRESTError("InvalidInputException", str(e)) + + try: + return self.versions[ver] + except IndexError: + raise VersionNotFoundException() + + def as_dict(self, version=-1): + obj = { + 'DatabaseName': self.database_name, + 'Name': self.name, + } + obj.update(self.get_version(version)) + return obj + + def create_partition(self, partiton_input): + partition = FakePartition(self.database_name, self.name, partiton_input) + key = str(partition.values) + if key in self.partitions: + raise PartitionAlreadyExistsException() + self.partitions[str(partition.values)] = partition + + def get_partitions(self): + return [p for str_part_values, p in self.partitions.items()] + + def get_partition(self, values): + try: + return self.partitions[str(values)] + except KeyError: + raise PartitionNotFoundException() + + def update_partition(self, old_values, partiton_input): + partition = FakePartition(self.database_name, self.name, partiton_input) + key = str(partition.values) + if old_values == partiton_input['Values']: + # Altering a partition in place. Don't remove it so the order of + # returned partitions doesn't change + if key not in self.partitions: + raise PartitionNotFoundException() + else: + removed = self.partitions.pop(str(old_values), None) + if removed is None: + raise PartitionNotFoundException() + if key in self.partitions: + # Trying to update to overwrite a partition that exists + raise PartitionAlreadyExistsException() + self.partitions[key] = partition + + +class FakePartition(BaseModel): + def __init__(self, database_name, table_name, partiton_input): + self.creation_time = time.time() + self.database_name = database_name + self.table_name = table_name + self.partition_input = partiton_input + self.values = self.partition_input.get('Values', []) + + def as_dict(self): + obj = { + 'DatabaseName': self.database_name, + 'TableName': self.table_name, + 'CreationTime': self.creation_time, + } + obj.update(self.partition_input) + return obj glue_backend = GlueBackend() diff --git a/moto/glue/responses.py b/moto/glue/responses.py index bb64c40d4cde..84cc6f901e17 100644 --- a/moto/glue/responses.py +++ b/moto/glue/responses.py @@ -37,27 +37,94 @@ def get_table(self): database_name = self.parameters.get('DatabaseName') table_name = self.parameters.get('Name') table = self.glue_backend.get_table(database_name, table_name) + + return json.dumps({'Table': table.as_dict()}) + + def update_table(self): + database_name = self.parameters.get('DatabaseName') + table_input = self.parameters.get('TableInput') + table_name = table_input.get('Name') + table = self.glue_backend.get_table(database_name, table_name) + table.update(table_input) + return "" + + def get_table_versions(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + table = self.glue_backend.get_table(database_name, table_name) + return json.dumps({ - 'Table': { - 'DatabaseName': table.database_name, - 'Name': table.name, - 'PartitionKeys': table.partition_keys, - 'StorageDescriptor': table.storage_descriptor - } + "TableVersions": [ + { + "Table": table.as_dict(version=n), + "VersionId": str(n + 1), + } for n in range(len(table.versions)) + ], + }) + + def get_table_version(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + table = self.glue_backend.get_table(database_name, table_name) + ver_id = self.parameters.get('VersionId') + + return json.dumps({ + "TableVersion": { + "Table": table.as_dict(version=ver_id), + "VersionId": ver_id, + }, }) def get_tables(self): database_name = self.parameters.get('DatabaseName') tables = self.glue_backend.get_tables(database_name) - return json.dumps( - { - 'TableList': [ - { - 'DatabaseName': table.database_name, - 'Name': table.name, - 'PartitionKeys': table.partition_keys, - 'StorageDescriptor': table.storage_descriptor - } for table in tables - ] - } - ) + return json.dumps({ + 'TableList': [ + table.as_dict() for table in tables + ] + }) + + def get_partitions(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + if 'Expression' in self.parameters: + raise NotImplementedError("Expression filtering in get_partitions is not implemented in moto") + table = self.glue_backend.get_table(database_name, table_name) + + return json.dumps({ + 'Partitions': [ + p.as_dict() for p in table.get_partitions() + ] + }) + + def get_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + values = self.parameters.get('PartitionValues') + + table = self.glue_backend.get_table(database_name, table_name) + + p = table.get_partition(values) + + return json.dumps({'Partition': p.as_dict()}) + + def create_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + part_input = self.parameters.get('PartitionInput') + + table = self.glue_backend.get_table(database_name, table_name) + table.create_partition(part_input) + + return "" + + def update_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + part_input = self.parameters.get('PartitionInput') + part_to_update = self.parameters.get('PartitionValueList') + + table = self.glue_backend.get_table(database_name, table_name) + table.update_partition(part_to_update, part_input) + + return "" diff --git a/moto/iam/models.py b/moto/iam/models.py index 697be798884e..4d884fa2f18a 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -37,7 +37,6 @@ def __init__(self, description=None, document=None, path=None): - self.document = document or {} self.name = name self.attachment_count = 0 @@ -45,7 +44,7 @@ def __init__(self, self.id = random_policy_id() self.path = path or '/' self.default_version_id = default_version_id or 'v1' - self.versions = [] + self.versions = [PolicyVersion(self.arn, document, True)] self.create_datetime = datetime.now(pytz.utc) self.update_datetime = datetime.now(pytz.utc) @@ -72,11 +71,11 @@ class ManagedPolicy(Policy): def attach_to(self, obj): self.attachment_count += 1 - obj.managed_policies[self.name] = self + obj.managed_policies[self.arn] = self def detach_from(self, obj): self.attachment_count -= 1 - del obj.managed_policies[self.name] + del obj.managed_policies[self.arn] @property def arn(self): @@ -477,11 +476,13 @@ def create_policy(self, description, path, policy_document, policy_name): document=policy_document, path=path, ) - self.managed_policies[policy.name] = policy + self.managed_policies[policy.arn] = policy return policy - def get_policy(self, policy_name): - return self.managed_policies.get(policy_name) + def get_policy(self, policy_arn): + if policy_arn not in self.managed_policies: + raise IAMNotFoundException("Policy {0} not found".format(policy_arn)) + return self.managed_policies.get(policy_arn) def list_attached_role_policies(self, role_name, marker=None, max_items=100, path_prefix='/'): policies = self.get_role(role_name).managed_policies.values() @@ -575,21 +576,18 @@ def list_role_policies(self, role_name): return role.policies.keys() def create_policy_version(self, policy_arn, policy_document, set_as_default): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") version = PolicyVersion(policy_arn, policy_document, set_as_default) policy.versions.append(version) + version.version_id = 'v{0}'.format(len(policy.versions)) if set_as_default: policy.default_version_id = version.version_id return version def get_policy_version(self, policy_arn, version_id): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") for version in policy.versions: @@ -598,19 +596,18 @@ def get_policy_version(self, policy_arn, version_id): raise IAMNotFoundException("Policy version not found") def list_policy_versions(self, policy_arn): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") return policy.versions def delete_policy_version(self, policy_arn, version_id): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") + if version_id == policy.default_version_id: + raise IAMConflictException( + "Cannot delete the default version of a policy") for i, v in enumerate(policy.versions): if v.version_id == version_id: del policy.versions[i] diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 9c1241c365cd..9e8d21396f8d 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -58,6 +58,12 @@ def create_policy(self): template = self.response_template(CREATE_POLICY_TEMPLATE) return template.render(policy=policy) + def get_policy(self): + policy_arn = self._get_param('PolicyArn') + policy = iam_backend.get_policy(policy_arn) + template = self.response_template(GET_POLICY_TEMPLATE) + return template.render(policy=policy) + def list_attached_role_policies(self): marker = self._get_param('Marker') max_items = self._get_int_param('MaxItems', 100) @@ -601,6 +607,25 @@ def get_account_authorization_details(self): """ +GET_POLICY_TEMPLATE = """ + + + {{ policy.name }} + {{ policy.description }} + {{ policy.default_version_id }} + {{ policy.id }} + {{ policy.path }} + {{ policy.arn }} + {{ policy.attachment_count }} + {{ policy.create_datetime.isoformat() }} + {{ policy.update_datetime.isoformat() }} + + + + 684f0917-3d22-11e4-a4a0-cffb9EXAMPLE + +""" + LIST_ATTACHED_ROLE_POLICIES_TEMPLATE = """ {% if marker is none %} diff --git a/moto/kms/models.py b/moto/kms/models.py index 89ebf00821ed..bb39d1b24182 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -2,8 +2,10 @@ import boto.kms from moto.core import BaseBackend, BaseModel +from moto.core.utils import iso_8601_datetime_without_milliseconds from .utils import generate_key_id from collections import defaultdict +from datetime import datetime, timedelta class Key(BaseModel): @@ -12,11 +14,13 @@ def __init__(self, policy, key_usage, description, region): self.id = generate_key_id() self.policy = policy self.key_usage = key_usage + self.key_state = "Enabled" self.description = description self.enabled = True self.region = region self.account_id = "0123456789012" self.key_rotation_status = False + self.deletion_date = None @property def physical_resource_id(self): @@ -27,7 +31,7 @@ def arn(self): return "arn:aws:kms:{0}:{1}:key/{2}".format(self.region, self.account_id, self.id) def to_dict(self): - return { + key_dict = { "KeyMetadata": { "AWSAccountId": self.account_id, "Arn": self.arn, @@ -36,8 +40,12 @@ def to_dict(self): "Enabled": self.enabled, "KeyId": self.id, "KeyUsage": self.key_usage, + "KeyState": self.key_state, } } + if self.key_state == 'PendingDeletion': + key_dict['KeyMetadata']['DeletionDate'] = iso_8601_datetime_without_milliseconds(self.deletion_date) + return key_dict def delete(self, region_name): kms_backends[region_name].delete_key(self.id) @@ -138,6 +146,29 @@ def put_key_policy(self, key_id, policy): def get_key_policy(self, key_id): return self.keys[self.get_key_id(key_id)].policy + def disable_key(self, key_id): + if key_id in self.keys: + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'Disabled' + + def enable_key(self, key_id): + if key_id in self.keys: + self.keys[key_id].enabled = True + self.keys[key_id].key_state = 'Enabled' + + def cancel_key_deletion(self, key_id): + if key_id in self.keys: + self.keys[key_id].key_state = 'Disabled' + self.keys[key_id].deletion_date = None + + def schedule_key_deletion(self, key_id, pending_window_in_days): + if key_id in self.keys: + if 7 <= pending_window_in_days <= 30: + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'PendingDeletion' + self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days) + return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date) + kms_backends = {} for region in boto.kms.regions(): diff --git a/moto/kms/responses.py b/moto/kms/responses.py index 0f544e954908..5883f51eca17 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -233,6 +233,56 @@ def decrypt(self): value = self.parameters.get("CiphertextBlob") return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")}) + def disable_key(self): + key_id = self.parameters.get('KeyId') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + self.kms_backend.disable_key(key_id) + except KeyError: + raise JSONResponseError(404, 'Not Found', body={ + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), + '__type': 'NotFoundException'}) + return json.dumps(None) + + def enable_key(self): + key_id = self.parameters.get('KeyId') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + self.kms_backend.enable_key(key_id) + except KeyError: + raise JSONResponseError(404, 'Not Found', body={ + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), + '__type': 'NotFoundException'}) + return json.dumps(None) + + def cancel_key_deletion(self): + key_id = self.parameters.get('KeyId') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + self.kms_backend.cancel_key_deletion(key_id) + except KeyError: + raise JSONResponseError(404, 'Not Found', body={ + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), + '__type': 'NotFoundException'}) + return json.dumps({'KeyId': key_id}) + + def schedule_key_deletion(self): + key_id = self.parameters.get('KeyId') + if self.parameters.get('PendingWindowInDays') is None: + pending_window_in_days = 30 + else: + pending_window_in_days = self.parameters.get('PendingWindowInDays') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + return json.dumps({ + 'KeyId': key_id, + 'DeletionDate': self.kms_backend.schedule_key_deletion(key_id, pending_window_in_days) + }) + except KeyError: + raise JSONResponseError(404, 'Not Found', body={ + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), + '__type': 'NotFoundException'}) + def _assert_valid_key_id(key_id): if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE): diff --git a/moto/logs/exceptions.py b/moto/logs/exceptions.py index cc83452ea292..bb02eced3337 100644 --- a/moto/logs/exceptions.py +++ b/moto/logs/exceptions.py @@ -29,5 +29,5 @@ def __init__(self): self.code = 400 super(ResourceAlreadyExistsException, self).__init__( 'ResourceAlreadyExistsException', - 'The specified resource already exists.' + 'The specified log group already exists' ) diff --git a/moto/logs/models.py b/moto/logs/models.py index 3e1c7b955b55..ca1fdc4ad635 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -19,7 +19,7 @@ def __init__(self, ingestion_time, log_event): def to_filter_dict(self): return { - "eventId": self.eventId, + "eventId": str(self.eventId), "ingestionTime": self.ingestionTime, # "logStreamName": "message": self.message, @@ -86,7 +86,7 @@ def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_t self.events += [LogEvent(self.lastIngestionTime, log_event) for log_event in log_events] self.uploadSequenceToken += 1 - return self.uploadSequenceToken + return '{:056d}'.format(self.uploadSequenceToken) def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): def filter_func(event): diff --git a/moto/organizations/__init__.py b/moto/organizations/__init__.py new file mode 100644 index 000000000000..372782dd3574 --- /dev/null +++ b/moto/organizations/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import organizations_backend +from ..core.models import base_decorator + +organizations_backends = {"global": organizations_backend} +mock_organizations = base_decorator(organizations_backends) diff --git a/moto/organizations/models.py b/moto/organizations/models.py new file mode 100644 index 000000000000..9d5fe388606e --- /dev/null +++ b/moto/organizations/models.py @@ -0,0 +1,296 @@ +from __future__ import unicode_literals + +import datetime +import re + +from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import RESTError +from moto.core.utils import unix_time +from moto.organizations import utils + + +class FakeOrganization(BaseModel): + + def __init__(self, feature_set): + self.id = utils.make_random_org_id() + self.root_id = utils.make_random_root_id() + self.feature_set = feature_set + self.master_account_id = utils.MASTER_ACCOUNT_ID + self.master_account_email = utils.MASTER_ACCOUNT_EMAIL + self.available_policy_types = [{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }] + + @property + def arn(self): + return utils.ORGANIZATION_ARN_FORMAT.format(self.master_account_id, self.id) + + @property + def master_account_arn(self): + return utils.MASTER_ACCOUNT_ARN_FORMAT.format(self.master_account_id, self.id) + + def describe(self): + return { + 'Organization': { + 'Id': self.id, + 'Arn': self.arn, + 'FeatureSet': self.feature_set, + 'MasterAccountArn': self.master_account_arn, + 'MasterAccountId': self.master_account_id, + 'MasterAccountEmail': self.master_account_email, + 'AvailablePolicyTypes': self.available_policy_types, + } + } + + +class FakeAccount(BaseModel): + + def __init__(self, organization, **kwargs): + self.organization_id = organization.id + self.master_account_id = organization.master_account_id + self.create_account_status_id = utils.make_random_create_account_status_id() + self.id = utils.make_random_account_id() + self.name = kwargs['AccountName'] + self.email = kwargs['Email'] + self.create_time = datetime.datetime.utcnow() + self.status = 'ACTIVE' + self.joined_method = 'CREATED' + self.parent_id = organization.root_id + + @property + def arn(self): + return utils.ACCOUNT_ARN_FORMAT.format( + self.master_account_id, + self.organization_id, + self.id + ) + + @property + def create_account_status(self): + return { + 'CreateAccountStatus': { + 'Id': self.create_account_status_id, + 'AccountName': self.name, + 'State': 'SUCCEEDED', + 'RequestedTimestamp': unix_time(self.create_time), + 'CompletedTimestamp': unix_time(self.create_time), + 'AccountId': self.id, + } + } + + def describe(self): + return { + 'Account': { + 'Id': self.id, + 'Arn': self.arn, + 'Email': self.email, + 'Name': self.name, + 'Status': self.status, + 'JoinedMethod': self.joined_method, + 'JoinedTimestamp': unix_time(self.create_time), + } + } + + +class FakeOrganizationalUnit(BaseModel): + + def __init__(self, organization, **kwargs): + self.type = 'ORGANIZATIONAL_UNIT' + self.organization_id = organization.id + self.master_account_id = organization.master_account_id + self.id = utils.make_random_ou_id(organization.root_id) + self.name = kwargs.get('Name') + self.parent_id = kwargs.get('ParentId') + self._arn_format = utils.OU_ARN_FORMAT + + @property + def arn(self): + return self._arn_format.format( + self.master_account_id, + self.organization_id, + self.id + ) + + def describe(self): + return { + 'OrganizationalUnit': { + 'Id': self.id, + 'Arn': self.arn, + 'Name': self.name, + } + } + + +class FakeRoot(FakeOrganizationalUnit): + + def __init__(self, organization, **kwargs): + super(FakeRoot, self).__init__(organization, **kwargs) + self.type = 'ROOT' + self.id = organization.root_id + self.name = 'Root' + self.policy_types = [{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }] + self._arn_format = utils.ROOT_ARN_FORMAT + + def describe(self): + return { + 'Id': self.id, + 'Arn': self.arn, + 'Name': self.name, + 'PolicyTypes': self.policy_types + } + + +class OrganizationsBackend(BaseBackend): + + def __init__(self): + self.org = None + self.accounts = [] + self.ou = [] + + def create_organization(self, **kwargs): + self.org = FakeOrganization(kwargs['FeatureSet']) + self.ou.append(FakeRoot(self.org)) + return self.org.describe() + + def describe_organization(self): + if not self.org: + raise RESTError( + 'AWSOrganizationsNotInUseException', + "Your account is not a member of an organization." + ) + return self.org.describe() + + def list_roots(self): + return dict( + Roots=[ou.describe() for ou in self.ou if isinstance(ou, FakeRoot)] + ) + + def create_organizational_unit(self, **kwargs): + new_ou = FakeOrganizationalUnit(self.org, **kwargs) + self.ou.append(new_ou) + return new_ou.describe() + + def get_organizational_unit_by_id(self, ou_id): + ou = next((ou for ou in self.ou if ou.id == ou_id), None) + if ou is None: + raise RESTError( + 'OrganizationalUnitNotFoundException', + "You specified an organizational unit that doesn't exist." + ) + return ou + + def validate_parent_id(self, parent_id): + try: + self.get_organizational_unit_by_id(parent_id) + except RESTError: + raise RESTError( + 'ParentNotFoundException', + "You specified parent that doesn't exist." + ) + return parent_id + + def describe_organizational_unit(self, **kwargs): + ou = self.get_organizational_unit_by_id(kwargs['OrganizationalUnitId']) + return ou.describe() + + def list_organizational_units_for_parent(self, **kwargs): + parent_id = self.validate_parent_id(kwargs['ParentId']) + return dict( + OrganizationalUnits=[ + { + 'Id': ou.id, + 'Arn': ou.arn, + 'Name': ou.name, + } + for ou in self.ou + if ou.parent_id == parent_id + ] + ) + + def create_account(self, **kwargs): + new_account = FakeAccount(self.org, **kwargs) + self.accounts.append(new_account) + return new_account.create_account_status + + def get_account_by_id(self, account_id): + account = next(( + account for account in self.accounts + if account.id == account_id + ), None) + if account is None: + raise RESTError( + 'AccountNotFoundException', + "You specified an account that doesn't exist." + ) + return account + + def describe_account(self, **kwargs): + account = self.get_account_by_id(kwargs['AccountId']) + return account.describe() + + def list_accounts(self): + return dict( + Accounts=[account.describe()['Account'] for account in self.accounts] + ) + + def list_accounts_for_parent(self, **kwargs): + parent_id = self.validate_parent_id(kwargs['ParentId']) + return dict( + Accounts=[ + account.describe()['Account'] + for account in self.accounts + if account.parent_id == parent_id + ] + ) + + def move_account(self, **kwargs): + new_parent_id = self.validate_parent_id(kwargs['DestinationParentId']) + self.validate_parent_id(kwargs['SourceParentId']) + account = self.get_account_by_id(kwargs['AccountId']) + index = self.accounts.index(account) + self.accounts[index].parent_id = new_parent_id + + def list_parents(self, **kwargs): + if re.compile(r'[0-9]{12}').match(kwargs['ChildId']): + child_object = self.get_account_by_id(kwargs['ChildId']) + else: + child_object = self.get_organizational_unit_by_id(kwargs['ChildId']) + return dict( + Parents=[ + { + 'Id': ou.id, + 'Type': ou.type, + } + for ou in self.ou + if ou.id == child_object.parent_id + ] + ) + + def list_children(self, **kwargs): + parent_id = self.validate_parent_id(kwargs['ParentId']) + if kwargs['ChildType'] == 'ACCOUNT': + obj_list = self.accounts + elif kwargs['ChildType'] == 'ORGANIZATIONAL_UNIT': + obj_list = self.ou + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + return dict( + Children=[ + { + 'Id': obj.id, + 'Type': kwargs['ChildType'], + } + for obj in obj_list + if obj.parent_id == parent_id + ] + ) + + +organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py new file mode 100644 index 000000000000..966c3fbf3bd8 --- /dev/null +++ b/moto/organizations/responses.py @@ -0,0 +1,87 @@ +from __future__ import unicode_literals +import json + +from moto.core.responses import BaseResponse +from .models import organizations_backend + + +class OrganizationsResponse(BaseResponse): + + @property + def organizations_backend(self): + return organizations_backend + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + def _get_param(self, param, default=None): + return self.request_params.get(param, default) + + def create_organization(self): + return json.dumps( + self.organizations_backend.create_organization(**self.request_params) + ) + + def describe_organization(self): + return json.dumps( + self.organizations_backend.describe_organization() + ) + + def list_roots(self): + return json.dumps( + self.organizations_backend.list_roots() + ) + + def create_organizational_unit(self): + return json.dumps( + self.organizations_backend.create_organizational_unit(**self.request_params) + ) + + def describe_organizational_unit(self): + return json.dumps( + self.organizations_backend.describe_organizational_unit(**self.request_params) + ) + + def list_organizational_units_for_parent(self): + return json.dumps( + self.organizations_backend.list_organizational_units_for_parent(**self.request_params) + ) + + def list_parents(self): + return json.dumps( + self.organizations_backend.list_parents(**self.request_params) + ) + + def create_account(self): + return json.dumps( + self.organizations_backend.create_account(**self.request_params) + ) + + def describe_account(self): + return json.dumps( + self.organizations_backend.describe_account(**self.request_params) + ) + + def list_accounts(self): + return json.dumps( + self.organizations_backend.list_accounts() + ) + + def list_accounts_for_parent(self): + return json.dumps( + self.organizations_backend.list_accounts_for_parent(**self.request_params) + ) + + def move_account(self): + return json.dumps( + self.organizations_backend.move_account(**self.request_params) + ) + + def list_children(self): + return json.dumps( + self.organizations_backend.list_children(**self.request_params) + ) diff --git a/moto/organizations/urls.py b/moto/organizations/urls.py new file mode 100644 index 000000000000..7911f5b53c10 --- /dev/null +++ b/moto/organizations/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import OrganizationsResponse + +url_bases = [ + "https?://organizations.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': OrganizationsResponse.dispatch, +} diff --git a/moto/organizations/utils.py b/moto/organizations/utils.py new file mode 100644 index 000000000000..007afa6edd78 --- /dev/null +++ b/moto/organizations/utils.py @@ -0,0 +1,59 @@ +from __future__ import unicode_literals + +import random +import string + +MASTER_ACCOUNT_ID = '123456789012' +MASTER_ACCOUNT_EMAIL = 'fakeorg@moto-example.com' +ORGANIZATION_ARN_FORMAT = 'arn:aws:organizations::{0}:organization/{1}' +MASTER_ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{0}' +ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}' +ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}' +OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}' + +CHARSET = string.ascii_lowercase + string.digits +ORG_ID_SIZE = 10 +ROOT_ID_SIZE = 4 +ACCOUNT_ID_SIZE = 12 +OU_ID_SUFFIX_SIZE = 8 +CREATE_ACCOUNT_STATUS_ID_SIZE = 8 + + +def make_random_org_id(): + # The regex pattern for an organization ID string requires "o-" + # followed by from 10 to 32 lower-case letters or digits. + # e.g. 'o-vipjnq5z86' + return 'o-' + ''.join(random.choice(CHARSET) for x in range(ORG_ID_SIZE)) + + +def make_random_root_id(): + # The regex pattern for a root ID string requires "r-" followed by + # from 4 to 32 lower-case letters or digits. + # e.g. 'r-3zwx' + return 'r-' + ''.join(random.choice(CHARSET) for x in range(ROOT_ID_SIZE)) + + +def make_random_ou_id(root_id): + # The regex pattern for an organizational unit ID string requires "ou-" + # followed by from 4 to 32 lower-case letters or digits (the ID of the root + # that contains the OU) followed by a second "-" dash and from 8 to 32 + # additional lower-case letters or digits. + # e.g. ou-g8sd-5oe3bjaw + return '-'.join([ + 'ou', + root_id.partition('-')[2], + ''.join(random.choice(CHARSET) for x in range(OU_ID_SUFFIX_SIZE)), + ]) + + +def make_random_account_id(): + # The regex pattern for an account ID string requires exactly 12 digits. + # e.g. '488633172133' + return ''.join([random.choice(string.digits) for n in range(ACCOUNT_ID_SIZE)]) + + +def make_random_create_account_status_id(): + # The regex pattern for an create account request ID string requires + # "car-" followed by from 8 to 32 lower-case letters or digits. + # e.g. 'car-35gxzwrp' + return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE)) diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index e0f3a7e696f9..8ad9168a5a66 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -85,6 +85,7 @@ old_ssl_wrap_socket = None old_sslwrap_simple = None old_sslsocket = None +old_sslcontext_wrap_socket = None if PY3: # pragma: no cover basestring = (bytes, str) @@ -100,6 +101,10 @@ if not PY3: old_sslwrap_simple = ssl.sslwrap_simple old_sslsocket = ssl.SSLSocket + try: + old_sslcontext_wrap_socket = ssl.SSLContext.wrap_socket + except AttributeError: + pass except ImportError: # pragma: no cover ssl = None @@ -281,7 +286,7 @@ def getpeercert(self, *a, **kw): return { 'notAfter': shift.strftime('%b %d %H:%M:%S GMT'), 'subjectAltName': ( - ('DNS', '*%s' % self._host), + ('DNS', '*.%s' % self._host), ('DNS', self._host), ('DNS', '*'), ), @@ -772,7 +777,7 @@ class URIMatcher(object): def __init__(self, uri, entries, match_querystring=False): self._match_querystring = match_querystring - if type(uri).__name__ == 'SRE_Pattern': + if type(uri).__name__ in ('SRE_Pattern', 'Pattern'): self.regex = uri result = urlsplit(uri.pattern) if result.scheme == 'https': @@ -1012,6 +1017,10 @@ def disable(cls): if ssl: ssl.wrap_socket = old_ssl_wrap_socket ssl.SSLSocket = old_sslsocket + try: + ssl.SSLContext.wrap_socket = old_sslcontext_wrap_socket + except AttributeError: + pass ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket ssl.__dict__['SSLSocket'] = old_sslsocket @@ -1058,6 +1067,14 @@ def enable(cls): ssl.wrap_socket = fake_wrap_socket ssl.SSLSocket = FakeSSLSocket + try: + def fake_sslcontext_wrap_socket(cls, *args, **kwargs): + return fake_wrap_socket(*args, **kwargs) + + ssl.SSLContext.wrap_socket = fake_sslcontext_wrap_socket + except AttributeError: + pass + ssl.__dict__['wrap_socket'] = fake_wrap_socket ssl.__dict__['SSLSocket'] = FakeSSLSocket diff --git a/moto/rds/models.py b/moto/rds/models.py index 77deff09d9ea..feecefe0ce79 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -48,6 +48,10 @@ def __init__(self, **kwargs): if self.publicly_accessible is None: self.publicly_accessible = True + self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot") + if self.copy_tags_to_snapshot is None: + self.copy_tags_to_snapshot = False + self.backup_retention_period = kwargs.get("backup_retention_period") if self.backup_retention_period is None: self.backup_retention_period = 1 @@ -137,6 +141,7 @@ def create_from_cloudformation_json(cls, resource_name, cloudformation_json, reg "multi_az": properties.get("MultiAZ"), "port": properties.get('Port', 3306), "publicly_accessible": properties.get("PubliclyAccessible"), + "copy_tags_to_snapshot": properties.get("CopyTagsToSnapshot"), "region": region_name, "security_groups": security_groups, "storage_encrypted": properties.get("StorageEncrypted"), @@ -217,6 +222,7 @@ def to_xml(self): {% endif %} {{ database.publicly_accessible }} + {{ database.copy_tags_to_snapshot }} {{ database.auto_minor_version_upgrade }} {{ database.allocated_storage }} {{ database.storage_encrypted }} diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 3fc4b6d659d8..fee004f7698c 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -73,6 +73,9 @@ def __init__(self, **kwargs): self.publicly_accessible = kwargs.get("publicly_accessible") if self.publicly_accessible is None: self.publicly_accessible = True + self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot") + if self.copy_tags_to_snapshot is None: + self.copy_tags_to_snapshot = False self.backup_retention_period = kwargs.get("backup_retention_period") if self.backup_retention_period is None: self.backup_retention_period = 1 @@ -208,6 +211,7 @@ def to_xml(self): {% endif %} {{ database.publicly_accessible }} + {{ database.copy_tags_to_snapshot }} {{ database.auto_minor_version_upgrade }} {{ database.allocated_storage }} {{ database.storage_encrypted }} @@ -304,6 +308,7 @@ def create_from_cloudformation_json(cls, resource_name, cloudformation_json, reg "db_parameter_group_name": properties.get('DBParameterGroupName'), "port": properties.get('Port', 3306), "publicly_accessible": properties.get("PubliclyAccessible"), + "copy_tags_to_snapshot": properties.get("CopyTagsToSnapshot"), "region": region_name, "security_groups": security_groups, "storage_encrypted": properties.get("StorageEncrypted"), @@ -362,6 +367,7 @@ def to_json(self): "PreferredBackupWindow": "{{ database.preferred_backup_window }}", "PreferredMaintenanceWindow": "{{ database.preferred_maintenance_window }}", "PubliclyAccessible": "{{ database.publicly_accessible }}", + "CopyTagsToSnapshot": "{{ database.copy_tags_to_snapshot }}", "AllocatedStorage": "{{ database.allocated_storage }}", "Endpoint": { "Address": "{{ database.address }}", @@ -411,10 +417,10 @@ def delete(self, region_name): class Snapshot(BaseModel): - def __init__(self, database, snapshot_id, tags=None): + def __init__(self, database, snapshot_id, tags): self.database = database self.snapshot_id = snapshot_id - self.tags = tags or [] + self.tags = tags self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) @property @@ -456,6 +462,20 @@ def to_xml(self): """) return template.render(snapshot=self, database=self.database) + def get_tags(self): + return self.tags + + def add_tags(self, tags): + new_keys = [tag_set['Key'] for tag_set in tags] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] + self.tags.extend(tags) + return self.tags + + def remove_tags(self, tag_keys): + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] + class SecurityGroup(BaseModel): @@ -691,6 +711,10 @@ def create_snapshot(self, db_instance_identifier, db_snapshot_identifier, tags=N raise DBSnapshotAlreadyExistsError(db_snapshot_identifier) if len(self.snapshots) >= int(os.environ.get('MOTO_RDS_SNAPSHOT_LIMIT', '100')): raise SnapshotQuotaExceededError() + if tags is None: + tags = list() + if database.copy_tags_to_snapshot and not tags: + tags = database.get_tags() snapshot = Snapshot(database, db_snapshot_identifier, tags) self.snapshots[db_snapshot_identifier] = snapshot return snapshot @@ -787,13 +811,13 @@ def find_db_from_id(self, db_id): def delete_database(self, db_instance_identifier, db_snapshot_name=None): if db_instance_identifier in self.databases: + if db_snapshot_name: + self.create_snapshot(db_instance_identifier, db_snapshot_name) database = self.databases.pop(db_instance_identifier) if database.is_replica: primary = self.find_db_from_id(database.source_db_identifier) primary.remove_replica(database) database.status = 'deleting' - if db_snapshot_name: - self.snapshots[db_snapshot_name] = Snapshot(database, db_snapshot_name) return database else: raise DBInstanceNotFoundError(db_instance_identifier) @@ -1028,8 +1052,8 @@ def list_tags_for_resource(self, arn): if resource_name in self.security_groups: return self.security_groups[resource_name].get_tags() elif resource_type == 'snapshot': # DB Snapshot - # TODO: Complete call to tags on resource type DB Snapshot - return [] + if resource_name in self.snapshots: + return self.snapshots[resource_name].get_tags() elif resource_type == 'subgrp': # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].get_tags() @@ -1059,7 +1083,8 @@ def remove_tags_from_resource(self, arn, tag_keys): if resource_name in self.security_groups: return self.security_groups[resource_name].remove_tags(tag_keys) elif resource_type == 'snapshot': # DB Snapshot - return None + if resource_name in self.snapshots: + return self.snapshots[resource_name].remove_tags(tag_keys) elif resource_type == 'subgrp': # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].remove_tags(tag_keys) @@ -1088,7 +1113,8 @@ def add_tags_to_resource(self, arn, tags): if resource_name in self.security_groups: return self.security_groups[resource_name].add_tags(tags) elif resource_type == 'snapshot': # DB Snapshot - return [] + if resource_name in self.snapshots: + return self.snapshots[resource_name].add_tags(tags) elif resource_type == 'subgrp': # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].add_tags(tags) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index eddb0042b825..66d4e0c52286 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -19,6 +19,7 @@ def _get_db_kwargs(self): "allocated_storage": self._get_int_param('AllocatedStorage'), "availability_zone": self._get_param("AvailabilityZone"), "backup_retention_period": self._get_param("BackupRetentionPeriod"), + "copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"), "db_instance_class": self._get_param('DBInstanceClass'), "db_instance_identifier": self._get_param('DBInstanceIdentifier'), "db_name": self._get_param("DBName"), @@ -159,7 +160,7 @@ def reboot_db_instance(self): def create_db_snapshot(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_snapshot_identifier = self._get_param('DBSnapshotIdentifier') - tags = self._get_param('Tags', []) + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) snapshot = self.backend.create_snapshot(db_instance_identifier, db_snapshot_identifier, tags) template = self.response_template(CREATE_SNAPSHOT_TEMPLATE) return template.render(snapshot=snapshot) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 4eafcfc7992d..70cbb95cb7cd 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -78,6 +78,7 @@ def __init__(self, redshift_backend, cluster_identifier, node_type, master_usern super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier + self.create_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) self.status = 'available' self.node_type = node_type self.master_username = master_username @@ -237,6 +238,7 @@ def to_json(self): "Address": self.endpoint, "Port": self.port }, + 'ClusterCreateTime': self.create_time, "PendingModifiedValues": [], "Tags": self.tags, "IamRoles": [{ diff --git a/moto/s3/models.py b/moto/s3/models.py index cf5628141350..bb4d7848c1ac 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -27,8 +27,14 @@ class FakeDeleteMarker(BaseModel): def __init__(self, key): self.key = key + self.name = key.name + self.last_modified = datetime.datetime.utcnow() self._version_id = key.version_id + 1 + @property + def last_modified_ISO8601(self): + return iso_8601_datetime_with_milliseconds(self.last_modified) + @property def version_id(self): return self._version_id @@ -335,8 +341,9 @@ def __init__(self, prefix=None, tags=None): class LifecycleRule(BaseModel): def __init__(self, id=None, prefix=None, lc_filter=None, status=None, expiration_days=None, - expiration_date=None, transition_days=None, expired_object_delete_marker=None, - transition_date=None, storage_class=None): + expiration_date=None, transition_days=None, transition_date=None, storage_class=None, + expired_object_delete_marker=None, nve_noncurrent_days=None, nvt_noncurrent_days=None, + nvt_storage_class=None, aimu_days=None): self.id = id self.prefix = prefix self.filter = lc_filter @@ -345,8 +352,12 @@ def __init__(self, id=None, prefix=None, lc_filter=None, status=None, expiration self.expiration_date = expiration_date self.transition_days = transition_days self.transition_date = transition_date - self.expired_object_delete_marker = expired_object_delete_marker self.storage_class = storage_class + self.expired_object_delete_marker = expired_object_delete_marker + self.nve_noncurrent_days = nve_noncurrent_days + self.nvt_noncurrent_days = nvt_noncurrent_days + self.nvt_storage_class = nvt_storage_class + self.aimu_days = aimu_days class CorsRule(BaseModel): @@ -408,9 +419,32 @@ def is_versioned(self): def set_lifecycle(self, rules): self.rules = [] for rule in rules: + # Extract and validate actions from Lifecycle rule expiration = rule.get('Expiration') transition = rule.get('Transition') + nve_noncurrent_days = None + if rule.get('NoncurrentVersionExpiration') is not None: + if rule["NoncurrentVersionExpiration"].get('NoncurrentDays') is None: + raise MalformedXML() + nve_noncurrent_days = rule["NoncurrentVersionExpiration"]["NoncurrentDays"] + + nvt_noncurrent_days = None + nvt_storage_class = None + if rule.get('NoncurrentVersionTransition') is not None: + if rule["NoncurrentVersionTransition"].get('NoncurrentDays') is None: + raise MalformedXML() + if rule["NoncurrentVersionTransition"].get('StorageClass') is None: + raise MalformedXML() + nvt_noncurrent_days = rule["NoncurrentVersionTransition"]["NoncurrentDays"] + nvt_storage_class = rule["NoncurrentVersionTransition"]["StorageClass"] + + aimu_days = None + if rule.get('AbortIncompleteMultipartUpload') is not None: + if rule["AbortIncompleteMultipartUpload"].get('DaysAfterInitiation') is None: + raise MalformedXML() + aimu_days = rule["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] + eodm = None if expiration and expiration.get("ExpiredObjectDeleteMarker") is not None: # This cannot be set if Date or Days is set: @@ -453,11 +487,14 @@ def set_lifecycle(self, rules): status=rule['Status'], expiration_days=expiration.get('Days') if expiration else None, expiration_date=expiration.get('Date') if expiration else None, - expired_object_delete_marker=eodm, transition_days=transition.get('Days') if transition else None, transition_date=transition.get('Date') if transition else None, - storage_class=transition[ - 'StorageClass'] if transition else None, + storage_class=transition.get('StorageClass') if transition else None, + expired_object_delete_marker=eodm, + nve_noncurrent_days=nve_noncurrent_days, + nvt_noncurrent_days=nvt_noncurrent_days, + nvt_storage_class=nvt_storage_class, + aimu_days=aimu_days, )) def delete_lifecycle(self): @@ -630,10 +667,7 @@ def get_bucket_latest_versions(self, bucket_name): latest_versions = {} for version in versions: - if isinstance(version, FakeDeleteMarker): - name = version.key.name - else: - name = version.name + name = version.name version_id = version.version_id maximum_version_per_key[name] = max( version_id, diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 5e7cf0fe5af9..962025cb13e3 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1228,6 +1228,22 @@ def _key_response_post(self, request, body, bucket_name, query, key_name, header {% endif %} {% endif %} + {% if rule.nvt_noncurrent_days and rule.nvt_storage_class %} + + {{ rule.nvt_noncurrent_days }} + {{ rule.nvt_storage_class }} + + {% endif %} + {% if rule.nve_noncurrent_days %} + + {{ rule.nve_noncurrent_days }} + + {% endif %} + {% if rule.aimu_days %} + + {{ rule.aimu_days }} + + {% endif %} {% endfor %} @@ -1273,10 +1289,10 @@ def _key_response_post(self, request, body, bucket_name, query, key_name, header {% endfor %} {% for marker in delete_marker_list %} - {{ marker.key.name }} + {{ marker.name }} {{ marker.version_id }} - {% if latest_versions[marker.key.name] == marker.version_id %}true{% else %}false{% endif %} - {{ marker.key.last_modified_ISO8601 }} + {% if latest_versions[marker.name] == marker.version_id %}true{% else %}false{% endif %} + {{ marker.last_modified_ISO8601 }} 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a webfile @@ -1433,7 +1449,7 @@ def _key_response_post(self, request, body, bucket_name, query, key_name, header STANDARD 1 - {{ count }} + {{ count }} {{ count }} false {% for part in parts %} diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index c60feb530c07..1404a0ec81c2 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -36,6 +36,7 @@ def __init__(self, region_name=None, **kwargs): self.rotation_enabled = False self.rotation_lambda_arn = '' self.auto_rotate_after_days = 0 + self.version_id = '' def reset(self): region_name = self.region @@ -105,6 +106,56 @@ def describe_secret(self, secret_id): return response + def rotate_secret(self, secret_id, client_request_token=None, + rotation_lambda_arn=None, rotation_rules=None): + + rotation_days = 'AutomaticallyAfterDays' + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + if client_request_token: + token_length = len(client_request_token) + if token_length < 32 or token_length > 64: + msg = ( + 'ClientRequestToken ' + 'must be 32-64 characters long.' + ) + raise InvalidParameterException(msg) + + if rotation_lambda_arn: + if len(rotation_lambda_arn) > 2048: + msg = ( + 'RotationLambdaARN ' + 'must <= 2048 characters long.' + ) + raise InvalidParameterException(msg) + + if rotation_rules: + if rotation_days in rotation_rules: + rotation_period = rotation_rules[rotation_days] + if rotation_period < 1 or rotation_period > 1000: + msg = ( + 'RotationRules.AutomaticallyAfterDays ' + 'must be within 1-1000.' + ) + raise InvalidParameterException(msg) + + self.version_id = client_request_token or '' + self.rotation_lambda_arn = rotation_lambda_arn or '' + if rotation_rules: + self.auto_rotate_after_days = rotation_rules.get(rotation_days, 0) + if self.auto_rotate_after_days > 0: + self.rotation_enabled = True + + response = json.dumps({ + "ARN": secret_arn(self.region, self.secret_id), + "Name": self.name, + "VersionId": self.version_id + }) + + return response + def get_random_password(self, password_length, exclude_characters, exclude_numbers, exclude_punctuation, exclude_uppercase, diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index c50c6a6e1422..b8b6872a8fbc 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -50,3 +50,15 @@ def describe_secret(self): return secretsmanager_backends[self.region].describe_secret( secret_id=secret_id ) + + def rotate_secret(self): + client_request_token = self._get_param('ClientRequestToken') + rotation_lambda_arn = self._get_param('RotationLambdaARN') + rotation_rules = self._get_param('RotationRules') + secret_id = self._get_param('SecretId') + return secretsmanager_backends[self.region].rotate_secret( + secret_id=secret_id, + client_request_token=client_request_token, + rotation_lambda_arn=rotation_lambda_arn, + rotation_rules=rotation_rules + ) diff --git a/moto/server.py b/moto/server.py index aad47757afac..ba247047845a 100644 --- a/moto/server.py +++ b/moto/server.py @@ -34,6 +34,9 @@ def __init__(self, create_app, service=None): self.service = service def get_backend_for_host(self, host): + if host == 'moto_api': + return host + if self.service: return self.service diff --git a/moto/ses/models.py b/moto/ses/models.py index 3dced60f2fa8..71fe9d9a14e9 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -49,7 +49,8 @@ def __init__(self): self.sent_messages = [] self.sent_message_count = 0 - def _is_verified_address(self, address): + def _is_verified_address(self, source): + _, address = parseaddr(source) if address in self.addresses: return True user, host = address.split('@', 1) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index b8db356e9f01..f3262a988054 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -385,10 +385,22 @@ def reset(self): def create_queue(self, name, **kwargs): queue = self.queues.get(name) if queue: - # Queue already exist. If attributes don't match, throw error - for key, value in kwargs.items(): - if getattr(queue, camelcase_to_underscores(key)) != value: - raise QueueAlreadyExists("The specified queue already exists.") + try: + kwargs.pop('region') + except KeyError: + pass + + new_queue = Queue(name, region=self.region_name, **kwargs) + + queue_attributes = queue.attributes + new_queue_attributes = new_queue.attributes + + for key in ['CreatedTimestamp', 'LastModifiedTimestamp']: + queue_attributes.pop(key) + new_queue_attributes.pop(key) + + if queue_attributes != new_queue_attributes: + raise QueueAlreadyExists("The specified queue already exists.") else: try: kwargs.pop('region') diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index c489d7118270..b4f64b14e439 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -336,7 +336,7 @@ def receive_message(self): try: wait_time = int(self.querystring.get("WaitTimeSeconds")[0]) except TypeError: - wait_time = queue.receive_message_wait_time_seconds + wait_time = int(queue.receive_message_wait_time_seconds) if wait_time < 0 or wait_time > 20: return self._error( diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 656a14839829..f16a7d981fab 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -5,10 +5,12 @@ from moto.core import BaseBackend, BaseModel from moto.core.exceptions import RESTError from moto.ec2 import ec2_backends +from moto.cloudformation import cloudformation_backends import datetime import time import uuid +import itertools class Parameter(BaseModel): @@ -67,7 +69,7 @@ def __init__(self, comment='', document_name='', timeout_seconds=MAX_TIMEOUT_SEC instance_ids=None, max_concurrency='', max_errors='', notification_config=None, output_s3_bucket_name='', output_s3_key_prefix='', output_s3_region='', parameters=None, - service_role_arn='', targets=None): + service_role_arn='', targets=None, backend_region='us-east-1'): if instance_ids is None: instance_ids = [] @@ -88,9 +90,9 @@ def __init__(self, comment='', document_name='', timeout_seconds=MAX_TIMEOUT_SEC self.status = 'Success' self.status_details = 'Details placeholder' - now = datetime.datetime.now() - self.requested_date_time = now.isoformat() - expires_after = now + datetime.timedelta(0, timeout_seconds) + self.requested_date_time = datetime.datetime.now() + self.requested_date_time_iso = self.requested_date_time.isoformat() + expires_after = self.requested_date_time + datetime.timedelta(0, timeout_seconds) self.expires_after = expires_after.isoformat() self.comment = comment @@ -105,6 +107,32 @@ def __init__(self, comment='', document_name='', timeout_seconds=MAX_TIMEOUT_SEC self.parameters = parameters self.service_role_arn = service_role_arn self.targets = targets + self.backend_region = backend_region + + # Get instance ids from a cloud formation stack target. + stack_instance_ids = [self.get_instance_ids_by_stack_ids(target['Values']) for + target in self.targets if + target['Key'] == 'tag:aws:cloudformation:stack-name'] + + self.instance_ids += list(itertools.chain.from_iterable(stack_instance_ids)) + + # Create invocations with a single run command plugin. + self.invocations = [] + for instance_id in self.instance_ids: + self.invocations.append( + self.invocation_response(instance_id, "aws:runShellScript")) + + def get_instance_ids_by_stack_ids(self, stack_ids): + instance_ids = [] + cloudformation_backend = cloudformation_backends[self.backend_region] + for stack_id in stack_ids: + stack_resources = cloudformation_backend.list_stack_resources(stack_id) + instance_resources = [ + instance.id for instance in stack_resources + if instance.type == "AWS::EC2::Instance"] + instance_ids.extend(instance_resources) + + return instance_ids def response_object(self): r = { @@ -122,7 +150,7 @@ def response_object(self): 'OutputS3BucketName': self.output_s3_bucket_name, 'OutputS3KeyPrefix': self.output_s3_key_prefix, 'Parameters': self.parameters, - 'RequestedDateTime': self.requested_date_time, + 'RequestedDateTime': self.requested_date_time_iso, 'ServiceRole': self.service_role_arn, 'Status': self.status, 'StatusDetails': self.status_details, @@ -132,6 +160,50 @@ def response_object(self): return r + def invocation_response(self, instance_id, plugin_name): + # Calculate elapsed time from requested time and now. Use a hardcoded + # elapsed time since there is no easy way to convert a timedelta to + # an ISO 8601 duration string. + elapsed_time_iso = "PT5M" + elapsed_time_delta = datetime.timedelta(minutes=5) + end_time = self.requested_date_time + elapsed_time_delta + + r = { + 'CommandId': self.command_id, + 'InstanceId': instance_id, + 'Comment': self.comment, + 'DocumentName': self.document_name, + 'PluginName': plugin_name, + 'ResponseCode': 0, + 'ExecutionStartDateTime': self.requested_date_time_iso, + 'ExecutionElapsedTime': elapsed_time_iso, + 'ExecutionEndDateTime': end_time.isoformat(), + 'Status': 'Success', + 'StatusDetails': 'Success', + 'StandardOutputContent': '', + 'StandardOutputUrl': '', + 'StandardErrorContent': '', + } + + return r + + def get_invocation(self, instance_id, plugin_name): + invocation = next( + (invocation for invocation in self.invocations + if invocation['InstanceId'] == instance_id), None) + + if invocation is None: + raise RESTError( + 'InvocationDoesNotExist', + 'An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation') + + if plugin_name is not None and invocation['PluginName'] != plugin_name: + raise RESTError( + 'InvocationDoesNotExist', + 'An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation') + + return invocation + class SimpleSystemManagerBackend(BaseBackend): @@ -140,6 +212,11 @@ def __init__(self): self._resource_tags = defaultdict(lambda: defaultdict(dict)) self._commands = [] + # figure out what region we're in + for region, backend in ssm_backends.items(): + if backend == self: + self._region = region + def delete_parameter(self, name): try: del self._parameters[name] @@ -260,7 +337,8 @@ def send_command(self, **kwargs): output_s3_region=kwargs.get('OutputS3Region', ''), parameters=kwargs.get('Parameters', {}), service_role_arn=kwargs.get('ServiceRoleArn', ''), - targets=kwargs.get('Targets', [])) + targets=kwargs.get('Targets', []), + backend_region=self._region) self._commands.append(command) return { @@ -298,6 +376,18 @@ def get_commands_by_instance_id(self, instance_id): command for command in self._commands if instance_id in command.instance_ids] + def get_command_invocation(self, **kwargs): + """ + https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetCommandInvocation.html + """ + + command_id = kwargs.get('CommandId') + instance_id = kwargs.get('InstanceId') + plugin_name = kwargs.get('PluginName', None) + + command = self.get_command_by_id(command_id) + return command.get_invocation(instance_id, plugin_name) + ssm_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index fd0d8b630448..eb05e51b6374 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -210,3 +210,8 @@ def list_commands(self): return json.dumps( self.ssm_backend.list_commands(**self.request_params) ) + + def get_command_invocation(self): + return json.dumps( + self.ssm_backend.get_command_invocation(**self.request_params) + ) diff --git a/requirements-dev.txt b/requirements-dev.txt index 655be0616d1d..111cd5f3ff84 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,7 @@ -r requirements.txt mock nose -sure==1.2.24 +sure==1.4.11 coverage flake8==3.5.0 freezegun @@ -13,5 +13,5 @@ six>=1.9 prompt-toolkit==1.0.14 click==6.7 inflection==0.3.1 -lxml==4.0.0 +lxml==4.2.3 beautifulsoup4==4.6.0 diff --git a/setup.py b/setup.py index 16aaf145294f..98780dd5a2e2 100755 --- a/setup.py +++ b/setup.py @@ -8,10 +8,9 @@ install_requires = [ "Jinja2>=2.7.3", "boto>=2.36.0", - "boto3>=1.6.16", + "boto3>=1.6.16,<1.8", "botocore>=1.9.16,<1.11", - "cookies", - "cryptography>=2.0.0", + "cryptography>=2.3.0", "requests>=2.5", "xmltodict", "six>1.9", @@ -41,7 +40,7 @@ setup( name='moto', - version='1.3.5', + version='1.3.6', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index a38107b998ec..ac79fa2239e8 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -31,6 +31,7 @@ def test_create_identity_pool(): # testing a helper function def test_get_random_identity_id(): assert len(get_random_identity_id('us-west-2')) > 0 + assert len(get_random_identity_id('us-west-2').split(':')[1]) == 19 @mock_cognitoidentity @@ -69,3 +70,16 @@ def test_get_open_id_token_for_developer_identity(): ) assert len(result['Token']) assert result['IdentityId'] == '12345' + +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity_when_no_explicit_identity_id(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }, + TokenDuration=123 + ) + assert len(result['Token']) > 0 + assert len(result['IdentityId']) > 0 diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index b2bd469ce22a..56d7c08a88e8 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -6,6 +6,7 @@ import uuid from jose import jws + from moto import mock_cognitoidp import sure # noqa @@ -24,6 +25,7 @@ def test_create_user_pool(): ) result["UserPool"]["Id"].should_not.be.none + result["UserPool"]["Id"].should.match(r'[\w-]+_[0-9a-zA-Z]+') result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) @@ -399,15 +401,22 @@ def authentication_flow(conn): username = str(uuid.uuid4()) temporary_password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + user_attribute_name = str(uuid.uuid4()) + user_attribute_value = str(uuid.uuid4()) client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), + ReadAttributes=[user_attribute_name] )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_password, + UserAttributes=[{ + 'Name': user_attribute_name, + 'Value': user_attribute_value + }] ) result = conn.admin_initiate_auth( @@ -446,6 +455,9 @@ def authentication_flow(conn): "access_token": result["AuthenticationResult"]["AccessToken"], "username": username, "password": new_password, + "additional_fields": { + user_attribute_name: user_attribute_value + } } @@ -475,6 +487,8 @@ def test_token_legitimacy(): access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) access_claims["iss"].should.equal(issuer) access_claims["aud"].should.equal(client_id) + for k, v in outputs["additional_fields"].items(): + access_claims[k].should.equal(v) @mock_cognitoidp diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 9e3638cc25a8..5d2f6a4ef007 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -85,3 +85,14 @@ def setUp(self): def test_still_the_same(self): bucket = self.conn.get_bucket('mybucket') bucket.name.should.equal("mybucket") + + +@mock_s3_deprecated +class TesterWithStaticmethod(object): + + @staticmethod + def static(*args): + assert not args or not isinstance(args[0], TesterWithStaticmethod) + + def test_no_instance_sent_to_staticmethod(self): + self.static() diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index ab8f258566c6..afc919dd7c0c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -201,6 +201,48 @@ def test_item_add_empty_string_exception(): ) +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_item_with_empty_string_exception(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + with assert_raises(ClientError) as ex: + conn.update_item( + TableName=name, + Key={ + 'forum_name': { 'S': 'LOLCat Forum'}, + }, + UpdateExpression='set Body=:Body', + ExpressionAttributeValues={ + ':Body': {'S': ''} + }) + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' + ) + + @requires_boto_gte("2.9") @mock_dynamodb2 def test_query_invalid_table(): @@ -658,8 +700,8 @@ def test_filter_expression(): filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) filter_expr.expr(row1).should.be(True) - # attribute function tests - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists(User)', {}, {}) + # attribute function tests (with extra spaces) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists (User)', {}, {}) filter_expr.expr(row1).should.be(True) filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) @@ -1178,7 +1220,8 @@ def test_update_if_not_exists(): 'forum_name': 'the-key', 'subject': '123' }, - UpdateExpression='SET created_at = if_not_exists(created_at, :created_at)', + # if_not_exists without space + UpdateExpression='SET created_at=if_not_exists(created_at,:created_at)', ExpressionAttributeValues={ ':created_at': 123 } @@ -1191,7 +1234,8 @@ def test_update_if_not_exists(): 'forum_name': 'the-key', 'subject': '123' }, - UpdateExpression='SET created_at = if_not_exists(created_at, :created_at)', + # if_not_exists with space + UpdateExpression='SET created_at = if_not_exists (created_at, :created_at)', ExpressionAttributeValues={ ':created_at': 456 } diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 8930838c6e32..442e41dde0c9 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -615,8 +615,8 @@ def test_copy_snapshot(): dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) attribs = ['data_encryption_key_id', 'encrypted', - 'kms_key_id', 'owner_alias', 'owner_id', 'progress', - 'start_time', 'state', 'state_message', + 'kms_key_id', 'owner_alias', 'owner_id', + 'progress', 'state', 'state_message', 'tags', 'volume_id', 'volume_size'] for attrib in attribs: diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 6722eed60316..1f98791b333e 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -2,12 +2,15 @@ # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError +import boto3 import boto from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2, mock_ec2_deprecated from tests.helpers import requires_boto_gte @@ -93,3 +96,37 @@ def test_vpc_peering_connections_delete(): cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_vpc_peering_connections_cross_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + vpc_pcx.status['Code'].should.equal('initiating-request') + vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_fail(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering wrong region with no vpc + with assert_raises(ClientError) as cm: + ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-2') + cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index bf72dc230fa6..70c1463ee215 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -304,6 +304,52 @@ def test_create_service(): response['service']['status'].should.equal('ACTIVE') response['service']['taskDefinition'].should.equal( 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('REPLICA') + +@mock_ecs +def test_create_service_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON', + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(0) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('DAEMON') @mock_ecs @@ -411,6 +457,72 @@ def test_describe_services(): response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') +@mock_ecs +def test_describe_services_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service1', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service2', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON' + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service3', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response = client.describe_services( + cluster='test_ecs_cluster', + services=['test_ecs_service1', + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2', + 'test_ecs_service3'] + ) + len(response['services']).should.equal(3) + response['services'][0]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['services'][0]['serviceName'].should.equal('test_ecs_service1') + response['services'][1]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['services'][1]['serviceName'].should.equal('test_ecs_service2') + + response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) + response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) + response['services'][0]['deployments'][0]['runningCount'].should.equal(0) + response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') + + response['services'][0]['schedulingStrategy'].should.equal('REPLICA') + response['services'][1]['schedulingStrategy'].should.equal('DAEMON') + response['services'][2]['schedulingStrategy'].should.equal('REPLICA') + + @mock_ecs def test_update_service(): client = boto3.client('ecs', region_name='us-east-1') @@ -449,6 +561,7 @@ def test_update_service(): desiredCount=0 ) response['service']['desiredCount'].should.equal(0) + response['service']['schedulingStrategy'].should.equal('REPLICA') @mock_ecs @@ -515,8 +628,10 @@ def test_delete_service(): 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') response['service']['serviceName'].should.equal('test_ecs_service') response['service']['status'].should.equal('ACTIVE') + response['service']['schedulingStrategy'].should.equal('REPLICA') response['service']['taskDefinition'].should.equal( 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + @mock_ec2 diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 5827e70c79ab..a67508430d7a 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -723,6 +723,40 @@ def test_describe_instance_health(): instances_health[0].state.should.equal('InService') +@mock_ec2 +@mock_elb +def test_describe_instance_health_boto3(): + elb = boto3.client('elb', region_name="us-east-1") + ec2 = boto3.client('ec2', region_name="us-east-1") + instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances'] + lb_name = "my_load_balancer" + elb.create_load_balancer( + Listeners=[{ + 'InstancePort': 80, + 'LoadBalancerPort': 8080, + 'Protocol': 'HTTP' + }], + LoadBalancerName=lb_name, + ) + elb.register_instances_with_load_balancer( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instances[0]['InstanceId']}] + ) + instances_health = elb.describe_instance_health( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instance['InstanceId']} for instance in instances] + ) + instances_health['InstanceStates'].should.have.length_of(2) + instances_health['InstanceStates'][0]['InstanceId'].\ + should.equal(instances[0]['InstanceId']) + instances_health['InstanceStates'][0]['State'].\ + should.equal('InService') + instances_health['InstanceStates'][1]['InstanceId'].\ + should.equal(instances[1]['InstanceId']) + instances_health['InstanceStates'][1]['State'].\ + should.equal('Unknown') + + @mock_elb def test_add_remove_tags(): client = boto3.client('elb', region_name='us-east-1') diff --git a/tests/test_glue/fixtures/datacatalog.py b/tests/test_glue/fixtures/datacatalog.py index b2efe4154a95..edad2f0f4c26 100644 --- a/tests/test_glue/fixtures/datacatalog.py +++ b/tests/test_glue/fixtures/datacatalog.py @@ -29,3 +29,28 @@ }, 'TableType': 'EXTERNAL_TABLE', } + + +PARTITION_INPUT = { + # 'DatabaseName': 'dbname', + 'StorageDescriptor': { + 'BucketColumns': [], + 'Columns': [], + 'Compressed': False, + 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', + 'Location': 's3://.../partition=value', + 'NumberOfBuckets': -1, + 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', + 'Parameters': {}, + 'SerdeInfo': { + 'Parameters': {'path': 's3://...', 'serialization.format': '1'}, + 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'}, + 'SkewedInfo': {'SkewedColumnNames': [], + 'SkewedColumnValueLocationMaps': {}, + 'SkewedColumnValues': []}, + 'SortColumns': [], + 'StoredAsSubDirectories': False, + }, + # 'TableName': 'source_table', + # 'Values': ['2018-06-26'], +} diff --git a/tests/test_glue/helpers.py b/tests/test_glue/helpers.py index 4a51f9117ee4..331b99867280 100644 --- a/tests/test_glue/helpers.py +++ b/tests/test_glue/helpers.py @@ -2,7 +2,7 @@ import copy -from .fixtures.datacatalog import TABLE_INPUT +from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT def create_database(client, database_name): @@ -17,22 +17,38 @@ def get_database(client, database_name): return client.get_database(Name=database_name) -def create_table_input(table_name, s3_location, columns=[], partition_keys=[]): +def create_table_input(database_name, table_name, columns=[], partition_keys=[]): table_input = copy.deepcopy(TABLE_INPUT) table_input['Name'] = table_name table_input['PartitionKeys'] = partition_keys table_input['StorageDescriptor']['Columns'] = columns - table_input['StorageDescriptor']['Location'] = s3_location + table_input['StorageDescriptor']['Location'] = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) return table_input -def create_table(client, database_name, table_name, table_input): +def create_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + return client.create_table( DatabaseName=database_name, TableInput=table_input ) +def update_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.update_table( + DatabaseName=database_name, + TableInput=table_input, + ) + + def get_table(client, database_name, table_name): return client.get_table( DatabaseName=database_name, @@ -44,3 +60,60 @@ def get_tables(client, database_name): return client.get_tables( DatabaseName=database_name ) + + +def get_table_versions(client, database_name, table_name): + return client.get_table_versions( + DatabaseName=database_name, + TableName=table_name + ) + + +def get_table_version(client, database_name, table_name, version_id): + return client.get_table_version( + DatabaseName=database_name, + TableName=table_name, + VersionId=version_id, + ) + + +def create_partition_input(database_name, table_name, values=[], columns=[]): + root_path = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) + + part_input = copy.deepcopy(PARTITION_INPUT) + part_input['Values'] = values + part_input['StorageDescriptor']['Columns'] = columns + part_input['StorageDescriptor']['SerdeInfo']['Parameters']['path'] = root_path + return part_input + + +def create_partition(client, database_name, table_name, partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input + ) + + +def update_partition(client, database_name, table_name, old_values=[], partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.update_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input, + PartitionValueList=old_values, + ) + + +def get_partition(client, database_name, table_name, values): + return client.get_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionValues=values, + ) diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index 7dabeb1f313e..a457d5127e30 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -1,10 +1,15 @@ from __future__ import unicode_literals import sure # noqa +import re from nose.tools import assert_raises import boto3 from botocore.client import ClientError + +from datetime import datetime +import pytz + from moto import mock_glue from . import helpers @@ -30,7 +35,19 @@ def test_create_database_already_exists(): with assert_raises(ClientError) as exc: helpers.create_database(client, database_name) - exc.exception.response['Error']['Code'].should.equal('DatabaseAlreadyExistsException') + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') @mock_glue @@ -40,12 +57,7 @@ def test_create_table(): helpers.create_database(client, database_name) table_name = 'myspecialtable' - s3_location = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - - table_input = helpers.create_table_input(table_name, s3_location) + table_input = helpers.create_table_input(database_name, table_name) helpers.create_table(client, database_name, table_name, table_input) response = helpers.get_table(client, database_name, table_name) @@ -63,18 +75,12 @@ def test_create_table_already_exists(): helpers.create_database(client, database_name) table_name = 'cantcreatethistabletwice' - s3_location = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - - table_input = helpers.create_table_input(table_name, s3_location) - helpers.create_table(client, database_name, table_name, table_input) + helpers.create_table(client, database_name, table_name) with assert_raises(ClientError) as exc: - helpers.create_table(client, database_name, table_name, table_input) + helpers.create_table(client, database_name, table_name) - exc.exception.response['Error']['Code'].should.equal('TableAlreadyExistsException') + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') @mock_glue @@ -87,11 +93,7 @@ def test_get_tables(): table_inputs = {} for table_name in table_names: - s3_location = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - table_input = helpers.create_table_input(table_name, s3_location) + table_input = helpers.create_table_input(database_name, table_name) table_inputs[table_name] = table_input helpers.create_table(client, database_name, table_name, table_input) @@ -99,10 +101,326 @@ def test_get_tables(): tables = response['TableList'] - assert len(tables) == 3 + tables.should.have.length_of(3) for table in tables: table_name = table['Name'] table_name.should.equal(table_inputs[table_name]['Name']) table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor']) table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys']) + + +@mock_glue +def test_get_table_versions(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myfirsttable' + version_inputs = {} + + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + version_inputs["1"] = table_input + + columns = [{'Name': 'country', 'Type': 'string'}] + table_input = helpers.create_table_input(database_name, table_name, columns=columns) + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["2"] = table_input + + # Updateing with an indentical input should still create a new version + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["3"] = table_input + + response = helpers.get_table_versions(client, database_name, table_name) + + vers = response['TableVersions'] + + vers.should.have.length_of(3) + vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([]) + vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + for n, ver in enumerate(vers): + n = str(n + 1) + ver['VersionId'].should.equal(n) + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor']) + ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys']) + + response = helpers.get_table_version(client, database_name, table_name, "3") + ver = response['TableVersion'] + + ver['VersionId'].should.equal("3") + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + +@mock_glue +def test_get_table_version_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "20") + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('version', re.I) + + +@mock_glue +def test_get_table_version_invalid_input(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int") + + exc.exception.response['Error']['Code'].should.equal('InvalidInputException') + + +@mock_glue +def test_get_table_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found') + + +@mock_glue +def test_get_table_when_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_get_partitions_empty(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + response['Partitions'].should.have.length_of(0) + + +@mock_glue +def test_create_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + before = datetime.now(pytz.utc) + + part_input = helpers.create_partition_input(database_name, table_name, values=values) + helpers.create_partition(client, database_name, table_name, part_input) + + after = datetime.now(pytz.utc) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + partitions = response['Partitions'] + + partitions.should.have.length_of(1) + + partition = partitions[0] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor']) + partition['Values'].should.equal(values) + partition['CreationTime'].should.be.greater_than(before) + partition['CreationTime'].should.be.lower_than(after) + + +@mock_glue +def test_create_partition_already_exist(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + helpers.create_partition(client, database_name, table_name, values=values) + + with assert_raises(ClientError) as exc: + helpers.create_partition(client, database_name, table_name, values=values) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_partition_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_get_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1]) + + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['Values'].should.equal(values[1]) + + +@mock_glue +def test_update_partition_not_found_moving(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02']) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_not_found_change_in_place(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values, values=values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_cannot_overwrite(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1]) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_update_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) + + +@mock_glue +def test_update_partition_move(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + new_values = ['2018-09-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=new_values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + # Old partition shouldn't exist anymore + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 2225f0644604..bc23ff7126dd 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -286,6 +286,16 @@ def test_create_policy_versions(): PolicyDocument='{"some":"policy"}') version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) +@mock_iam +def test_get_policy(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_policy( + PolicyName="TestGetPolicy", + PolicyDocument='{"some":"policy"}') + policy = conn.get_policy( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicy") + response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") + @mock_iam def test_get_policy_version(): @@ -314,17 +324,22 @@ def test_list_policy_versions(): PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") conn.create_policy( PolicyName="TestListPolicyVersions", - PolicyDocument='{"some":"policy"}') - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", PolicyDocument='{"first":"policy"}') + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + versions.get('Versions')[0].get('VersionId').should.equal('v1') + conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", PolicyDocument='{"second":"policy"}') + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", + PolicyDocument='{"third":"policy"}') versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") - versions.get('Versions')[0].get('Document').should.equal({'first': 'policy'}) + print(versions.get('Versions')) versions.get('Versions')[1].get('Document').should.equal({'second': 'policy'}) + versions.get('Versions')[2].get('Document').should.equal({'third': 'policy'}) @mock_iam @@ -332,20 +347,20 @@ def test_delete_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestDeletePolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyDocument='{"first":"policy"}') conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - PolicyDocument='{"first":"policy"}') + PolicyDocument='{"second":"policy"}') with assert_raises(ClientError): conn.delete_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", VersionId='v2-nope-this-does-not-exist') conn.delete_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - VersionId='v1') + VersionId='v2') versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion") - len(versions.get('Versions')).should.equal(0) + len(versions.get('Versions')).should.equal(1) @mock_iam_deprecated() diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 96715de7166e..8bccae27a02f 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -import re +import os, re import boto3 import boto.kms @@ -8,6 +8,9 @@ import sure # noqa from moto import mock_kms, mock_kms_deprecated from nose.tools import assert_raises +from freezegun import freeze_time +from datetime import datetime, timedelta +from dateutil.tz import tzlocal @mock_kms_deprecated @@ -617,3 +620,100 @@ def test_kms_encrypt_boto3(): response = client.decrypt(CiphertextBlob=response['CiphertextBlob']) response['Plaintext'].should.equal(b'bar') + + +@mock_kms +def test_disable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='disable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + + +@mock_kms +def test_enable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='enable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + client.enable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == True + assert result["KeyMetadata"]["KeyState"] == 'Enabled' + + +@mock_kms +def test_schedule_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzlocal()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_schedule_key_deletion_custom(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzlocal()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_cancel_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='cancel-key-deletion') + client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + response = client.cancel_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + assert 'DeletionDate' not in result["KeyMetadata"] diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 3f924cc5503f..e3d46fd87af2 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,5 +1,6 @@ import boto3 import sure # noqa +import six from botocore.exceptions import ClientError from moto import mock_logs, settings @@ -47,7 +48,7 @@ def test_exceptions(): logEvents=[ { 'timestamp': 0, - 'message': 'line' + 'message': 'line' }, ], ) @@ -79,7 +80,7 @@ def test_put_logs(): {'timestamp': 0, 'message': 'hello'}, {'timestamp': 0, 'message': 'world'} ] - conn.put_log_events( + putRes = conn.put_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=messages @@ -89,6 +90,9 @@ def test_put_logs(): logStreamName=log_stream_name ) events = res['events'] + nextSequenceToken = putRes['nextSequenceToken'] + assert isinstance(nextSequenceToken, six.string_types) == True + assert len(nextSequenceToken) == 56 events.should.have.length_of(2) @@ -117,4 +121,8 @@ def test_filter_logs_interleaved(): interleaved=True, ) events = res['events'] - events.should.have.length_of(2) + for original_message, resulting_event in zip(messages, events): + resulting_event['eventId'].should.equal(str(resulting_event['eventId'])) + resulting_event['timestamp'].should.equal(original_message['timestamp']) + resulting_event['message'].should.equal(original_message['message']) + diff --git a/tests/test_organizations/__init__.py b/tests/test_organizations/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py new file mode 100644 index 000000000000..6548b1830c4a --- /dev/null +++ b/tests/test_organizations/organizations_test_utils.py @@ -0,0 +1,136 @@ +from __future__ import unicode_literals + +import six +import sure # noqa +import datetime +from moto.organizations import utils + +EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" +ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE +ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE +OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE) +ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE +CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE + + +def test_make_random_org_id(): + org_id = utils.make_random_org_id() + org_id.should.match(ORG_ID_REGEX) + + +def test_make_random_root_id(): + root_id = utils.make_random_root_id() + root_id.should.match(ROOT_ID_REGEX) + + +def test_make_random_ou_id(): + root_id = utils.make_random_root_id() + ou_id = utils.make_random_ou_id(root_id) + ou_id.should.match(OU_ID_REGEX) + + +def test_make_random_account_id(): + account_id = utils.make_random_account_id() + account_id.should.match(ACCOUNT_ID_REGEX) + + +def test_make_random_create_account_status_id(): + create_account_status_id = utils.make_random_create_account_status_id() + create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + + +def validate_organization(response): + org = response['Organization'] + sorted(org.keys()).should.equal([ + 'Arn', + 'AvailablePolicyTypes', + 'FeatureSet', + 'Id', + 'MasterAccountArn', + 'MasterAccountEmail', + 'MasterAccountId', + ]) + org['Id'].should.match(ORG_ID_REGEX) + org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) + org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL) + org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING']) + org['AvailablePolicyTypes'].should.equal([{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }]) + + +def validate_roots(org, response): + response.should.have.key('Roots').should.be.a(list) + response['Roots'].should_not.be.empty + root = response['Roots'][0] + root.should.have.key('Id').should.match(ROOT_ID_REGEX) + root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + root['Id'], + )) + root.should.have.key('Name').should.be.a(six.string_types) + root.should.have.key('PolicyTypes').should.be.a(list) + root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED') + + +def validate_organizational_unit(org, response): + response.should.have.key('OrganizationalUnit').should.be.a(dict) + ou = response['OrganizationalUnit'] + ou.should.have.key('Id').should.match(OU_ID_REGEX) + ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + ou['Id'], + )) + ou.should.have.key('Name').should.be.a(six.string_types) + + +def validate_account(org, account): + sorted(account.keys()).should.equal([ + 'Arn', + 'Email', + 'Id', + 'JoinedMethod', + 'JoinedTimestamp', + 'Name', + 'Status', + ]) + account['Id'].should.match(ACCOUNT_ID_REGEX) + account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + account['Id'], + )) + account['Email'].should.match(EMAIL_REGEX) + account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) + account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) + account['Name'].should.be.a(six.string_types) + account['JoinedTimestamp'].should.be.a(datetime.datetime) + + +def validate_create_account_status(create_status): + sorted(create_status.keys()).should.equal([ + 'AccountId', + 'AccountName', + 'CompletedTimestamp', + 'Id', + 'RequestedTimestamp', + 'State', + ]) + create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) + create_status['AccountName'].should.be.a(six.string_types) + create_status['State'].should.equal('SUCCEEDED') + create_status['RequestedTimestamp'].should.be.a(datetime.datetime) + create_status['CompletedTimestamp'].should.be.a(datetime.datetime) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py new file mode 100644 index 000000000000..dfac5feeb294 --- /dev/null +++ b/tests/test_organizations/test_organizations_boto3.py @@ -0,0 +1,322 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_organizations +from moto.organizations import utils +from .organizations_test_utils import ( + validate_organization, + validate_roots, + validate_organizational_unit, + validate_account, + validate_create_account_status, +) + + +@mock_organizations +def test_create_organization(): + client = boto3.client('organizations', region_name='us-east-1') + response = client.create_organization(FeatureSet='ALL') + validate_organization(response) + response['Organization']['FeatureSet'].should.equal('ALL') + + +@mock_organizations +def test_describe_organization(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + response = client.describe_organization() + validate_organization(response) + + +@mock_organizations +def test_describe_organization_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_organization() + ex = e.exception + ex.operation_name.should.equal('DescribeOrganization') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException') + + +# Organizational Units + +@mock_organizations +def test_list_roots(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + response = client.list_roots() + validate_roots(org, response) + + +@mock_organizations +def test_create_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_name = 'ou01' + response = client.create_organizational_unit( + ParentId=root_id, + Name=ou_name, + ) + validate_organizational_unit(org, response) + response['OrganizationalUnit']['Name'].should.equal(ou_name) + + +@mock_organizations +def test_describe_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + response = client.describe_organizational_unit(OrganizationalUnitId=ou_id) + validate_organizational_unit(org, response) + + +@mock_organizations +def test_describe_organizational_unit_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + with assert_raises(ClientError) as e: + response = client.describe_organizational_unit( + OrganizationalUnitId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('DescribeOrganizationalUnit') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + + +@mock_organizations +def test_list_organizational_units_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + client.create_organizational_unit(ParentId=root_id, Name='ou01') + client.create_organizational_unit(ParentId=root_id, Name='ou02') + client.create_organizational_unit(ParentId=root_id, Name='ou03') + response = client.list_organizational_units_for_parent(ParentId=root_id) + response.should.have.key('OrganizationalUnits').should.be.a(list) + for ou in response['OrganizationalUnits']: + validate_organizational_unit(org, dict(OrganizationalUnit=ou)) + + +@mock_organizations +def test_list_organizational_units_for_parent_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.list_organizational_units_for_parent( + ParentId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('ListOrganizationalUnitsForParent') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + + +# Accounts +mockname = 'mock-account' +mockdomain = 'moto-example.org' +mockemail = '@'.join([mockname, mockdomain]) + + +@mock_organizations +def test_create_account(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + create_status = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus'] + validate_create_account_status(create_status) + create_status['AccountName'].should.equal(mockname) + + +@mock_organizations +def test_describe_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + response = client.describe_account(AccountId=account_id) + validate_account(org, response['Account']) + response['Account']['Name'].should.equal(mockname) + response['Account']['Email'].should.equal(mockemail) + + +@mock_organizations +def test_describe_account_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_account(AccountId=utils.make_random_account_id()) + ex = e.exception + ex.operation_name.should.equal('DescribeAccount') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + + +@mock_organizations +def test_list_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(5): + name = mockname + str(i) + email = name + '@' + mockdomain + client.create_account(AccountName=name, Email=email) + response = client.list_accounts() + response.should.have.key('Accounts') + accounts = response['Accounts'] + len(accounts).should.equal(5) + for account in accounts: + validate_account(org, account) + accounts[3]['Name'].should.equal(mockname + '3') + accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain) + + +@mock_organizations +def test_list_accounts_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + response = client.list_accounts_for_parent(ParentId=root_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_move_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + client.move_account( + AccountId=account_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response = client.list_accounts_for_parent(ParentId=ou01_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_list_parents_for_ou(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + response01 = client.list_parents(ChildId=ou01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + response02 = client.list_parents(ChildId=ou02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_parents_for_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_parents(ChildId=account01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + response02 = client.list_parents(ChildId=account02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT') + response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT') + response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT') + response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT') + response01['Children'][0]['Id'].should.equal(account01_id) + response01['Children'][0]['Type'].should.equal('ACCOUNT') + response02['Children'][0]['Id'].should.equal(ou01_id) + response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + response03['Children'][0]['Id'].should.equal(account02_id) + response03['Children'][0]['Type'].should.equal('ACCOUNT') + response04['Children'][0]['Id'].should.equal(ou02_id) + response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=utils.make_random_root_id(), + ChildType='ACCOUNT' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=root_id, + ChildType='BLEE' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 80dcd4f53e3b..cf9805444825 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -33,6 +33,7 @@ def test_create_database(): db_instance['DBInstanceIdentifier'].should.equal("db-master-1") db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) db_instance['DbiResourceId'].should.contain("db-") + db_instance['CopyTagsToSnapshot'].should.equal(False) @mock_rds2 @@ -339,6 +340,49 @@ def test_create_db_snapshots(): snapshot.get('Engine').should.equal('postgres') snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([]) + + +@mock_rds2 +def test_create_db_snapshots_copy_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + CopyTagsToSnapshot=True, + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) @mock_rds2 @@ -656,6 +700,117 @@ def test_remove_tags_db(): len(result['TagList']).should.equal(1) +@mock_rds2 +def test_list_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:foo') + result['TagList'].should.equal([]) + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshot']['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(3) + + +@mock_rds2 +def test_remove_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + len(result['TagList']).should.equal(1) + + @mock_rds2 def test_add_tags_option_group(): conn = boto3.client('rds', region_name='us-west-2') diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 6e027b86cde5..9208c92dd7d1 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +import datetime + import boto import boto3 from boto.redshift.exceptions import ( @@ -32,6 +34,8 @@ def test_create_cluster_boto3(): MasterUserPassword='password', ) response['Cluster']['NodeType'].should.equal('ds2.xlarge') + create_time = response['Cluster']['ClusterCreateTime'] + create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo)) @mock_redshift diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 9a68d1bbb3e8..6e339abb6699 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2471,6 +2471,72 @@ def test_boto3_delete_markers(): oldest['Key'].should.equal('key-with-versions-and-unicode-ó') +@mock_s3 +def test_boto3_multiple_delete_markers(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = u'key-with-versions-and-unicode-ó' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + + # Delete the object twice to add multiple delete markers + s3.delete_object(Bucket=bucket_name, Key=key) + s3.delete_object(Bucket=bucket_name, Key=key) + + response = s3.list_object_versions(Bucket=bucket_name) + response['DeleteMarkers'].should.have.length_of(2) + + with assert_raises(ClientError) as e: + s3.get_object( + Bucket=bucket_name, + Key=key + ) + e.response['Error']['Code'].should.equal('404') + + # Remove both delete markers to restore the object + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='2' + ) + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='3' + ) + + response = s3.get_object( + Bucket=bucket_name, + Key=key + ) + response['Body'].read().should.equal(items[-1]) + response = s3.list_object_versions(Bucket=bucket_name) + response['Versions'].should.have.length_of(2) + + # We've asserted there is only 2 records so one is newest, one is oldest + latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] + oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] + + # Double check ordering of version ID's + latest['VersionId'].should.equal('1') + oldest['VersionId'].should.equal('0') + + # Double check the name is still unicode + latest['Key'].should.equal('key-with-versions-and-unicode-ó') + oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + @mock_s3 def test_get_stream_gzipped(): payload = b"this is some stuff here" diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index d176e95c6bac..3d533a641048 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -191,6 +191,127 @@ def test_lifecycle_with_eodm(): assert err.exception.response["Error"]["Code"] == "MalformedXML" +@mock_s3 +def test_lifecycle_with_nve(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionExpiration": { + "NoncurrentDays": 30 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 30 + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 10 + + # TODO: Add test for failures due to missing children + + +@mock_s3 +def test_lifecycle_with_nvt(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionTransitions": [{ + "NoncurrentDays": 30, + "StorageClass": "ONEZONE_IA" + }], + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 30 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "ONEZONE_IA" + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 10 + + # Change StorageClass: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] = "GLACIER" + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "GLACIER" + + # With failures for missing children: + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 + + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_lifecycle_with_aimu(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "AbortIncompleteMultipartUpload": { + "DaysAfterInitiation": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 7 + + # Change DaysAfterInitiation: + lfc["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] = 30 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 30 + + # TODO: Add test for failures due to missing children + + @mock_s3_deprecated def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index c631fabb069c..ec384a6601c6 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -26,13 +26,13 @@ def test_get_secret_that_does_not_exist(): result = conn.get_secret_value(SecretId='i-dont-exist') @mock_secretsmanager -def test_get_secret_with_mismatched_id(): +def test_get_secret_that_does_not_match(): conn = boto3.client('secretsmanager', region_name='us-west-2') create_secret = conn.create_secret(Name='java-util-test-password', SecretString="foosecret") with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-exist') + result = conn.get_secret_value(SecretId='i-dont-match') @mock_secretsmanager def test_create_secret(): @@ -179,3 +179,108 @@ def test_describe_secret_that_does_not_match(): with assert_raises(ClientError): result = conn.get_secret_value(SecretId='i-dont-match') + +@mock_secretsmanager +def test_rotate_secret(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotated_secret = conn.rotate_secret(SecretId=secret_name) + + assert rotated_secret + assert rotated_secret['ARN'] == ( + 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad' + ) + assert rotated_secret['Name'] == secret_name + assert rotated_secret['VersionId'] != '' + +@mock_secretsmanager +def test_rotate_secret_enable_rotation(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + initial_description = conn.describe_secret(SecretId=secret_name) + assert initial_description + assert initial_description['RotationEnabled'] is False + assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 + + conn.rotate_secret(SecretId=secret_name, + RotationRules={'AutomaticallyAfterDays': 42}) + + rotated_description = conn.describe_secret(SecretId=secret_name) + assert rotated_description + assert rotated_description['RotationEnabled'] is True + assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', 'us-west-2') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-match') + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + # Test is intentionally empty. Boto3 catches too short ClientRequestToken + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + ClientRequestToken=client_request_token) + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + RotationLambdaARN=rotation_lambda_arn) + +@mock_secretsmanager +def test_rotate_secret_rotation_period_zero(): + # Test is intentionally empty. Boto3 catches zero day rotation period + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_rotation_period_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotation_rules = {'AutomaticallyAfterDays': 1001} + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + RotationRules=rotation_rules) diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index 8c6f7b970ac6..e573f9b6719b 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -49,6 +49,27 @@ def test_get_secret_that_does_not_exist(): assert json_data['message'] == "Secrets Manager can't find the specified secret" assert json_data['__type'] == 'ResourceNotFoundException' +@mock_secretsmanager +def test_get_secret_that_does_not_match(): + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + get_secret = test_client.post('/', + data={"SecretId": "i-dont-match", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + @mock_secretsmanager def test_create_secret(): @@ -133,3 +154,268 @@ def test_describe_secret_that_does_not_match(): json_data = json.loads(describe_secret.data.decode("utf-8")) assert json_data['message'] == "Secrets Manager can't find the specified secret" assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data # Returned dict is not empty + assert json_data['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' + ) + assert json_data['Name'] == 'test-secret' + assert json_data['VersionId'] == client_request_token + +# @mock_secretsmanager +# def test_rotate_secret_enable_rotation(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post( +# '/', +# data={ +# "Name": "test-secret", +# "SecretString": "foosecret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# initial_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(initial_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is False +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 0 + +# rotate_secret = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 42} +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# rotated_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(rotated_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is True +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-exist"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-match"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "RotationLambdaARN": rotation_lambda_arn}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "RotationLambdaARN must <= 2048 characters long." + assert json_data['__type'] == 'InvalidParameterException' + + +# +# The following tests should work, but fail on the embedded dict in +# RotationRules. The error message suggests a problem deeper in the code, which +# needs further investigation. +# + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_zero(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 0}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_too_long(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 1001}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index d3e4ca917696..9beb9a3faef0 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -40,6 +40,33 @@ def test_create_fifo_queue_fail(): raise RuntimeError('Should of raised InvalidParameterValue Exception') +@mock_sqs +def test_create_queue_with_same_attributes(): + sqs = boto3.client('sqs', region_name='us-east-1') + + dlq_url = sqs.create_queue(QueueName='test-queue-dlq')['QueueUrl'] + dlq_arn = sqs.get_queue_attributes(QueueUrl=dlq_url)['Attributes']['QueueArn'] + + attributes = { + 'DelaySeconds': '900', + 'MaximumMessageSize': '262144', + 'MessageRetentionPeriod': '1209600', + 'ReceiveMessageWaitTimeSeconds': '20', + 'RedrivePolicy': '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' % (dlq_arn), + 'VisibilityTimeout': '43200' + } + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + @mock_sqs def test_create_queue_with_different_attributes_fail(): sqs = boto3.client('sqs', region_name='us-east-1') @@ -1195,3 +1222,16 @@ def test_receive_messages_with_message_group_id_on_visibility_timeout(): messages = queue.receive_messages() messages.should.have.length_of(1) messages[0].message_id.should.equal(message.message_id) + +@mock_sqs +def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'ReceiveMessageWaitTimeSeconds': '2', + } + ) + + queue.receive_messages() diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 7a0685d56ae0..f8ef3a237825 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -5,11 +5,12 @@ import sure # noqa import datetime import uuid +import json from botocore.exceptions import ClientError from nose.tools import assert_raises -from moto import mock_ssm +from moto import mock_ssm, mock_cloudformation @mock_ssm @@ -668,3 +669,118 @@ def test_list_commands(): with assert_raises(ClientError): response = client.list_commands( CommandId=str(uuid.uuid4())) + +@mock_ssm +def test_get_command_invocation(): + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + InstanceIds=['i-123456', 'i-234567', 'i-345678'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + + instance_id = 'i-345678' + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='aws:runShellScript') + + invocation_response['CommandId'].should.equal(cmd_id) + invocation_response['InstanceId'].should.equal(instance_id) + + # test the error case for an invalid instance id + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId='i-FAKE') + + # test the error case for an invalid plugin name + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='FAKE') + +@mock_ssm +@mock_cloudformation +def test_get_command_invocations_from_stack(): + stack_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Test Stack", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-test-image-id", + "KeyName": "test", + "InstanceType": "t2.micro", + "Tags": [ + { + "Key": "Test Description", + "Value": "Test tag" + }, + { + "Key": "Test Name", + "Value": "Name tag for tests" + } + ] + } + } + }, + "Outputs": { + "test": { + "Description": "Test Output", + "Value": "Test output value", + "Export": { + "Name": "Test value to export" + } + }, + "PublicIP": { + "Value": "Test public ip" + } + } + } + + cloudformation_client = boto3.client( + 'cloudformation', + region_name='us-east-1') + + stack_template_str = json.dumps(stack_template) + + response = cloudformation_client.create_stack( + StackName='test_stack', + TemplateBody=stack_template_str, + Capabilities=('CAPABILITY_IAM', )) + + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + Targets=[{ + 'Key': 'tag:aws:cloudformation:stack-name', + 'Values': ('test_stack', )}], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + instance_ids = cmd['InstanceIds'] + + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_ids[0], + PluginName='aws:runShellScript') From 36d8f118e36a7cbdb1dd99667f599fd91aa26bcc Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Wed, 24 Oct 2018 14:53:08 +0200 Subject: [PATCH 006/658] implement `attach_policy`, `detach_policy` and `list_attached_policy` --- moto/iot/models.py | 22 +++++++++++++++++++ moto/iot/responses.py | 30 ++++++++++++++++++++++++++ tests/test_iot/test_iot.py | 44 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/moto/iot/models.py b/moto/iot/models.py index 931af192a1fa..4789e045fe72 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -400,6 +400,28 @@ def create_policy(self, policy_name, policy_document): self.policies[policy.name] = policy return policy + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_attached_policies(self, target): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] + return policies + def list_policies(self): policies = self.policies.values() return policies diff --git a/moto/iot/responses.py b/moto/iot/responses.py index c71d4942a9f3..f5e25fdbc0da 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +from urllib.parse import unquote from moto.core.responses import BaseResponse from .models import iot_backends @@ -234,6 +235,35 @@ def delete_policy(self): ) return json.dumps(dict()) + def attach_policy(self): + policy_name = self._get_param("policyName") + principal = self._get_param('target') + self.iot_backend.attach_policy( + policy_name=policy_name, + target=principal, + ) + return json.dumps(dict()) + + def detach_policy(self): + policy_name = self._get_param("policyName") + principal = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=principal, + ) + return json.dumps(dict()) + + def list_attached_policies(self): + principal = unquote(self._get_param('target')) + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + policies = self.iot_backend.list_attached_policies( + target=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + def attach_principal_policy(self): policy_name = self._get_param("policyName") principal = self.headers.get('x-amzn-iot-principal') diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 759c7d3c71f3..7fbd66963472 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -8,6 +8,50 @@ from moto import mock_iot +@mock_iot +def test_attach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + +@mock_iot +def test_detach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.be.empty + + +@mock_iot +def test_list_attached_policies(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + policies = client.list_attached_policies(target=cert['certificateArn']) + policies['policies'].should.be.empty + + @mock_iot def test_things(): client = boto3.client('iot', region_name='ap-northeast-1') From bb7e1197bc9474eb87dfa520f60fa8de57fbda0d Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Thu, 25 Oct 2018 12:13:56 +0200 Subject: [PATCH 007/658] adding AWS IoT policy version handling [+] `list_policy_version` [+] `get_policy_version` [+] `create_policy_version` [+] `delete_policy_version` [+] `set_default_policy_version` --- moto/iot/models.py | 104 +++++++++++++++++++++++++++++++++++-- moto/iot/responses.py | 36 ++++++++++++- tests/test_iot/test_iot.py | 82 ++++++++++++++++++++++++++++- 3 files changed, 215 insertions(+), 7 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 4789e045fe72..4bcab26ebc4b 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -136,18 +136,19 @@ def to_description_dict(self): class FakePolicy(BaseModel): - def __init__(self, name, document, region_name): + def __init__(self, name, document, region_name, default_version_id='1'): self.name = name self.document = document self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) - self.version = '1' # TODO: handle version + self.default_version_id = default_version_id + self.versions = [FakePolicyVersion(self.name, document, True, region_name)] def to_get_dict(self): return { 'policyName': self.name, 'policyArn': self.arn, 'policyDocument': self.document, - 'defaultVersionId': self.version + 'defaultVersionId': self.default_version_id } def to_dict_at_creation(self): @@ -155,7 +156,7 @@ def to_dict_at_creation(self): 'policyName': self.name, 'policyArn': self.arn, 'policyDocument': self.document, - 'policyVersionId': self.version + 'policyVersionId': self.default_version_id } def to_dict(self): @@ -165,6 +166,50 @@ def to_dict(self): } +class FakePolicyVersion(object): + + def __init__(self, + policy_name, + document, + is_default, + region_name): + self.name = policy_name + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) + self.document = document or {} + self.is_default = is_default + self.version_id = '1' + + self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'creationDate': self.create_datetime, + 'lastModifiedDate': self.last_modified_datetime, + 'generationId': self.version_id + } + + def to_dict_at_creation(self): + return { + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default + } + + def to_dict(self): + return { + 'versionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'createDate': self.create_datetime, + } + + class FakeJob(BaseModel): JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) @@ -436,6 +481,57 @@ def delete_policy(self, policy_name): policy = self.get_policy(policy_name) del self.policies[policy.name] + def create_policy_version(self, policy_name, policy_document, set_as_default): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) + policy.versions.append(version) + version.version_id = '{0}'.format(len(policy.versions)) + if set_as_default: + self.set_default_policy_version(policy_name, version.version_id) + return version + + def set_default_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + version.is_default = True + policy.default_version_id = version.version_id + policy.document = version.document + else: + version.is_default = False + + def get_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + return version + raise ResourceNotFoundException() + + def list_policy_versions(self, policy_name): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + return policy.versions + + def delete_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + if version_id == policy.default_version_id: + raise InvalidRequestException( + "Cannot delete the default version of a policy") + for i, v in enumerate(policy.versions): + if v.version_id == version_id: + del policy.versions[i] + return + raise ResourceNotFoundException() + def _get_principal(self, principal_arn): """ raise ResourceNotFoundException diff --git a/moto/iot/responses.py b/moto/iot/responses.py index f5e25fdbc0da..66d5ddfd6f14 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import json -from urllib.parse import unquote +from six.moves.urllib.parse import parse_qs, urlparse, unquote from moto.core.responses import BaseResponse from .models import iot_backends @@ -235,6 +235,40 @@ def delete_policy(self): ) return json.dumps(dict()) + def create_policy_version(self): + policy_name = self._get_param('policyName') + policy_document = self._get_param('policyDocument') + set_as_default = self._get_bool_param('setAsDefault') + policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) + + return json.dumps(dict(policy_version.to_dict_at_creation())) + + def set_default_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.set_default_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def get_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + policy_version = self.iot_backend.get_policy_version(policy_name, version_id) + return json.dumps(dict(policy_version.to_get_dict())) + + def list_policy_versions(self): + policy_name = self._get_param('policyName') + policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) + + return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) + + def delete_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.delete_policy_version(policy_name, version_id) + + return json.dumps(dict()) + def attach_policy(self): policy_name = self._get_param("policyName") principal = self._get_param('target') diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 7fbd66963472..758ff8940378 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals import json -import sure # noqa - +import sure #noqa import boto3 from moto import mock_iot @@ -52,6 +51,85 @@ def test_list_attached_policies(): policies['policies'].should.be.empty +@mock_iot +def test_policy_versions(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) + + policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), + setAsDefault=True) + policy1.should.have.key('policyArn').which.should_not.be.none + policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy1.should.have.key('policyVersionId').which.should.equal('2') + policy1.should.have.key('isDefaultVersion').which.should.equal(True) + + policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), + setAsDefault=False) + policy2.should.have.key('policyArn').which.should_not.be.none + policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy2.should.have.key('policyVersionId').which.should.equal('3') + policy2.should.have.key('isDefaultVersion').which.should.equal(False) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) + + client.delete_policy_version(policyName=policy_name, policyVersionId='1') + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) + + client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) + + # should fail as it's the default policy. Should use delete_policy instead + try: + client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + assert False, 'Should have failed in previous call' + except Exception as exception: + exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') + + @mock_iot def test_things(): client = boto3.client('iot', region_name='ap-northeast-1') From 1c7becb4f69b8ae171f8e72dedd997d2d31e6d73 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Thu, 25 Oct 2018 12:19:35 +0200 Subject: [PATCH 008/658] :rotating_light: linting error --- moto/iot/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 66d5ddfd6f14..3ef5bc93ee87 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import json -from six.moves.urllib.parse import parse_qs, urlparse, unquote +from six.moves.urllib.parse import unquote from moto.core.responses import BaseResponse from .models import iot_backends From 181b5539f670ae50b60d92a3e694cba62e73ce90 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Thu, 25 Oct 2018 12:45:35 +0200 Subject: [PATCH 009/658] :memo: update coverage to reflect changes made --- IMPLEMENTATION_COVERAGE.md | 9214 +++++++++++++++++++----------------- 1 file changed, 4786 insertions(+), 4428 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 7c68c0e31cb8..a153b92fcdb5 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,4428 +1,4786 @@ - -## acm - 41% implemented -- [X] add_tags_to_certificate -- [X] delete_certificate -- [ ] describe_certificate -- [ ] export_certificate -- [X] get_certificate -- [ ] import_certificate -- [ ] list_certificates -- [ ] list_tags_for_certificate -- [X] remove_tags_from_certificate -- [X] request_certificate -- [ ] resend_validation_email -- [ ] update_certificate_options - -## acm-pca - 0% implemented -- [ ] create_certificate_authority -- [ ] create_certificate_authority_audit_report -- [ ] delete_certificate_authority -- [ ] describe_certificate_authority -- [ ] describe_certificate_authority_audit_report -- [ ] get_certificate -- [ ] get_certificate_authority_certificate -- [ ] get_certificate_authority_csr -- [ ] import_certificate_authority_certificate -- [ ] issue_certificate -- [ ] list_certificate_authorities -- [ ] list_tags -- [ ] revoke_certificate -- [ ] tag_certificate_authority -- [ ] untag_certificate_authority -- [ ] update_certificate_authority - -## alexaforbusiness - 0% implemented -- [ ] associate_contact_with_address_book -- [ ] associate_device_with_room -- [ ] associate_skill_group_with_room -- [ ] create_address_book -- [ ] create_contact -- [ ] create_profile -- [ ] create_room -- [ ] create_skill_group -- [ ] create_user -- [ ] delete_address_book -- [ ] delete_contact -- [ ] delete_profile -- [ ] delete_room -- [ ] delete_room_skill_parameter -- [ ] delete_skill_group -- [ ] delete_user -- [ ] disassociate_contact_from_address_book -- [ ] disassociate_device_from_room -- [ ] disassociate_skill_group_from_room -- [ ] get_address_book -- [ ] get_contact -- [ ] get_device -- [ ] get_profile -- [ ] get_room -- [ ] get_room_skill_parameter -- [ ] get_skill_group -- [ ] list_skills -- [ ] list_tags -- [ ] put_room_skill_parameter -- [ ] resolve_room -- [ ] revoke_invitation -- [ ] search_address_books -- [ ] search_contacts -- [ ] search_devices -- [ ] search_profiles -- [ ] search_rooms -- [ ] search_skill_groups -- [ ] search_users -- [ ] send_invitation -- [ ] start_device_sync -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_address_book -- [ ] update_contact -- [ ] update_device -- [ ] update_profile -- [ ] update_room -- [ ] update_skill_group - -## apigateway - 24% implemented -- [ ] create_api_key -- [ ] create_authorizer -- [ ] create_base_path_mapping -- [X] create_deployment -- [ ] create_documentation_part -- [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model -- [ ] create_request_validator -- [X] create_resource -- [X] create_rest_api -- [X] create_stage -- [X] create_usage_plan -- [X] create_usage_plan_key -- [ ] create_vpc_link -- [ ] delete_api_key -- [ ] delete_authorizer -- [ ] delete_base_path_mapping -- [ ] delete_client_certificate -- [X] delete_deployment -- [ ] delete_documentation_part -- [ ] delete_documentation_version -- [ ] delete_domain_name -- [ ] delete_gateway_response -- [X] delete_integration -- [X] delete_integration_response -- [ ] delete_method -- [X] delete_method_response -- [ ] delete_model -- [ ] delete_request_validator -- [X] delete_resource -- [X] delete_rest_api -- [ ] delete_stage -- [X] delete_usage_plan -- [X] delete_usage_plan_key -- [ ] delete_vpc_link -- [ ] flush_stage_authorizers_cache -- [ ] flush_stage_cache -- [ ] generate_client_certificate -- [ ] get_account -- [ ] get_api_key -- [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers -- [ ] get_base_path_mapping -- [ ] get_base_path_mappings -- [ ] get_client_certificate -- [ ] get_client_certificates -- [X] get_deployment -- [X] get_deployments -- [ ] get_documentation_part -- [ ] get_documentation_parts -- [ ] get_documentation_version -- [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names -- [ ] get_export -- [ ] get_gateway_response -- [ ] get_gateway_responses -- [X] get_integration -- [X] get_integration_response -- [X] get_method -- [X] get_method_response -- [ ] get_model -- [ ] get_model_template -- [ ] get_models -- [ ] get_request_validator -- [ ] get_request_validators -- [X] get_resource -- [ ] get_resources -- [X] get_rest_api -- [ ] get_rest_apis -- [ ] get_sdk -- [ ] get_sdk_type -- [ ] get_sdk_types -- [X] get_stage -- [X] get_stages -- [ ] get_tags -- [ ] get_usage -- [X] get_usage_plan -- [X] get_usage_plan_key -- [X] get_usage_plan_keys -- [X] get_usage_plans -- [ ] get_vpc_link -- [ ] get_vpc_links -- [ ] import_api_keys -- [ ] import_documentation_parts -- [ ] import_rest_api -- [ ] put_gateway_response -- [ ] put_integration -- [ ] put_integration_response -- [ ] put_method -- [ ] put_method_response -- [ ] put_rest_api -- [ ] tag_resource -- [ ] test_invoke_authorizer -- [ ] test_invoke_method -- [ ] untag_resource -- [ ] update_account -- [ ] update_api_key -- [ ] update_authorizer -- [ ] update_base_path_mapping -- [ ] update_client_certificate -- [ ] update_deployment -- [ ] update_documentation_part -- [ ] update_documentation_version -- [ ] update_domain_name -- [ ] update_gateway_response -- [ ] update_integration -- [ ] update_integration_response -- [ ] update_method -- [ ] update_method_response -- [ ] update_model -- [ ] update_request_validator -- [ ] update_resource -- [ ] update_rest_api -- [X] update_stage -- [ ] update_usage -- [ ] update_usage_plan -- [ ] update_vpc_link - -## application-autoscaling - 0% implemented -- [ ] delete_scaling_policy -- [ ] delete_scheduled_action -- [ ] deregister_scalable_target -- [ ] describe_scalable_targets -- [ ] describe_scaling_activities -- [ ] describe_scaling_policies -- [ ] describe_scheduled_actions -- [ ] put_scaling_policy -- [ ] put_scheduled_action -- [ ] register_scalable_target - -## appstream - 0% implemented -- [ ] associate_fleet -- [ ] copy_image -- [ ] create_directory_config -- [ ] create_fleet -- [ ] create_image_builder -- [ ] create_image_builder_streaming_url -- [ ] create_stack -- [ ] create_streaming_url -- [ ] delete_directory_config -- [ ] delete_fleet -- [ ] delete_image -- [ ] delete_image_builder -- [ ] delete_stack -- [ ] describe_directory_configs -- [ ] describe_fleets -- [ ] describe_image_builders -- [ ] describe_images -- [ ] describe_sessions -- [ ] describe_stacks -- [ ] disassociate_fleet -- [ ] expire_session -- [ ] list_associated_fleets -- [ ] list_associated_stacks -- [ ] list_tags_for_resource -- [ ] start_fleet -- [ ] start_image_builder -- [ ] stop_fleet -- [ ] stop_image_builder -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_directory_config -- [ ] update_fleet -- [ ] update_stack - -## appsync - 0% implemented -- [ ] create_api_key -- [ ] create_data_source -- [ ] create_graphql_api -- [ ] create_resolver -- [ ] create_type -- [ ] delete_api_key -- [ ] delete_data_source -- [ ] delete_graphql_api -- [ ] delete_resolver -- [ ] delete_type -- [ ] get_data_source -- [ ] get_graphql_api -- [ ] get_introspection_schema -- [ ] get_resolver -- [ ] get_schema_creation_status -- [ ] get_type -- [ ] list_api_keys -- [ ] list_data_sources -- [ ] list_graphql_apis -- [ ] list_resolvers -- [ ] list_types -- [ ] start_schema_creation -- [ ] update_api_key -- [ ] update_data_source -- [ ] update_graphql_api -- [ ] update_resolver -- [ ] update_type - -## athena - 0% implemented -- [ ] batch_get_named_query -- [ ] batch_get_query_execution -- [ ] create_named_query -- [ ] delete_named_query -- [ ] get_named_query -- [ ] get_query_execution -- [ ] get_query_results -- [ ] list_named_queries -- [ ] list_query_executions -- [ ] start_query_execution -- [ ] stop_query_execution - -## autoscaling - 44% implemented -- [X] attach_instances -- [X] attach_load_balancer_target_groups -- [X] attach_load_balancers -- [ ] complete_lifecycle_action -- [X] create_auto_scaling_group -- [X] create_launch_configuration -- [X] create_or_update_tags -- [X] delete_auto_scaling_group -- [X] delete_launch_configuration -- [ ] delete_lifecycle_hook -- [ ] delete_notification_configuration -- [X] delete_policy -- [ ] delete_scheduled_action -- [ ] delete_tags -- [ ] describe_account_limits -- [ ] describe_adjustment_types -- [X] describe_auto_scaling_groups -- [X] describe_auto_scaling_instances -- [ ] describe_auto_scaling_notification_types -- [X] describe_launch_configurations -- [ ] describe_lifecycle_hook_types -- [ ] describe_lifecycle_hooks -- [X] describe_load_balancer_target_groups -- [X] describe_load_balancers -- [ ] describe_metric_collection_types -- [ ] describe_notification_configurations -- [X] describe_policies -- [ ] describe_scaling_activities -- [ ] describe_scaling_process_types -- [ ] describe_scheduled_actions -- [ ] describe_tags -- [ ] describe_termination_policy_types -- [X] detach_instances -- [X] detach_load_balancer_target_groups -- [X] detach_load_balancers -- [ ] disable_metrics_collection -- [ ] enable_metrics_collection -- [ ] enter_standby -- [X] execute_policy -- [ ] exit_standby -- [ ] put_lifecycle_hook -- [ ] put_notification_configuration -- [ ] put_scaling_policy -- [ ] put_scheduled_update_group_action -- [ ] record_lifecycle_action_heartbeat -- [ ] resume_processes -- [X] set_desired_capacity -- [X] set_instance_health -- [ ] set_instance_protection -- [X] suspend_processes -- [ ] terminate_instance_in_auto_scaling_group -- [X] update_auto_scaling_group - -## autoscaling-plans - 0% implemented -- [ ] create_scaling_plan -- [ ] delete_scaling_plan -- [ ] describe_scaling_plan_resources -- [ ] describe_scaling_plans - -## batch - 93% implemented -- [ ] cancel_job -- [X] create_compute_environment -- [X] create_job_queue -- [X] delete_compute_environment -- [X] delete_job_queue -- [X] deregister_job_definition -- [X] describe_compute_environments -- [X] describe_job_definitions -- [X] describe_job_queues -- [X] describe_jobs -- [X] list_jobs -- [X] register_job_definition -- [X] submit_job -- [X] terminate_job -- [X] update_compute_environment -- [X] update_job_queue - -## budgets - 0% implemented -- [ ] create_budget -- [ ] create_notification -- [ ] create_subscriber -- [ ] delete_budget -- [ ] delete_notification -- [ ] delete_subscriber -- [ ] describe_budget -- [ ] describe_budgets -- [ ] describe_notifications_for_budget -- [ ] describe_subscribers_for_notification -- [ ] update_budget -- [ ] update_notification -- [ ] update_subscriber - -## ce - 0% implemented -- [ ] get_cost_and_usage -- [ ] get_dimension_values -- [ ] get_reservation_coverage -- [ ] get_reservation_purchase_recommendation -- [ ] get_reservation_utilization -- [ ] get_tags - -## cloud9 - 0% implemented -- [ ] create_environment_ec2 -- [ ] create_environment_membership -- [ ] delete_environment -- [ ] delete_environment_membership -- [ ] describe_environment_memberships -- [ ] describe_environment_status -- [ ] describe_environments -- [ ] list_environments -- [ ] update_environment -- [ ] update_environment_membership - -## clouddirectory - 0% implemented -- [ ] add_facet_to_object -- [ ] apply_schema -- [ ] attach_object -- [ ] attach_policy -- [ ] attach_to_index -- [ ] attach_typed_link -- [ ] batch_read -- [ ] batch_write -- [ ] create_directory -- [ ] create_facet -- [ ] create_index -- [ ] create_object -- [ ] create_schema -- [ ] create_typed_link_facet -- [ ] delete_directory -- [ ] delete_facet -- [ ] delete_object -- [ ] delete_schema -- [ ] delete_typed_link_facet -- [ ] detach_from_index -- [ ] detach_object -- [ ] detach_policy -- [ ] detach_typed_link -- [ ] disable_directory -- [ ] enable_directory -- [ ] get_applied_schema_version -- [ ] get_directory -- [ ] get_facet -- [ ] get_object_attributes -- [ ] get_object_information -- [ ] get_schema_as_json -- [ ] get_typed_link_facet_information -- [ ] list_applied_schema_arns -- [ ] list_attached_indices -- [ ] list_development_schema_arns -- [ ] list_directories -- [ ] list_facet_attributes -- [ ] list_facet_names -- [ ] list_incoming_typed_links -- [ ] list_index -- [ ] list_object_attributes -- [ ] list_object_children -- [ ] list_object_parent_paths -- [ ] list_object_parents -- [ ] list_object_policies -- [ ] list_outgoing_typed_links -- [ ] list_policy_attachments -- [ ] list_published_schema_arns -- [ ] list_tags_for_resource -- [ ] list_typed_link_facet_attributes -- [ ] list_typed_link_facet_names -- [ ] lookup_policy -- [ ] publish_schema -- [ ] put_schema_from_json -- [ ] remove_facet_from_object -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_facet -- [ ] update_object_attributes -- [ ] update_schema -- [ ] update_typed_link_facet -- [ ] upgrade_applied_schema -- [ ] upgrade_published_schema - -## cloudformation - 21% implemented -- [ ] cancel_update_stack -- [ ] continue_update_rollback -- [X] create_change_set -- [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set -- [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set -- [ ] describe_account_limits -- [ ] describe_change_set -- [ ] describe_stack_events -- [ ] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation -- [X] describe_stacks -- [ ] estimate_template_cost -- [X] execute_change_set -- [ ] get_stack_policy -- [ ] get_template -- [ ] get_template_summary -- [ ] list_change_sets -- [X] list_exports -- [ ] list_imports -- [ ] list_stack_instances -- [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets -- [X] list_stacks -- [ ] set_stack_policy -- [ ] signal_resource -- [ ] stop_stack_set_operation -- [X] update_stack -- [ ] update_stack_instances -- [ ] update_stack_set -- [ ] update_termination_protection -- [ ] validate_template - -## cloudfront - 0% implemented -- [ ] create_cloud_front_origin_access_identity -- [ ] create_distribution -- [ ] create_distribution_with_tags -- [ ] create_field_level_encryption_config -- [ ] create_field_level_encryption_profile -- [ ] create_invalidation -- [ ] create_public_key -- [ ] create_streaming_distribution -- [ ] create_streaming_distribution_with_tags -- [ ] delete_cloud_front_origin_access_identity -- [ ] delete_distribution -- [ ] delete_field_level_encryption_config -- [ ] delete_field_level_encryption_profile -- [ ] delete_public_key -- [ ] delete_service_linked_role -- [ ] delete_streaming_distribution -- [ ] get_cloud_front_origin_access_identity -- [ ] get_cloud_front_origin_access_identity_config -- [ ] get_distribution -- [ ] get_distribution_config -- [ ] get_field_level_encryption -- [ ] get_field_level_encryption_config -- [ ] get_field_level_encryption_profile -- [ ] get_field_level_encryption_profile_config -- [ ] get_invalidation -- [ ] get_public_key -- [ ] get_public_key_config -- [ ] get_streaming_distribution -- [ ] get_streaming_distribution_config -- [ ] list_cloud_front_origin_access_identities -- [ ] list_distributions -- [ ] list_distributions_by_web_acl_id -- [ ] list_field_level_encryption_configs -- [ ] list_field_level_encryption_profiles -- [ ] list_invalidations -- [ ] list_public_keys -- [ ] list_streaming_distributions -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cloud_front_origin_access_identity -- [ ] update_distribution -- [ ] update_field_level_encryption_config -- [ ] update_field_level_encryption_profile -- [ ] update_public_key -- [ ] update_streaming_distribution - -## cloudhsm - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_hapg -- [ ] create_hsm -- [ ] create_luna_client -- [ ] delete_hapg -- [ ] delete_hsm -- [ ] delete_luna_client -- [ ] describe_hapg -- [ ] describe_hsm -- [ ] describe_luna_client -- [ ] get_config -- [ ] list_available_zones -- [ ] list_hapgs -- [ ] list_hsms -- [ ] list_luna_clients -- [ ] list_tags_for_resource -- [ ] modify_hapg -- [ ] modify_hsm -- [ ] modify_luna_client -- [ ] remove_tags_from_resource - -## cloudhsmv2 - 0% implemented -- [ ] create_cluster -- [ ] create_hsm -- [ ] delete_cluster -- [ ] delete_hsm -- [ ] describe_backups -- [ ] describe_clusters -- [ ] initialize_cluster -- [ ] list_tags -- [ ] tag_resource -- [ ] untag_resource - -## cloudsearch - 0% implemented -- [ ] build_suggesters -- [ ] create_domain -- [ ] define_analysis_scheme -- [ ] define_expression -- [ ] define_index_field -- [ ] define_suggester -- [ ] delete_analysis_scheme -- [ ] delete_domain -- [ ] delete_expression -- [ ] delete_index_field -- [ ] delete_suggester -- [ ] describe_analysis_schemes -- [ ] describe_availability_options -- [ ] describe_domains -- [ ] describe_expressions -- [ ] describe_index_fields -- [ ] describe_scaling_parameters -- [ ] describe_service_access_policies -- [ ] describe_suggesters -- [ ] index_documents -- [ ] list_domain_names -- [ ] update_availability_options -- [ ] update_scaling_parameters -- [ ] update_service_access_policies - -## cloudsearchdomain - 0% implemented -- [ ] search -- [ ] suggest -- [ ] upload_documents - -## cloudtrail - 0% implemented -- [ ] add_tags -- [ ] create_trail -- [ ] delete_trail -- [ ] describe_trails -- [ ] get_event_selectors -- [ ] get_trail_status -- [ ] list_public_keys -- [ ] list_tags -- [ ] lookup_events -- [ ] put_event_selectors -- [ ] remove_tags -- [ ] start_logging -- [ ] stop_logging -- [ ] update_trail - -## cloudwatch - 56% implemented -- [X] delete_alarms -- [X] delete_dashboards -- [ ] describe_alarm_history -- [ ] describe_alarms -- [ ] describe_alarms_for_metric -- [ ] disable_alarm_actions -- [ ] enable_alarm_actions -- [X] get_dashboard -- [ ] get_metric_data -- [X] get_metric_statistics -- [X] list_dashboards -- [ ] list_metrics -- [X] put_dashboard -- [X] put_metric_alarm -- [X] put_metric_data -- [X] set_alarm_state - -## codebuild - 0% implemented -- [ ] batch_delete_builds -- [ ] batch_get_builds -- [ ] batch_get_projects -- [ ] create_project -- [ ] create_webhook -- [ ] delete_project -- [ ] delete_webhook -- [ ] invalidate_project_cache -- [ ] list_builds -- [ ] list_builds_for_project -- [ ] list_curated_environment_images -- [ ] list_projects -- [ ] start_build -- [ ] stop_build -- [ ] update_project -- [ ] update_webhook - -## codecommit - 0% implemented -- [ ] batch_get_repositories -- [ ] create_branch -- [ ] create_pull_request -- [ ] create_repository -- [ ] delete_branch -- [ ] delete_comment_content -- [ ] delete_repository -- [ ] describe_pull_request_events -- [ ] get_blob -- [ ] get_branch -- [ ] get_comment -- [ ] get_comments_for_compared_commit -- [ ] get_comments_for_pull_request -- [ ] get_commit -- [ ] get_differences -- [ ] get_merge_conflicts -- [ ] get_pull_request -- [ ] get_repository -- [ ] get_repository_triggers -- [ ] list_branches -- [ ] list_pull_requests -- [ ] list_repositories -- [ ] merge_pull_request_by_fast_forward -- [ ] post_comment_for_compared_commit -- [ ] post_comment_for_pull_request -- [ ] post_comment_reply -- [ ] put_file -- [ ] put_repository_triggers -- [ ] test_repository_triggers -- [ ] update_comment -- [ ] update_default_branch -- [ ] update_pull_request_description -- [ ] update_pull_request_status -- [ ] update_pull_request_title -- [ ] update_repository_description -- [ ] update_repository_name - -## codedeploy - 0% implemented -- [ ] add_tags_to_on_premises_instances -- [ ] batch_get_application_revisions -- [ ] batch_get_applications -- [ ] batch_get_deployment_groups -- [ ] batch_get_deployment_instances -- [ ] batch_get_deployments -- [ ] batch_get_on_premises_instances -- [ ] continue_deployment -- [ ] create_application -- [ ] create_deployment -- [ ] create_deployment_config -- [ ] create_deployment_group -- [ ] delete_application -- [ ] delete_deployment_config -- [ ] delete_deployment_group -- [ ] delete_git_hub_account_token -- [ ] deregister_on_premises_instance -- [ ] get_application -- [ ] get_application_revision -- [ ] get_deployment -- [ ] get_deployment_config -- [ ] get_deployment_group -- [ ] get_deployment_instance -- [ ] get_on_premises_instance -- [ ] list_application_revisions -- [ ] list_applications -- [ ] list_deployment_configs -- [ ] list_deployment_groups -- [ ] list_deployment_instances -- [ ] list_deployments -- [ ] list_git_hub_account_token_names -- [ ] list_on_premises_instances -- [ ] put_lifecycle_event_hook_execution_status -- [ ] register_application_revision -- [ ] register_on_premises_instance -- [ ] remove_tags_from_on_premises_instances -- [ ] skip_wait_time_for_instance_termination -- [ ] stop_deployment -- [ ] update_application -- [ ] update_deployment_group - -## codepipeline - 0% implemented -- [ ] acknowledge_job -- [ ] acknowledge_third_party_job -- [ ] create_custom_action_type -- [ ] create_pipeline -- [ ] delete_custom_action_type -- [ ] delete_pipeline -- [ ] disable_stage_transition -- [ ] enable_stage_transition -- [ ] get_job_details -- [ ] get_pipeline -- [ ] get_pipeline_execution -- [ ] get_pipeline_state -- [ ] get_third_party_job_details -- [ ] list_action_types -- [ ] list_pipeline_executions -- [ ] list_pipelines -- [ ] poll_for_jobs -- [ ] poll_for_third_party_jobs -- [ ] put_action_revision -- [ ] put_approval_result -- [ ] put_job_failure_result -- [ ] put_job_success_result -- [ ] put_third_party_job_failure_result -- [ ] put_third_party_job_success_result -- [ ] retry_stage_execution -- [ ] start_pipeline_execution -- [ ] update_pipeline - -## codestar - 0% implemented -- [ ] associate_team_member -- [ ] create_project -- [ ] create_user_profile -- [ ] delete_project -- [ ] delete_user_profile -- [ ] describe_project -- [ ] describe_user_profile -- [ ] disassociate_team_member -- [ ] list_projects -- [ ] list_resources -- [ ] list_tags_for_project -- [ ] list_team_members -- [ ] list_user_profiles -- [ ] tag_project -- [ ] untag_project -- [ ] update_project -- [ ] update_team_member -- [ ] update_user_profile - -## cognito-identity - 0% implemented -- [ ] create_identity_pool -- [ ] delete_identities -- [ ] delete_identity_pool -- [ ] describe_identity -- [ ] describe_identity_pool -- [ ] get_credentials_for_identity -- [ ] get_id -- [ ] get_identity_pool_roles -- [ ] get_open_id_token -- [ ] get_open_id_token_for_developer_identity -- [ ] list_identities -- [ ] list_identity_pools -- [ ] lookup_developer_identity -- [ ] merge_developer_identities -- [ ] set_identity_pool_roles -- [ ] unlink_developer_identity -- [ ] unlink_identity -- [ ] update_identity_pool - -## cognito-idp - 0% implemented -- [ ] add_custom_attributes -- [ ] admin_add_user_to_group -- [ ] admin_confirm_sign_up -- [ ] admin_create_user -- [ ] admin_delete_user -- [ ] admin_delete_user_attributes -- [ ] admin_disable_provider_for_user -- [X] admin_disable_user -- [X] admin_enable_user -- [ ] admin_forget_device -- [ ] admin_get_device -- [ ] admin_get_user -- [ ] admin_initiate_auth -- [ ] admin_link_provider_for_user -- [ ] admin_list_devices -- [ ] admin_list_groups_for_user -- [ ] admin_list_user_auth_events -- [ ] admin_remove_user_from_group -- [ ] admin_reset_user_password -- [ ] admin_respond_to_auth_challenge -- [ ] admin_set_user_mfa_preference -- [ ] admin_set_user_settings -- [ ] admin_update_auth_event_feedback -- [ ] admin_update_device_status -- [ ] admin_update_user_attributes -- [ ] admin_user_global_sign_out -- [ ] associate_software_token -- [ ] change_password -- [ ] confirm_device -- [ ] confirm_forgot_password -- [ ] confirm_sign_up -- [ ] create_group -- [ ] create_identity_provider -- [ ] create_resource_server -- [ ] create_user_import_job -- [ ] create_user_pool -- [ ] create_user_pool_client -- [ ] create_user_pool_domain -- [ ] delete_group -- [ ] delete_identity_provider -- [ ] delete_resource_server -- [ ] delete_user -- [ ] delete_user_attributes -- [ ] delete_user_pool -- [ ] delete_user_pool_client -- [ ] delete_user_pool_domain -- [ ] describe_identity_provider -- [ ] describe_resource_server -- [ ] describe_risk_configuration -- [ ] describe_user_import_job -- [ ] describe_user_pool -- [ ] describe_user_pool_client -- [ ] describe_user_pool_domain -- [ ] forget_device -- [ ] forgot_password -- [ ] get_csv_header -- [ ] get_device -- [ ] get_group -- [ ] get_identity_provider_by_identifier -- [ ] get_signing_certificate -- [ ] get_ui_customization -- [ ] get_user -- [ ] get_user_attribute_verification_code -- [ ] get_user_pool_mfa_config -- [ ] global_sign_out -- [ ] initiate_auth -- [ ] list_devices -- [ ] list_groups -- [ ] list_identity_providers -- [ ] list_resource_servers -- [ ] list_user_import_jobs -- [ ] list_user_pool_clients -- [ ] list_user_pools -- [ ] list_users -- [ ] list_users_in_group -- [ ] resend_confirmation_code -- [ ] respond_to_auth_challenge -- [ ] set_risk_configuration -- [ ] set_ui_customization -- [ ] set_user_mfa_preference -- [ ] set_user_pool_mfa_config -- [ ] set_user_settings -- [ ] sign_up -- [ ] start_user_import_job -- [ ] stop_user_import_job -- [ ] update_auth_event_feedback -- [ ] update_device_status -- [ ] update_group -- [ ] update_identity_provider -- [ ] update_resource_server -- [ ] update_user_attributes -- [ ] update_user_pool -- [ ] update_user_pool_client -- [ ] verify_software_token -- [ ] verify_user_attribute - -## cognito-sync - 0% implemented -- [ ] bulk_publish -- [ ] delete_dataset -- [ ] describe_dataset -- [ ] describe_identity_pool_usage -- [ ] describe_identity_usage -- [ ] get_bulk_publish_details -- [ ] get_cognito_events -- [ ] get_identity_pool_configuration -- [ ] list_datasets -- [ ] list_identity_pool_usage -- [ ] list_records -- [ ] register_device -- [ ] set_cognito_events -- [ ] set_identity_pool_configuration -- [ ] subscribe_to_dataset -- [ ] unsubscribe_from_dataset -- [ ] update_records - -## comprehend - 0% implemented -- [ ] batch_detect_dominant_language -- [ ] batch_detect_entities -- [ ] batch_detect_key_phrases -- [ ] batch_detect_sentiment -- [ ] describe_topics_detection_job -- [ ] detect_dominant_language -- [ ] detect_entities -- [ ] detect_key_phrases -- [ ] detect_sentiment -- [ ] list_topics_detection_jobs -- [ ] start_topics_detection_job - -## config - 0% implemented -- [ ] batch_get_resource_config -- [ ] delete_aggregation_authorization -- [ ] delete_config_rule -- [ ] delete_configuration_aggregator -- [ ] delete_configuration_recorder -- [ ] delete_delivery_channel -- [ ] delete_evaluation_results -- [ ] delete_pending_aggregation_request -- [ ] deliver_config_snapshot -- [ ] describe_aggregate_compliance_by_config_rules -- [ ] describe_aggregation_authorizations -- [ ] describe_compliance_by_config_rule -- [ ] describe_compliance_by_resource -- [ ] describe_config_rule_evaluation_status -- [ ] describe_config_rules -- [ ] describe_configuration_aggregator_sources_status -- [ ] describe_configuration_aggregators -- [ ] describe_configuration_recorder_status -- [ ] describe_configuration_recorders -- [ ] describe_delivery_channel_status -- [ ] describe_delivery_channels -- [ ] describe_pending_aggregation_requests -- [ ] get_aggregate_compliance_details_by_config_rule -- [ ] get_aggregate_config_rule_compliance_summary -- [ ] get_compliance_details_by_config_rule -- [ ] get_compliance_details_by_resource -- [ ] get_compliance_summary_by_config_rule -- [ ] get_compliance_summary_by_resource_type -- [ ] get_discovered_resource_counts -- [ ] get_resource_config_history -- [ ] list_discovered_resources -- [ ] put_aggregation_authorization -- [ ] put_config_rule -- [ ] put_configuration_aggregator -- [ ] put_configuration_recorder -- [ ] put_delivery_channel -- [ ] put_evaluations -- [ ] start_config_rules_evaluation -- [ ] start_configuration_recorder -- [ ] stop_configuration_recorder - -## connect - 0% implemented -- [ ] start_outbound_voice_contact -- [ ] stop_contact - -## cur - 0% implemented -- [ ] delete_report_definition -- [ ] describe_report_definitions -- [ ] put_report_definition - -## datapipeline - 42% implemented -- [X] activate_pipeline -- [ ] add_tags -- [X] create_pipeline -- [ ] deactivate_pipeline -- [X] delete_pipeline -- [X] describe_objects -- [X] describe_pipelines -- [ ] evaluate_expression -- [X] get_pipeline_definition -- [X] list_pipelines -- [ ] poll_for_task -- [X] put_pipeline_definition -- [ ] query_objects -- [ ] remove_tags -- [ ] report_task_progress -- [ ] report_task_runner_heartbeat -- [ ] set_status -- [ ] set_task_status -- [ ] validate_pipeline_definition - -## dax - 0% implemented -- [ ] create_cluster -- [ ] create_parameter_group -- [ ] create_subnet_group -- [ ] decrease_replication_factor -- [ ] delete_cluster -- [ ] delete_parameter_group -- [ ] delete_subnet_group -- [ ] describe_clusters -- [ ] describe_default_parameters -- [ ] describe_events -- [ ] describe_parameter_groups -- [ ] describe_parameters -- [ ] describe_subnet_groups -- [ ] increase_replication_factor -- [ ] list_tags -- [ ] reboot_node -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cluster -- [ ] update_parameter_group -- [ ] update_subnet_group - -## devicefarm - 0% implemented -- [ ] create_device_pool -- [ ] create_instance_profile -- [ ] create_network_profile -- [ ] create_project -- [ ] create_remote_access_session -- [ ] create_upload -- [ ] delete_device_pool -- [ ] delete_instance_profile -- [ ] delete_network_profile -- [ ] delete_project -- [ ] delete_remote_access_session -- [ ] delete_run -- [ ] delete_upload -- [ ] get_account_settings -- [ ] get_device -- [ ] get_device_instance -- [ ] get_device_pool -- [ ] get_device_pool_compatibility -- [ ] get_instance_profile -- [ ] get_job -- [ ] get_network_profile -- [ ] get_offering_status -- [ ] get_project -- [ ] get_remote_access_session -- [ ] get_run -- [ ] get_suite -- [ ] get_test -- [ ] get_upload -- [ ] install_to_remote_access_session -- [ ] list_artifacts -- [ ] list_device_instances -- [ ] list_device_pools -- [ ] list_devices -- [ ] list_instance_profiles -- [ ] list_jobs -- [ ] list_network_profiles -- [ ] list_offering_promotions -- [ ] list_offering_transactions -- [ ] list_offerings -- [ ] list_projects -- [ ] list_remote_access_sessions -- [ ] list_runs -- [ ] list_samples -- [ ] list_suites -- [ ] list_tests -- [ ] list_unique_problems -- [ ] list_uploads -- [ ] purchase_offering -- [ ] renew_offering -- [ ] schedule_run -- [ ] stop_remote_access_session -- [ ] stop_run -- [ ] update_device_instance -- [ ] update_device_pool -- [ ] update_instance_profile -- [ ] update_network_profile -- [ ] update_project - -## directconnect - 0% implemented -- [ ] allocate_connection_on_interconnect -- [ ] allocate_hosted_connection -- [ ] allocate_private_virtual_interface -- [ ] allocate_public_virtual_interface -- [ ] associate_connection_with_lag -- [ ] associate_hosted_connection -- [ ] associate_virtual_interface -- [ ] confirm_connection -- [ ] confirm_private_virtual_interface -- [ ] confirm_public_virtual_interface -- [ ] create_bgp_peer -- [ ] create_connection -- [ ] create_direct_connect_gateway -- [ ] create_direct_connect_gateway_association -- [ ] create_interconnect -- [ ] create_lag -- [ ] create_private_virtual_interface -- [ ] create_public_virtual_interface -- [ ] delete_bgp_peer -- [ ] delete_connection -- [ ] delete_direct_connect_gateway -- [ ] delete_direct_connect_gateway_association -- [ ] delete_interconnect -- [ ] delete_lag -- [ ] delete_virtual_interface -- [ ] describe_connection_loa -- [ ] describe_connections -- [ ] describe_connections_on_interconnect -- [ ] describe_direct_connect_gateway_associations -- [ ] describe_direct_connect_gateway_attachments -- [ ] describe_direct_connect_gateways -- [ ] describe_hosted_connections -- [ ] describe_interconnect_loa -- [ ] describe_interconnects -- [ ] describe_lags -- [ ] describe_loa -- [ ] describe_locations -- [ ] describe_tags -- [ ] describe_virtual_gateways -- [ ] describe_virtual_interfaces -- [ ] disassociate_connection_from_lag -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_lag - -## discovery - 0% implemented -- [ ] associate_configuration_items_to_application -- [ ] create_application -- [ ] create_tags -- [ ] delete_applications -- [ ] delete_tags -- [ ] describe_agents -- [ ] describe_configurations -- [ ] describe_export_configurations -- [ ] describe_export_tasks -- [ ] describe_tags -- [ ] disassociate_configuration_items_from_application -- [ ] export_configurations -- [ ] get_discovery_summary -- [ ] list_configurations -- [ ] list_server_neighbors -- [ ] start_data_collection_by_agent_ids -- [ ] start_export_task -- [ ] stop_data_collection_by_agent_ids -- [ ] update_application - -## dms - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_endpoint -- [ ] create_event_subscription -- [ ] create_replication_instance -- [ ] create_replication_subnet_group -- [ ] create_replication_task -- [ ] delete_certificate -- [ ] delete_endpoint -- [ ] delete_event_subscription -- [ ] delete_replication_instance -- [ ] delete_replication_subnet_group -- [ ] delete_replication_task -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_connections -- [ ] describe_endpoint_types -- [ ] describe_endpoints -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_replication_instances -- [ ] describe_refresh_schemas_status -- [ ] describe_replication_instance_task_logs -- [ ] describe_replication_instances -- [ ] describe_replication_subnet_groups -- [ ] describe_replication_task_assessment_results -- [ ] describe_replication_tasks -- [ ] describe_schemas -- [ ] describe_table_statistics -- [ ] import_certificate -- [ ] list_tags_for_resource -- [ ] modify_endpoint -- [ ] modify_event_subscription -- [ ] modify_replication_instance -- [ ] modify_replication_subnet_group -- [ ] modify_replication_task -- [ ] reboot_replication_instance -- [ ] refresh_schemas -- [ ] reload_tables -- [ ] remove_tags_from_resource -- [ ] start_replication_task -- [ ] start_replication_task_assessment -- [ ] stop_replication_task -- [ ] test_connection - -## ds - 0% implemented -- [ ] add_ip_routes -- [ ] add_tags_to_resource -- [ ] cancel_schema_extension -- [ ] connect_directory -- [ ] create_alias -- [ ] create_computer -- [ ] create_conditional_forwarder -- [ ] create_directory -- [ ] create_microsoft_ad -- [ ] create_snapshot -- [ ] create_trust -- [ ] delete_conditional_forwarder -- [ ] delete_directory -- [ ] delete_snapshot -- [ ] delete_trust -- [ ] deregister_event_topic -- [ ] describe_conditional_forwarders -- [ ] describe_directories -- [ ] describe_domain_controllers -- [ ] describe_event_topics -- [ ] describe_snapshots -- [ ] describe_trusts -- [ ] disable_radius -- [ ] disable_sso -- [ ] enable_radius -- [ ] enable_sso -- [ ] get_directory_limits -- [ ] get_snapshot_limits -- [ ] list_ip_routes -- [ ] list_schema_extensions -- [ ] list_tags_for_resource -- [ ] register_event_topic -- [ ] remove_ip_routes -- [ ] remove_tags_from_resource -- [ ] restore_from_snapshot -- [ ] start_schema_extension -- [ ] update_conditional_forwarder -- [ ] update_number_of_domain_controllers -- [ ] update_radius -- [ ] verify_trust - -## dynamodb - 22% implemented -- [ ] batch_get_item -- [ ] batch_write_item -- [ ] create_backup -- [ ] create_global_table -- [X] create_table -- [ ] delete_backup -- [X] delete_item -- [X] delete_table -- [ ] describe_backup -- [ ] describe_continuous_backups -- [ ] describe_global_table -- [ ] describe_limits -- [ ] describe_table -- [ ] describe_time_to_live -- [X] get_item -- [ ] list_backups -- [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource -- [X] put_item -- [X] query -- [ ] restore_table_from_backup -- [ ] restore_table_to_point_in_time -- [X] scan -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_continuous_backups -- [ ] update_global_table -- [ ] update_item -- [ ] update_table -- [ ] update_time_to_live - -## dynamodbstreams - 0% implemented -- [ ] describe_stream -- [ ] get_records -- [ ] get_shard_iterator -- [ ] list_streams - -## ec2 - 37% implemented -- [ ] accept_reserved_instances_exchange_quote -- [ ] accept_vpc_endpoint_connections -- [X] accept_vpc_peering_connection -- [X] allocate_address -- [ ] allocate_hosts -- [ ] assign_ipv6_addresses -- [ ] assign_private_ip_addresses -- [X] associate_address -- [X] associate_dhcp_options -- [ ] associate_iam_instance_profile -- [X] associate_route_table -- [ ] associate_subnet_cidr_block -- [X] associate_vpc_cidr_block -- [ ] attach_classic_link_vpc -- [X] attach_internet_gateway -- [X] attach_network_interface -- [X] attach_volume -- [X] attach_vpn_gateway -- [X] authorize_security_group_egress -- [X] authorize_security_group_ingress -- [ ] bundle_instance -- [ ] cancel_bundle_task -- [ ] cancel_conversion_task -- [ ] cancel_export_task -- [ ] cancel_import_task -- [ ] cancel_reserved_instances_listing -- [X] cancel_spot_fleet_requests -- [X] cancel_spot_instance_requests -- [ ] confirm_product_instance -- [ ] copy_fpga_image -- [X] copy_image -- [X] copy_snapshot -- [X] create_customer_gateway -- [ ] create_default_subnet -- [ ] create_default_vpc -- [X] create_dhcp_options -- [ ] create_egress_only_internet_gateway -- [ ] create_flow_logs -- [ ] create_fpga_image -- [X] create_image -- [ ] create_instance_export_task -- [X] create_internet_gateway -- [X] create_key_pair -- [ ] create_launch_template -- [ ] create_launch_template_version -- [X] create_nat_gateway -- [X] create_network_acl -- [X] create_network_acl_entry -- [X] create_network_interface -- [ ] create_network_interface_permission -- [ ] create_placement_group -- [ ] create_reserved_instances_listing -- [X] create_route -- [X] create_route_table -- [X] create_security_group -- [X] create_snapshot -- [ ] create_spot_datafeed_subscription -- [X] create_subnet -- [X] create_tags -- [X] create_volume -- [X] create_vpc -- [ ] create_vpc_endpoint -- [ ] create_vpc_endpoint_connection_notification -- [ ] create_vpc_endpoint_service_configuration -- [X] create_vpc_peering_connection -- [X] create_vpn_connection -- [ ] create_vpn_connection_route -- [X] create_vpn_gateway -- [X] delete_customer_gateway -- [ ] delete_dhcp_options -- [ ] delete_egress_only_internet_gateway -- [ ] delete_flow_logs -- [ ] delete_fpga_image -- [X] delete_internet_gateway -- [X] delete_key_pair -- [ ] delete_launch_template -- [ ] delete_launch_template_versions -- [X] delete_nat_gateway -- [X] delete_network_acl -- [X] delete_network_acl_entry -- [X] delete_network_interface -- [ ] delete_network_interface_permission -- [ ] delete_placement_group -- [X] delete_route -- [X] delete_route_table -- [X] delete_security_group -- [X] delete_snapshot -- [ ] delete_spot_datafeed_subscription -- [X] delete_subnet -- [X] delete_tags -- [X] delete_volume -- [X] delete_vpc -- [ ] delete_vpc_endpoint_connection_notifications -- [ ] delete_vpc_endpoint_service_configurations -- [ ] delete_vpc_endpoints -- [X] delete_vpc_peering_connection -- [X] delete_vpn_connection -- [ ] delete_vpn_connection_route -- [X] delete_vpn_gateway -- [X] deregister_image -- [ ] describe_account_attributes -- [X] describe_addresses -- [ ] describe_aggregate_id_format -- [X] describe_availability_zones -- [ ] describe_bundle_tasks -- [ ] describe_classic_link_instances -- [ ] describe_conversion_tasks -- [ ] describe_customer_gateways -- [X] describe_dhcp_options -- [ ] describe_egress_only_internet_gateways -- [ ] describe_elastic_gpus -- [ ] describe_export_tasks -- [ ] describe_flow_logs -- [ ] describe_fpga_image_attribute -- [ ] describe_fpga_images -- [ ] describe_host_reservation_offerings -- [ ] describe_host_reservations -- [ ] describe_hosts -- [ ] describe_iam_instance_profile_associations -- [ ] describe_id_format -- [ ] describe_identity_id_format -- [ ] describe_image_attribute -- [X] describe_images -- [ ] describe_import_image_tasks -- [ ] describe_import_snapshot_tasks -- [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications -- [ ] describe_instance_status -- [ ] describe_instances -- [X] describe_internet_gateways -- [X] describe_key_pairs -- [ ] describe_launch_template_versions -- [ ] describe_launch_templates -- [ ] describe_moving_addresses -- [ ] describe_nat_gateways -- [ ] describe_network_acls -- [ ] describe_network_interface_attribute -- [ ] describe_network_interface_permissions -- [X] describe_network_interfaces -- [ ] describe_placement_groups -- [ ] describe_prefix_lists -- [ ] describe_principal_id_format -- [X] describe_regions -- [ ] describe_reserved_instances -- [ ] describe_reserved_instances_listings -- [ ] describe_reserved_instances_modifications -- [ ] describe_reserved_instances_offerings -- [ ] describe_route_tables -- [ ] describe_scheduled_instance_availability -- [ ] describe_scheduled_instances -- [ ] describe_security_group_references -- [X] describe_security_groups -- [ ] describe_snapshot_attribute -- [X] describe_snapshots -- [ ] describe_spot_datafeed_subscription -- [X] describe_spot_fleet_instances -- [ ] describe_spot_fleet_request_history -- [X] describe_spot_fleet_requests -- [X] describe_spot_instance_requests -- [ ] describe_spot_price_history -- [ ] describe_stale_security_groups -- [ ] describe_subnets -- [X] describe_tags -- [ ] describe_volume_attribute -- [ ] describe_volume_status -- [X] describe_volumes -- [ ] describe_volumes_modifications -- [X] describe_vpc_attribute -- [ ] describe_vpc_classic_link -- [ ] describe_vpc_classic_link_dns_support -- [ ] describe_vpc_endpoint_connection_notifications -- [ ] describe_vpc_endpoint_connections -- [ ] describe_vpc_endpoint_service_configurations -- [ ] describe_vpc_endpoint_service_permissions -- [ ] describe_vpc_endpoint_services -- [ ] describe_vpc_endpoints -- [ ] describe_vpc_peering_connections -- [ ] describe_vpcs -- [X] describe_vpn_connections -- [ ] describe_vpn_gateways -- [ ] detach_classic_link_vpc -- [X] detach_internet_gateway -- [X] detach_network_interface -- [X] detach_volume -- [X] detach_vpn_gateway -- [ ] disable_vgw_route_propagation -- [ ] disable_vpc_classic_link -- [ ] disable_vpc_classic_link_dns_support -- [X] disassociate_address -- [ ] disassociate_iam_instance_profile -- [X] disassociate_route_table -- [ ] disassociate_subnet_cidr_block -- [X] disassociate_vpc_cidr_block -- [ ] enable_vgw_route_propagation -- [ ] enable_volume_io -- [ ] enable_vpc_classic_link -- [ ] enable_vpc_classic_link_dns_support -- [ ] get_console_output -- [ ] get_console_screenshot -- [ ] get_host_reservation_purchase_preview -- [ ] get_launch_template_data -- [ ] get_password_data -- [ ] get_reserved_instances_exchange_quote -- [ ] import_image -- [ ] import_instance -- [X] import_key_pair -- [ ] import_snapshot -- [ ] import_volume -- [ ] modify_fpga_image_attribute -- [ ] modify_hosts -- [ ] modify_id_format -- [ ] modify_identity_id_format -- [ ] modify_image_attribute -- [X] modify_instance_attribute -- [ ] modify_instance_credit_specification -- [ ] modify_instance_placement -- [ ] modify_launch_template -- [X] modify_network_interface_attribute -- [ ] modify_reserved_instances -- [ ] modify_snapshot_attribute -- [X] modify_spot_fleet_request -- [X] modify_subnet_attribute -- [ ] modify_volume -- [ ] modify_volume_attribute -- [X] modify_vpc_attribute -- [ ] modify_vpc_endpoint -- [ ] modify_vpc_endpoint_connection_notification -- [ ] modify_vpc_endpoint_service_configuration -- [ ] modify_vpc_endpoint_service_permissions -- [ ] modify_vpc_peering_connection_options -- [ ] modify_vpc_tenancy -- [ ] monitor_instances -- [ ] move_address_to_vpc -- [ ] purchase_host_reservation -- [ ] purchase_reserved_instances_offering -- [ ] purchase_scheduled_instances -- [X] reboot_instances -- [ ] register_image -- [ ] reject_vpc_endpoint_connections -- [X] reject_vpc_peering_connection -- [X] release_address -- [ ] release_hosts -- [ ] replace_iam_instance_profile_association -- [X] replace_network_acl_association -- [X] replace_network_acl_entry -- [X] replace_route -- [X] replace_route_table_association -- [ ] report_instance_status -- [X] request_spot_fleet -- [X] request_spot_instances -- [ ] reset_fpga_image_attribute -- [ ] reset_image_attribute -- [ ] reset_instance_attribute -- [ ] reset_network_interface_attribute -- [ ] reset_snapshot_attribute -- [ ] restore_address_to_classic -- [X] revoke_security_group_egress -- [X] revoke_security_group_ingress -- [ ] run_instances -- [ ] run_scheduled_instances -- [X] start_instances -- [X] stop_instances -- [X] terminate_instances -- [ ] unassign_ipv6_addresses -- [ ] unassign_private_ip_addresses -- [ ] unmonitor_instances -- [ ] update_security_group_rule_descriptions_egress -- [ ] update_security_group_rule_descriptions_ingress - -## ecr - 31% implemented -- [ ] batch_check_layer_availability -- [ ] batch_delete_image -- [X] batch_get_image -- [ ] complete_layer_upload -- [X] create_repository -- [ ] delete_lifecycle_policy -- [X] delete_repository -- [ ] delete_repository_policy -- [X] describe_images -- [X] describe_repositories -- [ ] get_authorization_token -- [ ] get_download_url_for_layer -- [ ] get_lifecycle_policy -- [ ] get_lifecycle_policy_preview -- [ ] get_repository_policy -- [ ] initiate_layer_upload -- [X] list_images -- [X] put_image -- [ ] put_lifecycle_policy -- [ ] set_repository_policy -- [ ] start_lifecycle_policy_preview -- [ ] upload_layer_part - -## ecs - 87% implemented -- [X] create_cluster -- [X] create_service -- [X] delete_attributes -- [X] delete_cluster -- [X] delete_service -- [X] deregister_container_instance -- [X] deregister_task_definition -- [X] describe_clusters -- [X] describe_container_instances -- [X] describe_services -- [X] describe_task_definition -- [X] describe_tasks -- [ ] discover_poll_endpoint -- [X] list_attributes -- [X] list_clusters -- [X] list_container_instances -- [X] list_services -- [X] list_task_definition_families -- [X] list_task_definitions -- [X] list_tasks -- [X] put_attributes -- [X] register_container_instance -- [X] register_task_definition -- [X] run_task -- [X] start_task -- [X] stop_task -- [ ] submit_container_state_change -- [ ] submit_task_state_change -- [ ] update_container_agent -- [X] update_container_instances_state -- [X] update_service - -## efs - 0% implemented -- [ ] create_file_system -- [ ] create_mount_target -- [ ] create_tags -- [ ] delete_file_system -- [ ] delete_mount_target -- [ ] delete_tags -- [ ] describe_file_systems -- [ ] describe_mount_target_security_groups -- [ ] describe_mount_targets -- [ ] describe_tags -- [ ] modify_mount_target_security_groups - -## elasticache - 0% implemented -- [ ] add_tags_to_resource -- [ ] authorize_cache_security_group_ingress -- [ ] copy_snapshot -- [ ] create_cache_cluster -- [ ] create_cache_parameter_group -- [ ] create_cache_security_group -- [ ] create_cache_subnet_group -- [ ] create_replication_group -- [ ] create_snapshot -- [ ] delete_cache_cluster -- [ ] delete_cache_parameter_group -- [ ] delete_cache_security_group -- [ ] delete_cache_subnet_group -- [ ] delete_replication_group -- [ ] delete_snapshot -- [ ] describe_cache_clusters -- [ ] describe_cache_engine_versions -- [ ] describe_cache_parameter_groups -- [ ] describe_cache_parameters -- [ ] describe_cache_security_groups -- [ ] describe_cache_subnet_groups -- [ ] describe_engine_default_parameters -- [ ] describe_events -- [ ] describe_replication_groups -- [ ] describe_reserved_cache_nodes -- [ ] describe_reserved_cache_nodes_offerings -- [ ] describe_snapshots -- [ ] list_allowed_node_type_modifications -- [ ] list_tags_for_resource -- [ ] modify_cache_cluster -- [ ] modify_cache_parameter_group -- [ ] modify_cache_subnet_group -- [ ] modify_replication_group -- [ ] modify_replication_group_shard_configuration -- [ ] purchase_reserved_cache_nodes_offering -- [ ] reboot_cache_cluster -- [ ] remove_tags_from_resource -- [ ] reset_cache_parameter_group -- [ ] revoke_cache_security_group_ingress -- [ ] test_failover - -## elasticbeanstalk - 0% implemented -- [ ] abort_environment_update -- [ ] apply_environment_managed_action -- [ ] check_dns_availability -- [ ] compose_environments -- [ ] create_application -- [ ] create_application_version -- [ ] create_configuration_template -- [ ] create_environment -- [ ] create_platform_version -- [ ] create_storage_location -- [ ] delete_application -- [ ] delete_application_version -- [ ] delete_configuration_template -- [ ] delete_environment_configuration -- [ ] delete_platform_version -- [ ] describe_account_attributes -- [ ] describe_application_versions -- [ ] describe_applications -- [ ] describe_configuration_options -- [ ] describe_configuration_settings -- [ ] describe_environment_health -- [ ] describe_environment_managed_action_history -- [ ] describe_environment_managed_actions -- [ ] describe_environment_resources -- [ ] describe_environments -- [ ] describe_events -- [ ] describe_instances_health -- [ ] describe_platform_version -- [ ] list_available_solution_stacks -- [ ] list_platform_versions -- [ ] list_tags_for_resource -- [ ] rebuild_environment -- [ ] request_environment_info -- [ ] restart_app_server -- [ ] retrieve_environment_info -- [ ] swap_environment_cnames -- [ ] terminate_environment -- [ ] update_application -- [ ] update_application_resource_lifecycle -- [ ] update_application_version -- [ ] update_configuration_template -- [ ] update_environment -- [ ] update_tags_for_resource -- [ ] validate_configuration_settings - -## elastictranscoder - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_pipeline -- [ ] create_preset -- [ ] delete_pipeline -- [ ] delete_preset -- [ ] list_jobs_by_pipeline -- [ ] list_jobs_by_status -- [ ] list_pipelines -- [ ] list_presets -- [ ] read_job -- [ ] read_pipeline -- [ ] read_preset -- [ ] test_role -- [ ] update_pipeline -- [ ] update_pipeline_notifications -- [ ] update_pipeline_status - -## elb - 34% implemented -- [ ] add_tags -- [X] apply_security_groups_to_load_balancer -- [ ] attach_load_balancer_to_subnets -- [X] configure_health_check -- [X] create_app_cookie_stickiness_policy -- [X] create_lb_cookie_stickiness_policy -- [X] create_load_balancer -- [X] create_load_balancer_listeners -- [ ] create_load_balancer_policy -- [X] delete_load_balancer -- [X] delete_load_balancer_listeners -- [ ] delete_load_balancer_policy -- [ ] deregister_instances_from_load_balancer -- [ ] describe_account_limits -- [ ] describe_instance_health -- [ ] describe_load_balancer_attributes -- [ ] describe_load_balancer_policies -- [ ] describe_load_balancer_policy_types -- [X] describe_load_balancers -- [ ] describe_tags -- [ ] detach_load_balancer_from_subnets -- [ ] disable_availability_zones_for_load_balancer -- [ ] enable_availability_zones_for_load_balancer -- [ ] modify_load_balancer_attributes -- [ ] register_instances_with_load_balancer -- [ ] remove_tags -- [ ] set_load_balancer_listener_ssl_certificate -- [ ] set_load_balancer_policies_for_backend_server -- [X] set_load_balancer_policies_of_listener - -## elbv2 - 70% implemented -- [ ] add_listener_certificates -- [ ] add_tags -- [X] create_listener -- [X] create_load_balancer -- [X] create_rule -- [X] create_target_group -- [X] delete_listener -- [X] delete_load_balancer -- [X] delete_rule -- [X] delete_target_group -- [X] deregister_targets -- [ ] describe_account_limits -- [ ] describe_listener_certificates -- [X] describe_listeners -- [X] describe_load_balancer_attributes -- [X] describe_load_balancers -- [X] describe_rules -- [ ] describe_ssl_policies -- [ ] describe_tags -- [ ] describe_target_group_attributes -- [X] describe_target_groups -- [X] describe_target_health -- [X] modify_listener -- [X] modify_load_balancer_attributes -- [X] modify_rule -- [X] modify_target_group -- [ ] modify_target_group_attributes -- [X] register_targets -- [ ] remove_listener_certificates -- [ ] remove_tags -- [X] set_ip_address_type -- [X] set_rule_priorities -- [X] set_security_groups -- [X] set_subnets - -## emr - 55% implemented -- [ ] add_instance_fleet -- [X] add_instance_groups -- [X] add_job_flow_steps -- [X] add_tags -- [ ] cancel_steps -- [ ] create_security_configuration -- [ ] delete_security_configuration -- [ ] describe_cluster -- [X] describe_job_flows -- [ ] describe_security_configuration -- [X] describe_step -- [X] list_bootstrap_actions -- [X] list_clusters -- [ ] list_instance_fleets -- [X] list_instance_groups -- [ ] list_instances -- [ ] list_security_configurations -- [X] list_steps -- [ ] modify_instance_fleet -- [X] modify_instance_groups -- [ ] put_auto_scaling_policy -- [ ] remove_auto_scaling_policy -- [X] remove_tags -- [X] run_job_flow -- [X] set_termination_protection -- [X] set_visible_to_all_users -- [X] terminate_job_flows - -## es - 0% implemented -- [ ] add_tags -- [ ] create_elasticsearch_domain -- [ ] delete_elasticsearch_domain -- [ ] delete_elasticsearch_service_role -- [ ] describe_elasticsearch_domain -- [ ] describe_elasticsearch_domain_config -- [ ] describe_elasticsearch_domains -- [ ] describe_elasticsearch_instance_type_limits -- [ ] list_domain_names -- [ ] list_elasticsearch_instance_types -- [ ] list_elasticsearch_versions -- [ ] list_tags -- [ ] remove_tags -- [ ] update_elasticsearch_domain_config - -## events - 100% implemented -- [X] delete_rule -- [X] describe_event_bus -- [X] describe_rule -- [X] disable_rule -- [X] enable_rule -- [X] list_rule_names_by_target -- [X] list_rules -- [X] list_targets_by_rule -- [X] put_events -- [X] put_permission -- [X] put_rule -- [X] put_targets -- [X] remove_permission -- [X] remove_targets -- [X] test_event_pattern - -## firehose - 0% implemented -- [ ] create_delivery_stream -- [ ] delete_delivery_stream -- [ ] describe_delivery_stream -- [ ] list_delivery_streams -- [ ] put_record -- [ ] put_record_batch -- [ ] update_destination - -## fms - 0% implemented -- [ ] associate_admin_account -- [ ] delete_notification_channel -- [ ] delete_policy -- [ ] disassociate_admin_account -- [ ] get_admin_account -- [ ] get_compliance_detail -- [ ] get_notification_channel -- [ ] get_policy -- [ ] list_compliance_status -- [ ] list_policies -- [ ] put_notification_channel -- [ ] put_policy - -## gamelift - 0% implemented -- [ ] accept_match -- [ ] create_alias -- [ ] create_build -- [ ] create_fleet -- [ ] create_game_session -- [ ] create_game_session_queue -- [ ] create_matchmaking_configuration -- [ ] create_matchmaking_rule_set -- [ ] create_player_session -- [ ] create_player_sessions -- [ ] create_vpc_peering_authorization -- [ ] create_vpc_peering_connection -- [ ] delete_alias -- [ ] delete_build -- [ ] delete_fleet -- [ ] delete_game_session_queue -- [ ] delete_matchmaking_configuration -- [ ] delete_scaling_policy -- [ ] delete_vpc_peering_authorization -- [ ] delete_vpc_peering_connection -- [ ] describe_alias -- [ ] describe_build -- [ ] describe_ec2_instance_limits -- [ ] describe_fleet_attributes -- [ ] describe_fleet_capacity -- [ ] describe_fleet_events -- [ ] describe_fleet_port_settings -- [ ] describe_fleet_utilization -- [ ] describe_game_session_details -- [ ] describe_game_session_placement -- [ ] describe_game_session_queues -- [ ] describe_game_sessions -- [ ] describe_instances -- [ ] describe_matchmaking -- [ ] describe_matchmaking_configurations -- [ ] describe_matchmaking_rule_sets -- [ ] describe_player_sessions -- [ ] describe_runtime_configuration -- [ ] describe_scaling_policies -- [ ] describe_vpc_peering_authorizations -- [ ] describe_vpc_peering_connections -- [ ] get_game_session_log_url -- [ ] get_instance_access -- [ ] list_aliases -- [ ] list_builds -- [ ] list_fleets -- [ ] put_scaling_policy -- [ ] request_upload_credentials -- [ ] resolve_alias -- [ ] search_game_sessions -- [ ] start_game_session_placement -- [ ] start_match_backfill -- [ ] start_matchmaking -- [ ] stop_game_session_placement -- [ ] stop_matchmaking -- [ ] update_alias -- [ ] update_build -- [ ] update_fleet_attributes -- [ ] update_fleet_capacity -- [ ] update_fleet_port_settings -- [ ] update_game_session -- [ ] update_game_session_queue -- [ ] update_matchmaking_configuration -- [ ] update_runtime_configuration -- [ ] validate_matchmaking_rule_set - -## glacier - 12% implemented -- [ ] abort_multipart_upload -- [ ] abort_vault_lock -- [ ] add_tags_to_vault -- [ ] complete_multipart_upload -- [ ] complete_vault_lock -- [X] create_vault -- [ ] delete_archive -- [X] delete_vault -- [ ] delete_vault_access_policy -- [ ] delete_vault_notifications -- [ ] describe_job -- [ ] describe_vault -- [ ] get_data_retrieval_policy -- [ ] get_job_output -- [ ] get_vault_access_policy -- [ ] get_vault_lock -- [ ] get_vault_notifications -- [X] initiate_job -- [ ] initiate_multipart_upload -- [ ] initiate_vault_lock -- [X] list_jobs -- [ ] list_multipart_uploads -- [ ] list_parts -- [ ] list_provisioned_capacity -- [ ] list_tags_for_vault -- [ ] list_vaults -- [ ] purchase_provisioned_capacity -- [ ] remove_tags_from_vault -- [ ] set_data_retrieval_policy -- [ ] set_vault_access_policy -- [ ] set_vault_notifications -- [ ] upload_archive -- [ ] upload_multipart_part - -## glue - 0% implemented -- [ ] batch_create_partition -- [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table -- [ ] batch_delete_table_version -- [ ] batch_get_partition -- [ ] batch_stop_job_run -- [ ] create_classifier -- [ ] create_connection -- [ ] create_crawler -- [ ] create_database -- [ ] create_dev_endpoint -- [ ] create_job -- [ ] create_partition -- [ ] create_script -- [ ] create_table -- [ ] create_trigger -- [ ] create_user_defined_function -- [ ] delete_classifier -- [ ] delete_connection -- [ ] delete_crawler -- [ ] delete_database -- [ ] delete_dev_endpoint -- [ ] delete_job -- [ ] delete_partition -- [ ] delete_table -- [ ] delete_table_version -- [ ] delete_trigger -- [ ] delete_user_defined_function -- [ ] get_catalog_import_status -- [ ] get_classifier -- [ ] get_classifiers -- [ ] get_connection -- [ ] get_connections -- [ ] get_crawler -- [ ] get_crawler_metrics -- [ ] get_crawlers -- [ ] get_database -- [ ] get_databases -- [ ] get_dataflow_graph -- [ ] get_dev_endpoint -- [ ] get_dev_endpoints -- [ ] get_job -- [ ] get_job_run -- [ ] get_job_runs -- [ ] get_jobs -- [ ] get_mapping -- [ ] get_partition -- [ ] get_partitions -- [ ] get_plan -- [ ] get_table -- [ ] get_table_version -- [ ] get_table_versions -- [ ] get_tables -- [ ] get_trigger -- [ ] get_triggers -- [ ] get_user_defined_function -- [ ] get_user_defined_functions -- [ ] import_catalog_to_glue -- [ ] reset_job_bookmark -- [ ] start_crawler -- [ ] start_crawler_schedule -- [ ] start_job_run -- [ ] start_trigger -- [ ] stop_crawler -- [ ] stop_crawler_schedule -- [ ] stop_trigger -- [ ] update_classifier -- [ ] update_connection -- [ ] update_crawler -- [ ] update_crawler_schedule -- [ ] update_database -- [ ] update_dev_endpoint -- [ ] update_job -- [ ] update_partition -- [ ] update_table -- [ ] update_trigger -- [ ] update_user_defined_function - -## greengrass - 0% implemented -- [ ] associate_role_to_group -- [ ] associate_service_role_to_account -- [ ] create_core_definition -- [ ] create_core_definition_version -- [ ] create_deployment -- [ ] create_device_definition -- [ ] create_device_definition_version -- [ ] create_function_definition -- [ ] create_function_definition_version -- [ ] create_group -- [ ] create_group_certificate_authority -- [ ] create_group_version -- [ ] create_logger_definition -- [ ] create_logger_definition_version -- [ ] create_resource_definition -- [ ] create_resource_definition_version -- [ ] create_software_update_job -- [ ] create_subscription_definition -- [ ] create_subscription_definition_version -- [ ] delete_core_definition -- [ ] delete_device_definition -- [ ] delete_function_definition -- [ ] delete_group -- [ ] delete_logger_definition -- [ ] delete_resource_definition -- [ ] delete_subscription_definition -- [ ] disassociate_role_from_group -- [ ] disassociate_service_role_from_account -- [ ] get_associated_role -- [ ] get_connectivity_info -- [ ] get_core_definition -- [ ] get_core_definition_version -- [ ] get_deployment_status -- [ ] get_device_definition -- [ ] get_device_definition_version -- [ ] get_function_definition -- [ ] get_function_definition_version -- [ ] get_group -- [ ] get_group_certificate_authority -- [ ] get_group_certificate_configuration -- [ ] get_group_version -- [ ] get_logger_definition -- [ ] get_logger_definition_version -- [ ] get_resource_definition -- [ ] get_resource_definition_version -- [ ] get_service_role_for_account -- [ ] get_subscription_definition -- [ ] get_subscription_definition_version -- [ ] list_core_definition_versions -- [ ] list_core_definitions -- [ ] list_deployments -- [ ] list_device_definition_versions -- [ ] list_device_definitions -- [ ] list_function_definition_versions -- [ ] list_function_definitions -- [ ] list_group_certificate_authorities -- [ ] list_group_versions -- [ ] list_groups -- [ ] list_logger_definition_versions -- [ ] list_logger_definitions -- [ ] list_resource_definition_versions -- [ ] list_resource_definitions -- [ ] list_subscription_definition_versions -- [ ] list_subscription_definitions -- [ ] reset_deployments -- [ ] update_connectivity_info -- [ ] update_core_definition -- [ ] update_device_definition -- [ ] update_function_definition -- [ ] update_group -- [ ] update_group_certificate_configuration -- [ ] update_logger_definition -- [ ] update_resource_definition -- [ ] update_subscription_definition - -## guardduty - 0% implemented -- [ ] accept_invitation -- [ ] archive_findings -- [ ] create_detector -- [ ] create_ip_set -- [ ] create_members -- [ ] create_sample_findings -- [ ] create_threat_intel_set -- [ ] decline_invitations -- [ ] delete_detector -- [ ] delete_invitations -- [ ] delete_ip_set -- [ ] delete_members -- [ ] delete_threat_intel_set -- [ ] disassociate_from_master_account -- [ ] disassociate_members -- [ ] get_detector -- [ ] get_findings -- [ ] get_findings_statistics -- [ ] get_invitations_count -- [ ] get_ip_set -- [ ] get_master_account -- [ ] get_members -- [ ] get_threat_intel_set -- [ ] invite_members -- [ ] list_detectors -- [ ] list_findings -- [ ] list_invitations -- [ ] list_ip_sets -- [ ] list_members -- [ ] list_threat_intel_sets -- [ ] start_monitoring_members -- [ ] stop_monitoring_members -- [ ] unarchive_findings -- [ ] update_detector -- [ ] update_findings_feedback -- [ ] update_ip_set -- [ ] update_threat_intel_set - -## health - 0% implemented -- [ ] describe_affected_entities -- [ ] describe_entity_aggregates -- [ ] describe_event_aggregates -- [ ] describe_event_details -- [ ] describe_event_types -- [ ] describe_events - -## iam - 48% implemented -- [ ] add_client_id_to_open_id_connect_provider -- [X] add_role_to_instance_profile -- [X] add_user_to_group -- [X] attach_group_policy -- [X] attach_role_policy -- [X] attach_user_policy -- [ ] change_password -- [X] create_access_key -- [X] create_account_alias -- [X] create_group -- [X] create_instance_profile -- [X] create_login_profile -- [ ] create_open_id_connect_provider -- [X] create_policy -- [X] create_policy_version -- [X] create_role -- [ ] create_saml_provider -- [ ] create_service_linked_role -- [ ] create_service_specific_credential -- [X] create_user -- [ ] create_virtual_mfa_device -- [X] deactivate_mfa_device -- [X] delete_access_key -- [X] delete_account_alias -- [ ] delete_account_password_policy -- [ ] delete_group -- [ ] delete_group_policy -- [ ] delete_instance_profile -- [X] delete_login_profile -- [ ] delete_open_id_connect_provider -- [ ] delete_policy -- [X] delete_policy_version -- [X] delete_role -- [X] delete_role_policy -- [ ] delete_saml_provider -- [X] delete_server_certificate -- [ ] delete_service_linked_role -- [ ] delete_service_specific_credential -- [ ] delete_signing_certificate -- [ ] delete_ssh_public_key -- [X] delete_user -- [X] delete_user_policy -- [ ] delete_virtual_mfa_device -- [X] detach_group_policy -- [X] detach_role_policy -- [X] detach_user_policy -- [X] enable_mfa_device -- [ ] generate_credential_report -- [ ] get_access_key_last_used -- [X] get_account_authorization_details -- [ ] get_account_password_policy -- [ ] get_account_summary -- [ ] get_context_keys_for_custom_policy -- [ ] get_context_keys_for_principal_policy -- [X] get_credential_report -- [X] get_group -- [X] get_group_policy -- [X] get_instance_profile -- [X] get_login_profile -- [ ] get_open_id_connect_provider -- [X] get_policy -- [X] get_policy_version -- [X] get_role -- [X] get_role_policy -- [ ] get_saml_provider -- [X] get_server_certificate -- [ ] get_service_linked_role_deletion_status -- [ ] get_ssh_public_key -- [X] get_user -- [X] get_user_policy -- [ ] list_access_keys -- [X] list_account_aliases -- [X] list_attached_group_policies -- [X] list_attached_role_policies -- [X] list_attached_user_policies -- [ ] list_entities_for_policy -- [X] list_group_policies -- [X] list_groups -- [ ] list_groups_for_user -- [ ] list_instance_profiles -- [ ] list_instance_profiles_for_role -- [X] list_mfa_devices -- [ ] list_open_id_connect_providers -- [X] list_policies -- [X] list_policy_versions -- [X] list_role_policies -- [ ] list_roles -- [ ] list_saml_providers -- [ ] list_server_certificates -- [ ] list_service_specific_credentials -- [ ] list_signing_certificates -- [ ] list_ssh_public_keys -- [X] list_user_policies -- [X] list_users -- [ ] list_virtual_mfa_devices -- [X] put_group_policy -- [X] put_role_policy -- [X] put_user_policy -- [ ] remove_client_id_from_open_id_connect_provider -- [X] remove_role_from_instance_profile -- [X] remove_user_from_group -- [ ] reset_service_specific_credential -- [ ] resync_mfa_device -- [ ] set_default_policy_version -- [ ] simulate_custom_policy -- [ ] simulate_principal_policy -- [X] update_access_key -- [ ] update_account_password_policy -- [ ] update_assume_role_policy -- [ ] update_group -- [X] update_login_profile -- [ ] update_open_id_connect_provider_thumbprint -- [ ] update_role -- [ ] update_role_description -- [ ] update_saml_provider -- [ ] update_server_certificate -- [ ] update_service_specific_credential -- [ ] update_signing_certificate -- [ ] update_ssh_public_key -- [ ] update_user -- [ ] upload_server_certificate -- [ ] upload_signing_certificate -- [ ] upload_ssh_public_key - -## importexport - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] get_shipping_label -- [ ] get_status -- [ ] list_jobs -- [ ] update_job - -## inspector - 0% implemented -- [ ] add_attributes_to_findings -- [ ] create_assessment_target -- [ ] create_assessment_template -- [ ] create_resource_group -- [ ] delete_assessment_run -- [ ] delete_assessment_target -- [ ] delete_assessment_template -- [ ] describe_assessment_runs -- [ ] describe_assessment_targets -- [ ] describe_assessment_templates -- [ ] describe_cross_account_access_role -- [ ] describe_findings -- [ ] describe_resource_groups -- [ ] describe_rules_packages -- [ ] get_assessment_report -- [ ] get_telemetry_metadata -- [ ] list_assessment_run_agents -- [ ] list_assessment_runs -- [ ] list_assessment_targets -- [ ] list_assessment_templates -- [ ] list_event_subscriptions -- [ ] list_findings -- [ ] list_rules_packages -- [ ] list_tags_for_resource -- [ ] preview_agents -- [ ] register_cross_account_access_role -- [ ] remove_attributes_from_findings -- [ ] set_tags_for_resource -- [ ] start_assessment_run -- [ ] stop_assessment_run -- [ ] subscribe_to_event -- [ ] unsubscribe_from_event -- [ ] update_assessment_target - -## iot - 30% implemented -- [ ] accept_certificate_transfer -- [X] add_thing_to_thing_group -- [ ] associate_targets_with_job -- [ ] attach_policy -- [X] attach_principal_policy -- [X] attach_thing_principal -- [ ] cancel_certificate_transfer -- [ ] cancel_job -- [ ] clear_default_authorizer -- [ ] create_authorizer -- [ ] create_certificate_from_csr -- [X] create_job -- [X] create_keys_and_certificate -- [ ] create_ota_update -- [X] create_policy -- [ ] create_policy_version -- [ ] create_role_alias -- [ ] create_stream -- [X] create_thing -- [X] create_thing_group -- [X] create_thing_type -- [ ] create_topic_rule -- [ ] delete_authorizer -- [ ] delete_ca_certificate -- [X] delete_certificate -- [ ] delete_ota_update -- [X] delete_policy -- [ ] delete_policy_version -- [ ] delete_registration_code -- [ ] delete_role_alias -- [ ] delete_stream -- [X] delete_thing -- [X] delete_thing_group -- [X] delete_thing_type -- [ ] delete_topic_rule -- [ ] delete_v2_logging_level -- [ ] deprecate_thing_type -- [ ] describe_authorizer -- [ ] describe_ca_certificate -- [X] describe_certificate -- [ ] describe_default_authorizer -- [ ] describe_endpoint -- [ ] describe_event_configurations -- [ ] describe_index -- [X] describe_job -- [ ] describe_job_execution -- [ ] describe_role_alias -- [ ] describe_stream -- [X] describe_thing -- [X] describe_thing_group -- [ ] describe_thing_registration_task -- [X] describe_thing_type -- [ ] detach_policy -- [X] detach_principal_policy -- [X] detach_thing_principal -- [ ] disable_topic_rule -- [ ] enable_topic_rule -- [ ] get_effective_policies -- [ ] get_indexing_configuration -- [ ] get_job_document -- [ ] get_logging_options -- [ ] get_ota_update -- [X] get_policy -- [ ] get_policy_version -- [ ] get_registration_code -- [ ] get_topic_rule -- [ ] get_v2_logging_options -- [ ] list_attached_policies -- [ ] list_authorizers -- [ ] list_ca_certificates -- [X] list_certificates -- [ ] list_certificates_by_ca -- [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs -- [ ] list_ota_updates -- [ ] list_outgoing_certificates -- [X] list_policies -- [X] list_policy_principals -- [ ] list_policy_versions -- [X] list_principal_policies -- [X] list_principal_things -- [ ] list_role_aliases -- [ ] list_streams -- [ ] list_targets_for_policy -- [X] list_thing_groups -- [X] list_thing_groups_for_thing -- [X] list_thing_principals -- [ ] list_thing_registration_task_reports -- [ ] list_thing_registration_tasks -- [X] list_thing_types -- [X] list_things -- [X] list_things_in_thing_group -- [ ] list_topic_rules -- [ ] list_v2_logging_levels -- [ ] register_ca_certificate -- [ ] register_certificate -- [ ] register_thing -- [ ] reject_certificate_transfer -- [X] remove_thing_from_thing_group -- [ ] replace_topic_rule -- [ ] search_index -- [ ] set_default_authorizer -- [ ] set_default_policy_version -- [ ] set_logging_options -- [ ] set_v2_logging_level -- [ ] set_v2_logging_options -- [ ] start_thing_registration_task -- [ ] stop_thing_registration_task -- [ ] test_authorization -- [ ] test_invoke_authorizer -- [ ] transfer_certificate -- [ ] update_authorizer -- [ ] update_ca_certificate -- [X] update_certificate -- [ ] update_event_configurations -- [ ] update_indexing_configuration -- [ ] update_role_alias -- [ ] update_stream -- [X] update_thing -- [X] update_thing_group -- [X] update_thing_groups_for_thing - -## iot-data - 0% implemented -- [ ] delete_thing_shadow -- [ ] get_thing_shadow -- [ ] publish -- [ ] update_thing_shadow - -## iot-jobs-data - 0% implemented -- [ ] describe_job_execution -- [ ] get_pending_job_executions -- [ ] start_next_pending_job_execution -- [ ] update_job_execution - -## kinesis - 56% implemented -- [X] add_tags_to_stream -- [X] create_stream -- [ ] decrease_stream_retention_period -- [X] delete_stream -- [ ] describe_limits -- [X] describe_stream -- [ ] describe_stream_summary -- [ ] disable_enhanced_monitoring -- [ ] enable_enhanced_monitoring -- [X] get_records -- [X] get_shard_iterator -- [ ] increase_stream_retention_period -- [ ] list_shards -- [X] list_streams -- [X] list_tags_for_stream -- [X] merge_shards -- [X] put_record -- [X] put_records -- [X] remove_tags_from_stream -- [X] split_shard -- [ ] start_stream_encryption -- [ ] stop_stream_encryption -- [ ] update_shard_count - -## kinesis-video-archived-media - 0% implemented -- [ ] get_media_for_fragment_list -- [ ] list_fragments - -## kinesis-video-media - 0% implemented -- [ ] get_media - -## kinesisanalytics - 0% implemented -- [ ] add_application_cloud_watch_logging_option -- [ ] add_application_input -- [ ] add_application_input_processing_configuration -- [ ] add_application_output -- [ ] add_application_reference_data_source -- [ ] create_application -- [ ] delete_application -- [ ] delete_application_cloud_watch_logging_option -- [ ] delete_application_input_processing_configuration -- [ ] delete_application_output -- [ ] delete_application_reference_data_source -- [ ] describe_application -- [ ] discover_input_schema -- [ ] list_applications -- [ ] start_application -- [ ] stop_application -- [ ] update_application - -## kinesisvideo - 0% implemented -- [ ] create_stream -- [ ] delete_stream -- [ ] describe_stream -- [ ] get_data_endpoint -- [ ] list_streams -- [ ] list_tags_for_stream -- [ ] tag_stream -- [ ] untag_stream -- [ ] update_data_retention -- [ ] update_stream - -## kms - 25% implemented -- [ ] cancel_key_deletion -- [ ] create_alias -- [ ] create_grant -- [X] create_key -- [ ] decrypt -- [X] delete_alias -- [ ] delete_imported_key_material -- [X] describe_key -- [ ] disable_key -- [X] disable_key_rotation -- [ ] enable_key -- [X] enable_key_rotation -- [ ] encrypt -- [ ] generate_data_key -- [ ] generate_data_key_without_plaintext -- [ ] generate_random -- [X] get_key_policy -- [X] get_key_rotation_status -- [ ] get_parameters_for_import -- [ ] import_key_material -- [ ] list_aliases -- [ ] list_grants -- [ ] list_key_policies -- [X] list_keys -- [ ] list_resource_tags -- [ ] list_retirable_grants -- [X] put_key_policy -- [ ] re_encrypt -- [ ] retire_grant -- [ ] revoke_grant -- [ ] schedule_key_deletion -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_key_description - -## lambda - 0% implemented -- [ ] add_permission -- [ ] create_alias -- [ ] create_event_source_mapping -- [ ] create_function -- [ ] delete_alias -- [ ] delete_event_source_mapping -- [ ] delete_function -- [ ] delete_function_concurrency -- [ ] get_account_settings -- [ ] get_alias -- [ ] get_event_source_mapping -- [ ] get_function -- [ ] get_function_configuration -- [ ] get_policy -- [ ] invoke -- [ ] invoke_async -- [ ] list_aliases -- [ ] list_event_source_mappings -- [ ] list_functions -- [ ] list_tags -- [ ] list_versions_by_function -- [ ] publish_version -- [ ] put_function_concurrency -- [ ] remove_permission -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_event_source_mapping -- [ ] update_function_code -- [ ] update_function_configuration - -## lex-models - 0% implemented -- [ ] create_bot_version -- [ ] create_intent_version -- [ ] create_slot_type_version -- [ ] delete_bot -- [ ] delete_bot_alias -- [ ] delete_bot_channel_association -- [ ] delete_bot_version -- [ ] delete_intent -- [ ] delete_intent_version -- [ ] delete_slot_type -- [ ] delete_slot_type_version -- [ ] delete_utterances -- [ ] get_bot -- [ ] get_bot_alias -- [ ] get_bot_aliases -- [ ] get_bot_channel_association -- [ ] get_bot_channel_associations -- [ ] get_bot_versions -- [ ] get_bots -- [ ] get_builtin_intent -- [ ] get_builtin_intents -- [ ] get_builtin_slot_types -- [ ] get_export -- [ ] get_import -- [ ] get_intent -- [ ] get_intent_versions -- [ ] get_intents -- [ ] get_slot_type -- [ ] get_slot_type_versions -- [ ] get_slot_types -- [ ] get_utterances_view -- [ ] put_bot -- [ ] put_bot_alias -- [ ] put_intent -- [ ] put_slot_type -- [ ] start_import - -## lex-runtime - 0% implemented -- [ ] post_content -- [ ] post_text - -## lightsail - 0% implemented -- [ ] allocate_static_ip -- [ ] attach_disk -- [ ] attach_instances_to_load_balancer -- [ ] attach_load_balancer_tls_certificate -- [ ] attach_static_ip -- [ ] close_instance_public_ports -- [ ] create_disk -- [ ] create_disk_from_snapshot -- [ ] create_disk_snapshot -- [ ] create_domain -- [ ] create_domain_entry -- [ ] create_instance_snapshot -- [ ] create_instances -- [ ] create_instances_from_snapshot -- [ ] create_key_pair -- [ ] create_load_balancer -- [ ] create_load_balancer_tls_certificate -- [ ] delete_disk -- [ ] delete_disk_snapshot -- [ ] delete_domain -- [ ] delete_domain_entry -- [ ] delete_instance -- [ ] delete_instance_snapshot -- [ ] delete_key_pair -- [ ] delete_load_balancer -- [ ] delete_load_balancer_tls_certificate -- [ ] detach_disk -- [ ] detach_instances_from_load_balancer -- [ ] detach_static_ip -- [ ] download_default_key_pair -- [ ] get_active_names -- [ ] get_blueprints -- [ ] get_bundles -- [ ] get_disk -- [ ] get_disk_snapshot -- [ ] get_disk_snapshots -- [ ] get_disks -- [ ] get_domain -- [ ] get_domains -- [ ] get_instance -- [ ] get_instance_access_details -- [ ] get_instance_metric_data -- [ ] get_instance_port_states -- [ ] get_instance_snapshot -- [ ] get_instance_snapshots -- [ ] get_instance_state -- [ ] get_instances -- [ ] get_key_pair -- [ ] get_key_pairs -- [ ] get_load_balancer -- [ ] get_load_balancer_metric_data -- [ ] get_load_balancer_tls_certificates -- [ ] get_load_balancers -- [ ] get_operation -- [ ] get_operations -- [ ] get_operations_for_resource -- [ ] get_regions -- [ ] get_static_ip -- [ ] get_static_ips -- [ ] import_key_pair -- [ ] is_vpc_peered -- [ ] open_instance_public_ports -- [ ] peer_vpc -- [ ] put_instance_public_ports -- [ ] reboot_instance -- [ ] release_static_ip -- [ ] start_instance -- [ ] stop_instance -- [ ] unpeer_vpc -- [ ] update_domain_entry -- [ ] update_load_balancer_attribute - -## logs - 27% implemented -- [ ] associate_kms_key -- [ ] cancel_export_task -- [ ] create_export_task -- [X] create_log_group -- [X] create_log_stream -- [ ] delete_destination -- [X] delete_log_group -- [X] delete_log_stream -- [ ] delete_metric_filter -- [ ] delete_resource_policy -- [ ] delete_retention_policy -- [ ] delete_subscription_filter -- [ ] describe_destinations -- [ ] describe_export_tasks -- [X] describe_log_groups -- [X] describe_log_streams -- [ ] describe_metric_filters -- [ ] describe_resource_policies -- [ ] describe_subscription_filters -- [ ] disassociate_kms_key -- [X] filter_log_events -- [X] get_log_events -- [ ] list_tags_log_group -- [ ] put_destination -- [ ] put_destination_policy -- [X] put_log_events -- [ ] put_metric_filter -- [ ] put_resource_policy -- [ ] put_retention_policy -- [ ] put_subscription_filter -- [ ] tag_log_group -- [ ] test_metric_filter -- [ ] untag_log_group - -## machinelearning - 0% implemented -- [ ] add_tags -- [ ] create_batch_prediction -- [ ] create_data_source_from_rds -- [ ] create_data_source_from_redshift -- [ ] create_data_source_from_s3 -- [ ] create_evaluation -- [ ] create_ml_model -- [ ] create_realtime_endpoint -- [ ] delete_batch_prediction -- [ ] delete_data_source -- [ ] delete_evaluation -- [ ] delete_ml_model -- [ ] delete_realtime_endpoint -- [ ] delete_tags -- [ ] describe_batch_predictions -- [ ] describe_data_sources -- [ ] describe_evaluations -- [ ] describe_ml_models -- [ ] describe_tags -- [ ] get_batch_prediction -- [ ] get_data_source -- [ ] get_evaluation -- [ ] get_ml_model -- [ ] predict -- [ ] update_batch_prediction -- [ ] update_data_source -- [ ] update_evaluation -- [ ] update_ml_model - -## marketplace-entitlement - 0% implemented -- [ ] get_entitlements - -## marketplacecommerceanalytics - 0% implemented -- [ ] generate_data_set -- [ ] start_support_data_export - -## mediaconvert - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_job_template -- [ ] create_preset -- [ ] create_queue -- [ ] delete_job_template -- [ ] delete_preset -- [ ] delete_queue -- [ ] describe_endpoints -- [ ] get_job -- [ ] get_job_template -- [ ] get_preset -- [ ] get_queue -- [ ] list_job_templates -- [ ] list_jobs -- [ ] list_presets -- [ ] list_queues -- [ ] update_job_template -- [ ] update_preset -- [ ] update_queue - -## medialive - 0% implemented -- [ ] create_channel -- [ ] create_input -- [ ] create_input_security_group -- [ ] delete_channel -- [ ] delete_input -- [ ] delete_input_security_group -- [ ] describe_channel -- [ ] describe_input -- [ ] describe_input_security_group -- [ ] list_channels -- [ ] list_input_security_groups -- [ ] list_inputs -- [ ] start_channel -- [ ] stop_channel -- [ ] update_channel -- [ ] update_input -- [ ] update_input_security_group - -## mediapackage - 0% implemented -- [ ] create_channel -- [ ] create_origin_endpoint -- [ ] delete_channel -- [ ] delete_origin_endpoint -- [ ] describe_channel -- [ ] describe_origin_endpoint -- [ ] list_channels -- [ ] list_origin_endpoints -- [ ] rotate_channel_credentials -- [ ] update_channel -- [ ] update_origin_endpoint - -## mediastore - 0% implemented -- [ ] create_container -- [ ] delete_container -- [ ] delete_container_policy -- [ ] delete_cors_policy -- [ ] describe_container -- [ ] get_container_policy -- [ ] get_cors_policy -- [ ] list_containers -- [ ] put_container_policy -- [ ] put_cors_policy - -## mediastore-data - 0% implemented -- [ ] delete_object -- [ ] describe_object -- [ ] get_object -- [ ] list_items -- [ ] put_object - -## meteringmarketplace - 0% implemented -- [ ] batch_meter_usage -- [ ] meter_usage -- [ ] resolve_customer - -## mgh - 0% implemented -- [ ] associate_created_artifact -- [ ] associate_discovered_resource -- [ ] create_progress_update_stream -- [ ] delete_progress_update_stream -- [ ] describe_application_state -- [ ] describe_migration_task -- [ ] disassociate_created_artifact -- [ ] disassociate_discovered_resource -- [ ] import_migration_task -- [ ] list_created_artifacts -- [ ] list_discovered_resources -- [ ] list_migration_tasks -- [ ] list_progress_update_streams -- [ ] notify_application_state -- [ ] notify_migration_task_state -- [ ] put_resource_attributes - -## mobile - 0% implemented -- [ ] create_project -- [ ] delete_project -- [ ] describe_bundle -- [ ] describe_project -- [ ] export_bundle -- [ ] export_project -- [ ] list_bundles -- [ ] list_projects -- [ ] update_project - -## mq - 0% implemented -- [ ] create_broker -- [ ] create_configuration -- [ ] create_user -- [ ] delete_broker -- [ ] delete_user -- [ ] describe_broker -- [ ] describe_configuration -- [ ] describe_configuration_revision -- [ ] describe_user -- [ ] list_brokers -- [ ] list_configuration_revisions -- [ ] list_configurations -- [ ] list_users -- [ ] reboot_broker -- [ ] update_broker -- [ ] update_configuration -- [ ] update_user - -## mturk - 0% implemented -- [ ] accept_qualification_request -- [ ] approve_assignment -- [ ] associate_qualification_with_worker -- [ ] create_additional_assignments_for_hit -- [ ] create_hit -- [ ] create_hit_type -- [ ] create_hit_with_hit_type -- [ ] create_qualification_type -- [ ] create_worker_block -- [ ] delete_hit -- [ ] delete_qualification_type -- [ ] delete_worker_block -- [ ] disassociate_qualification_from_worker -- [ ] get_account_balance -- [ ] get_assignment -- [ ] get_file_upload_url -- [ ] get_hit -- [ ] get_qualification_score -- [ ] get_qualification_type -- [ ] list_assignments_for_hit -- [ ] list_bonus_payments -- [ ] list_hits -- [ ] list_hits_for_qualification_type -- [ ] list_qualification_requests -- [ ] list_qualification_types -- [ ] list_review_policy_results_for_hit -- [ ] list_reviewable_hits -- [ ] list_worker_blocks -- [ ] list_workers_with_qualification_type -- [ ] notify_workers -- [ ] reject_assignment -- [ ] reject_qualification_request -- [ ] send_bonus -- [ ] send_test_event_notification -- [ ] update_expiration_for_hit -- [ ] update_hit_review_status -- [ ] update_hit_type_of_hit -- [ ] update_notification_settings -- [ ] update_qualification_type - -## opsworks - 12% implemented -- [ ] assign_instance -- [ ] assign_volume -- [ ] associate_elastic_ip -- [ ] attach_elastic_load_balancer -- [ ] clone_stack -- [X] create_app -- [ ] create_deployment -- [X] create_instance -- [X] create_layer -- [X] create_stack -- [ ] create_user_profile -- [ ] delete_app -- [ ] delete_instance -- [ ] delete_layer -- [ ] delete_stack -- [ ] delete_user_profile -- [ ] deregister_ecs_cluster -- [ ] deregister_elastic_ip -- [ ] deregister_instance -- [ ] deregister_rds_db_instance -- [ ] deregister_volume -- [ ] describe_agent_versions -- [X] describe_apps -- [ ] describe_commands -- [ ] describe_deployments -- [ ] describe_ecs_clusters -- [ ] describe_elastic_ips -- [ ] describe_elastic_load_balancers -- [X] describe_instances -- [X] describe_layers -- [ ] describe_load_based_auto_scaling -- [ ] describe_my_user_profile -- [ ] describe_operating_systems -- [ ] describe_permissions -- [ ] describe_raid_arrays -- [ ] describe_rds_db_instances -- [ ] describe_service_errors -- [ ] describe_stack_provisioning_parameters -- [ ] describe_stack_summary -- [X] describe_stacks -- [ ] describe_time_based_auto_scaling -- [ ] describe_user_profiles -- [ ] describe_volumes -- [ ] detach_elastic_load_balancer -- [ ] disassociate_elastic_ip -- [ ] get_hostname_suggestion -- [ ] grant_access -- [ ] list_tags -- [ ] reboot_instance -- [ ] register_ecs_cluster -- [ ] register_elastic_ip -- [ ] register_instance -- [ ] register_rds_db_instance -- [ ] register_volume -- [ ] set_load_based_auto_scaling -- [ ] set_permission -- [ ] set_time_based_auto_scaling -- [X] start_instance -- [ ] start_stack -- [ ] stop_instance -- [ ] stop_stack -- [ ] tag_resource -- [ ] unassign_instance -- [ ] unassign_volume -- [ ] untag_resource -- [ ] update_app -- [ ] update_elastic_ip -- [ ] update_instance -- [ ] update_layer -- [ ] update_my_user_profile -- [ ] update_rds_db_instance -- [ ] update_stack -- [ ] update_user_profile -- [ ] update_volume - -## opsworkscm - 0% implemented -- [ ] associate_node -- [ ] create_backup -- [ ] create_server -- [ ] delete_backup -- [ ] delete_server -- [ ] describe_account_attributes -- [ ] describe_backups -- [ ] describe_events -- [ ] describe_node_association_status -- [ ] describe_servers -- [ ] disassociate_node -- [ ] restore_server -- [ ] start_maintenance -- [ ] update_server -- [ ] update_server_engine_attributes - -## organizations - 30% implemented -- [ ] accept_handshake -- [ ] attach_policy -- [ ] cancel_handshake -- [X] create_account -- [X] create_organization -- [X] create_organizational_unit -- [ ] create_policy -- [ ] decline_handshake -- [ ] delete_organization -- [ ] delete_organizational_unit -- [ ] delete_policy -- [X] describe_account -- [ ] describe_create_account_status -- [ ] describe_handshake -- [X] describe_organization -- [X] describe_organizational_unit -- [ ] describe_policy -- [ ] detach_policy -- [ ] disable_aws_service_access -- [ ] disable_policy_type -- [ ] enable_all_features -- [ ] enable_aws_service_access -- [ ] enable_policy_type -- [ ] invite_account_to_organization -- [ ] leave_organization -- [X] list_accounts -- [X] list_accounts_for_parent -- [ ] list_aws_service_access_for_organization -- [X] list_children -- [ ] list_create_account_status -- [ ] list_handshakes_for_account -- [ ] list_handshakes_for_organization -- [X] list_organizational_units_for_parent -- [X] list_parents -- [ ] list_policies -- [ ] list_policies_for_target -- [X] list_roots -- [ ] list_targets_for_policy -- [X] move_account -- [ ] remove_account_from_organization -- [ ] update_organizational_unit -- [ ] update_policy - -## pinpoint - 0% implemented -- [ ] create_app -- [ ] create_campaign -- [ ] create_export_job -- [ ] create_import_job -- [ ] create_segment -- [ ] delete_adm_channel -- [ ] delete_apns_channel -- [ ] delete_apns_sandbox_channel -- [ ] delete_apns_voip_channel -- [ ] delete_apns_voip_sandbox_channel -- [ ] delete_app -- [ ] delete_baidu_channel -- [ ] delete_campaign -- [ ] delete_email_channel -- [ ] delete_endpoint -- [ ] delete_event_stream -- [ ] delete_gcm_channel -- [ ] delete_segment -- [ ] delete_sms_channel -- [ ] get_adm_channel -- [ ] get_apns_channel -- [ ] get_apns_sandbox_channel -- [ ] get_apns_voip_channel -- [ ] get_apns_voip_sandbox_channel -- [ ] get_app -- [ ] get_application_settings -- [ ] get_apps -- [ ] get_baidu_channel -- [ ] get_campaign -- [ ] get_campaign_activities -- [ ] get_campaign_version -- [ ] get_campaign_versions -- [ ] get_campaigns -- [ ] get_email_channel -- [ ] get_endpoint -- [ ] get_event_stream -- [ ] get_export_job -- [ ] get_export_jobs -- [ ] get_gcm_channel -- [ ] get_import_job -- [ ] get_import_jobs -- [ ] get_segment -- [ ] get_segment_export_jobs -- [ ] get_segment_import_jobs -- [ ] get_segment_version -- [ ] get_segment_versions -- [ ] get_segments -- [ ] get_sms_channel -- [ ] put_event_stream -- [ ] send_messages -- [ ] send_users_messages -- [ ] update_adm_channel -- [ ] update_apns_channel -- [ ] update_apns_sandbox_channel -- [ ] update_apns_voip_channel -- [ ] update_apns_voip_sandbox_channel -- [ ] update_application_settings -- [ ] update_baidu_channel -- [ ] update_campaign -- [ ] update_email_channel -- [ ] update_endpoint -- [ ] update_endpoints_batch -- [ ] update_gcm_channel -- [ ] update_segment -- [ ] update_sms_channel - -## polly - 83% implemented -- [X] delete_lexicon -- [X] describe_voices -- [X] get_lexicon -- [X] list_lexicons -- [X] put_lexicon -- [ ] synthesize_speech - -## pricing - 0% implemented -- [ ] describe_services -- [ ] get_attribute_values -- [ ] get_products - -## rds - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] authorize_db_security_group_ingress -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] copy_db_snapshot -- [ ] copy_option_group -- [ ] create_db_cluster -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_instance_read_replica -- [ ] create_db_parameter_group -- [ ] create_db_security_group -- [ ] create_db_snapshot -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] create_option_group -- [ ] delete_db_cluster -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_parameter_group -- [ ] delete_db_security_group -- [ ] delete_db_snapshot -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] delete_option_group -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instances -- [ ] describe_db_log_files -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_security_groups -- [ ] describe_db_snapshot_attributes -- [ ] describe_db_snapshots -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_option_group_options -- [ ] describe_option_groups -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_reserved_db_instances -- [ ] describe_reserved_db_instances_offerings -- [ ] describe_source_regions -- [ ] describe_valid_db_instance_modifications -- [ ] download_db_log_file_portion -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_db_cluster -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_snapshot -- [ ] modify_db_snapshot_attribute -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] modify_option_group -- [ ] promote_read_replica -- [ ] promote_read_replica_db_cluster -- [ ] purchase_reserved_db_instances_offering -- [ ] reboot_db_instance -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_s3 -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time -- [ ] restore_db_instance_from_db_snapshot -- [ ] restore_db_instance_from_s3 -- [ ] restore_db_instance_to_point_in_time -- [ ] revoke_db_security_group_ingress -- [ ] start_db_instance -- [ ] stop_db_instance - -## redshift - 41% implemented -- [ ] authorize_cluster_security_group_ingress -- [ ] authorize_snapshot_access -- [ ] copy_cluster_snapshot -- [X] create_cluster -- [X] create_cluster_parameter_group -- [X] create_cluster_security_group -- [X] create_cluster_snapshot -- [X] create_cluster_subnet_group -- [ ] create_event_subscription -- [ ] create_hsm_client_certificate -- [ ] create_hsm_configuration -- [X] create_snapshot_copy_grant -- [X] create_tags -- [X] delete_cluster -- [X] delete_cluster_parameter_group -- [X] delete_cluster_security_group -- [X] delete_cluster_snapshot -- [X] delete_cluster_subnet_group -- [ ] delete_event_subscription -- [ ] delete_hsm_client_certificate -- [ ] delete_hsm_configuration -- [X] delete_snapshot_copy_grant -- [X] delete_tags -- [X] describe_cluster_parameter_groups -- [ ] describe_cluster_parameters -- [X] describe_cluster_security_groups -- [X] describe_cluster_snapshots -- [X] describe_cluster_subnet_groups -- [ ] describe_cluster_versions -- [X] describe_clusters -- [ ] describe_default_cluster_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_hsm_client_certificates -- [ ] describe_hsm_configurations -- [ ] describe_logging_status -- [ ] describe_orderable_cluster_options -- [ ] describe_reserved_node_offerings -- [ ] describe_reserved_nodes -- [ ] describe_resize -- [X] describe_snapshot_copy_grants -- [ ] describe_table_restore_status -- [X] describe_tags -- [ ] disable_logging -- [X] disable_snapshot_copy -- [ ] enable_logging -- [X] enable_snapshot_copy -- [ ] get_cluster_credentials -- [X] modify_cluster -- [ ] modify_cluster_iam_roles -- [ ] modify_cluster_parameter_group -- [ ] modify_cluster_subnet_group -- [ ] modify_event_subscription -- [X] modify_snapshot_copy_retention_period -- [ ] purchase_reserved_node_offering -- [ ] reboot_cluster -- [ ] reset_cluster_parameter_group -- [X] restore_from_cluster_snapshot -- [ ] restore_table_from_cluster_snapshot -- [ ] revoke_cluster_security_group_ingress -- [ ] revoke_snapshot_access -- [ ] rotate_encryption_key - -## rekognition - 0% implemented -- [ ] compare_faces -- [ ] create_collection -- [ ] create_stream_processor -- [ ] delete_collection -- [ ] delete_faces -- [ ] delete_stream_processor -- [ ] describe_stream_processor -- [ ] detect_faces -- [ ] detect_labels -- [ ] detect_moderation_labels -- [ ] detect_text -- [ ] get_celebrity_info -- [ ] get_celebrity_recognition -- [ ] get_content_moderation -- [ ] get_face_detection -- [ ] get_face_search -- [ ] get_label_detection -- [ ] get_person_tracking -- [ ] index_faces -- [ ] list_collections -- [ ] list_faces -- [ ] list_stream_processors -- [ ] recognize_celebrities -- [ ] search_faces -- [ ] search_faces_by_image -- [ ] start_celebrity_recognition -- [ ] start_content_moderation -- [ ] start_face_detection -- [ ] start_face_search -- [ ] start_label_detection -- [ ] start_person_tracking -- [ ] start_stream_processor -- [ ] stop_stream_processor - -## resource-groups - 0% implemented -- [ ] create_group -- [ ] delete_group -- [ ] get_group -- [ ] get_group_query -- [ ] get_tags -- [ ] list_group_resources -- [ ] list_groups -- [ ] search_resources -- [ ] tag -- [ ] untag -- [ ] update_group -- [ ] update_group_query - -## resourcegroupstaggingapi - 60% implemented -- [X] get_resources -- [X] get_tag_keys -- [X] get_tag_values -- [ ] tag_resources -- [ ] untag_resources - -## route53 - 12% implemented -- [ ] associate_vpc_with_hosted_zone -- [ ] change_resource_record_sets -- [X] change_tags_for_resource -- [X] create_health_check -- [X] create_hosted_zone -- [ ] create_query_logging_config -- [ ] create_reusable_delegation_set -- [ ] create_traffic_policy -- [ ] create_traffic_policy_instance -- [ ] create_traffic_policy_version -- [ ] create_vpc_association_authorization -- [X] delete_health_check -- [X] delete_hosted_zone -- [ ] delete_query_logging_config -- [ ] delete_reusable_delegation_set -- [ ] delete_traffic_policy -- [ ] delete_traffic_policy_instance -- [ ] delete_vpc_association_authorization -- [ ] disassociate_vpc_from_hosted_zone -- [ ] get_account_limit -- [ ] get_change -- [ ] get_checker_ip_ranges -- [ ] get_geo_location -- [ ] get_health_check -- [ ] get_health_check_count -- [ ] get_health_check_last_failure_reason -- [ ] get_health_check_status -- [X] get_hosted_zone -- [ ] get_hosted_zone_count -- [ ] get_hosted_zone_limit -- [ ] get_query_logging_config -- [ ] get_reusable_delegation_set -- [ ] get_reusable_delegation_set_limit -- [ ] get_traffic_policy -- [ ] get_traffic_policy_instance -- [ ] get_traffic_policy_instance_count -- [ ] list_geo_locations -- [ ] list_health_checks -- [ ] list_hosted_zones -- [ ] list_hosted_zones_by_name -- [ ] list_query_logging_configs -- [ ] list_resource_record_sets -- [ ] list_reusable_delegation_sets -- [X] list_tags_for_resource -- [ ] list_tags_for_resources -- [ ] list_traffic_policies -- [ ] list_traffic_policy_instances -- [ ] list_traffic_policy_instances_by_hosted_zone -- [ ] list_traffic_policy_instances_by_policy -- [ ] list_traffic_policy_versions -- [ ] list_vpc_association_authorizations -- [ ] test_dns_answer -- [ ] update_health_check -- [ ] update_hosted_zone_comment -- [ ] update_traffic_policy_comment -- [ ] update_traffic_policy_instance - -## route53domains - 0% implemented -- [ ] check_domain_availability -- [ ] check_domain_transferability -- [ ] delete_tags_for_domain -- [ ] disable_domain_auto_renew -- [ ] disable_domain_transfer_lock -- [ ] enable_domain_auto_renew -- [ ] enable_domain_transfer_lock -- [ ] get_contact_reachability_status -- [ ] get_domain_detail -- [ ] get_domain_suggestions -- [ ] get_operation_detail -- [ ] list_domains -- [ ] list_operations -- [ ] list_tags_for_domain -- [ ] register_domain -- [ ] renew_domain -- [ ] resend_contact_reachability_email -- [ ] retrieve_domain_auth_code -- [ ] transfer_domain -- [ ] update_domain_contact -- [ ] update_domain_contact_privacy -- [ ] update_domain_nameservers -- [ ] update_tags_for_domain -- [ ] view_billing - -## s3 - 15% implemented -- [ ] abort_multipart_upload -- [ ] complete_multipart_upload -- [ ] copy_object -- [X] create_bucket -- [ ] create_multipart_upload -- [X] delete_bucket -- [ ] delete_bucket_analytics_configuration -- [X] delete_bucket_cors -- [ ] delete_bucket_encryption -- [ ] delete_bucket_inventory_configuration -- [ ] delete_bucket_lifecycle -- [ ] delete_bucket_metrics_configuration -- [X] delete_bucket_policy -- [ ] delete_bucket_replication -- [X] delete_bucket_tagging -- [ ] delete_bucket_website -- [ ] delete_object -- [ ] delete_object_tagging -- [ ] delete_objects -- [ ] get_bucket_accelerate_configuration -- [X] get_bucket_acl -- [ ] get_bucket_analytics_configuration -- [ ] get_bucket_cors -- [ ] get_bucket_encryption -- [ ] get_bucket_inventory_configuration -- [ ] get_bucket_lifecycle -- [ ] get_bucket_lifecycle_configuration -- [ ] get_bucket_location -- [ ] get_bucket_logging -- [ ] get_bucket_metrics_configuration -- [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration -- [X] get_bucket_policy -- [ ] get_bucket_replication -- [ ] get_bucket_request_payment -- [ ] get_bucket_tagging -- [X] get_bucket_versioning -- [ ] get_bucket_website -- [ ] get_object -- [ ] get_object_acl -- [ ] get_object_tagging -- [ ] get_object_torrent -- [ ] head_bucket -- [ ] head_object -- [ ] list_bucket_analytics_configurations -- [ ] list_bucket_inventory_configurations -- [ ] list_bucket_metrics_configurations -- [ ] list_buckets -- [ ] list_multipart_uploads -- [ ] list_object_versions -- [ ] list_objects -- [ ] list_objects_v2 -- [ ] list_parts -- [ ] put_bucket_accelerate_configuration -- [ ] put_bucket_acl -- [ ] put_bucket_analytics_configuration -- [X] put_bucket_cors -- [ ] put_bucket_encryption -- [ ] put_bucket_inventory_configuration -- [ ] put_bucket_lifecycle -- [ ] put_bucket_lifecycle_configuration -- [X] put_bucket_logging -- [ ] put_bucket_metrics_configuration -- [ ] put_bucket_notification -- [X] put_bucket_notification_configuration -- [ ] put_bucket_policy -- [ ] put_bucket_replication -- [ ] put_bucket_request_payment -- [X] put_bucket_tagging -- [ ] put_bucket_versioning -- [ ] put_bucket_website -- [ ] put_object -- [ ] put_object_acl -- [ ] put_object_tagging -- [ ] restore_object -- [ ] select_object_content -- [ ] upload_part -- [ ] upload_part_copy - -## sagemaker - 0% implemented -- [ ] add_tags -- [ ] create_endpoint -- [ ] create_endpoint_config -- [ ] create_model -- [ ] create_notebook_instance -- [ ] create_notebook_instance_lifecycle_config -- [ ] create_presigned_notebook_instance_url -- [ ] create_training_job -- [ ] delete_endpoint -- [ ] delete_endpoint_config -- [ ] delete_model -- [ ] delete_notebook_instance -- [ ] delete_notebook_instance_lifecycle_config -- [ ] delete_tags -- [ ] describe_endpoint -- [ ] describe_endpoint_config -- [ ] describe_model -- [ ] describe_notebook_instance -- [ ] describe_notebook_instance_lifecycle_config -- [ ] describe_training_job -- [ ] list_endpoint_configs -- [ ] list_endpoints -- [ ] list_models -- [ ] list_notebook_instance_lifecycle_configs -- [ ] list_notebook_instances -- [ ] list_tags -- [ ] list_training_jobs -- [ ] start_notebook_instance -- [ ] stop_notebook_instance -- [ ] stop_training_job -- [ ] update_endpoint -- [ ] update_endpoint_weights_and_capacities -- [ ] update_notebook_instance -- [ ] update_notebook_instance_lifecycle_config - -## sagemaker-runtime - 0% implemented -- [ ] invoke_endpoint - -## sdb - 0% implemented -- [ ] batch_delete_attributes -- [ ] batch_put_attributes -- [ ] create_domain -- [ ] delete_attributes -- [ ] delete_domain -- [ ] domain_metadata -- [ ] get_attributes -- [ ] list_domains -- [ ] put_attributes -- [ ] select - -## secretsmanager - 33% implemented -- [ ] cancel_rotate_secret -- [X] create_secret -- [ ] delete_secret -- [X] describe_secret -- [X] get_random_password -- [X] get_secret_value -- [ ] list_secret_version_ids -- [ ] list_secrets -- [ ] put_secret_value -- [ ] restore_secret -- [X] rotate_secret -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_secret -- [ ] update_secret_version_stage - -## serverlessrepo - 0% implemented -- [ ] create_application -- [ ] create_application_version -- [ ] create_cloud_formation_change_set -- [ ] delete_application -- [ ] get_application -- [ ] get_application_policy -- [ ] list_application_versions -- [ ] list_applications -- [ ] put_application_policy -- [ ] update_application - -## servicecatalog - 0% implemented -- [ ] accept_portfolio_share -- [ ] associate_principal_with_portfolio -- [ ] associate_product_with_portfolio -- [ ] associate_tag_option_with_resource -- [ ] copy_product -- [ ] create_constraint -- [ ] create_portfolio -- [ ] create_portfolio_share -- [ ] create_product -- [ ] create_provisioned_product_plan -- [ ] create_provisioning_artifact -- [ ] create_tag_option -- [ ] delete_constraint -- [ ] delete_portfolio -- [ ] delete_portfolio_share -- [ ] delete_product -- [ ] delete_provisioned_product_plan -- [ ] delete_provisioning_artifact -- [ ] delete_tag_option -- [ ] describe_constraint -- [ ] describe_copy_product_status -- [ ] describe_portfolio -- [ ] describe_product -- [ ] describe_product_as_admin -- [ ] describe_product_view -- [ ] describe_provisioned_product -- [ ] describe_provisioned_product_plan -- [ ] describe_provisioning_artifact -- [ ] describe_provisioning_parameters -- [ ] describe_record -- [ ] describe_tag_option -- [ ] disassociate_principal_from_portfolio -- [ ] disassociate_product_from_portfolio -- [ ] disassociate_tag_option_from_resource -- [ ] execute_provisioned_product_plan -- [ ] list_accepted_portfolio_shares -- [ ] list_constraints_for_portfolio -- [ ] list_launch_paths -- [ ] list_portfolio_access -- [ ] list_portfolios -- [ ] list_portfolios_for_product -- [ ] list_principals_for_portfolio -- [ ] list_provisioned_product_plans -- [ ] list_provisioning_artifacts -- [ ] list_record_history -- [ ] list_resources_for_tag_option -- [ ] list_tag_options -- [ ] provision_product -- [ ] reject_portfolio_share -- [ ] scan_provisioned_products -- [ ] search_products -- [ ] search_products_as_admin -- [ ] search_provisioned_products -- [ ] terminate_provisioned_product -- [ ] update_constraint -- [ ] update_portfolio -- [ ] update_product -- [ ] update_provisioned_product -- [ ] update_provisioning_artifact -- [ ] update_tag_option - -## servicediscovery - 0% implemented -- [ ] create_private_dns_namespace -- [ ] create_public_dns_namespace -- [ ] create_service -- [ ] delete_namespace -- [ ] delete_service -- [ ] deregister_instance -- [ ] get_instance -- [ ] get_instances_health_status -- [ ] get_namespace -- [ ] get_operation -- [ ] get_service -- [ ] list_instances -- [ ] list_namespaces -- [ ] list_operations -- [ ] list_services -- [ ] register_instance -- [ ] update_instance_custom_health_status -- [ ] update_service - -## ses - 11% implemented -- [ ] clone_receipt_rule_set -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination -- [ ] create_configuration_set_tracking_options -- [ ] create_custom_verification_email_template -- [ ] create_receipt_filter -- [ ] create_receipt_rule -- [ ] create_receipt_rule_set -- [ ] create_template -- [ ] delete_configuration_set -- [ ] delete_configuration_set_event_destination -- [ ] delete_configuration_set_tracking_options -- [ ] delete_custom_verification_email_template -- [X] delete_identity -- [ ] delete_identity_policy -- [ ] delete_receipt_filter -- [ ] delete_receipt_rule -- [ ] delete_receipt_rule_set -- [ ] delete_template -- [ ] delete_verified_email_address -- [ ] describe_active_receipt_rule_set -- [ ] describe_configuration_set -- [ ] describe_receipt_rule -- [ ] describe_receipt_rule_set -- [ ] get_account_sending_enabled -- [ ] get_custom_verification_email_template -- [ ] get_identity_dkim_attributes -- [ ] get_identity_mail_from_domain_attributes -- [ ] get_identity_notification_attributes -- [ ] get_identity_policies -- [ ] get_identity_verification_attributes -- [X] get_send_quota -- [ ] get_send_statistics -- [ ] get_template -- [ ] list_configuration_sets -- [ ] list_custom_verification_email_templates -- [X] list_identities -- [ ] list_identity_policies -- [ ] list_receipt_filters -- [ ] list_receipt_rule_sets -- [ ] list_templates -- [X] list_verified_email_addresses -- [ ] put_identity_policy -- [ ] reorder_receipt_rule_set -- [ ] send_bounce -- [ ] send_bulk_templated_email -- [ ] send_custom_verification_email -- [X] send_email -- [X] send_raw_email -- [ ] send_templated_email -- [ ] set_active_receipt_rule_set -- [ ] set_identity_dkim_enabled -- [ ] set_identity_feedback_forwarding_enabled -- [ ] set_identity_headers_in_notifications_enabled -- [ ] set_identity_mail_from_domain -- [ ] set_identity_notification_topic -- [ ] set_receipt_rule_position -- [ ] test_render_template -- [ ] update_account_sending_enabled -- [ ] update_configuration_set_event_destination -- [ ] update_configuration_set_reputation_metrics_enabled -- [ ] update_configuration_set_sending_enabled -- [ ] update_configuration_set_tracking_options -- [ ] update_custom_verification_email_template -- [ ] update_receipt_rule -- [ ] update_template -- [ ] verify_domain_dkim -- [ ] verify_domain_identity -- [X] verify_email_address -- [X] verify_email_identity - -## shield - 0% implemented -- [ ] create_protection -- [ ] create_subscription -- [ ] delete_protection -- [ ] delete_subscription -- [ ] describe_attack -- [ ] describe_protection -- [ ] describe_subscription -- [ ] get_subscription_state -- [ ] list_attacks -- [ ] list_protections - -## sms - 0% implemented -- [ ] create_replication_job -- [ ] delete_replication_job -- [ ] delete_server_catalog -- [ ] disassociate_connector -- [ ] get_connectors -- [ ] get_replication_jobs -- [ ] get_replication_runs -- [ ] get_servers -- [ ] import_server_catalog -- [ ] start_on_demand_replication_run -- [ ] update_replication_job - -## snowball - 0% implemented -- [ ] cancel_cluster -- [ ] cancel_job -- [ ] create_address -- [ ] create_cluster -- [ ] create_job -- [ ] describe_address -- [ ] describe_addresses -- [ ] describe_cluster -- [ ] describe_job -- [ ] get_job_manifest -- [ ] get_job_unlock_code -- [ ] get_snowball_usage -- [ ] list_cluster_jobs -- [ ] list_clusters -- [ ] list_jobs -- [ ] update_cluster -- [ ] update_job - -## sns - 53% implemented -- [ ] add_permission -- [ ] check_if_phone_number_is_opted_out -- [ ] confirm_subscription -- [X] create_platform_application -- [X] create_platform_endpoint -- [X] create_topic -- [X] delete_endpoint -- [X] delete_platform_application -- [X] delete_topic -- [ ] get_endpoint_attributes -- [ ] get_platform_application_attributes -- [ ] get_sms_attributes -- [X] get_subscription_attributes -- [ ] get_topic_attributes -- [X] list_endpoints_by_platform_application -- [ ] list_phone_numbers_opted_out -- [X] list_platform_applications -- [X] list_subscriptions -- [ ] list_subscriptions_by_topic -- [X] list_topics -- [ ] opt_in_phone_number -- [X] publish -- [ ] remove_permission -- [X] set_endpoint_attributes -- [ ] set_platform_application_attributes -- [ ] set_sms_attributes -- [X] set_subscription_attributes -- [ ] set_topic_attributes -- [X] subscribe -- [X] unsubscribe - -## sqs - 65% implemented -- [X] add_permission -- [X] change_message_visibility -- [ ] change_message_visibility_batch -- [X] create_queue -- [X] delete_message -- [ ] delete_message_batch -- [X] delete_queue -- [ ] get_queue_attributes -- [ ] get_queue_url -- [X] list_dead_letter_source_queues -- [ ] list_queue_tags -- [X] list_queues -- [X] purge_queue -- [ ] receive_message -- [X] remove_permission -- [X] send_message -- [ ] send_message_batch -- [X] set_queue_attributes -- [X] tag_queue -- [X] untag_queue - -## ssm - 11% implemented -- [X] add_tags_to_resource -- [ ] cancel_command -- [ ] create_activation -- [ ] create_association -- [ ] create_association_batch -- [ ] create_document -- [ ] create_maintenance_window -- [ ] create_patch_baseline -- [ ] create_resource_data_sync -- [ ] delete_activation -- [ ] delete_association -- [ ] delete_document -- [ ] delete_maintenance_window -- [X] delete_parameter -- [X] delete_parameters -- [ ] delete_patch_baseline -- [ ] delete_resource_data_sync -- [ ] deregister_managed_instance -- [ ] deregister_patch_baseline_for_patch_group -- [ ] deregister_target_from_maintenance_window -- [ ] deregister_task_from_maintenance_window -- [ ] describe_activations -- [ ] describe_association -- [ ] describe_automation_executions -- [ ] describe_automation_step_executions -- [ ] describe_available_patches -- [ ] describe_document -- [ ] describe_document_permission -- [ ] describe_effective_instance_associations -- [ ] describe_effective_patches_for_patch_baseline -- [ ] describe_instance_associations_status -- [ ] describe_instance_information -- [ ] describe_instance_patch_states -- [ ] describe_instance_patch_states_for_patch_group -- [ ] describe_instance_patches -- [ ] describe_maintenance_window_execution_task_invocations -- [ ] describe_maintenance_window_execution_tasks -- [ ] describe_maintenance_window_executions -- [ ] describe_maintenance_window_targets -- [ ] describe_maintenance_window_tasks -- [ ] describe_maintenance_windows -- [ ] describe_parameters -- [ ] describe_patch_baselines -- [ ] describe_patch_group_state -- [ ] describe_patch_groups -- [ ] get_automation_execution -- [ ] get_command_invocation -- [ ] get_default_patch_baseline -- [ ] get_deployable_patch_snapshot_for_instance -- [ ] get_document -- [ ] get_inventory -- [ ] get_inventory_schema -- [ ] get_maintenance_window -- [ ] get_maintenance_window_execution -- [ ] get_maintenance_window_execution_task -- [ ] get_maintenance_window_execution_task_invocation -- [ ] get_maintenance_window_task -- [X] get_parameter -- [ ] get_parameter_history -- [X] get_parameters -- [X] get_parameters_by_path -- [ ] get_patch_baseline -- [ ] get_patch_baseline_for_patch_group -- [ ] list_association_versions -- [ ] list_associations -- [ ] list_command_invocations -- [X] list_commands -- [ ] list_compliance_items -- [ ] list_compliance_summaries -- [ ] list_document_versions -- [ ] list_documents -- [ ] list_inventory_entries -- [ ] list_resource_compliance_summaries -- [ ] list_resource_data_sync -- [X] list_tags_for_resource -- [ ] modify_document_permission -- [ ] put_compliance_items -- [ ] put_inventory -- [X] put_parameter -- [ ] register_default_patch_baseline -- [ ] register_patch_baseline_for_patch_group -- [ ] register_target_with_maintenance_window -- [ ] register_task_with_maintenance_window -- [X] remove_tags_from_resource -- [ ] send_automation_signal -- [X] send_command -- [ ] start_automation_execution -- [ ] stop_automation_execution -- [ ] update_association -- [ ] update_association_status -- [ ] update_document -- [ ] update_document_default_version -- [ ] update_maintenance_window -- [ ] update_maintenance_window_target -- [ ] update_maintenance_window_task -- [ ] update_managed_instance_role -- [ ] update_patch_baseline - -## stepfunctions - 0% implemented -- [ ] create_activity -- [ ] create_state_machine -- [ ] delete_activity -- [ ] delete_state_machine -- [ ] describe_activity -- [ ] describe_execution -- [ ] describe_state_machine -- [ ] describe_state_machine_for_execution -- [ ] get_activity_task -- [ ] get_execution_history -- [ ] list_activities -- [ ] list_executions -- [ ] list_state_machines -- [ ] send_task_failure -- [ ] send_task_heartbeat -- [ ] send_task_success -- [ ] start_execution -- [ ] stop_execution -- [ ] update_state_machine - -## storagegateway - 0% implemented -- [ ] activate_gateway -- [ ] add_cache -- [ ] add_tags_to_resource -- [ ] add_upload_buffer -- [ ] add_working_storage -- [ ] cancel_archival -- [ ] cancel_retrieval -- [ ] create_cached_iscsi_volume -- [ ] create_nfs_file_share -- [ ] create_snapshot -- [ ] create_snapshot_from_volume_recovery_point -- [ ] create_stored_iscsi_volume -- [ ] create_tape_with_barcode -- [ ] create_tapes -- [ ] delete_bandwidth_rate_limit -- [ ] delete_chap_credentials -- [ ] delete_file_share -- [ ] delete_gateway -- [ ] delete_snapshot_schedule -- [ ] delete_tape -- [ ] delete_tape_archive -- [ ] delete_volume -- [ ] describe_bandwidth_rate_limit -- [ ] describe_cache -- [ ] describe_cached_iscsi_volumes -- [ ] describe_chap_credentials -- [ ] describe_gateway_information -- [ ] describe_maintenance_start_time -- [ ] describe_nfs_file_shares -- [ ] describe_snapshot_schedule -- [ ] describe_stored_iscsi_volumes -- [ ] describe_tape_archives -- [ ] describe_tape_recovery_points -- [ ] describe_tapes -- [ ] describe_upload_buffer -- [ ] describe_vtl_devices -- [ ] describe_working_storage -- [ ] disable_gateway -- [ ] list_file_shares -- [ ] list_gateways -- [ ] list_local_disks -- [ ] list_tags_for_resource -- [ ] list_tapes -- [ ] list_volume_initiators -- [ ] list_volume_recovery_points -- [ ] list_volumes -- [ ] notify_when_uploaded -- [ ] refresh_cache -- [ ] remove_tags_from_resource -- [ ] reset_cache -- [ ] retrieve_tape_archive -- [ ] retrieve_tape_recovery_point -- [ ] set_local_console_password -- [ ] shutdown_gateway -- [ ] start_gateway -- [ ] update_bandwidth_rate_limit -- [ ] update_chap_credentials -- [ ] update_gateway_information -- [ ] update_gateway_software_now -- [ ] update_maintenance_start_time -- [ ] update_nfs_file_share -- [ ] update_snapshot_schedule -- [ ] update_vtl_device_type - -## sts - 42% implemented -- [X] assume_role -- [ ] assume_role_with_saml -- [ ] assume_role_with_web_identity -- [ ] decode_authorization_message -- [ ] get_caller_identity -- [X] get_federation_token -- [X] get_session_token - -## support - 0% implemented -- [ ] add_attachments_to_set -- [ ] add_communication_to_case -- [ ] create_case -- [ ] describe_attachment -- [ ] describe_cases -- [ ] describe_communications -- [ ] describe_services -- [ ] describe_severity_levels -- [ ] describe_trusted_advisor_check_refresh_statuses -- [ ] describe_trusted_advisor_check_result -- [ ] describe_trusted_advisor_check_summaries -- [ ] describe_trusted_advisor_checks -- [ ] refresh_trusted_advisor_check -- [ ] resolve_case - -## swf - 58% implemented -- [ ] count_closed_workflow_executions -- [ ] count_open_workflow_executions -- [X] count_pending_activity_tasks -- [X] count_pending_decision_tasks -- [ ] deprecate_activity_type -- [X] deprecate_domain -- [ ] deprecate_workflow_type -- [ ] describe_activity_type -- [X] describe_domain -- [X] describe_workflow_execution -- [ ] describe_workflow_type -- [ ] get_workflow_execution_history -- [ ] list_activity_types -- [X] list_closed_workflow_executions -- [X] list_domains -- [X] list_open_workflow_executions -- [ ] list_workflow_types -- [X] poll_for_activity_task -- [X] poll_for_decision_task -- [X] record_activity_task_heartbeat -- [ ] register_activity_type -- [X] register_domain -- [ ] register_workflow_type -- [ ] request_cancel_workflow_execution -- [ ] respond_activity_task_canceled -- [X] respond_activity_task_completed -- [X] respond_activity_task_failed -- [X] respond_decision_task_completed -- [X] signal_workflow_execution -- [X] start_workflow_execution -- [X] terminate_workflow_execution - -## transcribe - 0% implemented -- [ ] create_vocabulary -- [ ] delete_vocabulary -- [ ] get_transcription_job -- [ ] get_vocabulary -- [ ] list_transcription_jobs -- [ ] list_vocabularies -- [ ] start_transcription_job -- [ ] update_vocabulary - -## translate - 0% implemented -- [ ] translate_text - -## waf - 0% implemented -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## waf-regional - 0% implemented -- [ ] associate_web_acl -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] disassociate_web_acl -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_web_acl_for_resource -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_resources_for_web_acl -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## workdocs - 0% implemented -- [ ] abort_document_version_upload -- [ ] activate_user -- [ ] add_resource_permissions -- [ ] create_comment -- [ ] create_custom_metadata -- [ ] create_folder -- [ ] create_labels -- [ ] create_notification_subscription -- [ ] create_user -- [ ] deactivate_user -- [ ] delete_comment -- [ ] delete_custom_metadata -- [ ] delete_document -- [ ] delete_folder -- [ ] delete_folder_contents -- [ ] delete_labels -- [ ] delete_notification_subscription -- [ ] delete_user -- [ ] describe_activities -- [ ] describe_comments -- [ ] describe_document_versions -- [ ] describe_folder_contents -- [ ] describe_groups -- [ ] describe_notification_subscriptions -- [ ] describe_resource_permissions -- [ ] describe_root_folders -- [ ] describe_users -- [ ] get_current_user -- [ ] get_document -- [ ] get_document_path -- [ ] get_document_version -- [ ] get_folder -- [ ] get_folder_path -- [ ] initiate_document_version_upload -- [ ] remove_all_resource_permissions -- [ ] remove_resource_permission -- [ ] update_document -- [ ] update_document_version -- [ ] update_folder -- [ ] update_user - -## workmail - 0% implemented -- [ ] associate_delegate_to_resource -- [ ] associate_member_to_group -- [ ] create_alias -- [ ] create_group -- [ ] create_resource -- [ ] create_user -- [ ] delete_alias -- [ ] delete_group -- [ ] delete_mailbox_permissions -- [ ] delete_resource -- [ ] delete_user -- [ ] deregister_from_work_mail -- [ ] describe_group -- [ ] describe_organization -- [ ] describe_resource -- [ ] describe_user -- [ ] disassociate_delegate_from_resource -- [ ] disassociate_member_from_group -- [ ] list_aliases -- [ ] list_group_members -- [ ] list_groups -- [ ] list_mailbox_permissions -- [ ] list_organizations -- [ ] list_resource_delegates -- [ ] list_resources -- [ ] list_users -- [ ] put_mailbox_permissions -- [ ] register_to_work_mail -- [ ] reset_password -- [ ] update_primary_email_address -- [ ] update_resource - -## workspaces - 0% implemented -- [ ] create_tags -- [ ] create_workspaces -- [ ] delete_tags -- [ ] describe_tags -- [ ] describe_workspace_bundles -- [ ] describe_workspace_directories -- [ ] describe_workspaces -- [ ] describe_workspaces_connection_status -- [ ] modify_workspace_properties -- [ ] reboot_workspaces -- [ ] rebuild_workspaces -- [ ] start_workspaces -- [ ] stop_workspaces -- [ ] terminate_workspaces - -## xray - 0% implemented -- [ ] batch_get_traces -- [ ] get_service_graph -- [ ] get_trace_graph -- [ ] get_trace_summaries -- [ ] put_telemetry_records -- [ ] put_trace_segments + +## acm - 41% implemented +- [X] add_tags_to_certificate +- [X] delete_certificate +- [ ] describe_certificate +- [ ] export_certificate +- [X] get_certificate +- [ ] import_certificate +- [ ] list_certificates +- [ ] list_tags_for_certificate +- [X] remove_tags_from_certificate +- [X] request_certificate +- [ ] resend_validation_email +- [ ] update_certificate_options + +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] restore_certificate_authority +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority + +## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_contact_from_address_book +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_device_events +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 24% implemented +- [ ] create_api_key +- [ ] create_authorizer +- [ ] create_base_path_mapping +- [X] create_deployment +- [ ] create_documentation_part +- [ ] create_documentation_version +- [ ] create_domain_name +- [ ] create_model +- [ ] create_request_validator +- [X] create_resource +- [X] create_rest_api +- [X] create_stage +- [X] create_usage_plan +- [X] create_usage_plan_key +- [ ] create_vpc_link +- [ ] delete_api_key +- [ ] delete_authorizer +- [ ] delete_base_path_mapping +- [ ] delete_client_certificate +- [X] delete_deployment +- [ ] delete_documentation_part +- [ ] delete_documentation_version +- [ ] delete_domain_name +- [ ] delete_gateway_response +- [X] delete_integration +- [X] delete_integration_response +- [ ] delete_method +- [X] delete_method_response +- [ ] delete_model +- [ ] delete_request_validator +- [X] delete_resource +- [X] delete_rest_api +- [ ] delete_stage +- [X] delete_usage_plan +- [X] delete_usage_plan_key +- [ ] delete_vpc_link +- [ ] flush_stage_authorizers_cache +- [ ] flush_stage_cache +- [ ] generate_client_certificate +- [ ] get_account +- [ ] get_api_key +- [ ] get_api_keys +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_base_path_mapping +- [ ] get_base_path_mappings +- [ ] get_client_certificate +- [ ] get_client_certificates +- [X] get_deployment +- [X] get_deployments +- [ ] get_documentation_part +- [ ] get_documentation_parts +- [ ] get_documentation_version +- [ ] get_documentation_versions +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_export +- [ ] get_gateway_response +- [ ] get_gateway_responses +- [X] get_integration +- [X] get_integration_response +- [X] get_method +- [X] get_method_response +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_request_validator +- [ ] get_request_validators +- [X] get_resource +- [ ] get_resources +- [X] get_rest_api +- [ ] get_rest_apis +- [ ] get_sdk +- [ ] get_sdk_type +- [ ] get_sdk_types +- [X] get_stage +- [X] get_stages +- [ ] get_tags +- [ ] get_usage +- [X] get_usage_plan +- [X] get_usage_plan_key +- [X] get_usage_plan_keys +- [X] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links +- [ ] import_api_keys +- [ ] import_documentation_parts +- [ ] import_rest_api +- [ ] put_gateway_response +- [ ] put_integration +- [ ] put_integration_response +- [ ] put_method +- [ ] put_method_response +- [ ] put_rest_api +- [ ] tag_resource +- [ ] test_invoke_authorizer +- [ ] test_invoke_method +- [ ] untag_resource +- [ ] update_account +- [ ] update_api_key +- [ ] update_authorizer +- [ ] update_base_path_mapping +- [ ] update_client_certificate +- [ ] update_deployment +- [ ] update_documentation_part +- [ ] update_documentation_version +- [ ] update_domain_name +- [ ] update_gateway_response +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_method +- [ ] update_method_response +- [ ] update_model +- [ ] update_request_validator +- [ ] update_resource +- [ ] update_rest_api +- [X] update_stage +- [ ] update_usage +- [ ] update_usage_plan +- [ ] update_vpc_link + +## application-autoscaling - 0% implemented +- [ ] delete_scaling_policy +- [ ] delete_scheduled_action +- [ ] deregister_scalable_target +- [ ] describe_scalable_targets +- [ ] describe_scaling_activities +- [ ] describe_scaling_policies +- [ ] describe_scheduled_actions +- [ ] put_scaling_policy +- [ ] put_scheduled_action +- [ ] register_scalable_target + +## appstream - 0% implemented +- [ ] associate_fleet +- [ ] copy_image +- [ ] create_directory_config +- [ ] create_fleet +- [ ] create_image_builder +- [ ] create_image_builder_streaming_url +- [ ] create_stack +- [ ] create_streaming_url +- [ ] delete_directory_config +- [ ] delete_fleet +- [ ] delete_image +- [ ] delete_image_builder +- [ ] delete_image_permissions +- [ ] delete_stack +- [ ] describe_directory_configs +- [ ] describe_fleets +- [ ] describe_image_builders +- [ ] describe_image_permissions +- [ ] describe_images +- [ ] describe_sessions +- [ ] describe_stacks +- [ ] disassociate_fleet +- [ ] expire_session +- [ ] list_associated_fleets +- [ ] list_associated_stacks +- [ ] list_tags_for_resource +- [ ] start_fleet +- [ ] start_image_builder +- [ ] stop_fleet +- [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_directory_config +- [ ] update_fleet +- [ ] update_image_permissions +- [ ] update_stack + +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_api_key +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + +## athena - 0% implemented +- [ ] batch_get_named_query +- [ ] batch_get_query_execution +- [ ] create_named_query +- [ ] delete_named_query +- [ ] get_named_query +- [ ] get_query_execution +- [ ] get_query_results +- [ ] list_named_queries +- [ ] list_query_executions +- [ ] start_query_execution +- [ ] stop_query_execution + +## autoscaling - 42% implemented +- [X] attach_instances +- [X] attach_load_balancer_target_groups +- [X] attach_load_balancers +- [ ] batch_delete_scheduled_action +- [ ] batch_put_scheduled_update_group_action +- [ ] complete_lifecycle_action +- [X] create_auto_scaling_group +- [X] create_launch_configuration +- [X] create_or_update_tags +- [X] delete_auto_scaling_group +- [X] delete_launch_configuration +- [ ] delete_lifecycle_hook +- [ ] delete_notification_configuration +- [X] delete_policy +- [ ] delete_scheduled_action +- [ ] delete_tags +- [ ] describe_account_limits +- [ ] describe_adjustment_types +- [X] describe_auto_scaling_groups +- [X] describe_auto_scaling_instances +- [ ] describe_auto_scaling_notification_types +- [X] describe_launch_configurations +- [ ] describe_lifecycle_hook_types +- [ ] describe_lifecycle_hooks +- [X] describe_load_balancer_target_groups +- [X] describe_load_balancers +- [ ] describe_metric_collection_types +- [ ] describe_notification_configurations +- [X] describe_policies +- [ ] describe_scaling_activities +- [ ] describe_scaling_process_types +- [ ] describe_scheduled_actions +- [ ] describe_tags +- [ ] describe_termination_policy_types +- [X] detach_instances +- [X] detach_load_balancer_target_groups +- [X] detach_load_balancers +- [ ] disable_metrics_collection +- [ ] enable_metrics_collection +- [ ] enter_standby +- [X] execute_policy +- [ ] exit_standby +- [ ] put_lifecycle_hook +- [ ] put_notification_configuration +- [ ] put_scaling_policy +- [ ] put_scheduled_update_group_action +- [ ] record_lifecycle_action_heartbeat +- [ ] resume_processes +- [X] set_desired_capacity +- [X] set_instance_health +- [ ] set_instance_protection +- [X] suspend_processes +- [ ] terminate_instance_in_auto_scaling_group +- [X] update_auto_scaling_group + +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans +- [ ] update_scaling_plan + +## batch - 93% implemented +- [ ] cancel_job +- [X] create_compute_environment +- [X] create_job_queue +- [X] delete_compute_environment +- [X] delete_job_queue +- [X] deregister_job_definition +- [X] describe_compute_environments +- [X] describe_job_definitions +- [X] describe_job_queues +- [X] describe_jobs +- [X] list_jobs +- [X] register_job_definition +- [X] submit_job +- [X] terminate_job +- [X] update_compute_environment +- [X] update_job_queue + +## budgets - 0% implemented +- [ ] create_budget +- [ ] create_notification +- [ ] create_subscriber +- [ ] delete_budget +- [ ] delete_notification +- [ ] delete_subscriber +- [ ] describe_budget +- [ ] describe_budgets +- [ ] describe_notifications_for_budget +- [ ] describe_subscribers_for_notification +- [ ] update_budget +- [ ] update_notification +- [ ] update_subscriber + +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + +## clouddirectory - 0% implemented +- [ ] add_facet_to_object +- [ ] apply_schema +- [ ] attach_object +- [ ] attach_policy +- [ ] attach_to_index +- [ ] attach_typed_link +- [ ] batch_read +- [ ] batch_write +- [ ] create_directory +- [ ] create_facet +- [ ] create_index +- [ ] create_object +- [ ] create_schema +- [ ] create_typed_link_facet +- [ ] delete_directory +- [ ] delete_facet +- [ ] delete_object +- [ ] delete_schema +- [ ] delete_typed_link_facet +- [ ] detach_from_index +- [ ] detach_object +- [ ] detach_policy +- [ ] detach_typed_link +- [ ] disable_directory +- [ ] enable_directory +- [ ] get_applied_schema_version +- [ ] get_directory +- [ ] get_facet +- [ ] get_link_attributes +- [ ] get_object_attributes +- [ ] get_object_information +- [ ] get_schema_as_json +- [ ] get_typed_link_facet_information +- [ ] list_applied_schema_arns +- [ ] list_attached_indices +- [ ] list_development_schema_arns +- [ ] list_directories +- [ ] list_facet_attributes +- [ ] list_facet_names +- [ ] list_incoming_typed_links +- [ ] list_index +- [ ] list_managed_schema_arns +- [ ] list_object_attributes +- [ ] list_object_children +- [ ] list_object_parent_paths +- [ ] list_object_parents +- [ ] list_object_policies +- [ ] list_outgoing_typed_links +- [ ] list_policy_attachments +- [ ] list_published_schema_arns +- [ ] list_tags_for_resource +- [ ] list_typed_link_facet_attributes +- [ ] list_typed_link_facet_names +- [ ] lookup_policy +- [ ] publish_schema +- [ ] put_schema_from_json +- [ ] remove_facet_from_object +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_facet +- [ ] update_link_attributes +- [ ] update_object_attributes +- [ ] update_schema +- [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema + +## cloudformation - 21% implemented +- [ ] cancel_update_stack +- [ ] continue_update_rollback +- [X] create_change_set +- [X] create_stack +- [ ] create_stack_instances +- [ ] create_stack_set +- [ ] delete_change_set +- [X] delete_stack +- [ ] delete_stack_instances +- [ ] delete_stack_set +- [ ] describe_account_limits +- [ ] describe_change_set +- [ ] describe_stack_events +- [ ] describe_stack_instance +- [ ] describe_stack_resource +- [ ] describe_stack_resources +- [ ] describe_stack_set +- [ ] describe_stack_set_operation +- [X] describe_stacks +- [ ] estimate_template_cost +- [X] execute_change_set +- [ ] get_stack_policy +- [ ] get_template +- [ ] get_template_summary +- [ ] list_change_sets +- [X] list_exports +- [ ] list_imports +- [ ] list_stack_instances +- [X] list_stack_resources +- [ ] list_stack_set_operation_results +- [ ] list_stack_set_operations +- [ ] list_stack_sets +- [X] list_stacks +- [ ] set_stack_policy +- [ ] signal_resource +- [ ] stop_stack_set_operation +- [X] update_stack +- [ ] update_stack_instances +- [ ] update_stack_set +- [ ] update_termination_protection +- [ ] validate_template + +## cloudfront - 0% implemented +- [ ] create_cloud_front_origin_access_identity +- [ ] create_distribution +- [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile +- [ ] create_invalidation +- [ ] create_public_key +- [ ] create_streaming_distribution +- [ ] create_streaming_distribution_with_tags +- [ ] delete_cloud_front_origin_access_identity +- [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key +- [ ] delete_streaming_distribution +- [ ] get_cloud_front_origin_access_identity +- [ ] get_cloud_front_origin_access_identity_config +- [ ] get_distribution +- [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config +- [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config +- [ ] get_streaming_distribution +- [ ] get_streaming_distribution_config +- [ ] list_cloud_front_origin_access_identities +- [ ] list_distributions +- [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles +- [ ] list_invalidations +- [ ] list_public_keys +- [ ] list_streaming_distributions +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cloud_front_origin_access_identity +- [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key +- [ ] update_streaming_distribution + +## cloudhsm - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_hapg +- [ ] create_hsm +- [ ] create_luna_client +- [ ] delete_hapg +- [ ] delete_hsm +- [ ] delete_luna_client +- [ ] describe_hapg +- [ ] describe_hsm +- [ ] describe_luna_client +- [ ] get_config +- [ ] list_available_zones +- [ ] list_hapgs +- [ ] list_hsms +- [ ] list_luna_clients +- [ ] list_tags_for_resource +- [ ] modify_hapg +- [ ] modify_hsm +- [ ] modify_luna_client +- [ ] remove_tags_from_resource + +## cloudhsmv2 - 0% implemented +- [ ] copy_backup_to_region +- [ ] create_cluster +- [ ] create_hsm +- [ ] delete_cluster +- [ ] delete_hsm +- [ ] describe_backups +- [ ] describe_clusters +- [ ] initialize_cluster +- [ ] list_tags +- [ ] tag_resource +- [ ] untag_resource + +## cloudsearch - 0% implemented +- [ ] build_suggesters +- [ ] create_domain +- [ ] define_analysis_scheme +- [ ] define_expression +- [ ] define_index_field +- [ ] define_suggester +- [ ] delete_analysis_scheme +- [ ] delete_domain +- [ ] delete_expression +- [ ] delete_index_field +- [ ] delete_suggester +- [ ] describe_analysis_schemes +- [ ] describe_availability_options +- [ ] describe_domains +- [ ] describe_expressions +- [ ] describe_index_fields +- [ ] describe_scaling_parameters +- [ ] describe_service_access_policies +- [ ] describe_suggesters +- [ ] index_documents +- [ ] list_domain_names +- [ ] update_availability_options +- [ ] update_scaling_parameters +- [ ] update_service_access_policies + +## cloudsearchdomain - 0% implemented +- [ ] search +- [ ] suggest +- [ ] upload_documents + +## cloudtrail - 0% implemented +- [ ] add_tags +- [ ] create_trail +- [ ] delete_trail +- [ ] describe_trails +- [ ] get_event_selectors +- [ ] get_trail_status +- [ ] list_public_keys +- [ ] list_tags +- [ ] lookup_events +- [ ] put_event_selectors +- [ ] remove_tags +- [ ] start_logging +- [ ] stop_logging +- [ ] update_trail + +## cloudwatch - 56% implemented +- [X] delete_alarms +- [X] delete_dashboards +- [ ] describe_alarm_history +- [ ] describe_alarms +- [ ] describe_alarms_for_metric +- [ ] disable_alarm_actions +- [ ] enable_alarm_actions +- [X] get_dashboard +- [ ] get_metric_data +- [X] get_metric_statistics +- [X] list_dashboards +- [ ] list_metrics +- [X] put_dashboard +- [X] put_metric_alarm +- [X] put_metric_data +- [X] set_alarm_state + +## codebuild - 0% implemented +- [ ] batch_delete_builds +- [ ] batch_get_builds +- [ ] batch_get_projects +- [ ] create_project +- [ ] create_webhook +- [ ] delete_project +- [ ] delete_webhook +- [ ] invalidate_project_cache +- [ ] list_builds +- [ ] list_builds_for_project +- [ ] list_curated_environment_images +- [ ] list_projects +- [ ] start_build +- [ ] stop_build +- [ ] update_project +- [ ] update_webhook + +## codecommit - 0% implemented +- [ ] batch_get_repositories +- [ ] create_branch +- [ ] create_pull_request +- [ ] create_repository +- [ ] delete_branch +- [ ] delete_comment_content +- [ ] delete_repository +- [ ] describe_pull_request_events +- [ ] get_blob +- [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request +- [ ] get_commit +- [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request +- [ ] get_repository +- [ ] get_repository_triggers +- [ ] list_branches +- [ ] list_pull_requests +- [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply +- [ ] put_file +- [ ] put_repository_triggers +- [ ] test_repository_triggers +- [ ] update_comment +- [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title +- [ ] update_repository_description +- [ ] update_repository_name + +## codedeploy - 0% implemented +- [ ] add_tags_to_on_premises_instances +- [ ] batch_get_application_revisions +- [ ] batch_get_applications +- [ ] batch_get_deployment_groups +- [ ] batch_get_deployment_instances +- [ ] batch_get_deployments +- [ ] batch_get_on_premises_instances +- [ ] continue_deployment +- [ ] create_application +- [ ] create_deployment +- [ ] create_deployment_config +- [ ] create_deployment_group +- [ ] delete_application +- [ ] delete_deployment_config +- [ ] delete_deployment_group +- [ ] delete_git_hub_account_token +- [ ] deregister_on_premises_instance +- [ ] get_application +- [ ] get_application_revision +- [ ] get_deployment +- [ ] get_deployment_config +- [ ] get_deployment_group +- [ ] get_deployment_instance +- [ ] get_on_premises_instance +- [ ] list_application_revisions +- [ ] list_applications +- [ ] list_deployment_configs +- [ ] list_deployment_groups +- [ ] list_deployment_instances +- [ ] list_deployments +- [ ] list_git_hub_account_token_names +- [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status +- [ ] register_application_revision +- [ ] register_on_premises_instance +- [ ] remove_tags_from_on_premises_instances +- [ ] skip_wait_time_for_instance_termination +- [ ] stop_deployment +- [ ] update_application +- [ ] update_deployment_group + +## codepipeline - 0% implemented +- [ ] acknowledge_job +- [ ] acknowledge_third_party_job +- [ ] create_custom_action_type +- [ ] create_pipeline +- [ ] delete_custom_action_type +- [ ] delete_pipeline +- [ ] delete_webhook +- [ ] deregister_webhook_with_third_party +- [ ] disable_stage_transition +- [ ] enable_stage_transition +- [ ] get_job_details +- [ ] get_pipeline +- [ ] get_pipeline_execution +- [ ] get_pipeline_state +- [ ] get_third_party_job_details +- [ ] list_action_types +- [ ] list_pipeline_executions +- [ ] list_pipelines +- [ ] list_webhooks +- [ ] poll_for_jobs +- [ ] poll_for_third_party_jobs +- [ ] put_action_revision +- [ ] put_approval_result +- [ ] put_job_failure_result +- [ ] put_job_success_result +- [ ] put_third_party_job_failure_result +- [ ] put_third_party_job_success_result +- [ ] put_webhook +- [ ] register_webhook_with_third_party +- [ ] retry_stage_execution +- [ ] start_pipeline_execution +- [ ] update_pipeline + +## codestar - 0% implemented +- [ ] associate_team_member +- [ ] create_project +- [ ] create_user_profile +- [ ] delete_project +- [ ] delete_user_profile +- [ ] describe_project +- [ ] describe_user_profile +- [ ] disassociate_team_member +- [ ] list_projects +- [ ] list_resources +- [ ] list_tags_for_project +- [ ] list_team_members +- [ ] list_user_profiles +- [ ] tag_project +- [ ] untag_project +- [ ] update_project +- [ ] update_team_member +- [ ] update_user_profile + +## cognito-identity - 22% implemented +- [X] create_identity_pool +- [ ] delete_identities +- [ ] delete_identity_pool +- [ ] describe_identity +- [ ] describe_identity_pool +- [X] get_credentials_for_identity +- [X] get_id +- [ ] get_identity_pool_roles +- [ ] get_open_id_token +- [X] get_open_id_token_for_developer_identity +- [ ] list_identities +- [ ] list_identity_pools +- [ ] lookup_developer_identity +- [ ] merge_developer_identities +- [ ] set_identity_pool_roles +- [ ] unlink_developer_identity +- [ ] unlink_identity +- [ ] update_identity_pool + +## cognito-idp - 27% implemented +- [ ] add_custom_attributes +- [ ] admin_add_user_to_group +- [ ] admin_confirm_sign_up +- [X] admin_create_user +- [X] admin_delete_user +- [ ] admin_delete_user_attributes +- [ ] admin_disable_provider_for_user +- [X] admin_disable_user +- [X] admin_enable_user +- [ ] admin_forget_device +- [ ] admin_get_device +- [X] admin_get_user +- [X] admin_initiate_auth +- [ ] admin_link_provider_for_user +- [ ] admin_list_devices +- [ ] admin_list_groups_for_user +- [ ] admin_list_user_auth_events +- [ ] admin_remove_user_from_group +- [ ] admin_reset_user_password +- [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference +- [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback +- [ ] admin_update_device_status +- [ ] admin_update_user_attributes +- [ ] admin_user_global_sign_out +- [ ] associate_software_token +- [X] change_password +- [ ] confirm_device +- [X] confirm_forgot_password +- [ ] confirm_sign_up +- [ ] create_group +- [X] create_identity_provider +- [ ] create_resource_server +- [ ] create_user_import_job +- [X] create_user_pool +- [X] create_user_pool_client +- [X] create_user_pool_domain +- [ ] delete_group +- [X] delete_identity_provider +- [ ] delete_resource_server +- [ ] delete_user +- [ ] delete_user_attributes +- [X] delete_user_pool +- [X] delete_user_pool_client +- [X] delete_user_pool_domain +- [X] describe_identity_provider +- [ ] describe_resource_server +- [ ] describe_risk_configuration +- [ ] describe_user_import_job +- [X] describe_user_pool +- [X] describe_user_pool_client +- [X] describe_user_pool_domain +- [ ] forget_device +- [ ] forgot_password +- [ ] get_csv_header +- [ ] get_device +- [ ] get_group +- [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate +- [ ] get_ui_customization +- [ ] get_user +- [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config +- [ ] global_sign_out +- [ ] initiate_auth +- [ ] list_devices +- [ ] list_groups +- [X] list_identity_providers +- [ ] list_resource_servers +- [ ] list_user_import_jobs +- [X] list_user_pool_clients +- [X] list_user_pools +- [X] list_users +- [ ] list_users_in_group +- [ ] resend_confirmation_code +- [X] respond_to_auth_challenge +- [ ] set_risk_configuration +- [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config +- [ ] set_user_settings +- [ ] sign_up +- [ ] start_user_import_job +- [ ] stop_user_import_job +- [ ] update_auth_event_feedback +- [ ] update_device_status +- [ ] update_group +- [ ] update_identity_provider +- [ ] update_resource_server +- [ ] update_user_attributes +- [ ] update_user_pool +- [X] update_user_pool_client +- [ ] verify_software_token +- [ ] verify_user_attribute + +## cognito-sync - 0% implemented +- [ ] bulk_publish +- [ ] delete_dataset +- [ ] describe_dataset +- [ ] describe_identity_pool_usage +- [ ] describe_identity_usage +- [ ] get_bulk_publish_details +- [ ] get_cognito_events +- [ ] get_identity_pool_configuration +- [ ] list_datasets +- [ ] list_identity_pool_usage +- [ ] list_records +- [ ] register_device +- [ ] set_cognito_events +- [ ] set_identity_pool_configuration +- [ ] subscribe_to_dataset +- [ ] unsubscribe_from_dataset +- [ ] update_records + +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] batch_detect_syntax +- [ ] describe_dominant_language_detection_job +- [ ] describe_entities_detection_job +- [ ] describe_key_phrases_detection_job +- [ ] describe_sentiment_detection_job +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] detect_syntax +- [ ] list_dominant_language_detection_jobs +- [ ] list_entities_detection_jobs +- [ ] list_key_phrases_detection_jobs +- [ ] list_sentiment_detection_jobs +- [ ] list_topics_detection_jobs +- [ ] start_dominant_language_detection_job +- [ ] start_entities_detection_job +- [ ] start_key_phrases_detection_job +- [ ] start_sentiment_detection_job +- [ ] start_topics_detection_job +- [ ] stop_dominant_language_detection_job +- [ ] stop_entities_detection_job +- [ ] stop_key_phrases_detection_job +- [ ] stop_sentiment_detection_job + +## config - 0% implemented +- [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization +- [ ] delete_config_rule +- [ ] delete_configuration_aggregator +- [ ] delete_configuration_recorder +- [ ] delete_delivery_channel +- [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request +- [ ] delete_retention_configuration +- [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations +- [ ] describe_compliance_by_config_rule +- [ ] describe_compliance_by_resource +- [ ] describe_config_rule_evaluation_status +- [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators +- [ ] describe_configuration_recorder_status +- [ ] describe_configuration_recorders +- [ ] describe_delivery_channel_status +- [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] describe_retention_configurations +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary +- [ ] get_compliance_details_by_config_rule +- [ ] get_compliance_details_by_resource +- [ ] get_compliance_summary_by_config_rule +- [ ] get_compliance_summary_by_resource_type +- [ ] get_discovered_resource_counts +- [ ] get_resource_config_history +- [ ] list_discovered_resources +- [ ] put_aggregation_authorization +- [ ] put_config_rule +- [ ] put_configuration_aggregator +- [ ] put_configuration_recorder +- [ ] put_delivery_channel +- [ ] put_evaluations +- [ ] put_retention_configuration +- [ ] start_config_rules_evaluation +- [ ] start_configuration_recorder +- [ ] stop_configuration_recorder + +## connect - 0% implemented +- [ ] create_user +- [ ] delete_user +- [ ] describe_user +- [ ] describe_user_hierarchy_group +- [ ] describe_user_hierarchy_structure +- [ ] get_federation_token +- [ ] list_routing_profiles +- [ ] list_security_profiles +- [ ] list_user_hierarchy_groups +- [ ] list_users +- [ ] start_outbound_voice_contact +- [ ] stop_contact +- [ ] update_user_hierarchy +- [ ] update_user_identity_info +- [ ] update_user_phone_config +- [ ] update_user_routing_profile +- [ ] update_user_security_profiles + +## cur - 0% implemented +- [ ] delete_report_definition +- [ ] describe_report_definitions +- [ ] put_report_definition + +## datapipeline - 42% implemented +- [X] activate_pipeline +- [ ] add_tags +- [X] create_pipeline +- [ ] deactivate_pipeline +- [X] delete_pipeline +- [X] describe_objects +- [X] describe_pipelines +- [ ] evaluate_expression +- [X] get_pipeline_definition +- [X] list_pipelines +- [ ] poll_for_task +- [X] put_pipeline_definition +- [ ] query_objects +- [ ] remove_tags +- [ ] report_task_progress +- [ ] report_task_runner_heartbeat +- [ ] set_status +- [ ] set_task_status +- [ ] validate_pipeline_definition + +## dax - 0% implemented +- [ ] create_cluster +- [ ] create_parameter_group +- [ ] create_subnet_group +- [ ] decrease_replication_factor +- [ ] delete_cluster +- [ ] delete_parameter_group +- [ ] delete_subnet_group +- [ ] describe_clusters +- [ ] describe_default_parameters +- [ ] describe_events +- [ ] describe_parameter_groups +- [ ] describe_parameters +- [ ] describe_subnet_groups +- [ ] increase_replication_factor +- [ ] list_tags +- [ ] reboot_node +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cluster +- [ ] update_parameter_group +- [ ] update_subnet_group + +## devicefarm - 0% implemented +- [ ] create_device_pool +- [ ] create_instance_profile +- [ ] create_network_profile +- [ ] create_project +- [ ] create_remote_access_session +- [ ] create_upload +- [ ] create_vpce_configuration +- [ ] delete_device_pool +- [ ] delete_instance_profile +- [ ] delete_network_profile +- [ ] delete_project +- [ ] delete_remote_access_session +- [ ] delete_run +- [ ] delete_upload +- [ ] delete_vpce_configuration +- [ ] get_account_settings +- [ ] get_device +- [ ] get_device_instance +- [ ] get_device_pool +- [ ] get_device_pool_compatibility +- [ ] get_instance_profile +- [ ] get_job +- [ ] get_network_profile +- [ ] get_offering_status +- [ ] get_project +- [ ] get_remote_access_session +- [ ] get_run +- [ ] get_suite +- [ ] get_test +- [ ] get_upload +- [ ] get_vpce_configuration +- [ ] install_to_remote_access_session +- [ ] list_artifacts +- [ ] list_device_instances +- [ ] list_device_pools +- [ ] list_devices +- [ ] list_instance_profiles +- [ ] list_jobs +- [ ] list_network_profiles +- [ ] list_offering_promotions +- [ ] list_offering_transactions +- [ ] list_offerings +- [ ] list_projects +- [ ] list_remote_access_sessions +- [ ] list_runs +- [ ] list_samples +- [ ] list_suites +- [ ] list_tests +- [ ] list_unique_problems +- [ ] list_uploads +- [ ] list_vpce_configurations +- [ ] purchase_offering +- [ ] renew_offering +- [ ] schedule_run +- [ ] stop_job +- [ ] stop_remote_access_session +- [ ] stop_run +- [ ] update_device_instance +- [ ] update_device_pool +- [ ] update_instance_profile +- [ ] update_network_profile +- [ ] update_project +- [ ] update_upload +- [ ] update_vpce_configuration + +## directconnect - 0% implemented +- [ ] allocate_connection_on_interconnect +- [ ] allocate_hosted_connection +- [ ] allocate_private_virtual_interface +- [ ] allocate_public_virtual_interface +- [ ] associate_connection_with_lag +- [ ] associate_hosted_connection +- [ ] associate_virtual_interface +- [ ] confirm_connection +- [ ] confirm_private_virtual_interface +- [ ] confirm_public_virtual_interface +- [ ] create_bgp_peer +- [ ] create_connection +- [ ] create_direct_connect_gateway +- [ ] create_direct_connect_gateway_association +- [ ] create_interconnect +- [ ] create_lag +- [ ] create_private_virtual_interface +- [ ] create_public_virtual_interface +- [ ] delete_bgp_peer +- [ ] delete_connection +- [ ] delete_direct_connect_gateway +- [ ] delete_direct_connect_gateway_association +- [ ] delete_interconnect +- [ ] delete_lag +- [ ] delete_virtual_interface +- [ ] describe_connection_loa +- [ ] describe_connections +- [ ] describe_connections_on_interconnect +- [ ] describe_direct_connect_gateway_associations +- [ ] describe_direct_connect_gateway_attachments +- [ ] describe_direct_connect_gateways +- [ ] describe_hosted_connections +- [ ] describe_interconnect_loa +- [ ] describe_interconnects +- [ ] describe_lags +- [ ] describe_loa +- [ ] describe_locations +- [ ] describe_tags +- [ ] describe_virtual_gateways +- [ ] describe_virtual_interfaces +- [ ] disassociate_connection_from_lag +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_lag + +## discovery - 0% implemented +- [ ] associate_configuration_items_to_application +- [ ] create_application +- [ ] create_tags +- [ ] delete_applications +- [ ] delete_tags +- [ ] describe_agents +- [ ] describe_configurations +- [ ] describe_continuous_exports +- [ ] describe_export_configurations +- [ ] describe_export_tasks +- [ ] describe_tags +- [ ] disassociate_configuration_items_from_application +- [ ] export_configurations +- [ ] get_discovery_summary +- [ ] list_configurations +- [ ] list_server_neighbors +- [ ] start_continuous_export +- [ ] start_data_collection_by_agent_ids +- [ ] start_export_task +- [ ] stop_continuous_export +- [ ] stop_data_collection_by_agent_ids +- [ ] update_application + +## dlm - 0% implemented +- [ ] create_lifecycle_policy +- [ ] delete_lifecycle_policy +- [ ] get_lifecycle_policies +- [ ] get_lifecycle_policy +- [ ] update_lifecycle_policy + +## dms - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_endpoint +- [ ] create_event_subscription +- [ ] create_replication_instance +- [ ] create_replication_subnet_group +- [ ] create_replication_task +- [ ] delete_certificate +- [ ] delete_endpoint +- [ ] delete_event_subscription +- [ ] delete_replication_instance +- [ ] delete_replication_subnet_group +- [ ] delete_replication_task +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_connections +- [ ] describe_endpoint_types +- [ ] describe_endpoints +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_replication_instances +- [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs +- [ ] describe_replication_instances +- [ ] describe_replication_subnet_groups +- [ ] describe_replication_task_assessment_results +- [ ] describe_replication_tasks +- [ ] describe_schemas +- [ ] describe_table_statistics +- [ ] import_certificate +- [ ] list_tags_for_resource +- [ ] modify_endpoint +- [ ] modify_event_subscription +- [ ] modify_replication_instance +- [ ] modify_replication_subnet_group +- [ ] modify_replication_task +- [ ] reboot_replication_instance +- [ ] refresh_schemas +- [ ] reload_tables +- [ ] remove_tags_from_resource +- [ ] start_replication_task +- [ ] start_replication_task_assessment +- [ ] stop_replication_task +- [ ] test_connection + +## ds - 0% implemented +- [ ] add_ip_routes +- [ ] add_tags_to_resource +- [ ] cancel_schema_extension +- [ ] connect_directory +- [ ] create_alias +- [ ] create_computer +- [ ] create_conditional_forwarder +- [ ] create_directory +- [ ] create_microsoft_ad +- [ ] create_snapshot +- [ ] create_trust +- [ ] delete_conditional_forwarder +- [ ] delete_directory +- [ ] delete_snapshot +- [ ] delete_trust +- [ ] deregister_event_topic +- [ ] describe_conditional_forwarders +- [ ] describe_directories +- [ ] describe_domain_controllers +- [ ] describe_event_topics +- [ ] describe_snapshots +- [ ] describe_trusts +- [ ] disable_radius +- [ ] disable_sso +- [ ] enable_radius +- [ ] enable_sso +- [ ] get_directory_limits +- [ ] get_snapshot_limits +- [ ] list_ip_routes +- [ ] list_schema_extensions +- [ ] list_tags_for_resource +- [ ] register_event_topic +- [ ] remove_ip_routes +- [ ] remove_tags_from_resource +- [ ] reset_user_password +- [ ] restore_from_snapshot +- [ ] start_schema_extension +- [ ] update_conditional_forwarder +- [ ] update_number_of_domain_controllers +- [ ] update_radius +- [ ] verify_trust + +## dynamodb - 21% implemented +- [ ] batch_get_item +- [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table +- [X] create_table +- [ ] delete_backup +- [X] delete_item +- [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table +- [ ] describe_global_table_settings +- [ ] describe_limits +- [ ] describe_table +- [ ] describe_time_to_live +- [X] get_item +- [ ] list_backups +- [ ] list_global_tables +- [ ] list_tables +- [ ] list_tags_of_resource +- [X] put_item +- [X] query +- [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time +- [X] scan +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_continuous_backups +- [ ] update_global_table +- [ ] update_global_table_settings +- [ ] update_item +- [ ] update_table +- [ ] update_time_to_live + +## dynamodbstreams - 0% implemented +- [ ] describe_stream +- [ ] get_records +- [ ] get_shard_iterator +- [ ] list_streams + +## ec2 - 36% implemented +- [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections +- [X] accept_vpc_peering_connection +- [X] allocate_address +- [ ] allocate_hosts +- [ ] assign_ipv6_addresses +- [ ] assign_private_ip_addresses +- [X] associate_address +- [X] associate_dhcp_options +- [ ] associate_iam_instance_profile +- [X] associate_route_table +- [ ] associate_subnet_cidr_block +- [X] associate_vpc_cidr_block +- [ ] attach_classic_link_vpc +- [X] attach_internet_gateway +- [X] attach_network_interface +- [X] attach_volume +- [X] attach_vpn_gateway +- [X] authorize_security_group_egress +- [X] authorize_security_group_ingress +- [ ] bundle_instance +- [ ] cancel_bundle_task +- [ ] cancel_conversion_task +- [ ] cancel_export_task +- [ ] cancel_import_task +- [ ] cancel_reserved_instances_listing +- [X] cancel_spot_fleet_requests +- [X] cancel_spot_instance_requests +- [ ] confirm_product_instance +- [ ] copy_fpga_image +- [X] copy_image +- [X] copy_snapshot +- [X] create_customer_gateway +- [ ] create_default_subnet +- [ ] create_default_vpc +- [X] create_dhcp_options +- [ ] create_egress_only_internet_gateway +- [ ] create_fleet +- [ ] create_flow_logs +- [ ] create_fpga_image +- [X] create_image +- [ ] create_instance_export_task +- [X] create_internet_gateway +- [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version +- [X] create_nat_gateway +- [X] create_network_acl +- [X] create_network_acl_entry +- [X] create_network_interface +- [ ] create_network_interface_permission +- [ ] create_placement_group +- [ ] create_reserved_instances_listing +- [X] create_route +- [X] create_route_table +- [X] create_security_group +- [X] create_snapshot +- [ ] create_spot_datafeed_subscription +- [X] create_subnet +- [X] create_tags +- [X] create_volume +- [X] create_vpc +- [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration +- [X] create_vpc_peering_connection +- [X] create_vpn_connection +- [ ] create_vpn_connection_route +- [X] create_vpn_gateway +- [X] delete_customer_gateway +- [ ] delete_dhcp_options +- [ ] delete_egress_only_internet_gateway +- [ ] delete_fleets +- [ ] delete_flow_logs +- [ ] delete_fpga_image +- [X] delete_internet_gateway +- [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions +- [X] delete_nat_gateway +- [X] delete_network_acl +- [X] delete_network_acl_entry +- [X] delete_network_interface +- [ ] delete_network_interface_permission +- [ ] delete_placement_group +- [X] delete_route +- [X] delete_route_table +- [X] delete_security_group +- [X] delete_snapshot +- [ ] delete_spot_datafeed_subscription +- [X] delete_subnet +- [X] delete_tags +- [X] delete_volume +- [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations +- [ ] delete_vpc_endpoints +- [X] delete_vpc_peering_connection +- [X] delete_vpn_connection +- [ ] delete_vpn_connection_route +- [X] delete_vpn_gateway +- [X] deregister_image +- [ ] describe_account_attributes +- [X] describe_addresses +- [ ] describe_aggregate_id_format +- [X] describe_availability_zones +- [ ] describe_bundle_tasks +- [ ] describe_classic_link_instances +- [ ] describe_conversion_tasks +- [ ] describe_customer_gateways +- [X] describe_dhcp_options +- [ ] describe_egress_only_internet_gateways +- [ ] describe_elastic_gpus +- [ ] describe_export_tasks +- [ ] describe_fleet_history +- [ ] describe_fleet_instances +- [ ] describe_fleets +- [ ] describe_flow_logs +- [ ] describe_fpga_image_attribute +- [ ] describe_fpga_images +- [ ] describe_host_reservation_offerings +- [ ] describe_host_reservations +- [ ] describe_hosts +- [ ] describe_iam_instance_profile_associations +- [ ] describe_id_format +- [ ] describe_identity_id_format +- [ ] describe_image_attribute +- [X] describe_images +- [ ] describe_import_image_tasks +- [ ] describe_import_snapshot_tasks +- [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications +- [ ] describe_instance_status +- [ ] describe_instances +- [X] describe_internet_gateways +- [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates +- [ ] describe_moving_addresses +- [ ] describe_nat_gateways +- [ ] describe_network_acls +- [ ] describe_network_interface_attribute +- [ ] describe_network_interface_permissions +- [X] describe_network_interfaces +- [ ] describe_placement_groups +- [ ] describe_prefix_lists +- [ ] describe_principal_id_format +- [X] describe_regions +- [ ] describe_reserved_instances +- [ ] describe_reserved_instances_listings +- [ ] describe_reserved_instances_modifications +- [ ] describe_reserved_instances_offerings +- [ ] describe_route_tables +- [ ] describe_scheduled_instance_availability +- [ ] describe_scheduled_instances +- [ ] describe_security_group_references +- [X] describe_security_groups +- [ ] describe_snapshot_attribute +- [X] describe_snapshots +- [ ] describe_spot_datafeed_subscription +- [X] describe_spot_fleet_instances +- [ ] describe_spot_fleet_request_history +- [X] describe_spot_fleet_requests +- [X] describe_spot_instance_requests +- [ ] describe_spot_price_history +- [ ] describe_stale_security_groups +- [ ] describe_subnets +- [X] describe_tags +- [ ] describe_volume_attribute +- [ ] describe_volume_status +- [X] describe_volumes +- [ ] describe_volumes_modifications +- [X] describe_vpc_attribute +- [ ] describe_vpc_classic_link +- [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions +- [ ] describe_vpc_endpoint_services +- [ ] describe_vpc_endpoints +- [ ] describe_vpc_peering_connections +- [ ] describe_vpcs +- [X] describe_vpn_connections +- [ ] describe_vpn_gateways +- [ ] detach_classic_link_vpc +- [X] detach_internet_gateway +- [X] detach_network_interface +- [X] detach_volume +- [X] detach_vpn_gateway +- [ ] disable_vgw_route_propagation +- [ ] disable_vpc_classic_link +- [ ] disable_vpc_classic_link_dns_support +- [X] disassociate_address +- [ ] disassociate_iam_instance_profile +- [X] disassociate_route_table +- [ ] disassociate_subnet_cidr_block +- [X] disassociate_vpc_cidr_block +- [ ] enable_vgw_route_propagation +- [ ] enable_volume_io +- [ ] enable_vpc_classic_link +- [ ] enable_vpc_classic_link_dns_support +- [ ] get_console_output +- [ ] get_console_screenshot +- [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data +- [ ] get_password_data +- [ ] get_reserved_instances_exchange_quote +- [ ] import_image +- [ ] import_instance +- [X] import_key_pair +- [ ] import_snapshot +- [ ] import_volume +- [ ] modify_fleet +- [ ] modify_fpga_image_attribute +- [ ] modify_hosts +- [ ] modify_id_format +- [ ] modify_identity_id_format +- [ ] modify_image_attribute +- [X] modify_instance_attribute +- [ ] modify_instance_credit_specification +- [ ] modify_instance_placement +- [ ] modify_launch_template +- [X] modify_network_interface_attribute +- [ ] modify_reserved_instances +- [ ] modify_snapshot_attribute +- [X] modify_spot_fleet_request +- [X] modify_subnet_attribute +- [ ] modify_volume +- [ ] modify_volume_attribute +- [X] modify_vpc_attribute +- [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions +- [ ] modify_vpc_peering_connection_options +- [ ] modify_vpc_tenancy +- [ ] monitor_instances +- [ ] move_address_to_vpc +- [ ] purchase_host_reservation +- [ ] purchase_reserved_instances_offering +- [ ] purchase_scheduled_instances +- [X] reboot_instances +- [ ] register_image +- [ ] reject_vpc_endpoint_connections +- [X] reject_vpc_peering_connection +- [X] release_address +- [ ] release_hosts +- [ ] replace_iam_instance_profile_association +- [X] replace_network_acl_association +- [X] replace_network_acl_entry +- [X] replace_route +- [X] replace_route_table_association +- [ ] report_instance_status +- [X] request_spot_fleet +- [X] request_spot_instances +- [ ] reset_fpga_image_attribute +- [ ] reset_image_attribute +- [ ] reset_instance_attribute +- [ ] reset_network_interface_attribute +- [ ] reset_snapshot_attribute +- [ ] restore_address_to_classic +- [X] revoke_security_group_egress +- [X] revoke_security_group_ingress +- [ ] run_instances +- [ ] run_scheduled_instances +- [X] start_instances +- [X] stop_instances +- [X] terminate_instances +- [ ] unassign_ipv6_addresses +- [ ] unassign_private_ip_addresses +- [ ] unmonitor_instances +- [ ] update_security_group_rule_descriptions_egress +- [ ] update_security_group_rule_descriptions_ingress + +## ecr - 31% implemented +- [ ] batch_check_layer_availability +- [ ] batch_delete_image +- [X] batch_get_image +- [ ] complete_layer_upload +- [X] create_repository +- [ ] delete_lifecycle_policy +- [X] delete_repository +- [ ] delete_repository_policy +- [X] describe_images +- [X] describe_repositories +- [ ] get_authorization_token +- [ ] get_download_url_for_layer +- [ ] get_lifecycle_policy +- [ ] get_lifecycle_policy_preview +- [ ] get_repository_policy +- [ ] initiate_layer_upload +- [X] list_images +- [X] put_image +- [ ] put_lifecycle_policy +- [ ] set_repository_policy +- [ ] start_lifecycle_policy_preview +- [ ] upload_layer_part + +## ecs - 87% implemented +- [X] create_cluster +- [X] create_service +- [X] delete_attributes +- [X] delete_cluster +- [X] delete_service +- [X] deregister_container_instance +- [X] deregister_task_definition +- [X] describe_clusters +- [X] describe_container_instances +- [X] describe_services +- [X] describe_task_definition +- [X] describe_tasks +- [ ] discover_poll_endpoint +- [X] list_attributes +- [X] list_clusters +- [X] list_container_instances +- [X] list_services +- [X] list_task_definition_families +- [X] list_task_definitions +- [X] list_tasks +- [X] put_attributes +- [X] register_container_instance +- [X] register_task_definition +- [X] run_task +- [X] start_task +- [X] stop_task +- [ ] submit_container_state_change +- [ ] submit_task_state_change +- [ ] update_container_agent +- [X] update_container_instances_state +- [X] update_service + +## efs - 0% implemented +- [ ] create_file_system +- [ ] create_mount_target +- [ ] create_tags +- [ ] delete_file_system +- [ ] delete_mount_target +- [ ] delete_tags +- [ ] describe_file_systems +- [ ] describe_mount_target_security_groups +- [ ] describe_mount_targets +- [ ] describe_tags +- [ ] modify_mount_target_security_groups +- [ ] update_file_system + +## eks - 0% implemented +- [ ] create_cluster +- [ ] delete_cluster +- [ ] describe_cluster +- [ ] list_clusters + +## elasticache - 0% implemented +- [ ] add_tags_to_resource +- [ ] authorize_cache_security_group_ingress +- [ ] copy_snapshot +- [ ] create_cache_cluster +- [ ] create_cache_parameter_group +- [ ] create_cache_security_group +- [ ] create_cache_subnet_group +- [ ] create_replication_group +- [ ] create_snapshot +- [ ] delete_cache_cluster +- [ ] delete_cache_parameter_group +- [ ] delete_cache_security_group +- [ ] delete_cache_subnet_group +- [ ] delete_replication_group +- [ ] delete_snapshot +- [ ] describe_cache_clusters +- [ ] describe_cache_engine_versions +- [ ] describe_cache_parameter_groups +- [ ] describe_cache_parameters +- [ ] describe_cache_security_groups +- [ ] describe_cache_subnet_groups +- [ ] describe_engine_default_parameters +- [ ] describe_events +- [ ] describe_replication_groups +- [ ] describe_reserved_cache_nodes +- [ ] describe_reserved_cache_nodes_offerings +- [ ] describe_snapshots +- [ ] list_allowed_node_type_modifications +- [ ] list_tags_for_resource +- [ ] modify_cache_cluster +- [ ] modify_cache_parameter_group +- [ ] modify_cache_subnet_group +- [ ] modify_replication_group +- [ ] modify_replication_group_shard_configuration +- [ ] purchase_reserved_cache_nodes_offering +- [ ] reboot_cache_cluster +- [ ] remove_tags_from_resource +- [ ] reset_cache_parameter_group +- [ ] revoke_cache_security_group_ingress +- [ ] test_failover + +## elasticbeanstalk - 0% implemented +- [ ] abort_environment_update +- [ ] apply_environment_managed_action +- [ ] check_dns_availability +- [ ] compose_environments +- [ ] create_application +- [ ] create_application_version +- [ ] create_configuration_template +- [ ] create_environment +- [ ] create_platform_version +- [ ] create_storage_location +- [ ] delete_application +- [ ] delete_application_version +- [ ] delete_configuration_template +- [ ] delete_environment_configuration +- [ ] delete_platform_version +- [ ] describe_account_attributes +- [ ] describe_application_versions +- [ ] describe_applications +- [ ] describe_configuration_options +- [ ] describe_configuration_settings +- [ ] describe_environment_health +- [ ] describe_environment_managed_action_history +- [ ] describe_environment_managed_actions +- [ ] describe_environment_resources +- [ ] describe_environments +- [ ] describe_events +- [ ] describe_instances_health +- [ ] describe_platform_version +- [ ] list_available_solution_stacks +- [ ] list_platform_versions +- [ ] list_tags_for_resource +- [ ] rebuild_environment +- [ ] request_environment_info +- [ ] restart_app_server +- [ ] retrieve_environment_info +- [ ] swap_environment_cnames +- [ ] terminate_environment +- [ ] update_application +- [ ] update_application_resource_lifecycle +- [ ] update_application_version +- [ ] update_configuration_template +- [ ] update_environment +- [ ] update_tags_for_resource +- [ ] validate_configuration_settings + +## elastictranscoder - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_pipeline +- [ ] create_preset +- [ ] delete_pipeline +- [ ] delete_preset +- [ ] list_jobs_by_pipeline +- [ ] list_jobs_by_status +- [ ] list_pipelines +- [ ] list_presets +- [ ] read_job +- [ ] read_pipeline +- [ ] read_preset +- [ ] test_role +- [ ] update_pipeline +- [ ] update_pipeline_notifications +- [ ] update_pipeline_status + +## elb - 34% implemented +- [ ] add_tags +- [X] apply_security_groups_to_load_balancer +- [ ] attach_load_balancer_to_subnets +- [X] configure_health_check +- [X] create_app_cookie_stickiness_policy +- [X] create_lb_cookie_stickiness_policy +- [X] create_load_balancer +- [X] create_load_balancer_listeners +- [ ] create_load_balancer_policy +- [X] delete_load_balancer +- [X] delete_load_balancer_listeners +- [ ] delete_load_balancer_policy +- [ ] deregister_instances_from_load_balancer +- [ ] describe_account_limits +- [ ] describe_instance_health +- [ ] describe_load_balancer_attributes +- [ ] describe_load_balancer_policies +- [ ] describe_load_balancer_policy_types +- [X] describe_load_balancers +- [ ] describe_tags +- [ ] detach_load_balancer_from_subnets +- [ ] disable_availability_zones_for_load_balancer +- [ ] enable_availability_zones_for_load_balancer +- [ ] modify_load_balancer_attributes +- [ ] register_instances_with_load_balancer +- [ ] remove_tags +- [ ] set_load_balancer_listener_ssl_certificate +- [ ] set_load_balancer_policies_for_backend_server +- [X] set_load_balancer_policies_of_listener + +## elbv2 - 70% implemented +- [ ] add_listener_certificates +- [ ] add_tags +- [X] create_listener +- [X] create_load_balancer +- [X] create_rule +- [X] create_target_group +- [X] delete_listener +- [X] delete_load_balancer +- [X] delete_rule +- [X] delete_target_group +- [X] deregister_targets +- [ ] describe_account_limits +- [ ] describe_listener_certificates +- [X] describe_listeners +- [X] describe_load_balancer_attributes +- [X] describe_load_balancers +- [X] describe_rules +- [ ] describe_ssl_policies +- [ ] describe_tags +- [ ] describe_target_group_attributes +- [X] describe_target_groups +- [X] describe_target_health +- [X] modify_listener +- [X] modify_load_balancer_attributes +- [X] modify_rule +- [X] modify_target_group +- [ ] modify_target_group_attributes +- [X] register_targets +- [ ] remove_listener_certificates +- [ ] remove_tags +- [X] set_ip_address_type +- [X] set_rule_priorities +- [X] set_security_groups +- [X] set_subnets + +## emr - 55% implemented +- [ ] add_instance_fleet +- [X] add_instance_groups +- [X] add_job_flow_steps +- [X] add_tags +- [ ] cancel_steps +- [ ] create_security_configuration +- [ ] delete_security_configuration +- [ ] describe_cluster +- [X] describe_job_flows +- [ ] describe_security_configuration +- [X] describe_step +- [X] list_bootstrap_actions +- [X] list_clusters +- [ ] list_instance_fleets +- [X] list_instance_groups +- [ ] list_instances +- [ ] list_security_configurations +- [X] list_steps +- [ ] modify_instance_fleet +- [X] modify_instance_groups +- [ ] put_auto_scaling_policy +- [ ] remove_auto_scaling_policy +- [X] remove_tags +- [X] run_job_flow +- [X] set_termination_protection +- [X] set_visible_to_all_users +- [X] terminate_job_flows + +## es - 0% implemented +- [ ] add_tags +- [ ] create_elasticsearch_domain +- [ ] delete_elasticsearch_domain +- [ ] delete_elasticsearch_service_role +- [ ] describe_elasticsearch_domain +- [ ] describe_elasticsearch_domain_config +- [ ] describe_elasticsearch_domains +- [ ] describe_elasticsearch_instance_type_limits +- [ ] describe_reserved_elasticsearch_instance_offerings +- [ ] describe_reserved_elasticsearch_instances +- [ ] get_compatible_elasticsearch_versions +- [ ] get_upgrade_history +- [ ] get_upgrade_status +- [ ] list_domain_names +- [ ] list_elasticsearch_instance_types +- [ ] list_elasticsearch_versions +- [ ] list_tags +- [ ] purchase_reserved_elasticsearch_instance_offering +- [ ] remove_tags +- [ ] update_elasticsearch_domain_config +- [ ] upgrade_elasticsearch_domain + +## events - 100% implemented +- [X] delete_rule +- [X] describe_event_bus +- [X] describe_rule +- [X] disable_rule +- [X] enable_rule +- [X] list_rule_names_by_target +- [X] list_rules +- [X] list_targets_by_rule +- [X] put_events +- [X] put_permission +- [X] put_rule +- [X] put_targets +- [X] remove_permission +- [X] remove_targets +- [X] test_event_pattern + +## firehose - 0% implemented +- [ ] create_delivery_stream +- [ ] delete_delivery_stream +- [ ] describe_delivery_stream +- [ ] list_delivery_streams +- [ ] list_tags_for_delivery_stream +- [ ] put_record +- [ ] put_record_batch +- [ ] tag_delivery_stream +- [ ] untag_delivery_stream +- [ ] update_destination + +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + +## gamelift - 0% implemented +- [ ] accept_match +- [ ] create_alias +- [ ] create_build +- [ ] create_fleet +- [ ] create_game_session +- [ ] create_game_session_queue +- [ ] create_matchmaking_configuration +- [ ] create_matchmaking_rule_set +- [ ] create_player_session +- [ ] create_player_sessions +- [ ] create_vpc_peering_authorization +- [ ] create_vpc_peering_connection +- [ ] delete_alias +- [ ] delete_build +- [ ] delete_fleet +- [ ] delete_game_session_queue +- [ ] delete_matchmaking_configuration +- [ ] delete_scaling_policy +- [ ] delete_vpc_peering_authorization +- [ ] delete_vpc_peering_connection +- [ ] describe_alias +- [ ] describe_build +- [ ] describe_ec2_instance_limits +- [ ] describe_fleet_attributes +- [ ] describe_fleet_capacity +- [ ] describe_fleet_events +- [ ] describe_fleet_port_settings +- [ ] describe_fleet_utilization +- [ ] describe_game_session_details +- [ ] describe_game_session_placement +- [ ] describe_game_session_queues +- [ ] describe_game_sessions +- [ ] describe_instances +- [ ] describe_matchmaking +- [ ] describe_matchmaking_configurations +- [ ] describe_matchmaking_rule_sets +- [ ] describe_player_sessions +- [ ] describe_runtime_configuration +- [ ] describe_scaling_policies +- [ ] describe_vpc_peering_authorizations +- [ ] describe_vpc_peering_connections +- [ ] get_game_session_log_url +- [ ] get_instance_access +- [ ] list_aliases +- [ ] list_builds +- [ ] list_fleets +- [ ] put_scaling_policy +- [ ] request_upload_credentials +- [ ] resolve_alias +- [ ] search_game_sessions +- [ ] start_fleet_actions +- [ ] start_game_session_placement +- [ ] start_match_backfill +- [ ] start_matchmaking +- [ ] stop_fleet_actions +- [ ] stop_game_session_placement +- [ ] stop_matchmaking +- [ ] update_alias +- [ ] update_build +- [ ] update_fleet_attributes +- [ ] update_fleet_capacity +- [ ] update_fleet_port_settings +- [ ] update_game_session +- [ ] update_game_session_queue +- [ ] update_matchmaking_configuration +- [ ] update_runtime_configuration +- [ ] validate_matchmaking_rule_set + +## glacier - 12% implemented +- [ ] abort_multipart_upload +- [ ] abort_vault_lock +- [ ] add_tags_to_vault +- [ ] complete_multipart_upload +- [ ] complete_vault_lock +- [X] create_vault +- [ ] delete_archive +- [X] delete_vault +- [ ] delete_vault_access_policy +- [ ] delete_vault_notifications +- [ ] describe_job +- [ ] describe_vault +- [ ] get_data_retrieval_policy +- [ ] get_job_output +- [ ] get_vault_access_policy +- [ ] get_vault_lock +- [ ] get_vault_notifications +- [X] initiate_job +- [ ] initiate_multipart_upload +- [ ] initiate_vault_lock +- [X] list_jobs +- [ ] list_multipart_uploads +- [ ] list_parts +- [ ] list_provisioned_capacity +- [ ] list_tags_for_vault +- [ ] list_vaults +- [ ] purchase_provisioned_capacity +- [ ] remove_tags_from_vault +- [ ] set_data_retrieval_policy +- [ ] set_vault_access_policy +- [ ] set_vault_notifications +- [ ] upload_archive +- [ ] upload_multipart_part + +## glue - 6% implemented +- [ ] batch_create_partition +- [ ] batch_delete_connection +- [ ] batch_delete_partition +- [ ] batch_delete_table +- [ ] batch_delete_table_version +- [ ] batch_get_partition +- [ ] batch_stop_job_run +- [ ] create_classifier +- [ ] create_connection +- [ ] create_crawler +- [X] create_database +- [ ] create_dev_endpoint +- [ ] create_job +- [ ] create_partition +- [ ] create_script +- [X] create_table +- [ ] create_trigger +- [ ] create_user_defined_function +- [ ] delete_classifier +- [ ] delete_connection +- [ ] delete_crawler +- [ ] delete_database +- [ ] delete_dev_endpoint +- [ ] delete_job +- [ ] delete_partition +- [ ] delete_table +- [ ] delete_table_version +- [ ] delete_trigger +- [ ] delete_user_defined_function +- [ ] get_catalog_import_status +- [ ] get_classifier +- [ ] get_classifiers +- [ ] get_connection +- [ ] get_connections +- [ ] get_crawler +- [ ] get_crawler_metrics +- [ ] get_crawlers +- [X] get_database +- [ ] get_databases +- [ ] get_dataflow_graph +- [ ] get_dev_endpoint +- [ ] get_dev_endpoints +- [ ] get_job +- [ ] get_job_run +- [ ] get_job_runs +- [ ] get_jobs +- [ ] get_mapping +- [ ] get_partition +- [ ] get_partitions +- [ ] get_plan +- [X] get_table +- [ ] get_table_version +- [ ] get_table_versions +- [X] get_tables +- [ ] get_trigger +- [ ] get_triggers +- [ ] get_user_defined_function +- [ ] get_user_defined_functions +- [ ] import_catalog_to_glue +- [ ] reset_job_bookmark +- [ ] start_crawler +- [ ] start_crawler_schedule +- [ ] start_job_run +- [ ] start_trigger +- [ ] stop_crawler +- [ ] stop_crawler_schedule +- [ ] stop_trigger +- [ ] update_classifier +- [ ] update_connection +- [ ] update_crawler +- [ ] update_crawler_schedule +- [ ] update_database +- [ ] update_dev_endpoint +- [ ] update_job +- [ ] update_partition +- [ ] update_table +- [ ] update_trigger +- [ ] update_user_defined_function + +## greengrass - 0% implemented +- [ ] associate_role_to_group +- [ ] associate_service_role_to_account +- [ ] create_core_definition +- [ ] create_core_definition_version +- [ ] create_deployment +- [ ] create_device_definition +- [ ] create_device_definition_version +- [ ] create_function_definition +- [ ] create_function_definition_version +- [ ] create_group +- [ ] create_group_certificate_authority +- [ ] create_group_version +- [ ] create_logger_definition +- [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job +- [ ] create_subscription_definition +- [ ] create_subscription_definition_version +- [ ] delete_core_definition +- [ ] delete_device_definition +- [ ] delete_function_definition +- [ ] delete_group +- [ ] delete_logger_definition +- [ ] delete_resource_definition +- [ ] delete_subscription_definition +- [ ] disassociate_role_from_group +- [ ] disassociate_service_role_from_account +- [ ] get_associated_role +- [ ] get_connectivity_info +- [ ] get_core_definition +- [ ] get_core_definition_version +- [ ] get_deployment_status +- [ ] get_device_definition +- [ ] get_device_definition_version +- [ ] get_function_definition +- [ ] get_function_definition_version +- [ ] get_group +- [ ] get_group_certificate_authority +- [ ] get_group_certificate_configuration +- [ ] get_group_version +- [ ] get_logger_definition +- [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version +- [ ] get_service_role_for_account +- [ ] get_subscription_definition +- [ ] get_subscription_definition_version +- [ ] list_core_definition_versions +- [ ] list_core_definitions +- [ ] list_deployments +- [ ] list_device_definition_versions +- [ ] list_device_definitions +- [ ] list_function_definition_versions +- [ ] list_function_definitions +- [ ] list_group_certificate_authorities +- [ ] list_group_versions +- [ ] list_groups +- [ ] list_logger_definition_versions +- [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions +- [ ] list_subscription_definition_versions +- [ ] list_subscription_definitions +- [ ] reset_deployments +- [ ] update_connectivity_info +- [ ] update_core_definition +- [ ] update_device_definition +- [ ] update_function_definition +- [ ] update_group +- [ ] update_group_certificate_configuration +- [ ] update_logger_definition +- [ ] update_resource_definition +- [ ] update_subscription_definition + +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_filter +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_filter +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_filter +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_filters +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_filter +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + +## health - 0% implemented +- [ ] describe_affected_entities +- [ ] describe_entity_aggregates +- [ ] describe_event_aggregates +- [ ] describe_event_details +- [ ] describe_event_types +- [ ] describe_events + +## iam - 47% implemented +- [ ] add_client_id_to_open_id_connect_provider +- [X] add_role_to_instance_profile +- [X] add_user_to_group +- [X] attach_group_policy +- [X] attach_role_policy +- [X] attach_user_policy +- [ ] change_password +- [X] create_access_key +- [X] create_account_alias +- [X] create_group +- [X] create_instance_profile +- [X] create_login_profile +- [ ] create_open_id_connect_provider +- [X] create_policy +- [X] create_policy_version +- [X] create_role +- [ ] create_saml_provider +- [ ] create_service_linked_role +- [ ] create_service_specific_credential +- [X] create_user +- [ ] create_virtual_mfa_device +- [X] deactivate_mfa_device +- [X] delete_access_key +- [X] delete_account_alias +- [ ] delete_account_password_policy +- [ ] delete_group +- [ ] delete_group_policy +- [ ] delete_instance_profile +- [X] delete_login_profile +- [ ] delete_open_id_connect_provider +- [ ] delete_policy +- [X] delete_policy_version +- [X] delete_role +- [ ] delete_role_permissions_boundary +- [X] delete_role_policy +- [ ] delete_saml_provider +- [X] delete_server_certificate +- [ ] delete_service_linked_role +- [ ] delete_service_specific_credential +- [ ] delete_signing_certificate +- [ ] delete_ssh_public_key +- [X] delete_user +- [ ] delete_user_permissions_boundary +- [X] delete_user_policy +- [ ] delete_virtual_mfa_device +- [X] detach_group_policy +- [X] detach_role_policy +- [X] detach_user_policy +- [X] enable_mfa_device +- [ ] generate_credential_report +- [ ] get_access_key_last_used +- [X] get_account_authorization_details +- [ ] get_account_password_policy +- [ ] get_account_summary +- [ ] get_context_keys_for_custom_policy +- [ ] get_context_keys_for_principal_policy +- [X] get_credential_report +- [X] get_group +- [X] get_group_policy +- [X] get_instance_profile +- [X] get_login_profile +- [ ] get_open_id_connect_provider +- [X] get_policy +- [X] get_policy_version +- [X] get_role +- [X] get_role_policy +- [ ] get_saml_provider +- [X] get_server_certificate +- [ ] get_service_linked_role_deletion_status +- [ ] get_ssh_public_key +- [X] get_user +- [X] get_user_policy +- [ ] list_access_keys +- [X] list_account_aliases +- [X] list_attached_group_policies +- [X] list_attached_role_policies +- [X] list_attached_user_policies +- [ ] list_entities_for_policy +- [X] list_group_policies +- [X] list_groups +- [ ] list_groups_for_user +- [ ] list_instance_profiles +- [ ] list_instance_profiles_for_role +- [X] list_mfa_devices +- [ ] list_open_id_connect_providers +- [X] list_policies +- [X] list_policy_versions +- [X] list_role_policies +- [ ] list_roles +- [ ] list_saml_providers +- [ ] list_server_certificates +- [ ] list_service_specific_credentials +- [ ] list_signing_certificates +- [ ] list_ssh_public_keys +- [X] list_user_policies +- [X] list_users +- [ ] list_virtual_mfa_devices +- [X] put_group_policy +- [ ] put_role_permissions_boundary +- [X] put_role_policy +- [ ] put_user_permissions_boundary +- [X] put_user_policy +- [ ] remove_client_id_from_open_id_connect_provider +- [X] remove_role_from_instance_profile +- [X] remove_user_from_group +- [ ] reset_service_specific_credential +- [ ] resync_mfa_device +- [ ] set_default_policy_version +- [ ] simulate_custom_policy +- [ ] simulate_principal_policy +- [X] update_access_key +- [ ] update_account_password_policy +- [ ] update_assume_role_policy +- [ ] update_group +- [X] update_login_profile +- [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role +- [ ] update_role_description +- [ ] update_saml_provider +- [ ] update_server_certificate +- [ ] update_service_specific_credential +- [ ] update_signing_certificate +- [ ] update_ssh_public_key +- [ ] update_user +- [ ] upload_server_certificate +- [ ] upload_signing_certificate +- [ ] upload_ssh_public_key + +## importexport - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] get_shipping_label +- [ ] get_status +- [ ] list_jobs +- [ ] update_job + +## inspector - 0% implemented +- [ ] add_attributes_to_findings +- [ ] create_assessment_target +- [ ] create_assessment_template +- [ ] create_exclusions_preview +- [ ] create_resource_group +- [ ] delete_assessment_run +- [ ] delete_assessment_target +- [ ] delete_assessment_template +- [ ] describe_assessment_runs +- [ ] describe_assessment_targets +- [ ] describe_assessment_templates +- [ ] describe_cross_account_access_role +- [ ] describe_exclusions +- [ ] describe_findings +- [ ] describe_resource_groups +- [ ] describe_rules_packages +- [ ] get_assessment_report +- [ ] get_exclusions_preview +- [ ] get_telemetry_metadata +- [ ] list_assessment_run_agents +- [ ] list_assessment_runs +- [ ] list_assessment_targets +- [ ] list_assessment_templates +- [ ] list_event_subscriptions +- [ ] list_exclusions +- [ ] list_findings +- [ ] list_rules_packages +- [ ] list_tags_for_resource +- [ ] preview_agents +- [ ] register_cross_account_access_role +- [ ] remove_attributes_from_findings +- [ ] set_tags_for_resource +- [ ] start_assessment_run +- [ ] stop_assessment_run +- [ ] subscribe_to_event +- [ ] unsubscribe_from_event +- [ ] update_assessment_target + +## iot - 31% implemented +- [ ] accept_certificate_transfer +- [X] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [X] attach_policy +- [X] attach_principal_policy +- [ ] attach_security_profile +- [X] attach_thing_principal +- [ ] cancel_audit_task +- [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] cancel_job_execution +- [ ] clear_default_authorizer +- [ ] create_authorizer +- [ ] create_certificate_from_csr +- [X] create_job +- [X] create_keys_and_certificate +- [ ] create_ota_update +- [X] create_policy +- [X] create_policy_version +- [ ] create_role_alias +- [ ] create_scheduled_audit +- [ ] create_security_profile +- [ ] create_stream +- [X] create_thing +- [X] create_thing_group +- [X] create_thing_type +- [ ] create_topic_rule +- [ ] delete_account_audit_configuration +- [ ] delete_authorizer +- [ ] delete_ca_certificate +- [X] delete_certificate +- [ ] delete_job +- [ ] delete_job_execution +- [ ] delete_ota_update +- [X] delete_policy +- [X] delete_policy_version +- [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_scheduled_audit +- [ ] delete_security_profile +- [ ] delete_stream +- [X] delete_thing +- [X] delete_thing_group +- [X] delete_thing_type +- [ ] delete_topic_rule +- [ ] delete_v2_logging_level +- [ ] deprecate_thing_type +- [ ] describe_account_audit_configuration +- [ ] describe_audit_task +- [ ] describe_authorizer +- [ ] describe_ca_certificate +- [X] describe_certificate +- [ ] describe_default_authorizer +- [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [X] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_scheduled_audit +- [ ] describe_security_profile +- [ ] describe_stream +- [X] describe_thing +- [X] describe_thing_group +- [ ] describe_thing_registration_task +- [X] describe_thing_type +- [X] detach_policy +- [X] detach_principal_policy +- [ ] detach_security_profile +- [X] detach_thing_principal +- [ ] disable_topic_rule +- [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [X] get_job_document +- [ ] get_logging_options +- [ ] get_ota_update +- [X] get_policy +- [X] get_policy_version +- [ ] get_registration_code +- [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_active_violations +- [X] list_attached_policies +- [ ] list_audit_findings +- [ ] list_audit_tasks +- [ ] list_authorizers +- [ ] list_ca_certificates +- [X] list_certificates +- [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates +- [ ] list_outgoing_certificates +- [X] list_policies +- [X] list_policy_principals +- [X] list_policy_versions +- [X] list_principal_policies +- [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_scheduled_audits +- [ ] list_security_profiles +- [ ] list_security_profiles_for_target +- [ ] list_streams +- [ ] list_targets_for_policy +- [ ] list_targets_for_security_profile +- [X] list_thing_groups +- [X] list_thing_groups_for_thing +- [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks +- [X] list_thing_types +- [X] list_things +- [X] list_things_in_thing_group +- [ ] list_topic_rules +- [ ] list_v2_logging_levels +- [ ] list_violation_events +- [ ] register_ca_certificate +- [ ] register_certificate +- [ ] register_thing +- [ ] reject_certificate_transfer +- [X] remove_thing_from_thing_group +- [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer +- [X] set_default_policy_version +- [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_on_demand_audit_task +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer +- [ ] transfer_certificate +- [ ] update_account_audit_configuration +- [ ] update_authorizer +- [ ] update_ca_certificate +- [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_scheduled_audit +- [ ] update_security_profile +- [ ] update_stream +- [X] update_thing +- [X] update_thing_group +- [X] update_thing_groups_for_thing +- [ ] validate_security_profile_behaviors + +## iot-data - 100% implemented +- [X] delete_thing_shadow +- [X] get_thing_shadow +- [X] publish +- [X] update_thing_shadow + +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + +## iot1click-devices - 0% implemented +- [ ] claim_devices_by_claim_code +- [ ] describe_device +- [ ] finalize_device_claim +- [ ] get_device_methods +- [ ] initiate_device_claim +- [ ] invoke_device_method +- [ ] list_device_events +- [ ] list_devices +- [ ] unclaim_device +- [ ] update_device_state + +## iot1click-projects - 0% implemented +- [ ] associate_device_with_placement +- [ ] create_placement +- [ ] create_project +- [ ] delete_placement +- [ ] delete_project +- [ ] describe_placement +- [ ] describe_project +- [ ] disassociate_device_from_placement +- [ ] get_devices_in_placement +- [ ] list_placements +- [ ] list_projects +- [ ] update_placement +- [ ] update_project + +## iotanalytics - 0% implemented +- [ ] batch_put_message +- [ ] cancel_pipeline_reprocessing +- [ ] create_channel +- [ ] create_dataset +- [ ] create_dataset_content +- [ ] create_datastore +- [ ] create_pipeline +- [ ] delete_channel +- [ ] delete_dataset +- [ ] delete_dataset_content +- [ ] delete_datastore +- [ ] delete_pipeline +- [ ] describe_channel +- [ ] describe_dataset +- [ ] describe_datastore +- [ ] describe_logging_options +- [ ] describe_pipeline +- [ ] get_dataset_content +- [ ] list_channels +- [ ] list_datasets +- [ ] list_datastores +- [ ] list_pipelines +- [ ] list_tags_for_resource +- [ ] put_logging_options +- [ ] run_pipeline_activity +- [ ] sample_channel_data +- [ ] start_pipeline_reprocessing +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_channel +- [ ] update_dataset +- [ ] update_datastore +- [ ] update_pipeline + +## kinesis - 46% implemented +- [X] add_tags_to_stream +- [X] create_stream +- [ ] decrease_stream_retention_period +- [X] delete_stream +- [ ] deregister_stream_consumer +- [ ] describe_limits +- [X] describe_stream +- [ ] describe_stream_consumer +- [ ] describe_stream_summary +- [ ] disable_enhanced_monitoring +- [ ] enable_enhanced_monitoring +- [X] get_records +- [X] get_shard_iterator +- [ ] increase_stream_retention_period +- [ ] list_shards +- [ ] list_stream_consumers +- [X] list_streams +- [X] list_tags_for_stream +- [X] merge_shards +- [X] put_record +- [X] put_records +- [ ] register_stream_consumer +- [X] remove_tags_from_stream +- [X] split_shard +- [ ] start_stream_encryption +- [ ] stop_stream_encryption +- [ ] subscribe_to_shard +- [ ] update_shard_count + +## kinesis-video-archived-media - 0% implemented +- [ ] get_hls_streaming_session_url +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + +## kinesisanalytics - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] describe_application +- [ ] discover_input_schema +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + +## kms - 37% implemented +- [X] cancel_key_deletion +- [ ] create_alias +- [ ] create_grant +- [X] create_key +- [ ] decrypt +- [X] delete_alias +- [ ] delete_imported_key_material +- [X] describe_key +- [X] disable_key +- [X] disable_key_rotation +- [X] enable_key +- [X] enable_key_rotation +- [ ] encrypt +- [ ] generate_data_key +- [ ] generate_data_key_without_plaintext +- [ ] generate_random +- [X] get_key_policy +- [X] get_key_rotation_status +- [ ] get_parameters_for_import +- [ ] import_key_material +- [ ] list_aliases +- [ ] list_grants +- [ ] list_key_policies +- [X] list_keys +- [ ] list_resource_tags +- [ ] list_retirable_grants +- [X] put_key_policy +- [ ] re_encrypt +- [ ] retire_grant +- [ ] revoke_grant +- [X] schedule_key_deletion +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_key_description + +## lambda - 0% implemented +- [ ] add_permission +- [ ] create_alias +- [ ] create_event_source_mapping +- [ ] create_function +- [ ] delete_alias +- [ ] delete_event_source_mapping +- [ ] delete_function +- [ ] delete_function_concurrency +- [ ] get_account_settings +- [ ] get_alias +- [ ] get_event_source_mapping +- [ ] get_function +- [ ] get_function_configuration +- [ ] get_policy +- [ ] invoke +- [ ] invoke_async +- [ ] list_aliases +- [ ] list_event_source_mappings +- [ ] list_functions +- [ ] list_tags +- [ ] list_versions_by_function +- [ ] publish_version +- [ ] put_function_concurrency +- [ ] remove_permission +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_event_source_mapping +- [ ] update_function_code +- [ ] update_function_configuration + +## lex-models - 0% implemented +- [ ] create_bot_version +- [ ] create_intent_version +- [ ] create_slot_type_version +- [ ] delete_bot +- [ ] delete_bot_alias +- [ ] delete_bot_channel_association +- [ ] delete_bot_version +- [ ] delete_intent +- [ ] delete_intent_version +- [ ] delete_slot_type +- [ ] delete_slot_type_version +- [ ] delete_utterances +- [ ] get_bot +- [ ] get_bot_alias +- [ ] get_bot_aliases +- [ ] get_bot_channel_association +- [ ] get_bot_channel_associations +- [ ] get_bot_versions +- [ ] get_bots +- [ ] get_builtin_intent +- [ ] get_builtin_intents +- [ ] get_builtin_slot_types +- [ ] get_export +- [ ] get_import +- [ ] get_intent +- [ ] get_intent_versions +- [ ] get_intents +- [ ] get_slot_type +- [ ] get_slot_type_versions +- [ ] get_slot_types +- [ ] get_utterances_view +- [ ] put_bot +- [ ] put_bot_alias +- [ ] put_intent +- [ ] put_slot_type +- [ ] start_import + +## lex-runtime - 0% implemented +- [ ] post_content +- [ ] post_text + +## lightsail - 0% implemented +- [ ] allocate_static_ip +- [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate +- [ ] attach_static_ip +- [ ] close_instance_public_ports +- [ ] create_disk +- [ ] create_disk_from_snapshot +- [ ] create_disk_snapshot +- [ ] create_domain +- [ ] create_domain_entry +- [ ] create_instance_snapshot +- [ ] create_instances +- [ ] create_instances_from_snapshot +- [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate +- [ ] delete_disk +- [ ] delete_disk_snapshot +- [ ] delete_domain +- [ ] delete_domain_entry +- [ ] delete_instance +- [ ] delete_instance_snapshot +- [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate +- [ ] detach_disk +- [ ] detach_instances_from_load_balancer +- [ ] detach_static_ip +- [ ] download_default_key_pair +- [ ] get_active_names +- [ ] get_blueprints +- [ ] get_bundles +- [ ] get_disk +- [ ] get_disk_snapshot +- [ ] get_disk_snapshots +- [ ] get_disks +- [ ] get_domain +- [ ] get_domains +- [ ] get_instance +- [ ] get_instance_access_details +- [ ] get_instance_metric_data +- [ ] get_instance_port_states +- [ ] get_instance_snapshot +- [ ] get_instance_snapshots +- [ ] get_instance_state +- [ ] get_instances +- [ ] get_key_pair +- [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers +- [ ] get_operation +- [ ] get_operations +- [ ] get_operations_for_resource +- [ ] get_regions +- [ ] get_static_ip +- [ ] get_static_ips +- [ ] import_key_pair +- [ ] is_vpc_peered +- [ ] open_instance_public_ports +- [ ] peer_vpc +- [ ] put_instance_public_ports +- [ ] reboot_instance +- [ ] release_static_ip +- [ ] start_instance +- [ ] stop_instance +- [ ] unpeer_vpc +- [ ] update_domain_entry +- [ ] update_load_balancer_attribute + +## logs - 27% implemented +- [ ] associate_kms_key +- [ ] cancel_export_task +- [ ] create_export_task +- [X] create_log_group +- [X] create_log_stream +- [ ] delete_destination +- [X] delete_log_group +- [X] delete_log_stream +- [ ] delete_metric_filter +- [ ] delete_resource_policy +- [ ] delete_retention_policy +- [ ] delete_subscription_filter +- [ ] describe_destinations +- [ ] describe_export_tasks +- [X] describe_log_groups +- [X] describe_log_streams +- [ ] describe_metric_filters +- [ ] describe_resource_policies +- [ ] describe_subscription_filters +- [ ] disassociate_kms_key +- [X] filter_log_events +- [X] get_log_events +- [ ] list_tags_log_group +- [ ] put_destination +- [ ] put_destination_policy +- [X] put_log_events +- [ ] put_metric_filter +- [ ] put_resource_policy +- [ ] put_retention_policy +- [ ] put_subscription_filter +- [ ] tag_log_group +- [ ] test_metric_filter +- [ ] untag_log_group + +## machinelearning - 0% implemented +- [ ] add_tags +- [ ] create_batch_prediction +- [ ] create_data_source_from_rds +- [ ] create_data_source_from_redshift +- [ ] create_data_source_from_s3 +- [ ] create_evaluation +- [ ] create_ml_model +- [ ] create_realtime_endpoint +- [ ] delete_batch_prediction +- [ ] delete_data_source +- [ ] delete_evaluation +- [ ] delete_ml_model +- [ ] delete_realtime_endpoint +- [ ] delete_tags +- [ ] describe_batch_predictions +- [ ] describe_data_sources +- [ ] describe_evaluations +- [ ] describe_ml_models +- [ ] describe_tags +- [ ] get_batch_prediction +- [ ] get_data_source +- [ ] get_evaluation +- [ ] get_ml_model +- [ ] predict +- [ ] update_batch_prediction +- [ ] update_data_source +- [ ] update_evaluation +- [ ] update_ml_model + +## macie - 0% implemented +- [ ] associate_member_account +- [ ] associate_s3_resources +- [ ] disassociate_member_account +- [ ] disassociate_s3_resources +- [ ] list_member_accounts +- [ ] list_s3_resources +- [ ] update_s3_resources + +## marketplace-entitlement - 0% implemented +- [ ] get_entitlements + +## marketplacecommerceanalytics - 0% implemented +- [ ] generate_data_set +- [ ] start_support_data_export + +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] batch_update_schedule +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] delete_reservation +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] describe_offering +- [ ] describe_reservation +- [ ] describe_schedule +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] list_offerings +- [ ] list_reservations +- [ ] purchase_offering +- [ ] start_channel +- [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] delete_cors_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] get_cors_policy +- [ ] list_containers +- [ ] put_container_policy +- [ ] put_cors_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + +## mediatailor - 0% implemented +- [ ] delete_playback_configuration +- [ ] get_playback_configuration +- [ ] list_playback_configurations +- [ ] put_playback_configuration + +## meteringmarketplace - 0% implemented +- [ ] batch_meter_usage +- [ ] meter_usage +- [ ] resolve_customer + +## mgh - 0% implemented +- [ ] associate_created_artifact +- [ ] associate_discovered_resource +- [ ] create_progress_update_stream +- [ ] delete_progress_update_stream +- [ ] describe_application_state +- [ ] describe_migration_task +- [ ] disassociate_created_artifact +- [ ] disassociate_discovered_resource +- [ ] import_migration_task +- [ ] list_created_artifacts +- [ ] list_discovered_resources +- [ ] list_migration_tasks +- [ ] list_progress_update_streams +- [ ] notify_application_state +- [ ] notify_migration_task_state +- [ ] put_resource_attributes + +## mobile - 0% implemented +- [ ] create_project +- [ ] delete_project +- [ ] describe_bundle +- [ ] describe_project +- [ ] export_bundle +- [ ] export_project +- [ ] list_bundles +- [ ] list_projects +- [ ] update_project + +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + +## mturk - 0% implemented +- [ ] accept_qualification_request +- [ ] approve_assignment +- [ ] associate_qualification_with_worker +- [ ] create_additional_assignments_for_hit +- [ ] create_hit +- [ ] create_hit_type +- [ ] create_hit_with_hit_type +- [ ] create_qualification_type +- [ ] create_worker_block +- [ ] delete_hit +- [ ] delete_qualification_type +- [ ] delete_worker_block +- [ ] disassociate_qualification_from_worker +- [ ] get_account_balance +- [ ] get_assignment +- [ ] get_file_upload_url +- [ ] get_hit +- [ ] get_qualification_score +- [ ] get_qualification_type +- [ ] list_assignments_for_hit +- [ ] list_bonus_payments +- [ ] list_hits +- [ ] list_hits_for_qualification_type +- [ ] list_qualification_requests +- [ ] list_qualification_types +- [ ] list_review_policy_results_for_hit +- [ ] list_reviewable_hits +- [ ] list_worker_blocks +- [ ] list_workers_with_qualification_type +- [ ] notify_workers +- [ ] reject_assignment +- [ ] reject_qualification_request +- [ ] send_bonus +- [ ] send_test_event_notification +- [ ] update_expiration_for_hit +- [ ] update_hit_review_status +- [ ] update_hit_type_of_hit +- [ ] update_notification_settings +- [ ] update_qualification_type + +## neptune - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_parameter_group +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_valid_db_instance_modifications +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] promote_read_replica_db_cluster +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time + +## opsworks - 12% implemented +- [ ] assign_instance +- [ ] assign_volume +- [ ] associate_elastic_ip +- [ ] attach_elastic_load_balancer +- [ ] clone_stack +- [X] create_app +- [ ] create_deployment +- [X] create_instance +- [X] create_layer +- [X] create_stack +- [ ] create_user_profile +- [ ] delete_app +- [ ] delete_instance +- [ ] delete_layer +- [ ] delete_stack +- [ ] delete_user_profile +- [ ] deregister_ecs_cluster +- [ ] deregister_elastic_ip +- [ ] deregister_instance +- [ ] deregister_rds_db_instance +- [ ] deregister_volume +- [ ] describe_agent_versions +- [X] describe_apps +- [ ] describe_commands +- [ ] describe_deployments +- [ ] describe_ecs_clusters +- [ ] describe_elastic_ips +- [ ] describe_elastic_load_balancers +- [X] describe_instances +- [X] describe_layers +- [ ] describe_load_based_auto_scaling +- [ ] describe_my_user_profile +- [ ] describe_operating_systems +- [ ] describe_permissions +- [ ] describe_raid_arrays +- [ ] describe_rds_db_instances +- [ ] describe_service_errors +- [ ] describe_stack_provisioning_parameters +- [ ] describe_stack_summary +- [X] describe_stacks +- [ ] describe_time_based_auto_scaling +- [ ] describe_user_profiles +- [ ] describe_volumes +- [ ] detach_elastic_load_balancer +- [ ] disassociate_elastic_ip +- [ ] get_hostname_suggestion +- [ ] grant_access +- [ ] list_tags +- [ ] reboot_instance +- [ ] register_ecs_cluster +- [ ] register_elastic_ip +- [ ] register_instance +- [ ] register_rds_db_instance +- [ ] register_volume +- [ ] set_load_based_auto_scaling +- [ ] set_permission +- [ ] set_time_based_auto_scaling +- [X] start_instance +- [ ] start_stack +- [ ] stop_instance +- [ ] stop_stack +- [ ] tag_resource +- [ ] unassign_instance +- [ ] unassign_volume +- [ ] untag_resource +- [ ] update_app +- [ ] update_elastic_ip +- [ ] update_instance +- [ ] update_layer +- [ ] update_my_user_profile +- [ ] update_rds_db_instance +- [ ] update_stack +- [ ] update_user_profile +- [ ] update_volume + +## opsworkscm - 0% implemented +- [ ] associate_node +- [ ] create_backup +- [ ] create_server +- [ ] delete_backup +- [ ] delete_server +- [ ] describe_account_attributes +- [ ] describe_backups +- [ ] describe_events +- [ ] describe_node_association_status +- [ ] describe_servers +- [ ] disassociate_node +- [ ] restore_server +- [ ] start_maintenance +- [ ] update_server +- [ ] update_server_engine_attributes + +## organizations - 30% implemented +- [ ] accept_handshake +- [ ] attach_policy +- [ ] cancel_handshake +- [X] create_account +- [X] create_organization +- [X] create_organizational_unit +- [ ] create_policy +- [ ] decline_handshake +- [ ] delete_organization +- [ ] delete_organizational_unit +- [ ] delete_policy +- [X] describe_account +- [ ] describe_create_account_status +- [ ] describe_handshake +- [X] describe_organization +- [X] describe_organizational_unit +- [ ] describe_policy +- [ ] detach_policy +- [ ] disable_aws_service_access +- [ ] disable_policy_type +- [ ] enable_all_features +- [ ] enable_aws_service_access +- [ ] enable_policy_type +- [ ] invite_account_to_organization +- [ ] leave_organization +- [X] list_accounts +- [X] list_accounts_for_parent +- [ ] list_aws_service_access_for_organization +- [X] list_children +- [ ] list_create_account_status +- [ ] list_handshakes_for_account +- [ ] list_handshakes_for_organization +- [X] list_organizational_units_for_parent +- [X] list_parents +- [ ] list_policies +- [ ] list_policies_for_target +- [X] list_roots +- [ ] list_targets_for_policy +- [X] move_account +- [ ] remove_account_from_organization +- [ ] update_organizational_unit +- [ ] update_policy + +## pi - 0% implemented +- [ ] describe_dimension_keys +- [ ] get_resource_metrics + +## pinpoint - 0% implemented +- [ ] create_app +- [ ] create_campaign +- [ ] create_export_job +- [ ] create_import_job +- [ ] create_segment +- [ ] delete_adm_channel +- [ ] delete_apns_channel +- [ ] delete_apns_sandbox_channel +- [ ] delete_apns_voip_channel +- [ ] delete_apns_voip_sandbox_channel +- [ ] delete_app +- [ ] delete_baidu_channel +- [ ] delete_campaign +- [ ] delete_email_channel +- [ ] delete_endpoint +- [ ] delete_event_stream +- [ ] delete_gcm_channel +- [ ] delete_segment +- [ ] delete_sms_channel +- [ ] delete_user_endpoints +- [ ] get_adm_channel +- [ ] get_apns_channel +- [ ] get_apns_sandbox_channel +- [ ] get_apns_voip_channel +- [ ] get_apns_voip_sandbox_channel +- [ ] get_app +- [ ] get_application_settings +- [ ] get_apps +- [ ] get_baidu_channel +- [ ] get_campaign +- [ ] get_campaign_activities +- [ ] get_campaign_version +- [ ] get_campaign_versions +- [ ] get_campaigns +- [ ] get_channels +- [ ] get_email_channel +- [ ] get_endpoint +- [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs +- [ ] get_gcm_channel +- [ ] get_import_job +- [ ] get_import_jobs +- [ ] get_segment +- [ ] get_segment_export_jobs +- [ ] get_segment_import_jobs +- [ ] get_segment_version +- [ ] get_segment_versions +- [ ] get_segments +- [ ] get_sms_channel +- [ ] get_user_endpoints +- [ ] phone_number_validate +- [ ] put_event_stream +- [ ] put_events +- [ ] remove_attributes +- [ ] send_messages +- [ ] send_users_messages +- [ ] update_adm_channel +- [ ] update_apns_channel +- [ ] update_apns_sandbox_channel +- [ ] update_apns_voip_channel +- [ ] update_apns_voip_sandbox_channel +- [ ] update_application_settings +- [ ] update_baidu_channel +- [ ] update_campaign +- [ ] update_email_channel +- [ ] update_endpoint +- [ ] update_endpoints_batch +- [ ] update_gcm_channel +- [ ] update_segment +- [ ] update_sms_channel + +## polly - 55% implemented +- [X] delete_lexicon +- [X] describe_voices +- [X] get_lexicon +- [ ] get_speech_synthesis_task +- [X] list_lexicons +- [ ] list_speech_synthesis_tasks +- [X] put_lexicon +- [ ] start_speech_synthesis_task +- [ ] synthesize_speech + +## pricing - 0% implemented +- [ ] describe_services +- [ ] get_attribute_values +- [ ] get_products + +## rds - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] authorize_db_security_group_ingress +- [ ] backtrack_db_cluster +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] copy_db_snapshot +- [ ] copy_option_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_instance_read_replica +- [ ] create_db_parameter_group +- [ ] create_db_security_group +- [ ] create_db_snapshot +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] create_option_group +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_security_group +- [ ] delete_db_snapshot +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] delete_option_group +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_db_cluster_backtracks +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_log_files +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_security_groups +- [ ] describe_db_snapshot_attributes +- [ ] describe_db_snapshots +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_option_group_options +- [ ] describe_option_groups +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_reserved_db_instances +- [ ] describe_reserved_db_instances_offerings +- [ ] describe_source_regions +- [ ] describe_valid_db_instance_modifications +- [ ] download_db_log_file_portion +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_current_db_cluster_capacity +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_snapshot +- [ ] modify_db_snapshot_attribute +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] modify_option_group +- [ ] promote_read_replica +- [ ] promote_read_replica_db_cluster +- [ ] purchase_reserved_db_instances_offering +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_s3 +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time +- [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_from_s3 +- [ ] restore_db_instance_to_point_in_time +- [ ] revoke_db_security_group_ingress +- [ ] start_db_instance +- [ ] stop_db_instance + +## redshift - 37% implemented +- [ ] accept_reserved_node_exchange +- [ ] authorize_cluster_security_group_ingress +- [ ] authorize_snapshot_access +- [ ] copy_cluster_snapshot +- [X] create_cluster +- [X] create_cluster_parameter_group +- [X] create_cluster_security_group +- [X] create_cluster_snapshot +- [X] create_cluster_subnet_group +- [ ] create_event_subscription +- [ ] create_hsm_client_certificate +- [ ] create_hsm_configuration +- [X] create_snapshot_copy_grant +- [X] create_tags +- [X] delete_cluster +- [X] delete_cluster_parameter_group +- [X] delete_cluster_security_group +- [X] delete_cluster_snapshot +- [X] delete_cluster_subnet_group +- [ ] delete_event_subscription +- [ ] delete_hsm_client_certificate +- [ ] delete_hsm_configuration +- [X] delete_snapshot_copy_grant +- [X] delete_tags +- [ ] describe_cluster_db_revisions +- [X] describe_cluster_parameter_groups +- [ ] describe_cluster_parameters +- [X] describe_cluster_security_groups +- [X] describe_cluster_snapshots +- [X] describe_cluster_subnet_groups +- [ ] describe_cluster_tracks +- [ ] describe_cluster_versions +- [X] describe_clusters +- [ ] describe_default_cluster_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_hsm_client_certificates +- [ ] describe_hsm_configurations +- [ ] describe_logging_status +- [ ] describe_orderable_cluster_options +- [ ] describe_reserved_node_offerings +- [ ] describe_reserved_nodes +- [ ] describe_resize +- [X] describe_snapshot_copy_grants +- [ ] describe_table_restore_status +- [X] describe_tags +- [ ] disable_logging +- [X] disable_snapshot_copy +- [ ] enable_logging +- [X] enable_snapshot_copy +- [ ] get_cluster_credentials +- [ ] get_reserved_node_exchange_offerings +- [X] modify_cluster +- [ ] modify_cluster_db_revision +- [ ] modify_cluster_iam_roles +- [ ] modify_cluster_parameter_group +- [ ] modify_cluster_subnet_group +- [ ] modify_event_subscription +- [X] modify_snapshot_copy_retention_period +- [ ] purchase_reserved_node_offering +- [ ] reboot_cluster +- [ ] reset_cluster_parameter_group +- [ ] resize_cluster +- [X] restore_from_cluster_snapshot +- [ ] restore_table_from_cluster_snapshot +- [ ] revoke_cluster_security_group_ingress +- [ ] revoke_snapshot_access +- [ ] rotate_encryption_key + +## rekognition - 0% implemented +- [ ] compare_faces +- [ ] create_collection +- [ ] create_stream_processor +- [ ] delete_collection +- [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_collection +- [ ] describe_stream_processor +- [ ] detect_faces +- [ ] detect_labels +- [ ] detect_moderation_labels +- [ ] detect_text +- [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking +- [ ] index_faces +- [ ] list_collections +- [ ] list_faces +- [ ] list_stream_processors +- [ ] recognize_celebrities +- [ ] search_faces +- [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor + +## resource-groups - 0% implemented +- [ ] create_group +- [ ] delete_group +- [ ] get_group +- [ ] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [ ] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [ ] update_group +- [ ] update_group_query + +## resourcegroupstaggingapi - 60% implemented +- [X] get_resources +- [X] get_tag_keys +- [X] get_tag_values +- [ ] tag_resources +- [ ] untag_resources + +## route53 - 12% implemented +- [ ] associate_vpc_with_hosted_zone +- [ ] change_resource_record_sets +- [X] change_tags_for_resource +- [X] create_health_check +- [X] create_hosted_zone +- [ ] create_query_logging_config +- [ ] create_reusable_delegation_set +- [ ] create_traffic_policy +- [ ] create_traffic_policy_instance +- [ ] create_traffic_policy_version +- [ ] create_vpc_association_authorization +- [X] delete_health_check +- [X] delete_hosted_zone +- [ ] delete_query_logging_config +- [ ] delete_reusable_delegation_set +- [ ] delete_traffic_policy +- [ ] delete_traffic_policy_instance +- [ ] delete_vpc_association_authorization +- [ ] disassociate_vpc_from_hosted_zone +- [ ] get_account_limit +- [ ] get_change +- [ ] get_checker_ip_ranges +- [ ] get_geo_location +- [ ] get_health_check +- [ ] get_health_check_count +- [ ] get_health_check_last_failure_reason +- [ ] get_health_check_status +- [X] get_hosted_zone +- [ ] get_hosted_zone_count +- [ ] get_hosted_zone_limit +- [ ] get_query_logging_config +- [ ] get_reusable_delegation_set +- [ ] get_reusable_delegation_set_limit +- [ ] get_traffic_policy +- [ ] get_traffic_policy_instance +- [ ] get_traffic_policy_instance_count +- [ ] list_geo_locations +- [ ] list_health_checks +- [ ] list_hosted_zones +- [ ] list_hosted_zones_by_name +- [ ] list_query_logging_configs +- [ ] list_resource_record_sets +- [ ] list_reusable_delegation_sets +- [X] list_tags_for_resource +- [ ] list_tags_for_resources +- [ ] list_traffic_policies +- [ ] list_traffic_policy_instances +- [ ] list_traffic_policy_instances_by_hosted_zone +- [ ] list_traffic_policy_instances_by_policy +- [ ] list_traffic_policy_versions +- [ ] list_vpc_association_authorizations +- [ ] test_dns_answer +- [ ] update_health_check +- [ ] update_hosted_zone_comment +- [ ] update_traffic_policy_comment +- [ ] update_traffic_policy_instance + +## route53domains - 0% implemented +- [ ] check_domain_availability +- [ ] check_domain_transferability +- [ ] delete_tags_for_domain +- [ ] disable_domain_auto_renew +- [ ] disable_domain_transfer_lock +- [ ] enable_domain_auto_renew +- [ ] enable_domain_transfer_lock +- [ ] get_contact_reachability_status +- [ ] get_domain_detail +- [ ] get_domain_suggestions +- [ ] get_operation_detail +- [ ] list_domains +- [ ] list_operations +- [ ] list_tags_for_domain +- [ ] register_domain +- [ ] renew_domain +- [ ] resend_contact_reachability_email +- [ ] retrieve_domain_auth_code +- [ ] transfer_domain +- [ ] update_domain_contact +- [ ] update_domain_contact_privacy +- [ ] update_domain_nameservers +- [ ] update_tags_for_domain +- [ ] view_billing + +## s3 - 15% implemented +- [ ] abort_multipart_upload +- [ ] complete_multipart_upload +- [ ] copy_object +- [X] create_bucket +- [ ] create_multipart_upload +- [X] delete_bucket +- [ ] delete_bucket_analytics_configuration +- [X] delete_bucket_cors +- [ ] delete_bucket_encryption +- [ ] delete_bucket_inventory_configuration +- [ ] delete_bucket_lifecycle +- [ ] delete_bucket_metrics_configuration +- [X] delete_bucket_policy +- [ ] delete_bucket_replication +- [X] delete_bucket_tagging +- [ ] delete_bucket_website +- [ ] delete_object +- [ ] delete_object_tagging +- [ ] delete_objects +- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_acl +- [ ] get_bucket_analytics_configuration +- [ ] get_bucket_cors +- [ ] get_bucket_encryption +- [ ] get_bucket_inventory_configuration +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging +- [ ] get_bucket_metrics_configuration +- [ ] get_bucket_notification +- [ ] get_bucket_notification_configuration +- [X] get_bucket_policy +- [ ] get_bucket_replication +- [ ] get_bucket_request_payment +- [ ] get_bucket_tagging +- [X] get_bucket_versioning +- [ ] get_bucket_website +- [ ] get_object +- [ ] get_object_acl +- [ ] get_object_tagging +- [ ] get_object_torrent +- [ ] head_bucket +- [ ] head_object +- [ ] list_bucket_analytics_configurations +- [ ] list_bucket_inventory_configurations +- [ ] list_bucket_metrics_configurations +- [ ] list_buckets +- [ ] list_multipart_uploads +- [ ] list_object_versions +- [ ] list_objects +- [ ] list_objects_v2 +- [ ] list_parts +- [ ] put_bucket_accelerate_configuration +- [ ] put_bucket_acl +- [ ] put_bucket_analytics_configuration +- [X] put_bucket_cors +- [ ] put_bucket_encryption +- [ ] put_bucket_inventory_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration +- [X] put_bucket_logging +- [ ] put_bucket_metrics_configuration +- [ ] put_bucket_notification +- [X] put_bucket_notification_configuration +- [ ] put_bucket_policy +- [ ] put_bucket_replication +- [ ] put_bucket_request_payment +- [X] put_bucket_tagging +- [ ] put_bucket_versioning +- [ ] put_bucket_website +- [ ] put_object +- [ ] put_object_acl +- [ ] put_object_tagging +- [ ] restore_object +- [ ] select_object_content +- [ ] upload_part +- [ ] upload_part_copy + +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_hyper_parameter_tuning_job +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] create_transform_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_hyper_parameter_tuning_job +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_training_job +- [ ] describe_transform_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_hyper_parameter_tuning_jobs +- [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] list_training_jobs_for_hyper_parameter_tuning_job +- [ ] list_transform_jobs +- [ ] start_notebook_instance +- [ ] stop_hyper_parameter_tuning_job +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] stop_transform_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + +## sdb - 0% implemented +- [ ] batch_delete_attributes +- [ ] batch_put_attributes +- [ ] create_domain +- [ ] delete_attributes +- [ ] delete_domain +- [ ] domain_metadata +- [ ] get_attributes +- [ ] list_domains +- [ ] put_attributes +- [ ] select + +## secretsmanager - 27% implemented +- [ ] cancel_rotate_secret +- [X] create_secret +- [ ] delete_resource_policy +- [ ] delete_secret +- [X] describe_secret +- [X] get_random_password +- [ ] get_resource_policy +- [X] get_secret_value +- [ ] list_secret_version_ids +- [ ] list_secrets +- [ ] put_resource_policy +- [ ] put_secret_value +- [ ] restore_secret +- [X] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] delete_application +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + +## servicecatalog - 0% implemented +- [ ] accept_portfolio_share +- [ ] associate_principal_with_portfolio +- [ ] associate_product_with_portfolio +- [ ] associate_tag_option_with_resource +- [ ] copy_product +- [ ] create_constraint +- [ ] create_portfolio +- [ ] create_portfolio_share +- [ ] create_product +- [ ] create_provisioned_product_plan +- [ ] create_provisioning_artifact +- [ ] create_tag_option +- [ ] delete_constraint +- [ ] delete_portfolio +- [ ] delete_portfolio_share +- [ ] delete_product +- [ ] delete_provisioned_product_plan +- [ ] delete_provisioning_artifact +- [ ] delete_tag_option +- [ ] describe_constraint +- [ ] describe_copy_product_status +- [ ] describe_portfolio +- [ ] describe_product +- [ ] describe_product_as_admin +- [ ] describe_product_view +- [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan +- [ ] describe_provisioning_artifact +- [ ] describe_provisioning_parameters +- [ ] describe_record +- [ ] describe_tag_option +- [ ] disassociate_principal_from_portfolio +- [ ] disassociate_product_from_portfolio +- [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan +- [ ] list_accepted_portfolio_shares +- [ ] list_constraints_for_portfolio +- [ ] list_launch_paths +- [ ] list_portfolio_access +- [ ] list_portfolios +- [ ] list_portfolios_for_product +- [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans +- [ ] list_provisioning_artifacts +- [ ] list_record_history +- [ ] list_resources_for_tag_option +- [ ] list_tag_options +- [ ] provision_product +- [ ] reject_portfolio_share +- [ ] scan_provisioned_products +- [ ] search_products +- [ ] search_products_as_admin +- [ ] search_provisioned_products +- [ ] terminate_provisioned_product +- [ ] update_constraint +- [ ] update_portfolio +- [ ] update_product +- [ ] update_provisioned_product +- [ ] update_provisioning_artifact +- [ ] update_tag_option + +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_instance_custom_health_status +- [ ] update_service + +## ses - 11% implemented +- [ ] clone_receipt_rule_set +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template +- [ ] create_receipt_filter +- [ ] create_receipt_rule +- [ ] create_receipt_rule_set +- [ ] create_template +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template +- [X] delete_identity +- [ ] delete_identity_policy +- [ ] delete_receipt_filter +- [ ] delete_receipt_rule +- [ ] delete_receipt_rule_set +- [ ] delete_template +- [ ] delete_verified_email_address +- [ ] describe_active_receipt_rule_set +- [ ] describe_configuration_set +- [ ] describe_receipt_rule +- [ ] describe_receipt_rule_set +- [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template +- [ ] get_identity_dkim_attributes +- [ ] get_identity_mail_from_domain_attributes +- [ ] get_identity_notification_attributes +- [ ] get_identity_policies +- [ ] get_identity_verification_attributes +- [X] get_send_quota +- [ ] get_send_statistics +- [ ] get_template +- [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates +- [X] list_identities +- [ ] list_identity_policies +- [ ] list_receipt_filters +- [ ] list_receipt_rule_sets +- [ ] list_templates +- [X] list_verified_email_addresses +- [ ] put_identity_policy +- [ ] reorder_receipt_rule_set +- [ ] send_bounce +- [ ] send_bulk_templated_email +- [ ] send_custom_verification_email +- [X] send_email +- [X] send_raw_email +- [ ] send_templated_email +- [ ] set_active_receipt_rule_set +- [ ] set_identity_dkim_enabled +- [ ] set_identity_feedback_forwarding_enabled +- [ ] set_identity_headers_in_notifications_enabled +- [ ] set_identity_mail_from_domain +- [ ] set_identity_notification_topic +- [ ] set_receipt_rule_position +- [ ] test_render_template +- [ ] update_account_sending_enabled +- [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_reputation_metrics_enabled +- [ ] update_configuration_set_sending_enabled +- [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template +- [ ] update_receipt_rule +- [ ] update_template +- [ ] verify_domain_dkim +- [ ] verify_domain_identity +- [X] verify_email_address +- [X] verify_email_identity + +## shield - 0% implemented +- [ ] associate_drt_log_bucket +- [ ] associate_drt_role +- [ ] create_protection +- [ ] create_subscription +- [ ] delete_protection +- [ ] delete_subscription +- [ ] describe_attack +- [ ] describe_drt_access +- [ ] describe_emergency_contact_settings +- [ ] describe_protection +- [ ] describe_subscription +- [ ] disassociate_drt_log_bucket +- [ ] disassociate_drt_role +- [ ] get_subscription_state +- [ ] list_attacks +- [ ] list_protections +- [ ] update_emergency_contact_settings +- [ ] update_subscription + +## sms - 0% implemented +- [ ] create_replication_job +- [ ] delete_replication_job +- [ ] delete_server_catalog +- [ ] disassociate_connector +- [ ] get_connectors +- [ ] get_replication_jobs +- [ ] get_replication_runs +- [ ] get_servers +- [ ] import_server_catalog +- [ ] start_on_demand_replication_run +- [ ] update_replication_job + +## snowball - 0% implemented +- [ ] cancel_cluster +- [ ] cancel_job +- [ ] create_address +- [ ] create_cluster +- [ ] create_job +- [ ] describe_address +- [ ] describe_addresses +- [ ] describe_cluster +- [ ] describe_job +- [ ] get_job_manifest +- [ ] get_job_unlock_code +- [ ] get_snowball_usage +- [ ] list_cluster_jobs +- [ ] list_clusters +- [ ] list_compatible_images +- [ ] list_jobs +- [ ] update_cluster +- [ ] update_job + +## sns - 53% implemented +- [ ] add_permission +- [ ] check_if_phone_number_is_opted_out +- [ ] confirm_subscription +- [X] create_platform_application +- [X] create_platform_endpoint +- [X] create_topic +- [X] delete_endpoint +- [X] delete_platform_application +- [X] delete_topic +- [ ] get_endpoint_attributes +- [ ] get_platform_application_attributes +- [ ] get_sms_attributes +- [X] get_subscription_attributes +- [ ] get_topic_attributes +- [X] list_endpoints_by_platform_application +- [ ] list_phone_numbers_opted_out +- [X] list_platform_applications +- [X] list_subscriptions +- [ ] list_subscriptions_by_topic +- [X] list_topics +- [ ] opt_in_phone_number +- [X] publish +- [ ] remove_permission +- [X] set_endpoint_attributes +- [ ] set_platform_application_attributes +- [ ] set_sms_attributes +- [X] set_subscription_attributes +- [ ] set_topic_attributes +- [X] subscribe +- [X] unsubscribe + +## sqs - 65% implemented +- [X] add_permission +- [X] change_message_visibility +- [ ] change_message_visibility_batch +- [X] create_queue +- [X] delete_message +- [ ] delete_message_batch +- [X] delete_queue +- [ ] get_queue_attributes +- [ ] get_queue_url +- [X] list_dead_letter_source_queues +- [ ] list_queue_tags +- [X] list_queues +- [X] purge_queue +- [ ] receive_message +- [X] remove_permission +- [X] send_message +- [ ] send_message_batch +- [X] set_queue_attributes +- [X] tag_queue +- [X] untag_queue + +## ssm - 11% implemented +- [X] add_tags_to_resource +- [ ] cancel_command +- [ ] create_activation +- [ ] create_association +- [ ] create_association_batch +- [ ] create_document +- [ ] create_maintenance_window +- [ ] create_patch_baseline +- [ ] create_resource_data_sync +- [ ] delete_activation +- [ ] delete_association +- [ ] delete_document +- [ ] delete_inventory +- [ ] delete_maintenance_window +- [X] delete_parameter +- [X] delete_parameters +- [ ] delete_patch_baseline +- [ ] delete_resource_data_sync +- [ ] deregister_managed_instance +- [ ] deregister_patch_baseline_for_patch_group +- [ ] deregister_target_from_maintenance_window +- [ ] deregister_task_from_maintenance_window +- [ ] describe_activations +- [ ] describe_association +- [ ] describe_association_execution_targets +- [ ] describe_association_executions +- [ ] describe_automation_executions +- [ ] describe_automation_step_executions +- [ ] describe_available_patches +- [ ] describe_document +- [ ] describe_document_permission +- [ ] describe_effective_instance_associations +- [ ] describe_effective_patches_for_patch_baseline +- [ ] describe_instance_associations_status +- [ ] describe_instance_information +- [ ] describe_instance_patch_states +- [ ] describe_instance_patch_states_for_patch_group +- [ ] describe_instance_patches +- [ ] describe_inventory_deletions +- [ ] describe_maintenance_window_execution_task_invocations +- [ ] describe_maintenance_window_execution_tasks +- [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_targets +- [ ] describe_maintenance_window_tasks +- [ ] describe_maintenance_windows +- [ ] describe_parameters +- [ ] describe_patch_baselines +- [ ] describe_patch_group_state +- [ ] describe_patch_groups +- [ ] get_automation_execution +- [X] get_command_invocation +- [ ] get_default_patch_baseline +- [ ] get_deployable_patch_snapshot_for_instance +- [ ] get_document +- [ ] get_inventory +- [ ] get_inventory_schema +- [ ] get_maintenance_window +- [ ] get_maintenance_window_execution +- [ ] get_maintenance_window_execution_task +- [ ] get_maintenance_window_execution_task_invocation +- [ ] get_maintenance_window_task +- [X] get_parameter +- [ ] get_parameter_history +- [X] get_parameters +- [X] get_parameters_by_path +- [ ] get_patch_baseline +- [ ] get_patch_baseline_for_patch_group +- [ ] label_parameter_version +- [ ] list_association_versions +- [ ] list_associations +- [ ] list_command_invocations +- [X] list_commands +- [ ] list_compliance_items +- [ ] list_compliance_summaries +- [ ] list_document_versions +- [ ] list_documents +- [ ] list_inventory_entries +- [ ] list_resource_compliance_summaries +- [ ] list_resource_data_sync +- [X] list_tags_for_resource +- [ ] modify_document_permission +- [ ] put_compliance_items +- [ ] put_inventory +- [X] put_parameter +- [ ] register_default_patch_baseline +- [ ] register_patch_baseline_for_patch_group +- [ ] register_target_with_maintenance_window +- [ ] register_task_with_maintenance_window +- [X] remove_tags_from_resource +- [ ] send_automation_signal +- [X] send_command +- [ ] start_associations_once +- [ ] start_automation_execution +- [ ] stop_automation_execution +- [ ] update_association +- [ ] update_association_status +- [ ] update_document +- [ ] update_document_default_version +- [ ] update_maintenance_window +- [ ] update_maintenance_window_target +- [ ] update_maintenance_window_task +- [ ] update_managed_instance_role +- [ ] update_patch_baseline + +## stepfunctions - 0% implemented +- [ ] create_activity +- [ ] create_state_machine +- [ ] delete_activity +- [ ] delete_state_machine +- [ ] describe_activity +- [ ] describe_execution +- [ ] describe_state_machine +- [ ] describe_state_machine_for_execution +- [ ] get_activity_task +- [ ] get_execution_history +- [ ] list_activities +- [ ] list_executions +- [ ] list_state_machines +- [ ] send_task_failure +- [ ] send_task_heartbeat +- [ ] send_task_success +- [ ] start_execution +- [ ] stop_execution +- [ ] update_state_machine + +## storagegateway - 0% implemented +- [ ] activate_gateway +- [ ] add_cache +- [ ] add_tags_to_resource +- [ ] add_upload_buffer +- [ ] add_working_storage +- [ ] cancel_archival +- [ ] cancel_retrieval +- [ ] create_cached_iscsi_volume +- [ ] create_nfs_file_share +- [ ] create_smb_file_share +- [ ] create_snapshot +- [ ] create_snapshot_from_volume_recovery_point +- [ ] create_stored_iscsi_volume +- [ ] create_tape_with_barcode +- [ ] create_tapes +- [ ] delete_bandwidth_rate_limit +- [ ] delete_chap_credentials +- [ ] delete_file_share +- [ ] delete_gateway +- [ ] delete_snapshot_schedule +- [ ] delete_tape +- [ ] delete_tape_archive +- [ ] delete_volume +- [ ] describe_bandwidth_rate_limit +- [ ] describe_cache +- [ ] describe_cached_iscsi_volumes +- [ ] describe_chap_credentials +- [ ] describe_gateway_information +- [ ] describe_maintenance_start_time +- [ ] describe_nfs_file_shares +- [ ] describe_smb_file_shares +- [ ] describe_smb_settings +- [ ] describe_snapshot_schedule +- [ ] describe_stored_iscsi_volumes +- [ ] describe_tape_archives +- [ ] describe_tape_recovery_points +- [ ] describe_tapes +- [ ] describe_upload_buffer +- [ ] describe_vtl_devices +- [ ] describe_working_storage +- [ ] disable_gateway +- [ ] join_domain +- [ ] list_file_shares +- [ ] list_gateways +- [ ] list_local_disks +- [ ] list_tags_for_resource +- [ ] list_tapes +- [ ] list_volume_initiators +- [ ] list_volume_recovery_points +- [ ] list_volumes +- [ ] notify_when_uploaded +- [ ] refresh_cache +- [ ] remove_tags_from_resource +- [ ] reset_cache +- [ ] retrieve_tape_archive +- [ ] retrieve_tape_recovery_point +- [ ] set_local_console_password +- [ ] set_smb_guest_password +- [ ] shutdown_gateway +- [ ] start_gateway +- [ ] update_bandwidth_rate_limit +- [ ] update_chap_credentials +- [ ] update_gateway_information +- [ ] update_gateway_software_now +- [ ] update_maintenance_start_time +- [ ] update_nfs_file_share +- [ ] update_smb_file_share +- [ ] update_snapshot_schedule +- [ ] update_vtl_device_type + +## sts - 42% implemented +- [X] assume_role +- [ ] assume_role_with_saml +- [ ] assume_role_with_web_identity +- [ ] decode_authorization_message +- [ ] get_caller_identity +- [X] get_federation_token +- [X] get_session_token + +## support - 0% implemented +- [ ] add_attachments_to_set +- [ ] add_communication_to_case +- [ ] create_case +- [ ] describe_attachment +- [ ] describe_cases +- [ ] describe_communications +- [ ] describe_services +- [ ] describe_severity_levels +- [ ] describe_trusted_advisor_check_refresh_statuses +- [ ] describe_trusted_advisor_check_result +- [ ] describe_trusted_advisor_check_summaries +- [ ] describe_trusted_advisor_checks +- [ ] refresh_trusted_advisor_check +- [ ] resolve_case + +## swf - 58% implemented +- [ ] count_closed_workflow_executions +- [ ] count_open_workflow_executions +- [X] count_pending_activity_tasks +- [X] count_pending_decision_tasks +- [ ] deprecate_activity_type +- [X] deprecate_domain +- [ ] deprecate_workflow_type +- [ ] describe_activity_type +- [X] describe_domain +- [X] describe_workflow_execution +- [ ] describe_workflow_type +- [ ] get_workflow_execution_history +- [ ] list_activity_types +- [X] list_closed_workflow_executions +- [X] list_domains +- [X] list_open_workflow_executions +- [ ] list_workflow_types +- [X] poll_for_activity_task +- [X] poll_for_decision_task +- [X] record_activity_task_heartbeat +- [ ] register_activity_type +- [X] register_domain +- [ ] register_workflow_type +- [ ] request_cancel_workflow_execution +- [ ] respond_activity_task_canceled +- [X] respond_activity_task_completed +- [X] respond_activity_task_failed +- [X] respond_decision_task_completed +- [X] signal_workflow_execution +- [X] start_workflow_execution +- [X] terminate_workflow_execution + +## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary +- [ ] get_transcription_job +- [ ] get_vocabulary +- [ ] list_transcription_jobs +- [ ] list_vocabularies +- [ ] start_transcription_job +- [ ] update_vocabulary + +## translate - 0% implemented +- [ ] translate_text + +## waf - 0% implemented +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## waf-regional - 0% implemented +- [ ] associate_web_acl +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] disassociate_web_acl +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_web_acl_for_resource +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_resources_for_web_acl +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## workdocs - 0% implemented +- [ ] abort_document_version_upload +- [ ] activate_user +- [ ] add_resource_permissions +- [ ] create_comment +- [ ] create_custom_metadata +- [ ] create_folder +- [ ] create_labels +- [ ] create_notification_subscription +- [ ] create_user +- [ ] deactivate_user +- [ ] delete_comment +- [ ] delete_custom_metadata +- [ ] delete_document +- [ ] delete_folder +- [ ] delete_folder_contents +- [ ] delete_labels +- [ ] delete_notification_subscription +- [ ] delete_user +- [ ] describe_activities +- [ ] describe_comments +- [ ] describe_document_versions +- [ ] describe_folder_contents +- [ ] describe_groups +- [ ] describe_notification_subscriptions +- [ ] describe_resource_permissions +- [ ] describe_root_folders +- [ ] describe_users +- [ ] get_current_user +- [ ] get_document +- [ ] get_document_path +- [ ] get_document_version +- [ ] get_folder +- [ ] get_folder_path +- [ ] initiate_document_version_upload +- [ ] remove_all_resource_permissions +- [ ] remove_resource_permission +- [ ] update_document +- [ ] update_document_version +- [ ] update_folder +- [ ] update_user + +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_mailbox_permissions +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_mailbox_permissions +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] put_mailbox_permissions +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + +## workspaces - 0% implemented +- [ ] associate_ip_groups +- [ ] authorize_ip_rules +- [ ] create_ip_group +- [ ] create_tags +- [ ] create_workspaces +- [ ] delete_ip_group +- [ ] delete_tags +- [ ] describe_ip_groups +- [ ] describe_tags +- [ ] describe_workspace_bundles +- [ ] describe_workspace_directories +- [ ] describe_workspaces +- [ ] describe_workspaces_connection_status +- [ ] disassociate_ip_groups +- [ ] modify_workspace_properties +- [ ] modify_workspace_state +- [ ] reboot_workspaces +- [ ] rebuild_workspaces +- [ ] revoke_ip_rules +- [ ] start_workspaces +- [ ] stop_workspaces +- [ ] terminate_workspaces +- [ ] update_rules_of_ip_group + +## xray - 0% implemented +- [ ] batch_get_traces +- [ ] get_encryption_config +- [ ] get_service_graph +- [ ] get_trace_graph +- [ ] get_trace_summaries +- [ ] put_encryption_config +- [ ] put_telemetry_records +- [ ] put_trace_segments From e51d1bfade08dc11bde27990e118ff38aa654476 Mon Sep 17 00:00:00 2001 From: Stephan Date: Fri, 21 Dec 2018 12:28:56 +0100 Subject: [PATCH 010/658] merge --- .travis.yml | 100 +- requirements-dev.txt | 34 +- setup.py | 142 +- .../single_instance_with_ebs_volume.py | 690 +-- tests/test_cloudformation/fixtures/vpc_eip.py | 24 +- tests/test_cloudformation/fixtures/vpc_eni.py | 68 +- .../fixtures/vpc_single_instance_in_subnet.py | 816 +-- .../test_cloudformation_stack_crud.py | 1344 ++--- .../test_cloudformation_stack_crud_boto3.py | 1590 ++--- .../test_cloudformation_stack_integration.py | 4854 ++++++++-------- .../test_cloudformation/test_import_value.py | 174 +- tests/test_cloudformation/test_server.py | 66 +- .../test_cloudformation/test_stack_parsing.py | 942 +-- tests/test_cloudwatch/test_cloudwatch.py | 246 +- .../test_cloudwatch/test_cloudwatch_boto3.py | 448 +- .../test_cognitoidentity.py | 170 +- tests/test_cognitoidentity/test_server.py | 90 +- tests/test_cognitoidp/test_cognitoidp.py | 1202 ++-- tests/test_core/test_decorator_calls.py | 196 +- tests/test_core/test_instance_metadata.py | 92 +- tests/test_core/test_moto_api.py | 66 +- tests/test_core/test_nested.py | 58 +- tests/test_core/test_responses.py | 162 +- tests/test_core/test_server.py | 106 +- tests/test_core/test_url_mapping.py | 44 +- tests/test_core/test_utils.py | 60 +- tests/test_datapipeline/test_datapipeline.py | 408 +- tests/test_datapipeline/test_server.py | 56 +- tests/test_dynamodb/test_dynamodb.py | 108 +- .../test_dynamodb_table_with_range_key.py | 1052 ++-- .../test_dynamodb_table_without_range_key.py | 860 +-- tests/test_dynamodb/test_server.py | 40 +- tests/test_dynamodb2/test_dynamodb.py | 2676 ++++----- .../test_dynamodb_table_with_range_key.py | 3926 ++++++------- .../test_dynamodb_table_without_range_key.py | 1580 ++--- tests/test_dynamodb2/test_server.py | 38 +- tests/test_ec2/test_account_attributes.py | 88 +- tests/test_ec2/test_amazon_dev_pay.py | 20 +- tests/test_ec2/test_amis.py | 1552 ++--- .../test_availability_zones_and_regions.py | 108 +- tests/test_ec2/test_customer_gateways.py | 104 +- tests/test_ec2/test_dhcp_options.py | 666 +-- tests/test_ec2/test_ec2_core.py | 2 +- tests/test_ec2/test_elastic_block_store.py | 1330 ++--- tests/test_ec2/test_elastic_ip_addresses.py | 1028 ++-- .../test_elastic_network_interfaces.py | 724 +-- tests/test_ec2/test_general.py | 84 +- tests/test_ec2/test_instances.py | 2512 ++++---- tests/test_ec2/test_internet_gateways.py | 538 +- tests/test_ec2/test_ip_addresses.py | 20 +- tests/test_ec2/test_key_pairs.py | 302 +- tests/test_ec2/test_monitoring.py | 20 +- tests/test_ec2/test_nat_gateway.py | 218 +- tests/test_ec2/test_network_acls.py | 350 +- tests/test_ec2/test_placement_groups.py | 20 +- tests/test_ec2/test_regions.py | 296 +- tests/test_ec2/test_reserved_instances.py | 20 +- tests/test_ec2/test_route_tables.py | 1060 ++-- tests/test_ec2/test_security_groups.py | 1474 ++--- tests/test_ec2/test_server.py | 52 +- tests/test_ec2/test_spot_fleet.py | 690 +-- tests/test_ec2/test_spot_instances.py | 536 +- tests/test_ec2/test_subnets.py | 582 +- tests/test_ec2/test_tags.py | 906 +-- tests/test_ec2/test_utils.py | 16 +- .../test_ec2/test_virtual_private_gateways.py | 210 +- tests/test_ec2/test_vm_export.py | 20 +- tests/test_ec2/test_vm_import.py | 20 +- tests/test_ec2/test_vpc_peering.py | 264 +- tests/test_ec2/test_vpcs.py | 1082 ++-- tests/test_ec2/test_vpn_connections.py | 102 +- tests/test_ec2/test_windows.py | 20 +- tests/test_ecr/test_ecr_boto3.py | 1394 ++--- tests/test_ecs/test_ecs_boto3.py | 4428 +++++++------- tests/test_elb/test_elb.py | 1964 +++---- tests/test_elb/test_server.py | 34 +- tests/test_elbv2/test_elbv2.py | 3176 +++++----- tests/test_elbv2/test_server.py | 34 +- tests/test_emr/test_emr.py | 1316 ++--- tests/test_emr/test_emr_boto3.py | 1440 ++--- tests/test_emr/test_server.py | 36 +- tests/test_events/test_events.py | 422 +- tests/test_glacier/test_glacier_archives.py | 42 +- tests/test_glacier/test_glacier_jobs.py | 180 +- tests/test_glacier/test_glacier_server.py | 44 +- tests/test_glacier/test_glacier_vaults.py | 62 +- tests/test_glue/__init__.py | 2 +- tests/test_glue/fixtures/__init__.py | 2 +- tests/test_glue/fixtures/datacatalog.py | 112 +- tests/test_glue/helpers.py | 238 +- tests/test_glue/test_datacatalog.py | 852 +-- tests/test_iam/test_iam.py | 1520 ++--- tests/test_iam/test_iam_account_aliases.py | 40 +- tests/test_iam/test_iam_groups.py | 310 +- tests/test_iam/test_server.py | 52 +- tests/test_iot/test_iot.py | 1752 +++--- tests/test_iot/test_server.py | 38 +- tests/test_iotdata/test_iotdata.py | 186 +- tests/test_iotdata/test_server.py | 40 +- tests/test_kinesis/test_firehose.py | 376 +- tests/test_kinesis/test_kinesis.py | 1248 ++-- tests/test_kinesis/test_server.py | 50 +- tests/test_kms/test_kms.py | 1438 ++--- tests/test_kms/test_server.py | 50 +- tests/test_logs/test_logs.py | 256 +- tests/test_opsworks/test_apps.py | 204 +- tests/test_opsworks/test_instances.py | 448 +- tests/test_opsworks/test_layers.py | 234 +- tests/test_opsworks/test_stack.py | 92 +- .../organizations_test_utils.py | 272 +- .../test_organizations_boto3.py | 644 +- tests/test_polly/test_polly.py | 550 +- tests/test_polly/test_server.py | 38 +- tests/test_rds/test_rds.py | 648 +-- tests/test_rds/test_server.py | 40 +- tests/test_rds2/test_rds2.py | 2944 +++++----- tests/test_rds2/test_server.py | 40 +- tests/test_redshift/test_redshift.py | 2484 ++++---- tests/test_redshift/test_server.py | 44 +- .../test_resourcegroupstaggingapi.py | 570 +- .../test_server.py | 48 +- tests/test_route53/test_route53.py | 1422 ++--- tests/test_s3/test_s3.py | 5166 ++++++++--------- tests/test_s3/test_s3_lifecycle.py | 774 +-- tests/test_s3/test_s3_storageclass.py | 212 +- tests/test_s3/test_s3_utils.py | 160 +- tests/test_s3/test_server.py | 210 +- .../test_bucket_path_server.py | 226 +- .../test_s3bucket_path/test_s3bucket_path.py | 642 +- .../test_s3bucket_path_combo.py | 50 +- .../test_s3bucket_path_utils.py | 32 +- .../test_secretsmanager.py | 572 +- tests/test_secretsmanager/test_server.py | 842 +-- tests/test_ses/test_server.py | 32 +- tests/test_ses/test_ses.py | 232 +- tests/test_ses/test_ses_boto3.py | 388 +- tests/test_sns/test_application.py | 616 +- tests/test_sns/test_application_boto3.py | 700 +-- tests/test_sns/test_publishing.py | 138 +- tests/test_sns/test_publishing_boto3.py | 978 ++-- tests/test_sns/test_server.py | 48 +- tests/test_sns/test_subscriptions.py | 270 +- tests/test_sns/test_subscriptions_boto3.py | 792 +-- tests/test_sns/test_topics.py | 266 +- tests/test_sns/test_topics_boto3.py | 380 +- tests/test_sqs/test_server.py | 170 +- tests/test_sqs/test_sqs.py | 2474 ++++---- tests/test_ssm/test_ssm_boto3.py | 1572 ++--- tests/test_sts/test_server.py | 78 +- tests/test_sts/test_sts.py | 168 +- tests/test_swf/models/test_activity_task.py | 308 +- tests/test_swf/models/test_decision_task.py | 160 +- tests/test_swf/models/test_domain.py | 238 +- tests/test_swf/models/test_generic_type.py | 116 +- tests/test_swf/models/test_history_event.py | 62 +- tests/test_swf/models/test_timeout.py | 38 +- .../models/test_workflow_execution.py | 1002 ++-- .../test_swf/responses/test_activity_tasks.py | 456 +- .../test_swf/responses/test_activity_types.py | 268 +- .../test_swf/responses/test_decision_tasks.py | 684 +-- tests/test_swf/responses/test_domains.py | 238 +- tests/test_swf/responses/test_timeouts.py | 220 +- .../responses/test_workflow_executions.py | 524 +- .../test_swf/responses/test_workflow_types.py | 274 +- tests/test_swf/test_exceptions.py | 316 +- tests/test_swf/test_utils.py | 26 +- tests/test_swf/utils.py | 200 +- tests/test_xray/test_xray_boto3.py | 278 +- tests/test_xray/test_xray_client.py | 144 +- tox.ini | 28 +- travis_moto_server.sh | 8 +- wait_for.py | 62 +- 172 files changed, 49629 insertions(+), 49629 deletions(-) diff --git a/.travis.yml b/.travis.yml index de22818b895d..9f3106ad21b3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,50 +1,50 @@ -language: python -sudo: false -services: - - docker -python: - - 2.7 - - 3.6 -env: - - TEST_SERVER_MODE=false - - TEST_SERVER_MODE=true -# Due to incomplete Python 3.7 support on Travis CI ( -# https://github.com/travis-ci/travis-ci/issues/9815), -# using a matrix is necessary -matrix: - include: - - python: 3.7 - env: TEST_SERVER_MODE=false - dist: xenial - sudo: true - - python: 3.7 - env: TEST_SERVER_MODE=true - dist: xenial - sudo: true -before_install: - - export BOTO_CONFIG=/dev/null -install: - # We build moto first so the docker container doesn't try to compile it as well, also note we don't use - # -d for docker run so the logs show up in travis - # Python images come from here: https://hub.docker.com/_/python/ - - | - python setup.py sdist - - if [ "$TEST_SERVER_MODE" = "true" ]; then - docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh & - export AWS_SECRET_ACCESS_KEY=foobar_secret - export AWS_ACCESS_KEY_ID=foobar_key - fi - travis_retry pip install boto==2.45.0 - travis_retry pip install boto3 - travis_retry pip install dist/moto*.gz - travis_retry pip install coveralls==1.1 - travis_retry pip install -r requirements-dev.txt - - if [ "$TEST_SERVER_MODE" = "true" ]; then - python wait_for.py - fi -script: - - make test -after_success: - - coveralls +pyhlanguage: python +sudo: false +services: + - docker +python: + - 2.7 + - 3.6 +env: + - TEST_SERVER_MODE=false + - TEST_SERVER_MODE=true +# Due to incomplete Python 3.7 support on Travis CI ( +# https://github.com/travis-ci/travis-ci/issues/9815), +# using a matrix is necessary +matrix: + include: + - python: 3.7 + env: TEST_SERVER_MODE=false + dist: xenial + sudo: true + - python: 3.7 + env: TEST_SERVER_MODE=true + dist: xenial + sudo: true +before_install: + - export BOTO_CONFIG=/dev/null +install: + # We build moto first so the docker container doesn't try to compile it as well, also note we don't use + # -d for docker run so the logs show up in travis + # Python images come from here: https://hub.docker.com/_/python/ + - | + python setup.py sdist + + if [ "$TEST_SERVER_MODE" = "true" ]; then + docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh & + export AWS_SECRET_ACCESS_KEY=foobar_secret + export AWS_ACCESS_KEY_ID=foobar_key + fi + travis_retry pip install boto==2.45.0 + travis_retry pip install boto3 + travis_retry pip install dist/moto*.gz + travis_retry pip install coveralls==1.1 + travis_retry pip install -r requirements-dev.txt + + if [ "$TEST_SERVER_MODE" = "true" ]; then + python wait_for.py + fi +script: + - make test +after_success: + - coveralls diff --git a/requirements-dev.txt b/requirements-dev.txt index 111cd5f3ff84..5470815ee286 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,17 +1,17 @@ --r requirements.txt -mock -nose -sure==1.4.11 -coverage -flake8==3.5.0 -freezegun -flask -boto>=2.45.0 -boto3>=1.4.4 -botocore>=1.8.36 -six>=1.9 -prompt-toolkit==1.0.14 -click==6.7 -inflection==0.3.1 -lxml==4.2.3 -beautifulsoup4==4.6.0 +-r requirements.txt +mock +nose +sure==1.4.11 +coverage +flake8==3.5.0 +freezegun +flask +boto>=2.45.0 +boto3>=1.4.4 +botocore>=1.12.13 +six>=1.9 +prompt-toolkit==1.0.14 +click==6.7 +inflection==0.3.1 +lxml==4.2.3 +beautifulsoup4==4.6.0 \ No newline at end of file diff --git a/setup.py b/setup.py index 98780dd5a2e2..f547f7b43a52 100755 --- a/setup.py +++ b/setup.py @@ -1,71 +1,71 @@ -#!/usr/bin/env python -from __future__ import unicode_literals -import setuptools -from setuptools import setup, find_packages -import sys - - -install_requires = [ - "Jinja2>=2.7.3", - "boto>=2.36.0", - "boto3>=1.6.16,<1.8", - "botocore>=1.9.16,<1.11", - "cryptography>=2.3.0", - "requests>=2.5", - "xmltodict", - "six>1.9", - "werkzeug", - "pyaml", - "pytz", - "python-dateutil<3.0.0,>=2.1", - "python-jose<3.0.0", - "mock", - "docker>=2.5.1", - "jsondiff==1.1.1", - "aws-xray-sdk<0.96,>=0.93", - "responses>=0.9.0", -] - -extras_require = { - 'server': ['flask'], -} - -# https://hynek.me/articles/conditional-python-dependencies/ -if int(setuptools.__version__.split(".", 1)[0]) < 18: - if sys.version_info[0:2] < (3, 3): - install_requires.append("backports.tempfile") -else: - extras_require[":python_version<'3.3'"] = ["backports.tempfile"] - - -setup( - name='moto', - version='1.3.6', - description='A library that allows your python tests to easily' - ' mock out the boto library', - author='Steve Pulec', - author_email='spulec@gmail.com', - url='https://github.com/spulec/moto', - entry_points={ - 'console_scripts': [ - 'moto_server = moto.server:main', - ], - }, - packages=find_packages(exclude=("tests", "tests.*")), - install_requires=install_requires, - extras_require=extras_require, - include_package_data=True, - license="Apache", - test_suite="tests", - classifiers=[ - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.3", - "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "License :: OSI Approved :: Apache Software License", - "Topic :: Software Development :: Testing", - ], -) +#!/usr/bin/env python +from __future__ import unicode_literals +import setuptools +from setuptools import setup, find_packages +import sys + + +install_requires = [ + "Jinja2>=2.7.3", + "boto>=2.36.0", + "boto3>=1.6.16", + "botocore>=1.12.13", + "cryptography>=2.3.0", + "requests>=2.5", + "xmltodict", + "six>1.9", + "werkzeug", + "pyaml", + "pytz", + "python-dateutil<3.0.0,>=2.1", + "python-jose<3.0.0", + "mock", + "docker>=2.5.1", + "jsondiff==1.1.1", + "aws-xray-sdk!=0.96,>=0.93", + "responses>=0.9.0", +] + +extras_require = { + 'server': ['flask'], +} + +# https://hynek.me/articles/conditional-python-dependencies/ +if int(setuptools.__version__.split(".", 1)[0]) < 18: + if sys.version_info[0:2] < (3, 3): + install_requires.append("backports.tempfile") +else: + extras_require[":python_version<'3.3'"] = ["backports.tempfile"] + + +setup( + name='moto', + version='1.3.7', + description='A library that allows your python tests to easily' + ' mock out the boto library', + author='Steve Pulec', + author_email='spulec@gmail.com', + url='https://github.com/spulec/moto', + entry_points={ + 'console_scripts': [ + 'moto_server = moto.server:main', + ], + }, + packages=find_packages(exclude=("tests", "tests.*")), + install_requires=install_requires, + extras_require=extras_require, + include_package_data=True, + license="Apache", + test_suite="tests", + classifiers=[ + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "License :: OSI Approved :: Apache Software License", + "Topic :: Software Development :: Testing", + ], +) \ No newline at end of file diff --git a/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py b/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py index 37c7ca4f3790..189cc36cde9c 100644 --- a/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py +++ b/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py @@ -1,345 +1,345 @@ -from __future__ import unicode_literals - -template = { - "Description": "AWS CloudFormation Sample Template Gollum_Single_Instance_With_EBS_Volume: Gollum is a simple wiki system built on top of Git that powers GitHub Wikis. This template installs a Gollum Wiki stack on a single EC2 instance with an EBS volume for storage and demonstrates using the AWS CloudFormation bootstrap scripts to install the packages and files necessary at instance launch time. **WARNING** This template creates an Amazon EC2 instance and an EBS volume. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "SSHLocation": { - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", - "Description": "The IP address range that can be used to SSH to the EC2 instances", - "Default": "0.0.0.0/0", - "MinLength": "9", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "MaxLength": "18", - "Type": "String" - }, - "KeyName": { - "Type": "String", - "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instances", - "MinLength": "1", - "AllowedPattern": "[\\x20-\\x7E]*", - "MaxLength": "255", - "ConstraintDescription": "can contain only ASCII characters." - }, - "InstanceType": { - "Default": "m1.small", - "ConstraintDescription": "must be a valid EC2 instance type.", - "Type": "String", - "Description": "WebServer EC2 instance type", - "AllowedValues": [ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ] - }, - "VolumeSize": { - "Description": "WebServer EC2 instance type", - "Default": "5", - "Type": "Number", - "MaxValue": "1024", - "MinValue": "5", - "ConstraintDescription": "must be between 5 and 1024 Gb." - } - }, - "AWSTemplateFormatVersion": "2010-09-09", - "Outputs": { - "WebsiteURL": { - "Description": "URL for Gollum wiki", - "Value": { - "Fn::Join": [ - "", - [ - "http://", - { - "Fn::GetAtt": [ - "WebServer", - "PublicDnsName" - ] - } - ] - ] - } - } - }, - "Resources": { - "WebServerSecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "SecurityGroupIngress": [ - { - "ToPort": "80", - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0", - "FromPort": "80" - }, - { - "ToPort": "22", - "IpProtocol": "tcp", - "CidrIp": { - "Ref": "SSHLocation" - }, - "FromPort": "22" - } - ], - "GroupDescription": "Enable SSH access and HTTP access on the inbound port" - } - }, - "WebServer": { - "Type": "AWS::EC2::Instance", - "Properties": { - "UserData": { - "Fn::Base64": { - "Fn::Join": [ - "", - [ - "#!/bin/bash -v\n", - "yum update -y aws-cfn-bootstrap\n", - "# Helper function\n", - "function error_exit\n", - "{\n", - " /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '", - { - "Ref": "WaitHandle" - }, - "'\n", - " exit 1\n", - "}\n", - "# Install Rails packages\n", - "/opt/aws/bin/cfn-init -s ", - { - "Ref": "AWS::StackId" - }, - " -r WebServer ", - " --region ", - { - "Ref": "AWS::Region" - }, - " || error_exit 'Failed to run cfn-init'\n", - "# Wait for the EBS volume to show up\n", - "while [ ! -e /dev/sdh ]; do echo Waiting for EBS volume to attach; sleep 5; done\n", - "# Format the EBS volume and mount it\n", - "mkdir /var/wikidata\n", - "/sbin/mkfs -t ext3 /dev/sdh1\n", - "mount /dev/sdh1 /var/wikidata\n", - "# Initialize the wiki and fire up the server\n", - "cd /var/wikidata\n", - "git init\n", - "gollum --port 80 --host 0.0.0.0 &\n", - "# If all is well so signal success\n", - "/opt/aws/bin/cfn-signal -e $? -r \"Rails application setup complete\" '", - { - "Ref": "WaitHandle" - }, - "'\n" - ] - ] - } - }, - "KeyName": { - "Ref": "KeyName" - }, - "SecurityGroups": [ - { - "Ref": "WebServerSecurityGroup" - } - ], - "InstanceType": { - "Ref": "InstanceType" - }, - "ImageId": { - "Fn::FindInMap": [ - "AWSRegionArch2AMI", - { - "Ref": "AWS::Region" - }, - { - "Fn::FindInMap": [ - "AWSInstanceType2Arch", - { - "Ref": "InstanceType" - }, - "Arch" - ] - } - ] - } - }, - "Metadata": { - "AWS::CloudFormation::Init": { - "config": { - "packages": { - "rubygems": { - "nokogiri": [ - "1.5.10" - ], - "rdiscount": [], - "gollum": [ - "1.1.1" - ] - }, - "yum": { - "libxslt-devel": [], - "gcc": [], - "git": [], - "rubygems": [], - "ruby-devel": [], - "ruby-rdoc": [], - "make": [], - "libxml2-devel": [] - } - } - } - } - } - }, - "DataVolume": { - "Type": "AWS::EC2::Volume", - "Properties": { - "Tags": [ - { - "Value": "Gollum Data Volume", - "Key": "Usage" - } - ], - "AvailabilityZone": { - "Fn::GetAtt": [ - "WebServer", - "AvailabilityZone" - ] - }, - "Size": "100", - } - }, - "MountPoint": { - "Type": "AWS::EC2::VolumeAttachment", - "Properties": { - "InstanceId": { - "Ref": "WebServer" - }, - "Device": "/dev/sdh", - "VolumeId": { - "Ref": "DataVolume" - } - } - }, - "WaitCondition": { - "DependsOn": "MountPoint", - "Type": "AWS::CloudFormation::WaitCondition", - "Properties": { - "Handle": { - "Ref": "WaitHandle" - }, - "Timeout": "300" - }, - "Metadata": { - "Comment1": "Note that the WaitCondition is dependent on the volume mount point allowing the volume to be created and attached to the EC2 instance", - "Comment2": "The instance bootstrap script waits for the volume to be attached to the instance prior to installing Gollum and signalling completion" - } - }, - "WaitHandle": { - "Type": "AWS::CloudFormation::WaitConditionHandle" - } - }, - "Mappings": { - "AWSInstanceType2Arch": { - "m3.2xlarge": { - "Arch": "64" - }, - "m2.2xlarge": { - "Arch": "64" - }, - "m1.small": { - "Arch": "64" - }, - "c1.medium": { - "Arch": "64" - }, - "cg1.4xlarge": { - "Arch": "64HVM" - }, - "m2.xlarge": { - "Arch": "64" - }, - "t1.micro": { - "Arch": "64" - }, - "cc1.4xlarge": { - "Arch": "64HVM" - }, - "m1.medium": { - "Arch": "64" - }, - "cc2.8xlarge": { - "Arch": "64HVM" - }, - "m1.large": { - "Arch": "64" - }, - "m1.xlarge": { - "Arch": "64" - }, - "m2.4xlarge": { - "Arch": "64" - }, - "c1.xlarge": { - "Arch": "64" - }, - "m3.xlarge": { - "Arch": "64" - } - }, - "AWSRegionArch2AMI": { - "ap-southeast-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-b4b0cae6", - "64": "ami-beb0caec" - }, - "ap-southeast-2": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-b3990e89", - "64": "ami-bd990e87" - }, - "us-west-2": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-38fe7308", - "64": "ami-30fe7300" - }, - "us-east-1": { - "64HVM": "ami-0da96764", - "32": "ami-31814f58", - "64": "ami-1b814f72" - }, - "ap-northeast-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-0644f007", - "64": "ami-0a44f00b" - }, - "us-west-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-11d68a54", - "64": "ami-1bd68a5e" - }, - "eu-west-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-973b06e3", - "64": "ami-953b06e1" - }, - "sa-east-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-3e3be423", - "64": "ami-3c3be421" - } - } - } -} +from __future__ import unicode_literals + +template = { + "Description": "AWS CloudFormation Sample Template Gollum_Single_Instance_With_EBS_Volume: Gollum is a simple wiki system built on top of Git that powers GitHub Wikis. This template installs a Gollum Wiki stack on a single EC2 instance with an EBS volume for storage and demonstrates using the AWS CloudFormation bootstrap scripts to install the packages and files necessary at instance launch time. **WARNING** This template creates an Amazon EC2 instance and an EBS volume. You will be billed for the AWS resources used if you create a stack from this template.", + "Parameters": { + "SSHLocation": { + "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", + "Description": "The IP address range that can be used to SSH to the EC2 instances", + "Default": "0.0.0.0/0", + "MinLength": "9", + "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", + "MaxLength": "18", + "Type": "String" + }, + "KeyName": { + "Type": "String", + "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instances", + "MinLength": "1", + "AllowedPattern": "[\\x20-\\x7E]*", + "MaxLength": "255", + "ConstraintDescription": "can contain only ASCII characters." + }, + "InstanceType": { + "Default": "m1.small", + "ConstraintDescription": "must be a valid EC2 instance type.", + "Type": "String", + "Description": "WebServer EC2 instance type", + "AllowedValues": [ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "m3.xlarge", + "m3.2xlarge", + "c1.medium", + "c1.xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "cg1.4xlarge" + ] + }, + "VolumeSize": { + "Description": "WebServer EC2 instance type", + "Default": "5", + "Type": "Number", + "MaxValue": "1024", + "MinValue": "5", + "ConstraintDescription": "must be between 5 and 1024 Gb." + } + }, + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "WebsiteURL": { + "Description": "URL for Gollum wiki", + "Value": { + "Fn::Join": [ + "", + [ + "http://", + { + "Fn::GetAtt": [ + "WebServer", + "PublicDnsName" + ] + } + ] + ] + } + } + }, + "Resources": { + "WebServerSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "SecurityGroupIngress": [ + { + "ToPort": "80", + "IpProtocol": "tcp", + "CidrIp": "0.0.0.0/0", + "FromPort": "80" + }, + { + "ToPort": "22", + "IpProtocol": "tcp", + "CidrIp": { + "Ref": "SSHLocation" + }, + "FromPort": "22" + } + ], + "GroupDescription": "Enable SSH access and HTTP access on the inbound port" + } + }, + "WebServer": { + "Type": "AWS::EC2::Instance", + "Properties": { + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash -v\n", + "yum update -y aws-cfn-bootstrap\n", + "# Helper function\n", + "function error_exit\n", + "{\n", + " /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '", + { + "Ref": "WaitHandle" + }, + "'\n", + " exit 1\n", + "}\n", + "# Install Rails packages\n", + "/opt/aws/bin/cfn-init -s ", + { + "Ref": "AWS::StackId" + }, + " -r WebServer ", + " --region ", + { + "Ref": "AWS::Region" + }, + " || error_exit 'Failed to run cfn-init'\n", + "# Wait for the EBS volume to show up\n", + "while [ ! -e /dev/sdh ]; do echo Waiting for EBS volume to attach; sleep 5; done\n", + "# Format the EBS volume and mount it\n", + "mkdir /var/wikidata\n", + "/sbin/mkfs -t ext3 /dev/sdh1\n", + "mount /dev/sdh1 /var/wikidata\n", + "# Initialize the wiki and fire up the server\n", + "cd /var/wikidata\n", + "git init\n", + "gollum --port 80 --host 0.0.0.0 &\n", + "# If all is well so signal success\n", + "/opt/aws/bin/cfn-signal -e $? -r \"Rails application setup complete\" '", + { + "Ref": "WaitHandle" + }, + "'\n" + ] + ] + } + }, + "KeyName": { + "Ref": "KeyName" + }, + "SecurityGroups": [ + { + "Ref": "WebServerSecurityGroup" + } + ], + "InstanceType": { + "Ref": "InstanceType" + }, + "ImageId": { + "Fn::FindInMap": [ + "AWSRegionArch2AMI", + { + "Ref": "AWS::Region" + }, + { + "Fn::FindInMap": [ + "AWSInstanceType2Arch", + { + "Ref": "InstanceType" + }, + "Arch" + ] + } + ] + } + }, + "Metadata": { + "AWS::CloudFormation::Init": { + "config": { + "packages": { + "rubygems": { + "nokogiri": [ + "1.5.10" + ], + "rdiscount": [], + "gollum": [ + "1.1.1" + ] + }, + "yum": { + "libxslt-devel": [], + "gcc": [], + "git": [], + "rubygems": [], + "ruby-devel": [], + "ruby-rdoc": [], + "make": [], + "libxml2-devel": [] + } + } + } + } + } + }, + "DataVolume": { + "Type": "AWS::EC2::Volume", + "Properties": { + "Tags": [ + { + "Value": "Gollum Data Volume", + "Key": "Usage" + } + ], + "AvailabilityZone": { + "Fn::GetAtt": [ + "WebServer", + "AvailabilityZone" + ] + }, + "Size": "100", + } + }, + "MountPoint": { + "Type": "AWS::EC2::VolumeAttachment", + "Properties": { + "InstanceId": { + "Ref": "WebServer" + }, + "Device": "/dev/sdh", + "VolumeId": { + "Ref": "DataVolume" + } + } + }, + "WaitCondition": { + "DependsOn": "MountPoint", + "Type": "AWS::CloudFormation::WaitCondition", + "Properties": { + "Handle": { + "Ref": "WaitHandle" + }, + "Timeout": "300" + }, + "Metadata": { + "Comment1": "Note that the WaitCondition is dependent on the volume mount point allowing the volume to be created and attached to the EC2 instance", + "Comment2": "The instance bootstrap script waits for the volume to be attached to the instance prior to installing Gollum and signalling completion" + } + }, + "WaitHandle": { + "Type": "AWS::CloudFormation::WaitConditionHandle" + } + }, + "Mappings": { + "AWSInstanceType2Arch": { + "m3.2xlarge": { + "Arch": "64" + }, + "m2.2xlarge": { + "Arch": "64" + }, + "m1.small": { + "Arch": "64" + }, + "c1.medium": { + "Arch": "64" + }, + "cg1.4xlarge": { + "Arch": "64HVM" + }, + "m2.xlarge": { + "Arch": "64" + }, + "t1.micro": { + "Arch": "64" + }, + "cc1.4xlarge": { + "Arch": "64HVM" + }, + "m1.medium": { + "Arch": "64" + }, + "cc2.8xlarge": { + "Arch": "64HVM" + }, + "m1.large": { + "Arch": "64" + }, + "m1.xlarge": { + "Arch": "64" + }, + "m2.4xlarge": { + "Arch": "64" + }, + "c1.xlarge": { + "Arch": "64" + }, + "m3.xlarge": { + "Arch": "64" + } + }, + "AWSRegionArch2AMI": { + "ap-southeast-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-b4b0cae6", + "64": "ami-beb0caec" + }, + "ap-southeast-2": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-b3990e89", + "64": "ami-bd990e87" + }, + "us-west-2": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-38fe7308", + "64": "ami-30fe7300" + }, + "us-east-1": { + "64HVM": "ami-0da96764", + "32": "ami-31814f58", + "64": "ami-1b814f72" + }, + "ap-northeast-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-0644f007", + "64": "ami-0a44f00b" + }, + "us-west-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-11d68a54", + "64": "ami-1bd68a5e" + }, + "eu-west-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-973b06e3", + "64": "ami-953b06e1" + }, + "sa-east-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-3e3be423", + "64": "ami-3c3be421" + } + } + } +} diff --git a/tests/test_cloudformation/fixtures/vpc_eip.py b/tests/test_cloudformation/fixtures/vpc_eip.py index c7a46c83071b..2d6872f64bd5 100644 --- a/tests/test_cloudformation/fixtures/vpc_eip.py +++ b/tests/test_cloudformation/fixtures/vpc_eip.py @@ -1,12 +1,12 @@ -from __future__ import unicode_literals - -template = { - "Resources": { - "VPCEIP": { - "Type": "AWS::EC2::EIP", - "Properties": { - "Domain": "vpc" - } - } - } -} +from __future__ import unicode_literals + +template = { + "Resources": { + "VPCEIP": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + } + } +} diff --git a/tests/test_cloudformation/fixtures/vpc_eni.py b/tests/test_cloudformation/fixtures/vpc_eni.py index ef9eb1d089e5..bc13e691f248 100644 --- a/tests/test_cloudformation/fixtures/vpc_eni.py +++ b/tests/test_cloudformation/fixtures/vpc_eni.py @@ -1,34 +1,34 @@ -from __future__ import unicode_literals - -template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "VPC ENI Test CloudFormation", - "Resources": { - "ENI": { - "Type": "AWS::EC2::NetworkInterface", - "Properties": { - "SubnetId": {"Ref": "Subnet"} - } - }, - "Subnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "AvailabilityZone": "us-east-1a", - "VpcId": {"Ref": "VPC"}, - "CidrBlock": "10.0.0.0/24" - } - }, - "VPC": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16" - } - } - }, - "Outputs": { - "NinjaENI": { - "Description": "Elastic IP mapping to Auto-Scaling Group", - "Value": {"Ref": "ENI"} - } - } -} +from __future__ import unicode_literals + +template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "VPC ENI Test CloudFormation", + "Resources": { + "ENI": { + "Type": "AWS::EC2::NetworkInterface", + "Properties": { + "SubnetId": {"Ref": "Subnet"} + } + }, + "Subnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "AvailabilityZone": "us-east-1a", + "VpcId": {"Ref": "VPC"}, + "CidrBlock": "10.0.0.0/24" + } + }, + "VPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16" + } + } + }, + "Outputs": { + "NinjaENI": { + "Description": "Elastic IP mapping to Auto-Scaling Group", + "Value": {"Ref": "ENI"} + } + } +} diff --git a/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py b/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py index 177da884ebfc..39f02462ed5e 100644 --- a/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py +++ b/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py @@ -1,408 +1,408 @@ -from __future__ import unicode_literals - -template = { - "Description": "AWS CloudFormation Sample Template vpc_single_instance_in_subnet.template: Sample template showing how to create a VPC and add an EC2 instance with an Elastic IP address and a security group. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "SSHLocation": { - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", - "Description": " The IP address range that can be used to SSH to the EC2 instances", - "Default": "0.0.0.0/0", - "MinLength": "9", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "MaxLength": "18", - "Type": "String" - }, - "KeyName": { - "Type": "String", - "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance", - "MinLength": "1", - "AllowedPattern": "[\\x20-\\x7E]*", - "MaxLength": "255", - "ConstraintDescription": "can contain only ASCII characters." - }, - "InstanceType": { - "Default": "m1.small", - "ConstraintDescription": "must be a valid EC2 instance type.", - "Type": "String", - "Description": "WebServer EC2 instance type", - "AllowedValues": [ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ] - } - }, - "AWSTemplateFormatVersion": "2010-09-09", - "Outputs": { - "URL": { - "Description": "Newly created application URL", - "Value": { - "Fn::Join": [ - "", - [ - "http://", - { - "Fn::GetAtt": [ - "WebServerInstance", - "PublicIp" - ] - } - ] - ] - } - } - }, - "Resources": { - "Subnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "VPC" - }, - "CidrBlock": "10.0.0.0/24", - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - } - ] - } - }, - "WebServerWaitHandle": { - "Type": "AWS::CloudFormation::WaitConditionHandle" - }, - "Route": { - "Type": "AWS::EC2::Route", - "Properties": { - "GatewayId": { - "Ref": "InternetGateway" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "RouteTableId": { - "Ref": "RouteTable" - } - }, - "DependsOn": "AttachGateway" - }, - "SubnetRouteTableAssociation": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "Subnet" - }, - "RouteTableId": { - "Ref": "RouteTable" - } - } - }, - "InternetGateway": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - } - ] - } - }, - "RouteTable": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "VPC" - }, - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - } - ] - } - }, - "WebServerWaitCondition": { - "Type": "AWS::CloudFormation::WaitCondition", - "Properties": { - "Handle": { - "Ref": "WebServerWaitHandle" - }, - "Timeout": "300" - }, - "DependsOn": "WebServerInstance" - }, - "VPC": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - } - ] - } - }, - "InstanceSecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "SecurityGroupIngress": [ - { - "ToPort": "22", - "IpProtocol": "tcp", - "CidrIp": { - "Ref": "SSHLocation" - }, - "FromPort": "22" - }, - { - "ToPort": "80", - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0", - "FromPort": "80" - } - ], - "VpcId": { - "Ref": "VPC" - }, - "GroupDescription": "Enable SSH access via port 22" - } - }, - "WebServerInstance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "UserData": { - "Fn::Base64": { - "Fn::Join": [ - "", - [ - "#!/bin/bash\n", - "yum update -y aws-cfn-bootstrap\n", - "# Helper function\n", - "function error_exit\n", - "{\n", - " /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '", - { - "Ref": "WebServerWaitHandle" - }, - "'\n", - " exit 1\n", - "}\n", - "# Install the simple web page\n", - "/opt/aws/bin/cfn-init -s ", - { - "Ref": "AWS::StackId" - }, - " -r WebServerInstance ", - " --region ", - { - "Ref": "AWS::Region" - }, - " || error_exit 'Failed to run cfn-init'\n", - "# Start up the cfn-hup daemon to listen for changes to the Web Server metadata\n", - "/opt/aws/bin/cfn-hup || error_exit 'Failed to start cfn-hup'\n", - "# All done so signal success\n", - "/opt/aws/bin/cfn-signal -e 0 -r \"WebServer setup complete\" '", - { - "Ref": "WebServerWaitHandle" - }, - "'\n" - ] - ] - } - }, - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - }, - { - "Value": "Bar", - "Key": "Foo" - } - ], - "SecurityGroupIds": [ - { - "Ref": "InstanceSecurityGroup" - } - ], - "KeyName": { - "Ref": "KeyName" - }, - "SubnetId": { - "Ref": "Subnet" - }, - "ImageId": { - "Fn::FindInMap": [ - "RegionMap", - { - "Ref": "AWS::Region" - }, - "AMI" - ] - }, - "InstanceType": { - "Ref": "InstanceType" - } - }, - "Metadata": { - "Comment": "Install a simple PHP application", - "AWS::CloudFormation::Init": { - "config": { - "files": { - "/etc/cfn/cfn-hup.conf": { - "content": { - "Fn::Join": [ - "", - [ - "[main]\n", - "stack=", - { - "Ref": "AWS::StackId" - }, - "\n", - "region=", - { - "Ref": "AWS::Region" - }, - "\n" - ] - ] - }, - "owner": "root", - "group": "root", - "mode": "000400" - }, - "/etc/cfn/hooks.d/cfn-auto-reloader.conf": { - "content": { - "Fn::Join": [ - "", - [ - "[cfn-auto-reloader-hook]\n", - "triggers=post.update\n", - "path=Resources.WebServerInstance.Metadata.AWS::CloudFormation::Init\n", - "action=/opt/aws/bin/cfn-init -s ", - { - "Ref": "AWS::StackId" - }, - " -r WebServerInstance ", - " --region ", - { - "Ref": "AWS::Region" - }, - "\n", - "runas=root\n" - ] - ] - } - }, - "/var/www/html/index.php": { - "content": { - "Fn::Join": [ - "", - [ - "AWS CloudFormation sample PHP application';\n", - "?>\n" - ] - ] - }, - "owner": "apache", - "group": "apache", - "mode": "000644" - } - }, - "services": { - "sysvinit": { - "httpd": { - "ensureRunning": "true", - "enabled": "true" - }, - "sendmail": { - "ensureRunning": "false", - "enabled": "false" - } - } - }, - "packages": { - "yum": { - "httpd": [], - "php": [] - } - } - } - } - } - }, - "IPAddress": { - "Type": "AWS::EC2::EIP", - "Properties": { - "InstanceId": { - "Ref": "WebServerInstance" - }, - "Domain": "vpc" - }, - "DependsOn": "AttachGateway" - }, - "AttachGateway": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "VPC" - }, - "InternetGatewayId": { - "Ref": "InternetGateway" - } - } - } - }, - "Mappings": { - "RegionMap": { - "ap-southeast-1": { - "AMI": "ami-74dda626" - }, - "ap-southeast-2": { - "AMI": "ami-b3990e89" - }, - "us-west-2": { - "AMI": "ami-16fd7026" - }, - "us-east-1": { - "AMI": "ami-7f418316" - }, - "ap-northeast-1": { - "AMI": "ami-dcfa4edd" - }, - "us-west-1": { - "AMI": "ami-951945d0" - }, - "eu-west-1": { - "AMI": "ami-24506250" - }, - "sa-east-1": { - "AMI": "ami-3e3be423" - } - } - } -} +from __future__ import unicode_literals + +template = { + "Description": "AWS CloudFormation Sample Template vpc_single_instance_in_subnet.template: Sample template showing how to create a VPC and add an EC2 instance with an Elastic IP address and a security group. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", + "Parameters": { + "SSHLocation": { + "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", + "Description": " The IP address range that can be used to SSH to the EC2 instances", + "Default": "0.0.0.0/0", + "MinLength": "9", + "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", + "MaxLength": "18", + "Type": "String" + }, + "KeyName": { + "Type": "String", + "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance", + "MinLength": "1", + "AllowedPattern": "[\\x20-\\x7E]*", + "MaxLength": "255", + "ConstraintDescription": "can contain only ASCII characters." + }, + "InstanceType": { + "Default": "m1.small", + "ConstraintDescription": "must be a valid EC2 instance type.", + "Type": "String", + "Description": "WebServer EC2 instance type", + "AllowedValues": [ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "m3.xlarge", + "m3.2xlarge", + "c1.medium", + "c1.xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "cg1.4xlarge" + ] + } + }, + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "URL": { + "Description": "Newly created application URL", + "Value": { + "Fn::Join": [ + "", + [ + "http://", + { + "Fn::GetAtt": [ + "WebServerInstance", + "PublicIp" + ] + } + ] + ] + } + } + }, + "Resources": { + "Subnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "CidrBlock": "10.0.0.0/24", + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + } + ] + } + }, + "WebServerWaitHandle": { + "Type": "AWS::CloudFormation::WaitConditionHandle" + }, + "Route": { + "Type": "AWS::EC2::Route", + "Properties": { + "GatewayId": { + "Ref": "InternetGateway" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "RouteTableId": { + "Ref": "RouteTable" + } + }, + "DependsOn": "AttachGateway" + }, + "SubnetRouteTableAssociation": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "SubnetId": { + "Ref": "Subnet" + }, + "RouteTableId": { + "Ref": "RouteTable" + } + } + }, + "InternetGateway": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + } + ] + } + }, + "RouteTable": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + } + ] + } + }, + "WebServerWaitCondition": { + "Type": "AWS::CloudFormation::WaitCondition", + "Properties": { + "Handle": { + "Ref": "WebServerWaitHandle" + }, + "Timeout": "300" + }, + "DependsOn": "WebServerInstance" + }, + "VPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + } + ] + } + }, + "InstanceSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "SecurityGroupIngress": [ + { + "ToPort": "22", + "IpProtocol": "tcp", + "CidrIp": { + "Ref": "SSHLocation" + }, + "FromPort": "22" + }, + { + "ToPort": "80", + "IpProtocol": "tcp", + "CidrIp": "0.0.0.0/0", + "FromPort": "80" + } + ], + "VpcId": { + "Ref": "VPC" + }, + "GroupDescription": "Enable SSH access via port 22" + } + }, + "WebServerInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash\n", + "yum update -y aws-cfn-bootstrap\n", + "# Helper function\n", + "function error_exit\n", + "{\n", + " /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '", + { + "Ref": "WebServerWaitHandle" + }, + "'\n", + " exit 1\n", + "}\n", + "# Install the simple web page\n", + "/opt/aws/bin/cfn-init -s ", + { + "Ref": "AWS::StackId" + }, + " -r WebServerInstance ", + " --region ", + { + "Ref": "AWS::Region" + }, + " || error_exit 'Failed to run cfn-init'\n", + "# Start up the cfn-hup daemon to listen for changes to the Web Server metadata\n", + "/opt/aws/bin/cfn-hup || error_exit 'Failed to start cfn-hup'\n", + "# All done so signal success\n", + "/opt/aws/bin/cfn-signal -e 0 -r \"WebServer setup complete\" '", + { + "Ref": "WebServerWaitHandle" + }, + "'\n" + ] + ] + } + }, + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + }, + { + "Value": "Bar", + "Key": "Foo" + } + ], + "SecurityGroupIds": [ + { + "Ref": "InstanceSecurityGroup" + } + ], + "KeyName": { + "Ref": "KeyName" + }, + "SubnetId": { + "Ref": "Subnet" + }, + "ImageId": { + "Fn::FindInMap": [ + "RegionMap", + { + "Ref": "AWS::Region" + }, + "AMI" + ] + }, + "InstanceType": { + "Ref": "InstanceType" + } + }, + "Metadata": { + "Comment": "Install a simple PHP application", + "AWS::CloudFormation::Init": { + "config": { + "files": { + "/etc/cfn/cfn-hup.conf": { + "content": { + "Fn::Join": [ + "", + [ + "[main]\n", + "stack=", + { + "Ref": "AWS::StackId" + }, + "\n", + "region=", + { + "Ref": "AWS::Region" + }, + "\n" + ] + ] + }, + "owner": "root", + "group": "root", + "mode": "000400" + }, + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": { + "content": { + "Fn::Join": [ + "", + [ + "[cfn-auto-reloader-hook]\n", + "triggers=post.update\n", + "path=Resources.WebServerInstance.Metadata.AWS::CloudFormation::Init\n", + "action=/opt/aws/bin/cfn-init -s ", + { + "Ref": "AWS::StackId" + }, + " -r WebServerInstance ", + " --region ", + { + "Ref": "AWS::Region" + }, + "\n", + "runas=root\n" + ] + ] + } + }, + "/var/www/html/index.php": { + "content": { + "Fn::Join": [ + "", + [ + "AWS CloudFormation sample PHP application';\n", + "?>\n" + ] + ] + }, + "owner": "apache", + "group": "apache", + "mode": "000644" + } + }, + "services": { + "sysvinit": { + "httpd": { + "ensureRunning": "true", + "enabled": "true" + }, + "sendmail": { + "ensureRunning": "false", + "enabled": "false" + } + } + }, + "packages": { + "yum": { + "httpd": [], + "php": [] + } + } + } + } + } + }, + "IPAddress": { + "Type": "AWS::EC2::EIP", + "Properties": { + "InstanceId": { + "Ref": "WebServerInstance" + }, + "Domain": "vpc" + }, + "DependsOn": "AttachGateway" + }, + "AttachGateway": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "InternetGatewayId": { + "Ref": "InternetGateway" + } + } + } + }, + "Mappings": { + "RegionMap": { + "ap-southeast-1": { + "AMI": "ami-74dda626" + }, + "ap-southeast-2": { + "AMI": "ami-b3990e89" + }, + "us-west-2": { + "AMI": "ami-16fd7026" + }, + "us-east-1": { + "AMI": "ami-7f418316" + }, + "ap-northeast-1": { + "AMI": "ami-dcfa4edd" + }, + "us-west-1": { + "AMI": "ami-951945d0" + }, + "eu-west-1": { + "AMI": "ami-24506250" + }, + "sa-east-1": { + "AMI": "ami-3e3be423" + } + } + } +} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 801faf8a106f..a61aa157ace8 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -1,672 +1,672 @@ -from __future__ import unicode_literals - -import os -import json - -import boto -import boto.s3 -import boto.s3.key -import boto.cloudformation -from boto.exception import BotoServerError -import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -from moto import mock_cloudformation_deprecated, mock_s3_deprecated, mock_route53_deprecated -from moto.cloudformation import cloudformation_backends - -dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": {}, -} - -dummy_template2 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 2", - "Resources": {}, -} - -# template with resource which has no delete attribute defined -dummy_template3 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 3", - "Resources": { - "VPC": { - "Properties": { - "CidrBlock": "192.168.0.0/16", - }, - "Type": "AWS::EC2::VPC" - } - }, -} - -dummy_template_json = json.dumps(dummy_template) -dummy_template_json2 = json.dumps(dummy_template2) -dummy_template_json3 = json.dumps(dummy_template3) - - -@mock_cloudformation_deprecated -def test_create_stack(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - stack = conn.describe_stacks()[0] - stack.stack_name.should.equal('test_stack') - stack.get_template().should.equal({ - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - - }) - - -@mock_cloudformation_deprecated -@mock_route53_deprecated -def test_create_stack_hosted_zone_by_id(): - conn = boto.connect_cloudformation() - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Parameters": { - }, - "Resources": { - "Bar": { - "Type" : "AWS::Route53::HostedZone", - "Properties" : { - "Name" : "foo.bar.baz", - } - }, - }, - } - dummy_template2 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 2", - "Parameters": { - "ZoneId": { "Type": "String" } - }, - "Resources": { - "Foo": { - "Properties": { - "HostedZoneId": {"Ref": "ZoneId"}, - "RecordSets": [] - }, - "Type": "AWS::Route53::RecordSetGroup" - } - }, - } - conn.create_stack( - "test_stack", - template_body=json.dumps(dummy_template), - parameters={}.items() - ) - r53_conn = boto.connect_route53() - zone_id = r53_conn.get_zones()[0].id - conn.create_stack( - "test_stack", - template_body=json.dumps(dummy_template2), - parameters={"ZoneId": zone_id}.items() - ) - - stack = conn.describe_stacks()[0] - assert stack.list_resources() - - -@mock_cloudformation_deprecated -def test_creating_stacks_across_regions(): - west1_conn = boto.cloudformation.connect_to_region("us-west-1") - west1_conn.create_stack("test_stack", template_body=dummy_template_json) - - west2_conn = boto.cloudformation.connect_to_region("us-west-2") - west2_conn.create_stack("test_stack", template_body=dummy_template_json) - - list(west1_conn.describe_stacks()).should.have.length_of(1) - list(west2_conn.describe_stacks()).should.have.length_of(1) - - -@mock_cloudformation_deprecated -def test_create_stack_with_notification_arn(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack_with_notifications", - template_body=dummy_template_json, - notification_arns='arn:aws:sns:us-east-1:123456789012:fake-queue' - ) - - stack = conn.describe_stacks()[0] - [n.value for n in stack.notification_arns].should.contain( - 'arn:aws:sns:us-east-1:123456789012:fake-queue') - - -@mock_cloudformation_deprecated -@mock_s3_deprecated -def test_create_stack_from_s3_url(): - s3_conn = boto.s3.connect_to_region('us-west-1') - bucket = s3_conn.create_bucket("foobar") - key = boto.s3.key.Key(bucket) - key.key = "template-key" - key.set_contents_from_string(dummy_template_json) - key_url = key.generate_url(expires_in=0, query_auth=False) - - conn = boto.cloudformation.connect_to_region('us-west-1') - conn.create_stack('new-stack', template_url=key_url) - - stack = conn.describe_stacks()[0] - stack.stack_name.should.equal('new-stack') - stack.get_template().should.equal( - { - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - - }) - - -@mock_cloudformation_deprecated -def test_describe_stack_by_name(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - stack = conn.describe_stacks("test_stack")[0] - stack.stack_name.should.equal('test_stack') - - -@mock_cloudformation_deprecated -def test_describe_stack_by_stack_id(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - stack = conn.describe_stacks("test_stack")[0] - stack_by_id = conn.describe_stacks(stack.stack_id)[0] - stack_by_id.stack_id.should.equal(stack.stack_id) - stack_by_id.stack_name.should.equal("test_stack") - - -@mock_cloudformation_deprecated -def test_describe_deleted_stack(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - stack = conn.describe_stacks("test_stack")[0] - stack_id = stack.stack_id - conn.delete_stack(stack.stack_id) - stack_by_id = conn.describe_stacks(stack_id)[0] - stack_by_id.stack_id.should.equal(stack.stack_id) - stack_by_id.stack_name.should.equal("test_stack") - stack_by_id.stack_status.should.equal("DELETE_COMPLETE") - - -@mock_cloudformation_deprecated -def test_get_template_by_name(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - template = conn.get_template("test_stack") - template.should.equal({ - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - - }) - - -@mock_cloudformation_deprecated -def test_list_stacks(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - conn.create_stack( - "test_stack2", - template_body=dummy_template_json, - ) - - stacks = conn.list_stacks() - stacks.should.have.length_of(2) - stacks[0].template_description.should.equal("Stack 1") - - -@mock_cloudformation_deprecated -def test_delete_stack_by_name(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - conn.list_stacks().should.have.length_of(1) - conn.delete_stack("test_stack") - conn.list_stacks().should.have.length_of(0) - - -@mock_cloudformation_deprecated -def test_delete_stack_by_id(): - conn = boto.connect_cloudformation() - stack_id = conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - conn.list_stacks().should.have.length_of(1) - conn.delete_stack(stack_id) - conn.list_stacks().should.have.length_of(0) - with assert_raises(BotoServerError): - conn.describe_stacks("test_stack") - - conn.describe_stacks(stack_id).should.have.length_of(1) - - -@mock_cloudformation_deprecated -def test_delete_stack_with_resource_missing_delete_attr(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json3, - ) - - conn.list_stacks().should.have.length_of(1) - conn.delete_stack("test_stack") - conn.list_stacks().should.have.length_of(0) - - -@mock_cloudformation_deprecated -def test_bad_describe_stack(): - conn = boto.connect_cloudformation() - with assert_raises(BotoServerError): - conn.describe_stacks("bad_stack") - - -@mock_cloudformation_deprecated() -def test_cloudformation_params(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": {}, - "Parameters": { - "APPNAME": { - "Default": "app-name", - "Description": "The name of the app", - "Type": "String" - } - } - } - dummy_template_json = json.dumps(dummy_template) - cfn = boto.connect_cloudformation() - cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[ - ('APPNAME', 'testing123')]) - stack = cfn.describe_stacks('test_stack1')[0] - stack.parameters.should.have.length_of(1) - param = stack.parameters[0] - param.key.should.equal('APPNAME') - param.value.should.equal('testing123') - - -@mock_cloudformation_deprecated -def test_cloudformation_params_conditions_and_resources_are_distinct(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Conditions": { - "FooEnabled": { - "Fn::Equals": [ - { - "Ref": "FooEnabled" - }, - "true" - ] - }, - "FooDisabled": { - "Fn::Not": [ - { - "Fn::Equals": [ - { - "Ref": "FooEnabled" - }, - "true" - ] - } - ] - } - }, - "Parameters": { - "FooEnabled": { - "Type": "String", - "AllowedValues": [ - "true", - "false" - ] - } - }, - "Resources": { - "Bar": { - "Properties": { - "CidrBlock": "192.168.0.0/16", - }, - "Condition": "FooDisabled", - "Type": "AWS::EC2::VPC" - } - } - } - dummy_template_json = json.dumps(dummy_template) - cfn = boto.connect_cloudformation() - cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[('FooEnabled', 'true')]) - stack = cfn.describe_stacks('test_stack1')[0] - resources = stack.list_resources() - assert not [resource for resource in resources if resource.logical_resource_id == 'Bar'] - - -@mock_cloudformation_deprecated -def test_stack_tags(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - tags={"foo": "bar", "baz": "bleh"}, - ) - - stack = conn.describe_stacks()[0] - dict(stack.tags).should.equal({"foo": "bar", "baz": "bleh"}) - - -@mock_cloudformation_deprecated -def test_update_stack(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - conn.update_stack("test_stack", dummy_template_json2) - - stack = conn.describe_stacks()[0] - stack.stack_status.should.equal("UPDATE_COMPLETE") - stack.get_template().should.equal({ - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json2, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - }) - - -@mock_cloudformation_deprecated -def test_update_stack_with_previous_template(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - conn.update_stack("test_stack", use_previous_template=True) - - stack = conn.describe_stacks()[0] - stack.stack_status.should.equal("UPDATE_COMPLETE") - stack.get_template().should.equal({ - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - }) - - -@mock_cloudformation_deprecated -def test_update_stack_with_parameters(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack", - "Resources": { - "VPC": { - "Properties": { - "CidrBlock": {"Ref": "Bar"} - }, - "Type": "AWS::EC2::VPC" - } - }, - "Parameters": { - "Bar": { - "Type": "String" - } - } - } - dummy_template_json = json.dumps(dummy_template) - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - parameters=[("Bar", "192.168.0.0/16")] - ) - conn.update_stack( - "test_stack", - template_body=dummy_template_json, - parameters=[("Bar", "192.168.0.1/16")] - ) - - stack = conn.describe_stacks()[0] - assert stack.parameters[0].value == "192.168.0.1/16" - - -@mock_cloudformation_deprecated -def test_update_stack_replace_tags(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - tags={"foo": "bar"}, - ) - conn.update_stack( - "test_stack", - template_body=dummy_template_json, - tags={"foo": "baz"}, - ) - - stack = conn.describe_stacks()[0] - stack.stack_status.should.equal("UPDATE_COMPLETE") - # since there is one tag it doesn't come out as a list - dict(stack.tags).should.equal({"foo": "baz"}) - - -@mock_cloudformation_deprecated -def test_update_stack_when_rolled_back(): - conn = boto.connect_cloudformation() - stack_id = conn.create_stack( - "test_stack", template_body=dummy_template_json) - - cloudformation_backends[conn.region.name].stacks[ - stack_id].status = 'ROLLBACK_COMPLETE' - - with assert_raises(BotoServerError) as err: - conn.update_stack("test_stack", dummy_template_json) - - ex = err.exception - ex.body.should.match( - r'is in ROLLBACK_COMPLETE state and can not be updated') - ex.error_code.should.equal('ValidationError') - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_cloudformation_deprecated -def test_describe_stack_events_shows_create_update_and_delete(): - conn = boto.connect_cloudformation() - stack_id = conn.create_stack( - "test_stack", template_body=dummy_template_json) - conn.update_stack(stack_id, template_body=dummy_template_json2) - conn.delete_stack(stack_id) - - # assert begins and ends with stack events - events = conn.describe_stack_events(stack_id) - events[0].resource_type.should.equal("AWS::CloudFormation::Stack") - events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") - - # testing ordering of stack events without assuming resource events will not exist - # the AWS API returns events in reverse chronological order - stack_events_to_look_for = iter([ - ("DELETE_COMPLETE", None), - ("DELETE_IN_PROGRESS", "User Initiated"), - ("UPDATE_COMPLETE", None), - ("UPDATE_IN_PROGRESS", "User Initiated"), - ("CREATE_COMPLETE", None), - ("CREATE_IN_PROGRESS", "User Initiated"), - ]) - try: - for event in events: - event.stack_id.should.equal(stack_id) - event.stack_name.should.equal("test_stack") - event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") - - if event.resource_type == "AWS::CloudFormation::Stack": - event.logical_resource_id.should.equal("test_stack") - event.physical_resource_id.should.equal(stack_id) - - status_to_look_for, reason_to_look_for = next( - stack_events_to_look_for) - event.resource_status.should.equal(status_to_look_for) - if reason_to_look_for is not None: - event.resource_status_reason.should.equal( - reason_to_look_for) - except StopIteration: - assert False, "Too many stack events" - - list(stack_events_to_look_for).should.be.empty - - -@mock_cloudformation_deprecated -def test_create_stack_lambda_and_dynamodb(): - conn = boto.connect_cloudformation() - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack Lambda Test 1", - "Parameters": { - }, - "Resources": { - "func1": { - "Type" : "AWS::Lambda::Function", - "Properties" : { - "Code": { - "S3Bucket": "bucket_123", - "S3Key": "key_123" - }, - "FunctionName": "func1", - "Handler": "handler.handler", - "Role": "role1", - "Runtime": "python2.7", - "Description": "descr", - "MemorySize": 12345, - } - }, - "func1version": { - "Type": "AWS::Lambda::LambdaVersion", - "Properties" : { - "Version": "v1.2.3" - } - }, - "tab1": { - "Type" : "AWS::DynamoDB::Table", - "Properties" : { - "TableName": "tab1", - "KeySchema": [{ - "AttributeName": "attr1", - "KeyType": "HASH" - }], - "AttributeDefinitions": [{ - "AttributeName": "attr1", - "AttributeType": "string" - }], - "ProvisionedThroughput": { - "ReadCapacityUnits": 10, - "WriteCapacityUnits": 10 - } - } - }, - "func1mapping": { - "Type": "AWS::Lambda::EventSourceMapping", - "Properties" : { - "FunctionName": "v1.2.3", - "EventSourceArn": "arn:aws:dynamodb:region:XXXXXX:table/tab1/stream/2000T00:00:00.000", - "StartingPosition": "0", - "BatchSize": 100, - "Enabled": True - } - } - }, - } - validate_s3_before = os.environ.get('VALIDATE_LAMBDA_S3', '') - try: - os.environ['VALIDATE_LAMBDA_S3'] = 'false' - conn.create_stack( - "test_stack_lambda_1", - template_body=json.dumps(dummy_template), - parameters={}.items() - ) - finally: - os.environ['VALIDATE_LAMBDA_S3'] = validate_s3_before - - stack = conn.describe_stacks()[0] - resources = stack.list_resources() - assert len(resources) == 4 - - -@mock_cloudformation_deprecated -def test_create_stack_kinesis(): - conn = boto.connect_cloudformation() - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack Kinesis Test 1", - "Parameters": {}, - "Resources": { - "stream1": { - "Type" : "AWS::Kinesis::Stream", - "Properties" : { - "Name": "stream1", - "ShardCount": 2 - } - } - } - } - conn.create_stack( - "test_stack_kinesis_1", - template_body=json.dumps(dummy_template), - parameters={}.items() - ) - - stack = conn.describe_stacks()[0] - resources = stack.list_resources() - assert len(resources) == 1 +from __future__ import unicode_literals + +import os +import json + +import boto +import boto.s3 +import boto.s3.key +import boto.cloudformation +from boto.exception import BotoServerError +import sure # noqa +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +from moto import mock_cloudformation_deprecated, mock_s3_deprecated, mock_route53_deprecated +from moto.cloudformation import cloudformation_backends + +dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": {}, +} + +dummy_template2 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 2", + "Resources": {}, +} + +# template with resource which has no delete attribute defined +dummy_template3 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 3", + "Resources": { + "VPC": { + "Properties": { + "CidrBlock": "192.168.0.0/16", + }, + "Type": "AWS::EC2::VPC" + } + }, +} + +dummy_template_json = json.dumps(dummy_template) +dummy_template_json2 = json.dumps(dummy_template2) +dummy_template_json3 = json.dumps(dummy_template3) + + +@mock_cloudformation_deprecated +def test_create_stack(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + stack = conn.describe_stacks()[0] + stack.stack_name.should.equal('test_stack') + stack.get_template().should.equal({ + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + + }) + + +@mock_cloudformation_deprecated +@mock_route53_deprecated +def test_create_stack_hosted_zone_by_id(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Parameters": { + }, + "Resources": { + "Bar": { + "Type" : "AWS::Route53::HostedZone", + "Properties" : { + "Name" : "foo.bar.baz", + } + }, + }, + } + dummy_template2 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 2", + "Parameters": { + "ZoneId": { "Type": "String" } + }, + "Resources": { + "Foo": { + "Properties": { + "HostedZoneId": {"Ref": "ZoneId"}, + "RecordSets": [] + }, + "Type": "AWS::Route53::RecordSetGroup" + } + }, + } + conn.create_stack( + "test_stack", + template_body=json.dumps(dummy_template), + parameters={}.items() + ) + r53_conn = boto.connect_route53() + zone_id = r53_conn.get_zones()[0].id + conn.create_stack( + "test_stack", + template_body=json.dumps(dummy_template2), + parameters={"ZoneId": zone_id}.items() + ) + + stack = conn.describe_stacks()[0] + assert stack.list_resources() + + +@mock_cloudformation_deprecated +def test_creating_stacks_across_regions(): + west1_conn = boto.cloudformation.connect_to_region("us-west-1") + west1_conn.create_stack("test_stack", template_body=dummy_template_json) + + west2_conn = boto.cloudformation.connect_to_region("us-west-2") + west2_conn.create_stack("test_stack", template_body=dummy_template_json) + + list(west1_conn.describe_stacks()).should.have.length_of(1) + list(west2_conn.describe_stacks()).should.have.length_of(1) + + +@mock_cloudformation_deprecated +def test_create_stack_with_notification_arn(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack_with_notifications", + template_body=dummy_template_json, + notification_arns='arn:aws:sns:us-east-1:123456789012:fake-queue' + ) + + stack = conn.describe_stacks()[0] + [n.value for n in stack.notification_arns].should.contain( + 'arn:aws:sns:us-east-1:123456789012:fake-queue') + + +@mock_cloudformation_deprecated +@mock_s3_deprecated +def test_create_stack_from_s3_url(): + s3_conn = boto.s3.connect_to_region('us-west-1') + bucket = s3_conn.create_bucket("foobar") + key = boto.s3.key.Key(bucket) + key.key = "template-key" + key.set_contents_from_string(dummy_template_json) + key_url = key.generate_url(expires_in=0, query_auth=False) + + conn = boto.cloudformation.connect_to_region('us-west-1') + conn.create_stack('new-stack', template_url=key_url) + + stack = conn.describe_stacks()[0] + stack.stack_name.should.equal('new-stack') + stack.get_template().should.equal( + { + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + + }) + + +@mock_cloudformation_deprecated +def test_describe_stack_by_name(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + stack = conn.describe_stacks("test_stack")[0] + stack.stack_name.should.equal('test_stack') + + +@mock_cloudformation_deprecated +def test_describe_stack_by_stack_id(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + stack = conn.describe_stacks("test_stack")[0] + stack_by_id = conn.describe_stacks(stack.stack_id)[0] + stack_by_id.stack_id.should.equal(stack.stack_id) + stack_by_id.stack_name.should.equal("test_stack") + + +@mock_cloudformation_deprecated +def test_describe_deleted_stack(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + stack = conn.describe_stacks("test_stack")[0] + stack_id = stack.stack_id + conn.delete_stack(stack.stack_id) + stack_by_id = conn.describe_stacks(stack_id)[0] + stack_by_id.stack_id.should.equal(stack.stack_id) + stack_by_id.stack_name.should.equal("test_stack") + stack_by_id.stack_status.should.equal("DELETE_COMPLETE") + + +@mock_cloudformation_deprecated +def test_get_template_by_name(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + template = conn.get_template("test_stack") + template.should.equal({ + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + + }) + + +@mock_cloudformation_deprecated +def test_list_stacks(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + conn.create_stack( + "test_stack2", + template_body=dummy_template_json, + ) + + stacks = conn.list_stacks() + stacks.should.have.length_of(2) + stacks[0].template_description.should.equal("Stack 1") + + +@mock_cloudformation_deprecated +def test_delete_stack_by_name(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + conn.list_stacks().should.have.length_of(1) + conn.delete_stack("test_stack") + conn.list_stacks().should.have.length_of(0) + + +@mock_cloudformation_deprecated +def test_delete_stack_by_id(): + conn = boto.connect_cloudformation() + stack_id = conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + conn.list_stacks().should.have.length_of(1) + conn.delete_stack(stack_id) + conn.list_stacks().should.have.length_of(0) + with assert_raises(BotoServerError): + conn.describe_stacks("test_stack") + + conn.describe_stacks(stack_id).should.have.length_of(1) + + +@mock_cloudformation_deprecated +def test_delete_stack_with_resource_missing_delete_attr(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json3, + ) + + conn.list_stacks().should.have.length_of(1) + conn.delete_stack("test_stack") + conn.list_stacks().should.have.length_of(0) + + +@mock_cloudformation_deprecated +def test_bad_describe_stack(): + conn = boto.connect_cloudformation() + with assert_raises(BotoServerError): + conn.describe_stacks("bad_stack") + + +@mock_cloudformation_deprecated() +def test_cloudformation_params(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": {}, + "Parameters": { + "APPNAME": { + "Default": "app-name", + "Description": "The name of the app", + "Type": "String" + } + } + } + dummy_template_json = json.dumps(dummy_template) + cfn = boto.connect_cloudformation() + cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[ + ('APPNAME', 'testing123')]) + stack = cfn.describe_stacks('test_stack1')[0] + stack.parameters.should.have.length_of(1) + param = stack.parameters[0] + param.key.should.equal('APPNAME') + param.value.should.equal('testing123') + + +@mock_cloudformation_deprecated +def test_cloudformation_params_conditions_and_resources_are_distinct(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Conditions": { + "FooEnabled": { + "Fn::Equals": [ + { + "Ref": "FooEnabled" + }, + "true" + ] + }, + "FooDisabled": { + "Fn::Not": [ + { + "Fn::Equals": [ + { + "Ref": "FooEnabled" + }, + "true" + ] + } + ] + } + }, + "Parameters": { + "FooEnabled": { + "Type": "String", + "AllowedValues": [ + "true", + "false" + ] + } + }, + "Resources": { + "Bar": { + "Properties": { + "CidrBlock": "192.168.0.0/16", + }, + "Condition": "FooDisabled", + "Type": "AWS::EC2::VPC" + } + } + } + dummy_template_json = json.dumps(dummy_template) + cfn = boto.connect_cloudformation() + cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[('FooEnabled', 'true')]) + stack = cfn.describe_stacks('test_stack1')[0] + resources = stack.list_resources() + assert not [resource for resource in resources if resource.logical_resource_id == 'Bar'] + + +@mock_cloudformation_deprecated +def test_stack_tags(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + tags={"foo": "bar", "baz": "bleh"}, + ) + + stack = conn.describe_stacks()[0] + dict(stack.tags).should.equal({"foo": "bar", "baz": "bleh"}) + + +@mock_cloudformation_deprecated +def test_update_stack(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + conn.update_stack("test_stack", dummy_template_json2) + + stack = conn.describe_stacks()[0] + stack.stack_status.should.equal("UPDATE_COMPLETE") + stack.get_template().should.equal({ + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json2, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + }) + + +@mock_cloudformation_deprecated +def test_update_stack_with_previous_template(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + conn.update_stack("test_stack", use_previous_template=True) + + stack = conn.describe_stacks()[0] + stack.stack_status.should.equal("UPDATE_COMPLETE") + stack.get_template().should.equal({ + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + }) + + +@mock_cloudformation_deprecated +def test_update_stack_with_parameters(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack", + "Resources": { + "VPC": { + "Properties": { + "CidrBlock": {"Ref": "Bar"} + }, + "Type": "AWS::EC2::VPC" + } + }, + "Parameters": { + "Bar": { + "Type": "String" + } + } + } + dummy_template_json = json.dumps(dummy_template) + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + parameters=[("Bar", "192.168.0.0/16")] + ) + conn.update_stack( + "test_stack", + template_body=dummy_template_json, + parameters=[("Bar", "192.168.0.1/16")] + ) + + stack = conn.describe_stacks()[0] + assert stack.parameters[0].value == "192.168.0.1/16" + + +@mock_cloudformation_deprecated +def test_update_stack_replace_tags(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + tags={"foo": "bar"}, + ) + conn.update_stack( + "test_stack", + template_body=dummy_template_json, + tags={"foo": "baz"}, + ) + + stack = conn.describe_stacks()[0] + stack.stack_status.should.equal("UPDATE_COMPLETE") + # since there is one tag it doesn't come out as a list + dict(stack.tags).should.equal({"foo": "baz"}) + + +@mock_cloudformation_deprecated +def test_update_stack_when_rolled_back(): + conn = boto.connect_cloudformation() + stack_id = conn.create_stack( + "test_stack", template_body=dummy_template_json) + + cloudformation_backends[conn.region.name].stacks[ + stack_id].status = 'ROLLBACK_COMPLETE' + + with assert_raises(BotoServerError) as err: + conn.update_stack("test_stack", dummy_template_json) + + ex = err.exception + ex.body.should.match( + r'is in ROLLBACK_COMPLETE state and can not be updated') + ex.error_code.should.equal('ValidationError') + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_cloudformation_deprecated +def test_describe_stack_events_shows_create_update_and_delete(): + conn = boto.connect_cloudformation() + stack_id = conn.create_stack( + "test_stack", template_body=dummy_template_json) + conn.update_stack(stack_id, template_body=dummy_template_json2) + conn.delete_stack(stack_id) + + # assert begins and ends with stack events + events = conn.describe_stack_events(stack_id) + events[0].resource_type.should.equal("AWS::CloudFormation::Stack") + events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") + + # testing ordering of stack events without assuming resource events will not exist + # the AWS API returns events in reverse chronological order + stack_events_to_look_for = iter([ + ("DELETE_COMPLETE", None), + ("DELETE_IN_PROGRESS", "User Initiated"), + ("UPDATE_COMPLETE", None), + ("UPDATE_IN_PROGRESS", "User Initiated"), + ("CREATE_COMPLETE", None), + ("CREATE_IN_PROGRESS", "User Initiated"), + ]) + try: + for event in events: + event.stack_id.should.equal(stack_id) + event.stack_name.should.equal("test_stack") + event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") + + if event.resource_type == "AWS::CloudFormation::Stack": + event.logical_resource_id.should.equal("test_stack") + event.physical_resource_id.should.equal(stack_id) + + status_to_look_for, reason_to_look_for = next( + stack_events_to_look_for) + event.resource_status.should.equal(status_to_look_for) + if reason_to_look_for is not None: + event.resource_status_reason.should.equal( + reason_to_look_for) + except StopIteration: + assert False, "Too many stack events" + + list(stack_events_to_look_for).should.be.empty + + +@mock_cloudformation_deprecated +def test_create_stack_lambda_and_dynamodb(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack Lambda Test 1", + "Parameters": { + }, + "Resources": { + "func1": { + "Type" : "AWS::Lambda::Function", + "Properties" : { + "Code": { + "S3Bucket": "bucket_123", + "S3Key": "key_123" + }, + "FunctionName": "func1", + "Handler": "handler.handler", + "Role": "role1", + "Runtime": "python2.7", + "Description": "descr", + "MemorySize": 12345, + } + }, + "func1version": { + "Type": "AWS::Lambda::LambdaVersion", + "Properties" : { + "Version": "v1.2.3" + } + }, + "tab1": { + "Type" : "AWS::DynamoDB::Table", + "Properties" : { + "TableName": "tab1", + "KeySchema": [{ + "AttributeName": "attr1", + "KeyType": "HASH" + }], + "AttributeDefinitions": [{ + "AttributeName": "attr1", + "AttributeType": "string" + }], + "ProvisionedThroughput": { + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10 + } + } + }, + "func1mapping": { + "Type": "AWS::Lambda::EventSourceMapping", + "Properties" : { + "FunctionName": "v1.2.3", + "EventSourceArn": "arn:aws:dynamodb:region:XXXXXX:table/tab1/stream/2000T00:00:00.000", + "StartingPosition": "0", + "BatchSize": 100, + "Enabled": True + } + } + }, + } + validate_s3_before = os.environ.get('VALIDATE_LAMBDA_S3', '') + try: + os.environ['VALIDATE_LAMBDA_S3'] = 'false' + conn.create_stack( + "test_stack_lambda_1", + template_body=json.dumps(dummy_template), + parameters={}.items() + ) + finally: + os.environ['VALIDATE_LAMBDA_S3'] = validate_s3_before + + stack = conn.describe_stacks()[0] + resources = stack.list_resources() + assert len(resources) == 4 + + +@mock_cloudformation_deprecated +def test_create_stack_kinesis(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack Kinesis Test 1", + "Parameters": {}, + "Resources": { + "stream1": { + "Type" : "AWS::Kinesis::Stream", + "Properties" : { + "Name": "stream1", + "ShardCount": 2 + } + } + } + } + conn.create_stack( + "test_stack_kinesis_1", + template_body=json.dumps(dummy_template), + parameters={}.items() + ) + + stack = conn.describe_stacks()[0] + resources = stack.list_resources() + assert len(resources) == 1 diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 9bfae6174b13..152b359e3d10 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -1,795 +1,795 @@ -from __future__ import unicode_literals - -import json -from collections import OrderedDict - -import boto3 -from botocore.exceptions import ClientError -import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -from nose.tools import assert_raises - -from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 - -dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": { - "EC2Instance1": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-d3adb33f", - "KeyName": "dummy", - "InstanceType": "t2.micro", - "Tags": [ - { - "Key": "Description", - "Value": "Test tag" - }, - { - "Key": "Name", - "Value": "Name tag for tests" - } - ] - } - } - } -} - -dummy_template_yaml = """--- -AWSTemplateFormatVersion: 2010-09-09 -Description: Stack1 with yaml template -Resources: - EC2Instance1: - Type: AWS::EC2::Instance - Properties: - ImageId: ami-d3adb33f - KeyName: dummy - InstanceType: t2.micro - Tags: - - Key: Description - Value: Test tag - - Key: Name - Value: Name tag for tests -""" - -dummy_template_yaml_with_short_form_func = """--- -AWSTemplateFormatVersion: 2010-09-09 -Description: Stack1 with yaml template -Resources: - EC2Instance1: - Type: AWS::EC2::Instance - Properties: - ImageId: ami-d3adb33f - KeyName: !Join [ ":", [ du, m, my ] ] - InstanceType: t2.micro - Tags: - - Key: Description - Value: Test tag - - Key: Name - Value: Name tag for tests -""" - -dummy_template_yaml_with_ref = """--- -AWSTemplateFormatVersion: 2010-09-09 -Description: Stack1 with yaml template -Parameters: - TagDescription: - Type: String - TagName: - Type: String - -Resources: - EC2Instance1: - Type: AWS::EC2::Instance - Properties: - ImageId: ami-d3adb33f - KeyName: dummy - InstanceType: t2.micro - Tags: - - Key: Description - Value: - Ref: TagDescription - - Key: Name - Value: !Ref TagName -""" - -dummy_update_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Parameters": { - "KeyName": { - "Description": "Name of an existing EC2 KeyPair", - "Type": "AWS::EC2::KeyPair::KeyName", - "ConstraintDescription": "must be the name of an existing EC2 KeyPair." - } - }, - "Resources": { - "Instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-08111162" - } - } - } -} - -dummy_output_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": { - "Instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-08111162" - } - } - }, - "Outputs": { - "StackVPC": { - "Description": "The ID of the VPC", - "Value": "VPCID", - "Export": { - "Name": "My VPC ID" - } - } - } -} - -dummy_import_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::ImportValue": 'My VPC ID'}, - "VisibilityTimeout": 60, - } - } - } -} - -dummy_redrive_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MainQueue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "mainqueue.fifo", - "FifoQueue": True, - "ContentBasedDeduplication": False, - "RedrivePolicy": { - "deadLetterTargetArn": { - "Fn::GetAtt": [ - "DeadLetterQueue", - "Arn" - ] - }, - "maxReceiveCount": 5 - } - } - }, - "DeadLetterQueue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "FifoQueue": True - } - }, - } -} - -dummy_template_json = json.dumps(dummy_template) -dummy_update_template_json = json.dumps(dummy_update_template) -dummy_output_template_json = json.dumps(dummy_output_template) -dummy_import_template_json = json.dumps(dummy_import_template) -dummy_redrive_template_json = json.dumps(dummy_redrive_template) - - - -@mock_cloudformation -def test_boto3_create_stack(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) - - -@mock_cloudformation -def test_boto3_create_stack_with_yaml(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_yaml, - ) - - cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - dummy_template_yaml) - - -@mock_cloudformation -def test_boto3_create_stack_with_short_form_func_yaml(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_yaml_with_short_form_func, - ) - - cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - dummy_template_yaml_with_short_form_func) - - -@mock_cloudformation -def test_boto3_create_stack_with_ref_yaml(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - params = [ - {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, - {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, - ] - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_yaml_with_ref, - Parameters=params - ) - - cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - dummy_template_yaml_with_ref) - - -@mock_cloudformation -def test_creating_stacks_across_regions(): - west1_cf = boto3.resource('cloudformation', region_name='us-west-1') - west2_cf = boto3.resource('cloudformation', region_name='us-west-2') - west1_cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - west2_cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - list(west1_cf.stacks.all()).should.have.length_of(1) - list(west2_cf.stacks.all()).should.have.length_of(1) - - -@mock_cloudformation -def test_create_stack_with_notification_arn(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - cf.create_stack( - StackName="test_stack_with_notifications", - TemplateBody=dummy_template_json, - NotificationARNs=['arn:aws:sns:us-east-1:123456789012:fake-queue'], - ) - - stack = list(cf.stacks.all())[0] - stack.notification_arns.should.contain( - 'arn:aws:sns:us-east-1:123456789012:fake-queue') - - -@mock_cloudformation -def test_create_stack_with_role_arn(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - cf.create_stack( - StackName="test_stack_with_notifications", - TemplateBody=dummy_template_json, - RoleARN='arn:aws:iam::123456789012:role/moto', - ) - stack = list(cf.stacks.all())[0] - stack.role_arn.should.equal('arn:aws:iam::123456789012:role/moto') - - -@mock_cloudformation -@mock_s3 -def test_create_stack_from_s3_url(): - s3 = boto3.client('s3') - s3_conn = boto3.resource('s3') - bucket = s3_conn.create_bucket(Bucket="foobar") - - key = s3_conn.Object( - 'foobar', 'template-key').put(Body=dummy_template_json) - key_url = s3.generate_presigned_url( - ClientMethod='get_object', - Params={ - 'Bucket': 'foobar', - 'Key': 'template-key' - } - ) - - cf_conn = boto3.client('cloudformation', region_name='us-west-1') - cf_conn.create_stack( - StackName='stack_from_url', - TemplateURL=key_url, - ) - cf_conn.get_template(StackName="stack_from_url")['TemplateBody'].should.equal( - json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) - - -@mock_cloudformation -def test_update_stack_with_previous_value(): - name = 'update_stack_with_previous_value' - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName=name, TemplateBody=dummy_template_yaml_with_ref, - Parameters=[ - {'ParameterKey': 'TagName', 'ParameterValue': 'foo'}, - {'ParameterKey': 'TagDescription', 'ParameterValue': 'bar'}, - ] - ) - cf_conn.update_stack( - StackName=name, UsePreviousTemplate=True, - Parameters=[ - {'ParameterKey': 'TagName', 'UsePreviousValue': True}, - {'ParameterKey': 'TagDescription', 'ParameterValue': 'not bar'}, - ] - ) - stack = cf_conn.describe_stacks(StackName=name)['Stacks'][0] - tag_name = [x['ParameterValue'] for x in stack['Parameters'] - if x['ParameterKey'] == 'TagName'][0] - tag_desc = [x['ParameterValue'] for x in stack['Parameters'] - if x['ParameterKey'] == 'TagDescription'][0] - assert tag_name == 'foo' - assert tag_desc == 'not bar' - - -@mock_cloudformation -@mock_s3 -@mock_ec2 -def test_update_stack_from_s3_url(): - s3 = boto3.client('s3') - s3_conn = boto3.resource('s3') - - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="update_stack_from_url", - TemplateBody=dummy_template_json, - Tags=[{'Key': 'foo', 'Value': 'bar'}], - ) - - s3_conn.create_bucket(Bucket="foobar") - - s3_conn.Object( - 'foobar', 'template-key').put(Body=dummy_update_template_json) - key_url = s3.generate_presigned_url( - ClientMethod='get_object', - Params={ - 'Bucket': 'foobar', - 'Key': 'template-key' - } - ) - - cf_conn.update_stack( - StackName="update_stack_from_url", - TemplateURL=key_url, - ) - - cf_conn.get_template(StackName="update_stack_from_url")[ 'TemplateBody'].should.equal( - json.loads(dummy_update_template_json, object_pairs_hook=OrderedDict)) - - -@mock_cloudformation -@mock_s3 -def test_create_change_set_from_s3_url(): - s3 = boto3.client('s3') - s3_conn = boto3.resource('s3') - bucket = s3_conn.create_bucket(Bucket="foobar") - - key = s3_conn.Object( - 'foobar', 'template-key').put(Body=dummy_template_json) - key_url = s3.generate_presigned_url( - ClientMethod='get_object', - Params={ - 'Bucket': 'foobar', - 'Key': 'template-key' - } - ) - cf_conn = boto3.client('cloudformation', region_name='us-west-1') - response = cf_conn.create_change_set( - StackName='NewStack', - TemplateURL=key_url, - ChangeSetName='NewChangeSet', - ChangeSetType='CREATE', - ) - assert 'arn:aws:cloudformation:us-west-1:123456789:changeSet/NewChangeSet/' in response['Id'] - assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId'] - - -@mock_cloudformation -def test_execute_change_set_w_arn(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - change_set = cf_conn.create_change_set( - StackName='NewStack', - TemplateBody=dummy_template_json, - ChangeSetName='NewChangeSet', - ChangeSetType='CREATE', - ) - cf_conn.execute_change_set(ChangeSetName=change_set['Id']) - - -@mock_cloudformation -def test_execute_change_set_w_name(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - change_set = cf_conn.create_change_set( - StackName='NewStack', - TemplateBody=dummy_template_json, - ChangeSetName='NewChangeSet', - ChangeSetType='CREATE', - ) - cf_conn.execute_change_set(ChangeSetName='NewStack', StackName='NewStack') - - -@mock_cloudformation -def test_describe_stack_pagination(): - conn = boto3.client('cloudformation', region_name='us-east-1') - for i in range(100): - conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - resp = conn.describe_stacks() - stacks = resp['Stacks'] - stacks.should.have.length_of(50) - next_token = resp['NextToken'] - next_token.should_not.be.none - resp2 = conn.describe_stacks(NextToken=next_token) - stacks.extend(resp2['Stacks']) - stacks.should.have.length_of(100) - assert 'NextToken' not in resp2.keys() - - -@mock_cloudformation -def test_describe_stack_resources(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - - response = cf_conn.describe_stack_resources(StackName=stack['StackName']) - resource = response['StackResources'][0] - resource['LogicalResourceId'].should.equal('EC2Instance1') - resource['ResourceStatus'].should.equal('CREATE_COMPLETE') - resource['ResourceType'].should.equal('AWS::EC2::Instance') - resource['StackId'].should.equal(stack['StackId']) - - -@mock_cloudformation -def test_describe_stack_by_name(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - stack['StackName'].should.equal('test_stack') - - -@mock_cloudformation -def test_describe_stack_by_stack_id(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - stack_by_id = cf_conn.describe_stacks(StackName=stack['StackId'])['Stacks'][ - 0] - - stack_by_id['StackId'].should.equal(stack['StackId']) - stack_by_id['StackName'].should.equal("test_stack") - - -@mock_cloudformation -def test_list_stacks(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - cf.create_stack( - StackName="test_stack2", - TemplateBody=dummy_template_json, - ) - - stacks = list(cf.stacks.all()) - stacks.should.have.length_of(2) - stack_names = [stack.stack_name for stack in stacks] - stack_names.should.contain("test_stack") - stack_names.should.contain("test_stack2") - - -@mock_cloudformation -def test_delete_stack_from_resource(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - list(cf.stacks.all()).should.have.length_of(1) - stack.delete() - list(cf.stacks.all()).should.have.length_of(0) - - -@mock_cloudformation -@mock_ec2 -def test_delete_stack_by_name(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - cf_conn.describe_stacks()['Stacks'].should.have.length_of(1) - cf_conn.delete_stack(StackName="test_stack") - cf_conn.describe_stacks()['Stacks'].should.have.length_of(0) - - -@mock_cloudformation -def test_describe_deleted_stack(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - stack_id = stack['StackId'] - cf_conn.delete_stack(StackName=stack['StackId']) - stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0] - stack_by_id['StackId'].should.equal(stack['StackId']) - stack_by_id['StackName'].should.equal("test_stack") - stack_by_id['StackStatus'].should.equal("DELETE_COMPLETE") - - -@mock_cloudformation -@mock_ec2 -def test_describe_updated_stack(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - Tags=[{'Key': 'foo', 'Value': 'bar'}], - ) - - cf_conn.update_stack( - StackName="test_stack", - RoleARN='arn:aws:iam::123456789012:role/moto', - TemplateBody=dummy_update_template_json, - Tags=[{'Key': 'foo', 'Value': 'baz'}], - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - stack_id = stack['StackId'] - stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0] - stack_by_id['StackId'].should.equal(stack['StackId']) - stack_by_id['StackName'].should.equal("test_stack") - stack_by_id['StackStatus'].should.equal("UPDATE_COMPLETE") - stack_by_id['RoleARN'].should.equal('arn:aws:iam::123456789012:role/moto') - stack_by_id['Tags'].should.equal([{'Key': 'foo', 'Value': 'baz'}]) - - -@mock_cloudformation -def test_bad_describe_stack(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - with assert_raises(ClientError): - cf_conn.describe_stacks(StackName="non_existent_stack") - - -@mock_cloudformation() -def test_cloudformation_params(): - dummy_template_with_params = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": {}, - "Parameters": { - "APPNAME": { - "Default": "app-name", - "Description": "The name of the app", - "Type": "String" - } - } - } - dummy_template_with_params_json = json.dumps(dummy_template_with_params) - - cf = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName='test_stack', - TemplateBody=dummy_template_with_params_json, - Parameters=[{ - "ParameterKey": "APPNAME", - "ParameterValue": "testing123", - }], - ) - - stack.parameters.should.have.length_of(1) - param = stack.parameters[0] - param['ParameterKey'].should.equal('APPNAME') - param['ParameterValue'].should.equal('testing123') - - -@mock_cloudformation -def test_stack_tags(): - tags = [ - { - "Key": "foo", - "Value": "bar" - }, - { - "Key": "baz", - "Value": "bleh" - } - ] - cf = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - Tags=tags, - ) - observed_tag_items = set( - item for items in [tag.items() for tag in stack.tags] for item in items) - expected_tag_items = set( - item for items in [tag.items() for tag in tags] for item in items) - observed_tag_items.should.equal(expected_tag_items) - - -@mock_cloudformation -@mock_ec2 -def test_stack_events(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - stack.update(TemplateBody=dummy_update_template_json) - stack = cf.Stack(stack.stack_id) - stack.delete() - - # assert begins and ends with stack events - events = list(stack.events.all()) - events[0].resource_type.should.equal("AWS::CloudFormation::Stack") - events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") - - # testing ordering of stack events without assuming resource events will not exist - # the AWS API returns events in reverse chronological order - stack_events_to_look_for = iter([ - ("DELETE_COMPLETE", None), - ("DELETE_IN_PROGRESS", "User Initiated"), - ("UPDATE_COMPLETE", None), - ("UPDATE_IN_PROGRESS", "User Initiated"), - ("CREATE_COMPLETE", None), - ("CREATE_IN_PROGRESS", "User Initiated"), - ]) - try: - for event in events: - event.stack_id.should.equal(stack.stack_id) - event.stack_name.should.equal("test_stack") - event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") - - if event.resource_type == "AWS::CloudFormation::Stack": - event.logical_resource_id.should.equal("test_stack") - event.physical_resource_id.should.equal(stack.stack_id) - - status_to_look_for, reason_to_look_for = next( - stack_events_to_look_for) - event.resource_status.should.equal(status_to_look_for) - if reason_to_look_for is not None: - event.resource_status_reason.should.equal( - reason_to_look_for) - except StopIteration: - assert False, "Too many stack events" - - list(stack_events_to_look_for).should.be.empty - - -@mock_cloudformation -def test_list_exports(): - cf_client = boto3.client('cloudformation', region_name='us-east-1') - cf_resource = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf_resource.create_stack( - StackName="test_stack", - TemplateBody=dummy_output_template_json, - ) - output_value = 'VPCID' - exports = cf_client.list_exports()['Exports'] - - stack.outputs.should.have.length_of(1) - stack.outputs[0]['OutputValue'].should.equal(output_value) - - exports.should.have.length_of(1) - exports[0]['ExportingStackId'].should.equal(stack.stack_id) - exports[0]['Name'].should.equal('My VPC ID') - exports[0]['Value'].should.equal(output_value) - - -@mock_cloudformation -def test_list_exports_with_token(): - cf = boto3.client('cloudformation', region_name='us-east-1') - for i in range(101): - # Add index to ensure name is unique - dummy_output_template['Outputs']['StackVPC']['Export']['Name'] += str(i) - cf.create_stack( - StackName="test_stack", - TemplateBody=json.dumps(dummy_output_template), - ) - exports = cf.list_exports() - exports['Exports'].should.have.length_of(100) - exports.get('NextToken').should_not.be.none - - more_exports = cf.list_exports(NextToken=exports['NextToken']) - more_exports['Exports'].should.have.length_of(1) - more_exports.get('NextToken').should.be.none - - -@mock_cloudformation -def test_delete_stack_with_export(): - cf = boto3.client('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_output_template_json, - ) - - stack_id = stack['StackId'] - exports = cf.list_exports()['Exports'] - exports.should.have.length_of(1) - - cf.delete_stack(StackName=stack_id) - cf.list_exports()['Exports'].should.have.length_of(0) - - -@mock_cloudformation -def test_export_names_must_be_unique(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - first_stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_output_template_json, - ) - with assert_raises(ClientError): - cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_output_template_json, - ) - - -@mock_sqs -@mock_cloudformation -def test_stack_with_imports(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - ec2_resource = boto3.resource('sqs', region_name='us-east-1') - - output_stack = cf.create_stack( - StackName="test_stack1", - TemplateBody=dummy_output_template_json, - ) - import_stack = cf.create_stack( - StackName="test_stack2", - TemplateBody=dummy_import_template_json - ) - - output_stack.outputs.should.have.length_of(1) - output = output_stack.outputs[0]['OutputValue'] - queue = ec2_resource.get_queue_by_name(QueueName=output) - queue.should_not.be.none - - -@mock_sqs -@mock_cloudformation -def test_non_json_redrive_policy(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - - stack = cf.create_stack( - StackName="test_stack1", - TemplateBody=dummy_redrive_template_json - ) - - stack.Resource('MainQueue').resource_status\ - .should.equal("CREATE_COMPLETE") - stack.Resource('DeadLetterQueue').resource_status\ - .should.equal("CREATE_COMPLETE") +from __future__ import unicode_literals + +import json +from collections import OrderedDict + +import boto3 +from botocore.exceptions import ClientError +import sure # noqa +# Ensure 'assert_raises' context manager support for Python 2.6 +from nose.tools import assert_raises + +from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 + +dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-d3adb33f", + "KeyName": "dummy", + "InstanceType": "t2.micro", + "Tags": [ + { + "Key": "Description", + "Value": "Test tag" + }, + { + "Key": "Name", + "Value": "Name tag for tests" + } + ] + } + } + } +} + +dummy_template_yaml = """--- +AWSTemplateFormatVersion: 2010-09-09 +Description: Stack1 with yaml template +Resources: + EC2Instance1: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-d3adb33f + KeyName: dummy + InstanceType: t2.micro + Tags: + - Key: Description + Value: Test tag + - Key: Name + Value: Name tag for tests +""" + +dummy_template_yaml_with_short_form_func = """--- +AWSTemplateFormatVersion: 2010-09-09 +Description: Stack1 with yaml template +Resources: + EC2Instance1: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-d3adb33f + KeyName: !Join [ ":", [ du, m, my ] ] + InstanceType: t2.micro + Tags: + - Key: Description + Value: Test tag + - Key: Name + Value: Name tag for tests +""" + +dummy_template_yaml_with_ref = """--- +AWSTemplateFormatVersion: 2010-09-09 +Description: Stack1 with yaml template +Parameters: + TagDescription: + Type: String + TagName: + Type: String + +Resources: + EC2Instance1: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-d3adb33f + KeyName: dummy + InstanceType: t2.micro + Tags: + - Key: Description + Value: + Ref: TagDescription + - Key: Name + Value: !Ref TagName +""" + +dummy_update_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Parameters": { + "KeyName": { + "Description": "Name of an existing EC2 KeyPair", + "Type": "AWS::EC2::KeyPair::KeyName", + "ConstraintDescription": "must be the name of an existing EC2 KeyPair." + } + }, + "Resources": { + "Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-08111162" + } + } + } +} + +dummy_output_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": { + "Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-08111162" + } + } + }, + "Outputs": { + "StackVPC": { + "Description": "The ID of the VPC", + "Value": "VPCID", + "Export": { + "Name": "My VPC ID" + } + } + } +} + +dummy_import_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::ImportValue": 'My VPC ID'}, + "VisibilityTimeout": 60, + } + } + } +} + +dummy_redrive_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MainQueue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "mainqueue.fifo", + "FifoQueue": True, + "ContentBasedDeduplication": False, + "RedrivePolicy": { + "deadLetterTargetArn": { + "Fn::GetAtt": [ + "DeadLetterQueue", + "Arn" + ] + }, + "maxReceiveCount": 5 + } + } + }, + "DeadLetterQueue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "FifoQueue": True + } + }, + } +} + +dummy_template_json = json.dumps(dummy_template) +dummy_update_template_json = json.dumps(dummy_update_template) +dummy_output_template_json = json.dumps(dummy_output_template) +dummy_import_template_json = json.dumps(dummy_import_template) +dummy_redrive_template_json = json.dumps(dummy_redrive_template) + + + +@mock_cloudformation +def test_boto3_create_stack(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) + + +@mock_cloudformation +def test_boto3_create_stack_with_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_yaml, + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + dummy_template_yaml) + + +@mock_cloudformation +def test_boto3_create_stack_with_short_form_func_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_yaml_with_short_form_func, + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + dummy_template_yaml_with_short_form_func) + + +@mock_cloudformation +def test_boto3_create_stack_with_ref_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + params = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, + ] + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=params + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + dummy_template_yaml_with_ref) + + +@mock_cloudformation +def test_creating_stacks_across_regions(): + west1_cf = boto3.resource('cloudformation', region_name='us-west-1') + west2_cf = boto3.resource('cloudformation', region_name='us-west-2') + west1_cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + west2_cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + list(west1_cf.stacks.all()).should.have.length_of(1) + list(west2_cf.stacks.all()).should.have.length_of(1) + + +@mock_cloudformation +def test_create_stack_with_notification_arn(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + cf.create_stack( + StackName="test_stack_with_notifications", + TemplateBody=dummy_template_json, + NotificationARNs=['arn:aws:sns:us-east-1:123456789012:fake-queue'], + ) + + stack = list(cf.stacks.all())[0] + stack.notification_arns.should.contain( + 'arn:aws:sns:us-east-1:123456789012:fake-queue') + + +@mock_cloudformation +def test_create_stack_with_role_arn(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + cf.create_stack( + StackName="test_stack_with_notifications", + TemplateBody=dummy_template_json, + RoleARN='arn:aws:iam::123456789012:role/moto', + ) + stack = list(cf.stacks.all())[0] + stack.role_arn.should.equal('arn:aws:iam::123456789012:role/moto') + + +@mock_cloudformation +@mock_s3 +def test_create_stack_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + bucket = s3_conn.create_bucket(Bucket="foobar") + + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + + cf_conn = boto3.client('cloudformation', region_name='us-west-1') + cf_conn.create_stack( + StackName='stack_from_url', + TemplateURL=key_url, + ) + cf_conn.get_template(StackName="stack_from_url")['TemplateBody'].should.equal( + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) + + +@mock_cloudformation +def test_update_stack_with_previous_value(): + name = 'update_stack_with_previous_value' + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName=name, TemplateBody=dummy_template_yaml_with_ref, + Parameters=[ + {'ParameterKey': 'TagName', 'ParameterValue': 'foo'}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'bar'}, + ] + ) + cf_conn.update_stack( + StackName=name, UsePreviousTemplate=True, + Parameters=[ + {'ParameterKey': 'TagName', 'UsePreviousValue': True}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'not bar'}, + ] + ) + stack = cf_conn.describe_stacks(StackName=name)['Stacks'][0] + tag_name = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagName'][0] + tag_desc = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagDescription'][0] + assert tag_name == 'foo' + assert tag_desc == 'not bar' + + +@mock_cloudformation +@mock_s3 +@mock_ec2 +def test_update_stack_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="update_stack_from_url", + TemplateBody=dummy_template_json, + Tags=[{'Key': 'foo', 'Value': 'bar'}], + ) + + s3_conn.create_bucket(Bucket="foobar") + + s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_update_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + + cf_conn.update_stack( + StackName="update_stack_from_url", + TemplateURL=key_url, + ) + + cf_conn.get_template(StackName="update_stack_from_url")[ 'TemplateBody'].should.equal( + json.loads(dummy_update_template_json, object_pairs_hook=OrderedDict)) + + +@mock_cloudformation +@mock_s3 +def test_create_change_set_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + bucket = s3_conn.create_bucket(Bucket="foobar") + + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + cf_conn = boto3.client('cloudformation', region_name='us-west-1') + response = cf_conn.create_change_set( + StackName='NewStack', + TemplateURL=key_url, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + assert 'arn:aws:cloudformation:us-west-1:123456789:changeSet/NewChangeSet/' in response['Id'] + assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId'] + + +@mock_cloudformation +def test_execute_change_set_w_arn(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName=change_set['Id']) + + +@mock_cloudformation +def test_execute_change_set_w_name(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName='NewStack', StackName='NewStack') + + +@mock_cloudformation +def test_describe_stack_pagination(): + conn = boto3.client('cloudformation', region_name='us-east-1') + for i in range(100): + conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + resp = conn.describe_stacks() + stacks = resp['Stacks'] + stacks.should.have.length_of(50) + next_token = resp['NextToken'] + next_token.should_not.be.none + resp2 = conn.describe_stacks(NextToken=next_token) + stacks.extend(resp2['Stacks']) + stacks.should.have.length_of(100) + assert 'NextToken' not in resp2.keys() + + +@mock_cloudformation +def test_describe_stack_resources(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + + response = cf_conn.describe_stack_resources(StackName=stack['StackName']) + resource = response['StackResources'][0] + resource['LogicalResourceId'].should.equal('EC2Instance1') + resource['ResourceStatus'].should.equal('CREATE_COMPLETE') + resource['ResourceType'].should.equal('AWS::EC2::Instance') + resource['StackId'].should.equal(stack['StackId']) + + +@mock_cloudformation +def test_describe_stack_by_name(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + stack['StackName'].should.equal('test_stack') + + +@mock_cloudformation +def test_describe_stack_by_stack_id(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + stack_by_id = cf_conn.describe_stacks(StackName=stack['StackId'])['Stacks'][ + 0] + + stack_by_id['StackId'].should.equal(stack['StackId']) + stack_by_id['StackName'].should.equal("test_stack") + + +@mock_cloudformation +def test_list_stacks(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + cf.create_stack( + StackName="test_stack2", + TemplateBody=dummy_template_json, + ) + + stacks = list(cf.stacks.all()) + stacks.should.have.length_of(2) + stack_names = [stack.stack_name for stack in stacks] + stack_names.should.contain("test_stack") + stack_names.should.contain("test_stack2") + + +@mock_cloudformation +def test_delete_stack_from_resource(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + list(cf.stacks.all()).should.have.length_of(1) + stack.delete() + list(cf.stacks.all()).should.have.length_of(0) + + +@mock_cloudformation +@mock_ec2 +def test_delete_stack_by_name(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + cf_conn.describe_stacks()['Stacks'].should.have.length_of(1) + cf_conn.delete_stack(StackName="test_stack") + cf_conn.describe_stacks()['Stacks'].should.have.length_of(0) + + +@mock_cloudformation +def test_describe_deleted_stack(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + stack_id = stack['StackId'] + cf_conn.delete_stack(StackName=stack['StackId']) + stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0] + stack_by_id['StackId'].should.equal(stack['StackId']) + stack_by_id['StackName'].should.equal("test_stack") + stack_by_id['StackStatus'].should.equal("DELETE_COMPLETE") + + +@mock_cloudformation +@mock_ec2 +def test_describe_updated_stack(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + Tags=[{'Key': 'foo', 'Value': 'bar'}], + ) + + cf_conn.update_stack( + StackName="test_stack", + RoleARN='arn:aws:iam::123456789012:role/moto', + TemplateBody=dummy_update_template_json, + Tags=[{'Key': 'foo', 'Value': 'baz'}], + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + stack_id = stack['StackId'] + stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0] + stack_by_id['StackId'].should.equal(stack['StackId']) + stack_by_id['StackName'].should.equal("test_stack") + stack_by_id['StackStatus'].should.equal("UPDATE_COMPLETE") + stack_by_id['RoleARN'].should.equal('arn:aws:iam::123456789012:role/moto') + stack_by_id['Tags'].should.equal([{'Key': 'foo', 'Value': 'baz'}]) + + +@mock_cloudformation +def test_bad_describe_stack(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + with assert_raises(ClientError): + cf_conn.describe_stacks(StackName="non_existent_stack") + + +@mock_cloudformation() +def test_cloudformation_params(): + dummy_template_with_params = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": {}, + "Parameters": { + "APPNAME": { + "Default": "app-name", + "Description": "The name of the app", + "Type": "String" + } + } + } + dummy_template_with_params_json = json.dumps(dummy_template_with_params) + + cf = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName='test_stack', + TemplateBody=dummy_template_with_params_json, + Parameters=[{ + "ParameterKey": "APPNAME", + "ParameterValue": "testing123", + }], + ) + + stack.parameters.should.have.length_of(1) + param = stack.parameters[0] + param['ParameterKey'].should.equal('APPNAME') + param['ParameterValue'].should.equal('testing123') + + +@mock_cloudformation +def test_stack_tags(): + tags = [ + { + "Key": "foo", + "Value": "bar" + }, + { + "Key": "baz", + "Value": "bleh" + } + ] + cf = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + Tags=tags, + ) + observed_tag_items = set( + item for items in [tag.items() for tag in stack.tags] for item in items) + expected_tag_items = set( + item for items in [tag.items() for tag in tags] for item in items) + observed_tag_items.should.equal(expected_tag_items) + + +@mock_cloudformation +@mock_ec2 +def test_stack_events(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + stack.update(TemplateBody=dummy_update_template_json) + stack = cf.Stack(stack.stack_id) + stack.delete() + + # assert begins and ends with stack events + events = list(stack.events.all()) + events[0].resource_type.should.equal("AWS::CloudFormation::Stack") + events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") + + # testing ordering of stack events without assuming resource events will not exist + # the AWS API returns events in reverse chronological order + stack_events_to_look_for = iter([ + ("DELETE_COMPLETE", None), + ("DELETE_IN_PROGRESS", "User Initiated"), + ("UPDATE_COMPLETE", None), + ("UPDATE_IN_PROGRESS", "User Initiated"), + ("CREATE_COMPLETE", None), + ("CREATE_IN_PROGRESS", "User Initiated"), + ]) + try: + for event in events: + event.stack_id.should.equal(stack.stack_id) + event.stack_name.should.equal("test_stack") + event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") + + if event.resource_type == "AWS::CloudFormation::Stack": + event.logical_resource_id.should.equal("test_stack") + event.physical_resource_id.should.equal(stack.stack_id) + + status_to_look_for, reason_to_look_for = next( + stack_events_to_look_for) + event.resource_status.should.equal(status_to_look_for) + if reason_to_look_for is not None: + event.resource_status_reason.should.equal( + reason_to_look_for) + except StopIteration: + assert False, "Too many stack events" + + list(stack_events_to_look_for).should.be.empty + + +@mock_cloudformation +def test_list_exports(): + cf_client = boto3.client('cloudformation', region_name='us-east-1') + cf_resource = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf_resource.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + output_value = 'VPCID' + exports = cf_client.list_exports()['Exports'] + + stack.outputs.should.have.length_of(1) + stack.outputs[0]['OutputValue'].should.equal(output_value) + + exports.should.have.length_of(1) + exports[0]['ExportingStackId'].should.equal(stack.stack_id) + exports[0]['Name'].should.equal('My VPC ID') + exports[0]['Value'].should.equal(output_value) + + +@mock_cloudformation +def test_list_exports_with_token(): + cf = boto3.client('cloudformation', region_name='us-east-1') + for i in range(101): + # Add index to ensure name is unique + dummy_output_template['Outputs']['StackVPC']['Export']['Name'] += str(i) + cf.create_stack( + StackName="test_stack", + TemplateBody=json.dumps(dummy_output_template), + ) + exports = cf.list_exports() + exports['Exports'].should.have.length_of(100) + exports.get('NextToken').should_not.be.none + + more_exports = cf.list_exports(NextToken=exports['NextToken']) + more_exports['Exports'].should.have.length_of(1) + more_exports.get('NextToken').should.be.none + + +@mock_cloudformation +def test_delete_stack_with_export(): + cf = boto3.client('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + + stack_id = stack['StackId'] + exports = cf.list_exports()['Exports'] + exports.should.have.length_of(1) + + cf.delete_stack(StackName=stack_id) + cf.list_exports()['Exports'].should.have.length_of(0) + + +@mock_cloudformation +def test_export_names_must_be_unique(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + first_stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + with assert_raises(ClientError): + cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + + +@mock_sqs +@mock_cloudformation +def test_stack_with_imports(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + ec2_resource = boto3.resource('sqs', region_name='us-east-1') + + output_stack = cf.create_stack( + StackName="test_stack1", + TemplateBody=dummy_output_template_json, + ) + import_stack = cf.create_stack( + StackName="test_stack2", + TemplateBody=dummy_import_template_json + ) + + output_stack.outputs.should.have.length_of(1) + output = output_stack.outputs[0]['OutputValue'] + queue = ec2_resource.get_queue_by_name(QueueName=output) + queue.should_not.be.none + + +@mock_sqs +@mock_cloudformation +def test_non_json_redrive_policy(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + + stack = cf.create_stack( + StackName="test_stack1", + TemplateBody=dummy_redrive_template_json + ) + + stack.Resource('MainQueue').resource_status\ + .should.equal("CREATE_COMPLETE") + stack.Resource('DeadLetterQueue').resource_status\ + .should.equal("CREATE_COMPLETE") diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 2c808726fdd3..936f7c2a1799 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -1,2427 +1,2427 @@ -from __future__ import unicode_literals -import json - -import base64 -import boto -import boto.cloudformation -import boto.datapipeline -import boto.ec2 -import boto.ec2.autoscale -import boto.ec2.elb -from boto.exception import BotoServerError -import boto.iam -import boto.redshift -import boto.sns -import boto.sqs -import boto.vpc -import boto3 -import sure # noqa - -from moto import ( - mock_autoscaling_deprecated, - mock_cloudformation, - mock_cloudformation_deprecated, - mock_datapipeline_deprecated, - mock_ec2, - mock_ec2_deprecated, - mock_elb, - mock_elb_deprecated, - mock_iam_deprecated, - mock_kms, - mock_lambda, - mock_rds_deprecated, - mock_rds2, - mock_rds2_deprecated, - mock_redshift, - mock_redshift_deprecated, - mock_route53_deprecated, - mock_sns_deprecated, - mock_sqs, - mock_sqs_deprecated, - mock_elbv2) - -from .fixtures import ( - ec2_classic_eip, - fn_join, - rds_mysql_with_db_parameter_group, - rds_mysql_with_read_replica, - redshift, - route53_ec2_instance_with_public_ip, - route53_health_check, - route53_roundrobin, - single_instance_with_ebs_volume, - vpc_eip, - vpc_single_instance_in_subnet, -) - - -@mock_cloudformation_deprecated() -def test_stack_sqs_integration(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - stack = conn.describe_stacks()[0] - queue = stack.describe_resources()[0] - queue.resource_type.should.equal('AWS::SQS::Queue') - queue.logical_resource_id.should.equal("QueueGroup") - queue.physical_resource_id.should.equal("my-queue") - - -@mock_cloudformation_deprecated() -def test_stack_list_resources(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - resources = conn.list_stack_resources("test_stack") - assert len(resources) == 1 - queue = resources[0] - queue.resource_type.should.equal('AWS::SQS::Queue') - queue.logical_resource_id.should.equal("QueueGroup") - queue.physical_resource_id.should.equal("my-queue") - - -@mock_cloudformation_deprecated() -@mock_sqs_deprecated() -def test_update_stack(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - sqs_conn = boto.sqs.connect_to_region("us-west-1") - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(1) - queues[0].get_attributes('VisibilityTimeout')[ - 'VisibilityTimeout'].should.equal('60') - - sqs_template['Resources']['QueueGroup'][ - 'Properties']['VisibilityTimeout'] = 100 - sqs_template_json = json.dumps(sqs_template) - conn.update_stack("test_stack", sqs_template_json) - - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(1) - queues[0].get_attributes('VisibilityTimeout')[ - 'VisibilityTimeout'].should.equal('100') - - -@mock_cloudformation_deprecated() -@mock_sqs_deprecated() -def test_update_stack_and_remove_resource(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - sqs_conn = boto.sqs.connect_to_region("us-west-1") - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(1) - - sqs_template['Resources'].pop('QueueGroup') - sqs_template_json = json.dumps(sqs_template) - conn.update_stack("test_stack", sqs_template_json) - - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(0) - - -@mock_cloudformation_deprecated() -@mock_sqs_deprecated() -def test_update_stack_and_add_resource(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": {}, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - sqs_conn = boto.sqs.connect_to_region("us-west-1") - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(0) - - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - conn.update_stack("test_stack", sqs_template_json) - - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(1) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_stack_ec2_integration(): - ec2_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "WebServerGroup": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - }, - } - ec2_template_json = json.dumps(ec2_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "ec2_stack", - template_body=ec2_template_json, - ) - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - - stack = conn.describe_stacks()[0] - instance = stack.describe_resources()[0] - instance.resource_type.should.equal('AWS::EC2::Instance') - instance.logical_resource_id.should.contain("WebServerGroup") - instance.physical_resource_id.should.equal(ec2_instance.id) - - -@mock_ec2_deprecated() -@mock_elb_deprecated() -@mock_cloudformation_deprecated() -def test_stack_elb_integration_with_attached_ec2_instances(): - elb_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MyELB": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "Instances": [{"Ref": "Ec2Instance1"}], - "LoadBalancerName": "test-elb", - "AvailabilityZones": ['us-east-1'], - "Listeners": [ - { - "InstancePort": "80", - "LoadBalancerPort": "80", - "Protocol": "HTTP", - } - ], - } - }, - "Ec2Instance1": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - }, - } - elb_template_json = json.dumps(elb_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "elb_stack", - template_body=elb_template_json, - ) - - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") - load_balancer = elb_conn.get_all_load_balancers()[0] - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - - load_balancer.instances[0].id.should.equal(ec2_instance.id) - list(load_balancer.availability_zones).should.equal(['us-east-1']) - - -@mock_elb_deprecated() -@mock_cloudformation_deprecated() -def test_stack_elb_integration_with_health_check(): - elb_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MyELB": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "LoadBalancerName": "test-elb", - "AvailabilityZones": ['us-west-1'], - "HealthCheck": { - "HealthyThreshold": "3", - "Interval": "5", - "Target": "HTTP:80/healthcheck", - "Timeout": "4", - "UnhealthyThreshold": "2", - }, - "Listeners": [ - { - "InstancePort": "80", - "LoadBalancerPort": "80", - "Protocol": "HTTP", - } - ], - } - }, - }, - } - elb_template_json = json.dumps(elb_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "elb_stack", - template_body=elb_template_json, - ) - - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") - load_balancer = elb_conn.get_all_load_balancers()[0] - health_check = load_balancer.health_check - - health_check.healthy_threshold.should.equal(3) - health_check.interval.should.equal(5) - health_check.target.should.equal("HTTP:80/healthcheck") - health_check.timeout.should.equal(4) - health_check.unhealthy_threshold.should.equal(2) - - -@mock_elb_deprecated() -@mock_cloudformation_deprecated() -def test_stack_elb_integration_with_update(): - elb_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MyELB": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "LoadBalancerName": "test-elb", - "AvailabilityZones": ['us-west-1a'], - "Listeners": [ - { - "InstancePort": "80", - "LoadBalancerPort": "80", - "Protocol": "HTTP", - } - ], - "Policies": {"Ref": "AWS::NoValue"}, - } - }, - }, - } - elb_template_json = json.dumps(elb_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "elb_stack", - template_body=elb_template_json, - ) - - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") - load_balancer = elb_conn.get_all_load_balancers()[0] - load_balancer.availability_zones[0].should.equal('us-west-1a') - - elb_template['Resources']['MyELB']['Properties'][ - 'AvailabilityZones'] = ['us-west-1b'] - elb_template_json = json.dumps(elb_template) - conn.update_stack( - "elb_stack", - template_body=elb_template_json, - ) - load_balancer = elb_conn.get_all_load_balancers()[0] - load_balancer.availability_zones[0].should.equal('us-west-1b') - - -@mock_ec2_deprecated() -@mock_redshift_deprecated() -@mock_cloudformation_deprecated() -def test_redshift_stack(): - redshift_template_json = json.dumps(redshift.template) - - vpc_conn = boto.vpc.connect_to_region("us-west-2") - conn = boto.cloudformation.connect_to_region("us-west-2") - conn.create_stack( - "redshift_stack", - template_body=redshift_template_json, - parameters=[ - ("DatabaseName", "mydb"), - ("ClusterType", "multi-node"), - ("NumberOfNodes", 2), - ("NodeType", "dw1.xlarge"), - ("MasterUsername", "myuser"), - ("MasterUserPassword", "mypass"), - ("InboundTraffic", "10.0.0.1/16"), - ("PortNumber", 5439), - ] - ) - - redshift_conn = boto.redshift.connect_to_region("us-west-2") - - cluster_res = redshift_conn.describe_clusters() - clusters = cluster_res['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'] - clusters.should.have.length_of(1) - cluster = clusters[0] - cluster['DBName'].should.equal("mydb") - cluster['NumberOfNodes'].should.equal(2) - cluster['NodeType'].should.equal("dw1.xlarge") - cluster['MasterUsername'].should.equal("myuser") - cluster['Port'].should.equal(5439) - cluster['VpcSecurityGroups'].should.have.length_of(1) - security_group_id = cluster['VpcSecurityGroups'][0]['VpcSecurityGroupId'] - - groups = vpc_conn.get_all_security_groups(group_ids=[security_group_id]) - groups.should.have.length_of(1) - group = groups[0] - group.rules.should.have.length_of(1) - group.rules[0].grants[0].cidr_ip.should.equal("10.0.0.1/16") - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_stack_security_groups(): - security_group_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "my-security-group": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "My other group", - }, - }, - "Ec2Instance2": { - "Type": "AWS::EC2::Instance", - "Properties": { - "SecurityGroups": [{"Ref": "InstanceSecurityGroup"}], - "ImageId": "ami-1234abcd", - } - }, - "InstanceSecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "My security group", - "Tags": [ - { - "Key": "bar", - "Value": "baz" - } - ], - "SecurityGroupIngress": [{ - "IpProtocol": "tcp", - "FromPort": "22", - "ToPort": "22", - "CidrIp": "123.123.123.123/32", - }, { - "IpProtocol": "tcp", - "FromPort": "80", - "ToPort": "8000", - "SourceSecurityGroupId": {"Ref": "my-security-group"}, - }] - } - } - }, - } - security_group_template_json = json.dumps(security_group_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "security_group_stack", - template_body=security_group_template_json, - tags={"foo": "bar"} - ) - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - instance_group = ec2_conn.get_all_security_groups( - filters={'description': ['My security group']})[0] - other_group = ec2_conn.get_all_security_groups( - filters={'description': ['My other group']})[0] - - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - - ec2_instance.groups[0].id.should.equal(instance_group.id) - instance_group.description.should.equal("My security group") - instance_group.tags.should.have.key('foo').which.should.equal('bar') - instance_group.tags.should.have.key('bar').which.should.equal('baz') - rule1, rule2 = instance_group.rules - int(rule1.to_port).should.equal(22) - int(rule1.from_port).should.equal(22) - rule1.grants[0].cidr_ip.should.equal("123.123.123.123/32") - rule1.ip_protocol.should.equal('tcp') - - int(rule2.to_port).should.equal(8000) - int(rule2.from_port).should.equal(80) - rule2.ip_protocol.should.equal('tcp') - rule2.grants[0].group_id.should.equal(other_group.id) - - -@mock_autoscaling_deprecated() -@mock_elb_deprecated() -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -def test_autoscaling_group_with_elb(): - web_setup_template = { - "AWSTemplateFormatVersion": "2010-09-09", - - "Resources": { - "my-as-group": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AvailabilityZones": ['us-east1'], - "LaunchConfigurationName": {"Ref": "my-launch-config"}, - "MinSize": "2", - "MaxSize": "2", - "DesiredCapacity": "2", - "LoadBalancerNames": [{"Ref": "my-elb"}], - "Tags": [ - { - "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", - "PropagateAtLaunch": True}, - { - "Key": "not-propagated-test-tag", - "Value": "not-propagated-test-tag-value", - "PropagateAtLaunch": False - } - ] - }, - }, - - "my-launch-config": { - "Type": "AWS::AutoScaling::LaunchConfiguration", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - - "my-elb": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "AvailabilityZones": ['us-east1'], - "Listeners": [{ - "LoadBalancerPort": "80", - "InstancePort": "80", - "Protocol": "HTTP", - }], - "LoadBalancerName": "my-elb", - "HealthCheck": { - "Target": "HTTP:80", - "HealthyThreshold": "3", - "UnhealthyThreshold": "5", - "Interval": "30", - "Timeout": "5", - }, - }, - }, - } - } - - web_setup_template_json = json.dumps(web_setup_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "web_stack", - template_body=web_setup_template_json, - ) - - autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") - autoscale_group = autoscale_conn.get_all_groups()[0] - autoscale_group.launch_config_name.should.contain("my-launch-config") - autoscale_group.load_balancers[0].should.equal('my-elb') - - # Confirm the Launch config was actually created - autoscale_conn.get_all_launch_configurations().should.have.length_of(1) - - # Confirm the ELB was actually created - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") - elb_conn.get_all_load_balancers().should.have.length_of(1) - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - as_group_resource = [resource for resource in resources if resource.resource_type == - 'AWS::AutoScaling::AutoScalingGroup'][0] - as_group_resource.physical_resource_id.should.contain("my-as-group") - - launch_config_resource = [ - resource for resource in resources if - resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] - launch_config_resource.physical_resource_id.should.contain( - "my-launch-config") - - elb_resource = [resource for resource in resources if resource.resource_type == - 'AWS::ElasticLoadBalancing::LoadBalancer'][0] - elb_resource.physical_resource_id.should.contain("my-elb") - - # confirm the instances were created with the right tags - ec2_conn = boto.ec2.connect_to_region('us-west-1') - reservations = ec2_conn.get_all_reservations() - len(reservations).should.equal(1) - reservation = reservations[0] - len(reservation.instances).should.equal(2) - for instance in reservation.instances: - instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') - instance.tags.keys().should_not.contain('not-propagated-test-tag') - - -@mock_autoscaling_deprecated() -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -def test_autoscaling_group_update(): - asg_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "my-as-group": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AvailabilityZones": ['us-west-1'], - "LaunchConfigurationName": {"Ref": "my-launch-config"}, - "MinSize": "2", - "MaxSize": "2", - "DesiredCapacity": "2" - }, - }, - - "my-launch-config": { - "Type": "AWS::AutoScaling::LaunchConfiguration", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - }, - } - asg_template_json = json.dumps(asg_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "asg_stack", - template_body=asg_template_json, - ) - - autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") - asg = autoscale_conn.get_all_groups()[0] - asg.min_size.should.equal(2) - asg.max_size.should.equal(2) - asg.desired_capacity.should.equal(2) - - asg_template['Resources']['my-as-group']['Properties']['MaxSize'] = 3 - asg_template['Resources']['my-as-group']['Properties']['Tags'] = [ - { - "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", - "PropagateAtLaunch": True}, - { - "Key": "not-propagated-test-tag", - "Value": "not-propagated-test-tag-value", - "PropagateAtLaunch": False - } - ] - asg_template_json = json.dumps(asg_template) - conn.update_stack( - "asg_stack", - template_body=asg_template_json, - ) - asg = autoscale_conn.get_all_groups()[0] - asg.min_size.should.equal(2) - asg.max_size.should.equal(3) - asg.desired_capacity.should.equal(2) - - # confirm the instances were created with the right tags - ec2_conn = boto.ec2.connect_to_region('us-west-1') - reservations = ec2_conn.get_all_reservations() - running_instance_count = 0 - for res in reservations: - for instance in res.instances: - if instance.state == 'running': - running_instance_count += 1 - instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') - instance.tags.keys().should_not.contain('not-propagated-test-tag') - running_instance_count.should.equal(2) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_vpc_single_instance_in_subnet(): - template_json = json.dumps(vpc_single_instance_in_subnet.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - parameters=[("KeyName", "my_key")], - ) - - vpc_conn = boto.vpc.connect_to_region("us-west-1") - - vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] - vpc.cidr_block.should.equal("10.0.0.0/16") - - # Add this once we implement the endpoint - # vpc_conn.get_all_internet_gateways().should.have.length_of(1) - - subnet = vpc_conn.get_all_subnets(filters={'vpcId': vpc.id})[0] - subnet.vpc_id.should.equal(vpc.id) - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - instance = reservation.instances[0] - instance.tags["Foo"].should.equal("Bar") - # Check that the EIP is attached the the EC2 instance - eip = ec2_conn.get_all_addresses()[0] - eip.domain.should.equal('vpc') - eip.instance_id.should.equal(instance.id) - - security_group = ec2_conn.get_all_security_groups( - filters={'vpc_id': [vpc.id]})[0] - security_group.vpc_id.should.equal(vpc.id) - - stack = conn.describe_stacks()[0] - - vpc.tags.should.have.key('Application').which.should.equal(stack.stack_id) - - resources = stack.describe_resources() - vpc_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] - vpc_resource.physical_resource_id.should.equal(vpc.id) - - subnet_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::Subnet'][0] - subnet_resource.physical_resource_id.should.equal(subnet.id) - - eip_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] - eip_resource.physical_resource_id.should.equal(eip.public_ip) - - -@mock_cloudformation() -@mock_ec2() -@mock_rds2() -def test_rds_db_parameter_groups(): - ec2_conn = boto3.client("ec2", region_name="us-west-1") - ec2_conn.create_security_group( - GroupName='application', Description='Our Application Group') - - template_json = json.dumps(rds_mysql_with_db_parameter_group.template) - cf_conn = boto3.client('cloudformation', 'us-west-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - Parameters=[{'ParameterKey': key, 'ParameterValue': value} for - key, value in [ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("EC2SecurityGroup", "application"), - ("MultiAZ", "true"), - ] - ], - ) - - rds_conn = boto3.client('rds', region_name="us-west-1") - - db_parameter_groups = rds_conn.describe_db_parameter_groups() - len(db_parameter_groups['DBParameterGroups']).should.equal(1) - db_parameter_group_name = db_parameter_groups[ - 'DBParameterGroups'][0]['DBParameterGroupName'] - - found_cloudformation_set_parameter = False - for db_parameter in rds_conn.describe_db_parameters(DBParameterGroupName=db_parameter_group_name)[ - 'Parameters']: - if db_parameter['ParameterName'] == 'BACKLOG_QUEUE_LIMIT' and db_parameter[ - 'ParameterValue'] == '2048': - found_cloudformation_set_parameter = True - - found_cloudformation_set_parameter.should.equal(True) - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -@mock_rds_deprecated() -def test_rds_mysql_with_read_replica(): - ec2_conn = boto.ec2.connect_to_region("us-west-1") - ec2_conn.create_security_group('application', 'Our Application Group') - - template_json = json.dumps(rds_mysql_with_read_replica.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - parameters=[ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("EC2SecurityGroup", "application"), - ("MultiAZ", "true"), - ], - ) - - rds_conn = boto.rds.connect_to_region("us-west-1") - - primary = rds_conn.get_all_dbinstances("master_db")[0] - primary.master_username.should.equal("my_user") - primary.allocated_storage.should.equal(20) - primary.instance_class.should.equal("db.m1.medium") - primary.multi_az.should.equal(True) - list(primary.read_replica_dbinstance_identifiers).should.have.length_of(1) - replica_id = primary.read_replica_dbinstance_identifiers[0] - - replica = rds_conn.get_all_dbinstances(replica_id)[0] - replica.instance_class.should.equal("db.m1.medium") - - security_group_name = primary.security_groups[0].name - security_group = rds_conn.get_all_dbsecurity_groups(security_group_name)[0] - security_group.ec2_groups[0].name.should.equal("application") - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -@mock_rds_deprecated() -def test_rds_mysql_with_read_replica_in_vpc(): - template_json = json.dumps(rds_mysql_with_read_replica.template) - conn = boto.cloudformation.connect_to_region("eu-central-1") - conn.create_stack( - "test_stack", - template_body=template_json, - parameters=[ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("MultiAZ", "true"), - ], - ) - - rds_conn = boto.rds.connect_to_region("eu-central-1") - primary = rds_conn.get_all_dbinstances("master_db")[0] - - subnet_group_name = primary.subnet_group.name - subnet_group = rds_conn.get_all_db_subnet_groups(subnet_group_name)[0] - subnet_group.description.should.equal("my db subnet group") - - -@mock_autoscaling_deprecated() -@mock_iam_deprecated() -@mock_cloudformation_deprecated() -def test_iam_roles(): - iam_template = { - "AWSTemplateFormatVersion": "2010-09-09", - - "Resources": { - - "my-launch-config": { - "Properties": { - "IamInstanceProfile": {"Ref": "my-instance-profile-with-path"}, - "ImageId": "ami-1234abcd", - }, - "Type": "AWS::AutoScaling::LaunchConfiguration" - }, - "my-instance-profile-with-path": { - "Properties": { - "Path": "my-path", - "Roles": [{"Ref": "my-role-with-path"}], - }, - "Type": "AWS::IAM::InstanceProfile" - }, - "my-instance-profile-no-path": { - "Properties": { - "Roles": [{"Ref": "my-role-no-path"}], - }, - "Type": "AWS::IAM::InstanceProfile" - }, - "my-role-with-path": { - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": [ - "sts:AssumeRole" - ], - "Effect": "Allow", - "Principal": { - "Service": [ - "ec2.amazonaws.com" - ] - } - } - ] - }, - "Path": "my-path", - "Policies": [ - { - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "ec2:CreateTags", - "ec2:DescribeInstances", - "ec2:DescribeTags" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - } - ], - "Version": "2012-10-17" - }, - "PolicyName": "EC2_Tags" - }, - { - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "sqs:*" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - } - ], - "Version": "2012-10-17" - }, - "PolicyName": "SQS" - }, - ] - }, - "Type": "AWS::IAM::Role" - }, - "my-role-no-path": { - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": [ - "sts:AssumeRole" - ], - "Effect": "Allow", - "Principal": { - "Service": [ - "ec2.amazonaws.com" - ] - } - } - ] - }, - }, - "Type": "AWS::IAM::Role" - } - } - } - - iam_template_json = json.dumps(iam_template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=iam_template_json, - ) - - iam_conn = boto.iam.connect_to_region("us-west-1") - - role_results = iam_conn.list_roles()['list_roles_response'][ - 'list_roles_result']['roles'] - role_name_to_id = {} - for role_result in role_results: - role = iam_conn.get_role(role_result.role_name) - role.role_name.should.contain("my-role") - if 'with-path' in role.role_name: - role_name_to_id['with-path'] = role.role_id - role.path.should.equal("my-path") - else: - role_name_to_id['no-path'] = role.role_id - role.role_name.should.contain('no-path') - role.path.should.equal('/') - - instance_profile_responses = iam_conn.list_instance_profiles()[ - 'list_instance_profiles_response']['list_instance_profiles_result']['instance_profiles'] - instance_profile_responses.should.have.length_of(2) - instance_profile_names = [] - - for instance_profile_response in instance_profile_responses: - instance_profile = iam_conn.get_instance_profile(instance_profile_response.instance_profile_name) - instance_profile_names.append(instance_profile.instance_profile_name) - instance_profile.instance_profile_name.should.contain( - "my-instance-profile") - if "with-path" in instance_profile.instance_profile_name: - instance_profile.path.should.equal("my-path") - instance_profile.role_id.should.equal(role_name_to_id['with-path']) - else: - instance_profile.instance_profile_name.should.contain('no-path') - instance_profile.role_id.should.equal(role_name_to_id['no-path']) - instance_profile.path.should.equal('/') - - autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") - launch_config = autoscale_conn.get_all_launch_configurations()[0] - launch_config.instance_profile_name.should.contain("my-instance-profile-with-path") - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - instance_profile_resources = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'] - {ip.physical_resource_id for ip in instance_profile_resources}.should.equal(set(instance_profile_names)) - - role_resources = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'] - {r.physical_resource_id for r in role_resources}.should.equal(set(role_name_to_id.values())) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_single_instance_with_ebs_volume(): - template_json = json.dumps(single_instance_with_ebs_volume.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - parameters=[("KeyName", "key_name")] - ) - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - - volumes = ec2_conn.get_all_volumes() - # Grab the mounted drive - volume = [ - volume for volume in volumes if volume.attach_data.device == '/dev/sdh'][0] - volume.volume_state().should.equal('in-use') - volume.attach_data.instance_id.should.equal(ec2_instance.id) - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - ebs_volumes = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::Volume'] - ebs_volumes[0].physical_resource_id.should.equal(volume.id) - - -@mock_cloudformation_deprecated() -def test_create_template_without_required_param(): - template_json = json.dumps(single_instance_with_ebs_volume.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack.when.called_with( - "test_stack", - template_body=template_json, - ).should.throw(BotoServerError) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_classic_eip(): - template_json = json.dumps(ec2_classic_eip.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack("test_stack", template_body=template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eip = ec2_conn.get_all_addresses()[0] - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - cfn_eip = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] - cfn_eip.physical_resource_id.should.equal(eip.public_ip) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_vpc_eip(): - template_json = json.dumps(vpc_eip.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack("test_stack", template_body=template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eip = ec2_conn.get_all_addresses()[0] - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - cfn_eip = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] - cfn_eip.physical_resource_id.should.equal(eip.public_ip) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_fn_join(): - template_json = json.dumps(fn_join.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack("test_stack", template_body=template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eip = ec2_conn.get_all_addresses()[0] - - stack = conn.describe_stacks()[0] - fn_join_output = stack.outputs[0] - fn_join_output.value.should.equal('test eip:{0}'.format(eip.public_ip)) - - -@mock_cloudformation_deprecated() -@mock_sqs_deprecated() -def test_conditional_resources(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Parameters": { - "EnvType": { - "Description": "Environment type.", - "Type": "String", - } - }, - "Conditions": { - "CreateQueue": {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]} - }, - "Resources": { - "QueueGroup": { - "Condition": "CreateQueue", - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack_without_queue", - template_body=sqs_template_json, - parameters=[("EnvType", "staging")], - ) - sqs_conn = boto.sqs.connect_to_region("us-west-1") - list(sqs_conn.get_all_queues()).should.have.length_of(0) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack_with_queue", - template_body=sqs_template_json, - parameters=[("EnvType", "prod")], - ) - sqs_conn = boto.sqs.connect_to_region("us-west-1") - list(sqs_conn.get_all_queues()).should.have.length_of(1) - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -def test_conditional_if_handling(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Conditions": { - "EnvEqualsPrd": { - "Fn::Equals": [ - { - "Ref": "ENV" - }, - "prd" - ] - } - }, - "Parameters": { - "ENV": { - "Default": "dev", - "Description": "Deployment environment for the stack (dev/prd)", - "Type": "String" - }, - }, - "Description": "Stack 1", - "Resources": { - "App1": { - "Properties": { - "ImageId": { - "Fn::If": [ - "EnvEqualsPrd", - "ami-00000000", - "ami-ffffffff" - ] - }, - }, - "Type": "AWS::EC2::Instance" - }, - } - } - dummy_template_json = json.dumps(dummy_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack('test_stack1', template_body=dummy_template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - ec2_instance.image_id.should.equal("ami-ffffffff") - ec2_instance.terminate() - - conn = boto.cloudformation.connect_to_region("us-west-2") - conn.create_stack( - 'test_stack1', template_body=dummy_template_json, parameters=[("ENV", "prd")]) - ec2_conn = boto.ec2.connect_to_region("us-west-2") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - ec2_instance.image_id.should.equal("ami-00000000") - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -def test_cloudformation_mapping(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Mappings": { - "RegionMap": { - "us-east-1": {"32": "ami-6411e20d", "64": "ami-7a11e213"}, - "us-west-1": {"32": "ami-c9c7978c", "64": "ami-cfc7978a"}, - "eu-west-1": {"32": "ami-37c2f643", "64": "ami-31c2f645"}, - "ap-southeast-1": {"32": "ami-66f28c34", "64": "ami-60f28c32"}, - "ap-northeast-1": {"32": "ami-9c03a89d", "64": "ami-a003a8a1"} - } - }, - "Resources": { - "WebServer": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": { - "Fn::FindInMap": ["RegionMap", {"Ref": "AWS::Region"}, "32"] - }, - "InstanceType": "m1.small" - }, - "Type": "AWS::EC2::Instance", - }, - }, - } - - dummy_template_json = json.dumps(dummy_template) - - conn = boto.cloudformation.connect_to_region("us-east-1") - conn.create_stack('test_stack1', template_body=dummy_template_json) - ec2_conn = boto.ec2.connect_to_region("us-east-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - ec2_instance.image_id.should.equal("ami-6411e20d") - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack('test_stack1', template_body=dummy_template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - ec2_instance.image_id.should.equal("ami-c9c7978c") - - -@mock_cloudformation_deprecated() -@mock_route53_deprecated() -def test_route53_roundrobin(): - route53_conn = boto.connect_route53() - - template_json = json.dumps(route53_roundrobin.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - stack = conn.create_stack( - "test_stack", - template_body=template_json, - ) - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.hosted_zone_id.should.equal(zone_id) - rrsets.should.have.length_of(2) - record_set1 = rrsets[0] - record_set1.name.should.equal('test_stack.us-west-1.my_zone.') - record_set1.identifier.should.equal("test_stack AWS") - record_set1.type.should.equal('CNAME') - record_set1.ttl.should.equal('900') - record_set1.weight.should.equal('3') - record_set1.resource_records[0].should.equal("aws.amazon.com") - - record_set2 = rrsets[1] - record_set2.name.should.equal('test_stack.us-west-1.my_zone.') - record_set2.identifier.should.equal("test_stack Amazon") - record_set2.type.should.equal('CNAME') - record_set2.ttl.should.equal('900') - record_set2.weight.should.equal('1') - record_set2.resource_records[0].should.equal("www.amazon.com") - - stack = conn.describe_stacks()[0] - output = stack.outputs[0] - output.key.should.equal('DomainName') - output.value.should.equal( - 'arn:aws:route53:::hostedzone/{0}'.format(zone_id)) - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -@mock_route53_deprecated() -def test_route53_ec2_instance_with_public_ip(): - route53_conn = boto.connect_route53() - ec2_conn = boto.ec2.connect_to_region("us-west-1") - - template_json = json.dumps(route53_ec2_instance_with_public_ip.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - ) - - instance_id = ec2_conn.get_all_reservations()[0].instances[0].id - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.should.have.length_of(1) - - record_set1 = rrsets[0] - record_set1.name.should.equal('{0}.us-west-1.my_zone.'.format(instance_id)) - record_set1.identifier.should.equal(None) - record_set1.type.should.equal('A') - record_set1.ttl.should.equal('900') - record_set1.weight.should.equal(None) - record_set1.resource_records[0].should.equal("10.0.0.25") - - -@mock_cloudformation_deprecated() -@mock_route53_deprecated() -def test_route53_associate_health_check(): - route53_conn = boto.connect_route53() - - template_json = json.dumps(route53_health_check.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - ) - - checks = route53_conn.get_list_health_checks()['ListHealthChecksResponse'][ - 'HealthChecks'] - list(checks).should.have.length_of(1) - check = checks[0] - health_check_id = check['Id'] - config = check['HealthCheckConfig'] - config["FailureThreshold"].should.equal("3") - config["IPAddress"].should.equal("10.0.0.4") - config["Port"].should.equal("80") - config["RequestInterval"].should.equal("10") - config["ResourcePath"].should.equal("/") - config["Type"].should.equal("HTTP") - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.should.have.length_of(1) - - record_set = rrsets[0] - record_set.health_check.should.equal(health_check_id) - - -@mock_cloudformation_deprecated() -@mock_route53_deprecated() -def test_route53_with_update(): - route53_conn = boto.connect_route53() - - template_json = json.dumps(route53_health_check.template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.should.have.length_of(1) - - record_set = rrsets[0] - record_set.resource_records.should.equal(["my.example.com"]) - - route53_health_check.template['Resources']['myDNSRecord'][ - 'Properties']['ResourceRecords'] = ["my_other.example.com"] - template_json = json.dumps(route53_health_check.template) - cf_conn.update_stack( - "test_stack", - template_body=template_json, - ) - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.should.have.length_of(1) - - record_set = rrsets[0] - record_set.resource_records.should.equal(["my_other.example.com"]) - - -@mock_cloudformation_deprecated() -@mock_sns_deprecated() -def test_sns_topic(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MySNSTopic": { - "Type": "AWS::SNS::Topic", - "Properties": { - "Subscription": [ - {"Endpoint": "https://example.com", "Protocol": "https"}, - ], - "TopicName": "my_topics", - } - } - }, - "Outputs": { - "topic_name": { - "Value": {"Fn::GetAtt": ["MySNSTopic", "TopicName"]} - }, - "topic_arn": { - "Value": {"Ref": "MySNSTopic"} - }, - } - } - template_json = json.dumps(dummy_template) - conn = boto.cloudformation.connect_to_region("us-west-1") - stack = conn.create_stack( - "test_stack", - template_body=template_json, - ) - - sns_conn = boto.sns.connect_to_region("us-west-1") - topics = sns_conn.get_all_topics()["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"] - topics.should.have.length_of(1) - topic_arn = topics[0]['TopicArn'] - topic_arn.should.contain("my_topics") - - subscriptions = sns_conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("https") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("https://example.com") - - stack = conn.describe_stacks()[0] - topic_name_output = [x for x in stack.outputs if x.key == 'topic_name'][0] - topic_name_output.value.should.equal("my_topics") - topic_arn_output = [x for x in stack.outputs if x.key == 'topic_arn'][0] - topic_arn_output.value.should.equal(topic_arn) - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_vpc_gateway_attachment_creation_should_attach_itself_to_vpc(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "internetgateway": { - "Type": "AWS::EC2::InternetGateway" - }, - "testvpc": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - "EnableDnsHostnames": "true", - "EnableDnsSupport": "true", - "InstanceTenancy": "default" - }, - }, - "vpcgatewayattachment": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "InternetGatewayId": { - "Ref": "internetgateway" - }, - "VpcId": { - "Ref": "testvpc" - } - }, - }, - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] - igws = vpc_conn.get_all_internet_gateways( - filters={'attachment.vpc-id': vpc.id} - ) - - igws.should.have.length_of(1) - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_vpc_peering_creation(): - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc_source = vpc_conn.create_vpc("10.0.0.0/16") - peer_vpc = vpc_conn.create_vpc("10.1.0.0/16") - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "vpcpeeringconnection": { - "Type": "AWS::EC2::VPCPeeringConnection", - "Properties": { - "PeerVpcId": peer_vpc.id, - "VpcId": vpc_source.id, - } - }, - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - peering_connections = vpc_conn.get_all_vpc_peering_connections() - peering_connections.should.have.length_of(1) - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_multiple_security_group_ingress_separate_from_security_group_by_id(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "test-security-group1": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "Tags": [ - { - "Key": "sg-name", - "Value": "sg1" - } - ] - }, - }, - "test-security-group2": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "Tags": [ - { - "Key": "sg-name", - "Value": "sg2" - } - ] - }, - }, - "test-sg-ingress": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": {"Ref": "test-security-group1"}, - "IpProtocol": "tcp", - "FromPort": "80", - "ToPort": "8080", - "SourceSecurityGroupId": {"Ref": "test-security-group2"}, - } - } - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - - security_group1 = ec2_conn.get_all_security_groups( - filters={"tag:sg-name": "sg1"})[0] - security_group2 = ec2_conn.get_all_security_groups( - filters={"tag:sg-name": "sg2"})[0] - - security_group1.rules.should.have.length_of(1) - security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[ - 0].group_id.should.equal(security_group2.id) - security_group1.rules[0].ip_protocol.should.equal('tcp') - security_group1.rules[0].from_port.should.equal('80') - security_group1.rules[0].to_port.should.equal('8080') - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_security_group_ingress_separate_from_security_group_by_id(): - ec2_conn = boto.ec2.connect_to_region("us-west-1") - ec2_conn.create_security_group( - "test-security-group1", "test security group") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "test-security-group2": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "Tags": [ - { - "Key": "sg-name", - "Value": "sg2" - } - ] - }, - }, - "test-sg-ingress": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupName": "test-security-group1", - "IpProtocol": "tcp", - "FromPort": "80", - "ToPort": "8080", - "SourceSecurityGroupId": {"Ref": "test-security-group2"}, - } - } - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - security_group1 = ec2_conn.get_all_security_groups( - groupnames=["test-security-group1"])[0] - security_group2 = ec2_conn.get_all_security_groups( - filters={"tag:sg-name": "sg2"})[0] - - security_group1.rules.should.have.length_of(1) - security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[ - 0].group_id.should.equal(security_group2.id) - security_group1.rules[0].ip_protocol.should.equal('tcp') - security_group1.rules[0].from_port.should.equal('80') - security_group1.rules[0].to_port.should.equal('8080') - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_security_group_ingress_separate_from_security_group_by_id_using_vpc(): - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "test-security-group1": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "VpcId": vpc.id, - "Tags": [ - { - "Key": "sg-name", - "Value": "sg1" - } - ] - }, - }, - "test-security-group2": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "VpcId": vpc.id, - "Tags": [ - { - "Key": "sg-name", - "Value": "sg2" - } - ] - }, - }, - "test-sg-ingress": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": {"Ref": "test-security-group1"}, - "VpcId": vpc.id, - "IpProtocol": "tcp", - "FromPort": "80", - "ToPort": "8080", - "SourceSecurityGroupId": {"Ref": "test-security-group2"}, - } - } - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - security_group1 = vpc_conn.get_all_security_groups( - filters={"tag:sg-name": "sg1"})[0] - security_group2 = vpc_conn.get_all_security_groups( - filters={"tag:sg-name": "sg2"})[0] - - security_group1.rules.should.have.length_of(1) - security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[ - 0].group_id.should.equal(security_group2.id) - security_group1.rules[0].ip_protocol.should.equal('tcp') - security_group1.rules[0].from_port.should.equal('80') - security_group1.rules[0].to_port.should.equal('8080') - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_security_group_with_update(): - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc1 = vpc_conn.create_vpc("10.0.0.0/16") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "test-security-group": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "VpcId": vpc1.id, - "Tags": [ - { - "Key": "sg-name", - "Value": "sg" - } - ] - }, - }, - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - security_group = vpc_conn.get_all_security_groups( - filters={"tag:sg-name": "sg"})[0] - security_group.vpc_id.should.equal(vpc1.id) - - vpc2 = vpc_conn.create_vpc("10.1.0.0/16") - template['Resources'][ - 'test-security-group']['Properties']['VpcId'] = vpc2.id - template_json = json.dumps(template) - cf_conn.update_stack( - "test_stack", - template_body=template_json, - ) - security_group = vpc_conn.get_all_security_groups( - filters={"tag:sg-name": "sg"})[0] - security_group.vpc_id.should.equal(vpc2.id) - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_subnets_should_be_created_with_availability_zone(): - vpc_conn = boto.vpc.connect_to_region('us-west-1') - vpc = vpc_conn.create_vpc("10.0.0.0/16") - - subnet_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testSubnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": vpc.id, - "CidrBlock": "10.0.0.0/24", - "AvailabilityZone": "us-west-1b", - } - } - } - } - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - template_json = json.dumps(subnet_template) - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] - subnet.availability_zone.should.equal('us-west-1b') - - -@mock_cloudformation_deprecated -@mock_datapipeline_deprecated -def test_datapipeline(): - dp_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "dataPipeline": { - "Properties": { - "Activate": "true", - "Name": "testDataPipeline", - "PipelineObjects": [ - { - "Fields": [ - { - "Key": "failureAndRerunMode", - "StringValue": "CASCADE" - }, - { - "Key": "scheduleType", - "StringValue": "cron" - }, - { - "Key": "schedule", - "RefValue": "DefaultSchedule" - }, - { - "Key": "pipelineLogUri", - "StringValue": "s3://bucket/logs" - }, - { - "Key": "type", - "StringValue": "Default" - }, - ], - "Id": "Default", - "Name": "Default" - }, - { - "Fields": [ - { - "Key": "startDateTime", - "StringValue": "1970-01-01T01:00:00" - }, - { - "Key": "period", - "StringValue": "1 Day" - }, - { - "Key": "type", - "StringValue": "Schedule" - } - ], - "Id": "DefaultSchedule", - "Name": "RunOnce" - } - ], - "PipelineTags": [] - }, - "Type": "AWS::DataPipeline::Pipeline" - } - } - } - cf_conn = boto.cloudformation.connect_to_region("us-east-1") - template_json = json.dumps(dp_template) - stack_id = cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - dp_conn = boto.datapipeline.connect_to_region('us-east-1') - data_pipelines = dp_conn.list_pipelines() - - data_pipelines['pipelineIdList'].should.have.length_of(1) - data_pipelines['pipelineIdList'][0][ - 'name'].should.equal('testDataPipeline') - - stack_resources = cf_conn.list_stack_resources(stack_id) - stack_resources.should.have.length_of(1) - stack_resources[0].physical_resource_id.should.equal( - data_pipelines['pipelineIdList'][0]['id']) - - -@mock_cloudformation -@mock_lambda -def test_lambda_function(): - # switch this to python as backend lambda only supports python execution. - lambda_code = """ -def lambda_handler(event, context): - return (event, context) -""" - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "lambdaTest": { - "Type": "AWS::Lambda::Function", - "Properties": { - "Code": { - # CloudFormation expects a string as ZipFile, not a ZIP file base64-encoded - "ZipFile": {"Fn::Join": ["\n", lambda_code.splitlines()]} - }, - "Handler": "lambda_function.handler", - "Description": "Test function", - "MemorySize": 128, - "Role": "test-role", - "Runtime": "python2.7" - } - } - } - } - - template_json = json.dumps(template) - cf_conn = boto3.client('cloudformation', 'us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - ) - - conn = boto3.client('lambda', 'us-east-1') - result = conn.list_functions() - result['Functions'].should.have.length_of(1) - result['Functions'][0]['Description'].should.equal('Test function') - result['Functions'][0]['Handler'].should.equal('lambda_function.handler') - result['Functions'][0]['MemorySize'].should.equal(128) - result['Functions'][0]['Role'].should.equal('test-role') - result['Functions'][0]['Runtime'].should.equal('python2.7') - - -@mock_cloudformation -@mock_ec2 -def test_nat_gateway(): - ec2_conn = boto3.client('ec2', 'us-east-1') - vpc_id = ec2_conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']['VpcId'] - subnet_id = ec2_conn.create_subnet( - CidrBlock='10.0.1.0/24', VpcId=vpc_id)['Subnet']['SubnetId'] - route_table_id = ec2_conn.create_route_table( - VpcId=vpc_id)['RouteTable']['RouteTableId'] - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "NAT": { - "DependsOn": "vpcgatewayattachment", - "Type": "AWS::EC2::NatGateway", - "Properties": { - "AllocationId": {"Fn::GetAtt": ["EIP", "AllocationId"]}, - "SubnetId": subnet_id - } - }, - "EIP": { - "Type": "AWS::EC2::EIP", - "Properties": { - "Domain": "vpc" - } - }, - "Route": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": route_table_id, - "DestinationCidrBlock": "0.0.0.0/0", - "NatGatewayId": {"Ref": "NAT"} - } - }, - "internetgateway": { - "Type": "AWS::EC2::InternetGateway" - }, - "vpcgatewayattachment": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "InternetGatewayId": { - "Ref": "internetgateway" - }, - "VpcId": vpc_id, - }, - } - } - } - - cf_conn = boto3.client('cloudformation', 'us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=json.dumps(template), - ) - - result = ec2_conn.describe_nat_gateways() - - result['NatGateways'].should.have.length_of(1) - result['NatGateways'][0]['VpcId'].should.equal(vpc_id) - result['NatGateways'][0]['SubnetId'].should.equal(subnet_id) - result['NatGateways'][0]['State'].should.equal('available') - - -@mock_cloudformation() -@mock_kms() -def test_stack_kms(): - kms_key_template = { - 'Resources': { - 'kmskey': { - 'Properties': { - 'Description': 'A kms key', - 'EnableKeyRotation': True, - 'Enabled': True, - 'KeyPolicy': 'a policy', - }, - 'Type': 'AWS::KMS::Key' - } - } - } - kms_key_template_json = json.dumps(kms_key_template) - - cf_conn = boto3.client('cloudformation', 'us-east-1') - cf_conn.create_stack( - StackName='test_stack', - TemplateBody=kms_key_template_json, - ) - - kms_conn = boto3.client('kms', 'us-east-1') - keys = kms_conn.list_keys()['Keys'] - len(keys).should.equal(1) - result = kms_conn.describe_key(KeyId=keys[0]['KeyId']) - - result['KeyMetadata']['Enabled'].should.equal(True) - result['KeyMetadata']['KeyUsage'].should.equal('ENCRYPT_DECRYPT') - - -@mock_cloudformation() -@mock_ec2() -def test_stack_spot_fleet(): - conn = boto3.client('ec2', 'us-east-1') - - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] - subnet_id = subnet['SubnetId'] - - spot_fleet_template = { - 'Resources': { - "SpotFleet": { - "Type": "AWS::EC2::SpotFleet", - "Properties": { - "SpotFleetRequestConfigData": { - "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", - "SpotPrice": "0.12", - "TargetCapacity": 6, - "AllocationStrategy": "diversified", - "LaunchSpecifications": [ - { - "EbsOptimized": "false", - "InstanceType": 't2.small', - "ImageId": "ami-1234", - "SubnetId": subnet_id, - "WeightedCapacity": "2", - "SpotPrice": "0.13", - }, - { - "EbsOptimized": "true", - "InstanceType": 't2.large', - "ImageId": "ami-1234", - "Monitoring": {"Enabled": "true"}, - "SecurityGroups": [{"GroupId": "sg-123"}], - "SubnetId": subnet_id, - "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, - "WeightedCapacity": "4", - "SpotPrice": "10.00", - } - ] - } - } - } - } - } - spot_fleet_template_json = json.dumps(spot_fleet_template) - - cf_conn = boto3.client('cloudformation', 'us-east-1') - stack_id = cf_conn.create_stack( - StackName='test_stack', - TemplateBody=spot_fleet_template_json, - )['StackId'] - - stack_resources = cf_conn.list_stack_resources(StackName=stack_id) - stack_resources['StackResourceSummaries'].should.have.length_of(1) - spot_fleet_id = stack_resources[ - 'StackResourceSummaries'][0]['PhysicalResourceId'] - - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(1) - spot_fleet_request = spot_fleet_requests[0] - spot_fleet_request['SpotFleetRequestState'].should.equal("active") - spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] - - spot_fleet_config['SpotPrice'].should.equal('0.12') - spot_fleet_config['TargetCapacity'].should.equal(6) - spot_fleet_config['IamFleetRole'].should.equal( - 'arn:aws:iam::123456789012:role/fleet') - spot_fleet_config['AllocationStrategy'].should.equal('diversified') - spot_fleet_config['FulfilledCapacity'].should.equal(6.0) - - len(spot_fleet_config['LaunchSpecifications']).should.equal(2) - launch_spec = spot_fleet_config['LaunchSpecifications'][0] - - launch_spec['EbsOptimized'].should.equal(False) - launch_spec['ImageId'].should.equal("ami-1234") - launch_spec['InstanceType'].should.equal("t2.small") - launch_spec['SubnetId'].should.equal(subnet_id) - launch_spec['SpotPrice'].should.equal("0.13") - launch_spec['WeightedCapacity'].should.equal(2.0) - - -@mock_cloudformation() -@mock_ec2() -def test_stack_spot_fleet_should_figure_out_default_price(): - conn = boto3.client('ec2', 'us-east-1') - - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] - subnet_id = subnet['SubnetId'] - - spot_fleet_template = { - 'Resources': { - "SpotFleet1": { - "Type": "AWS::EC2::SpotFleet", - "Properties": { - "SpotFleetRequestConfigData": { - "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", - "TargetCapacity": 6, - "AllocationStrategy": "diversified", - "LaunchSpecifications": [ - { - "EbsOptimized": "false", - "InstanceType": 't2.small', - "ImageId": "ami-1234", - "SubnetId": subnet_id, - "WeightedCapacity": "2", - }, - { - "EbsOptimized": "true", - "InstanceType": 't2.large', - "ImageId": "ami-1234", - "Monitoring": {"Enabled": "true"}, - "SecurityGroups": [{"GroupId": "sg-123"}], - "SubnetId": subnet_id, - "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, - "WeightedCapacity": "4", - } - ] - } - } - } - } - } - spot_fleet_template_json = json.dumps(spot_fleet_template) - - cf_conn = boto3.client('cloudformation', 'us-east-1') - stack_id = cf_conn.create_stack( - StackName='test_stack', - TemplateBody=spot_fleet_template_json, - )['StackId'] - - stack_resources = cf_conn.list_stack_resources(StackName=stack_id) - stack_resources['StackResourceSummaries'].should.have.length_of(1) - spot_fleet_id = stack_resources[ - 'StackResourceSummaries'][0]['PhysicalResourceId'] - - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(1) - spot_fleet_request = spot_fleet_requests[0] - spot_fleet_request['SpotFleetRequestState'].should.equal("active") - spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] - - assert 'SpotPrice' not in spot_fleet_config - len(spot_fleet_config['LaunchSpecifications']).should.equal(2) - launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] - launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] - - assert 'SpotPrice' not in launch_spec1 - assert 'SpotPrice' not in launch_spec2 - - -@mock_ec2 -@mock_elbv2 -@mock_cloudformation -def test_stack_elbv2_resources_integration(): - alb_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Outputs": { - "albdns": { - "Description": "Load balanacer DNS", - "Value": {"Fn::GetAtt": ["alb", "DNSName"]}, - }, - "albname": { - "Description": "Load balancer name", - "Value": {"Fn::GetAtt": ["alb", "LoadBalancerName"]}, - }, - "canonicalhostedzoneid": { - "Description": "Load balancer canonical hosted zone ID", - "Value": {"Fn::GetAtt": ["alb", "CanonicalHostedZoneID"]}, - }, - }, - "Resources": { - "alb": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "myelbv2", - "Scheme": "internet-facing", - "Subnets": [{ - "Ref": "mysubnet", - }], - "SecurityGroups": [{ - "Ref": "mysg", - }], - "Type": "application", - "IpAddressType": "ipv4", - } - }, - "mytargetgroup1": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "HealthCheckIntervalSeconds": 30, - "HealthCheckPath": "/status", - "HealthCheckPort": 80, - "HealthCheckProtocol": "HTTP", - "HealthCheckTimeoutSeconds": 5, - "HealthyThresholdCount": 30, - "UnhealthyThresholdCount": 5, - "Matcher": { - "HttpCode": "200,201" - }, - "Name": "mytargetgroup1", - "Port": 80, - "Protocol": "HTTP", - "TargetType": "instance", - "Targets": [{ - "Id": { - "Ref": "ec2instance", - "Port": 80, - }, - }], - "VpcId": { - "Ref": "myvpc", - } - } - }, - "mytargetgroup2": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "HealthCheckIntervalSeconds": 30, - "HealthCheckPath": "/status", - "HealthCheckPort": 8080, - "HealthCheckProtocol": "HTTP", - "HealthCheckTimeoutSeconds": 5, - "HealthyThresholdCount": 30, - "UnhealthyThresholdCount": 5, - "Name": "mytargetgroup2", - "Port": 8080, - "Protocol": "HTTP", - "TargetType": "instance", - "Targets": [{ - "Id": { - "Ref": "ec2instance", - "Port": 8080, - }, - }], - "VpcId": { - "Ref": "myvpc", - } - } - }, - "listener": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "DefaultActions": [{ - "Type": "forward", - "TargetGroupArn": {"Ref": "mytargetgroup1"} - }], - "LoadBalancerArn": {"Ref": "alb"}, - "Port": "80", - "Protocol": "HTTP" - } - }, - "myvpc": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - } - }, - "mysubnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.0.0/27", - "VpcId": {"Ref": "myvpc"}, - } - }, - "mysg": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "mysg", - "GroupDescription": "test security group", - "VpcId": {"Ref": "myvpc"} - } - }, - "ec2instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - }, - } - alb_template_json = json.dumps(alb_template) - - cfn_conn = boto3.client("cloudformation", "us-west-1") - cfn_conn.create_stack( - StackName="elb_stack", - TemplateBody=alb_template_json, - ) - - elbv2_conn = boto3.client("elbv2", "us-west-1") - - load_balancers = elbv2_conn.describe_load_balancers()['LoadBalancers'] - len(load_balancers).should.equal(1) - load_balancers[0]['LoadBalancerName'].should.equal('myelbv2') - load_balancers[0]['Scheme'].should.equal('internet-facing') - load_balancers[0]['Type'].should.equal('application') - load_balancers[0]['IpAddressType'].should.equal('ipv4') - - target_groups = sorted( - elbv2_conn.describe_target_groups()['TargetGroups'], - key=lambda tg: tg['TargetGroupName']) # sort to do comparison with indexes - len(target_groups).should.equal(2) - target_groups[0]['HealthCheckIntervalSeconds'].should.equal(30) - target_groups[0]['HealthCheckPath'].should.equal('/status') - target_groups[0]['HealthCheckPort'].should.equal('80') - target_groups[0]['HealthCheckProtocol'].should.equal('HTTP') - target_groups[0]['HealthCheckTimeoutSeconds'].should.equal(5) - target_groups[0]['HealthyThresholdCount'].should.equal(30) - target_groups[0]['UnhealthyThresholdCount'].should.equal(5) - target_groups[0]['Matcher'].should.equal({'HttpCode': '200,201'}) - target_groups[0]['TargetGroupName'].should.equal('mytargetgroup1') - target_groups[0]['Port'].should.equal(80) - target_groups[0]['Protocol'].should.equal('HTTP') - target_groups[0]['TargetType'].should.equal('instance') - - target_groups[1]['HealthCheckIntervalSeconds'].should.equal(30) - target_groups[1]['HealthCheckPath'].should.equal('/status') - target_groups[1]['HealthCheckPort'].should.equal('8080') - target_groups[1]['HealthCheckProtocol'].should.equal('HTTP') - target_groups[1]['HealthCheckTimeoutSeconds'].should.equal(5) - target_groups[1]['HealthyThresholdCount'].should.equal(30) - target_groups[1]['UnhealthyThresholdCount'].should.equal(5) - target_groups[1]['Matcher'].should.equal({'HttpCode': '200'}) - target_groups[1]['TargetGroupName'].should.equal('mytargetgroup2') - target_groups[1]['Port'].should.equal(8080) - target_groups[1]['Protocol'].should.equal('HTTP') - target_groups[1]['TargetType'].should.equal('instance') - - listeners = elbv2_conn.describe_listeners(LoadBalancerArn=load_balancers[0]['LoadBalancerArn'])['Listeners'] - len(listeners).should.equal(1) - listeners[0]['LoadBalancerArn'].should.equal(load_balancers[0]['LoadBalancerArn']) - listeners[0]['Port'].should.equal(80) - listeners[0]['Protocol'].should.equal('HTTP') - listeners[0]['DefaultActions'].should.equal([{ - "Type": "forward", - "TargetGroupArn": target_groups[0]['TargetGroupArn'] - }]) - - # test outputs - stacks = cfn_conn.describe_stacks(StackName='elb_stack')['Stacks'] - len(stacks).should.equal(1) - - dns = list(filter(lambda item: item['OutputKey'] == 'albdns', stacks[0]['Outputs']))[0] - name = list(filter(lambda item: item['OutputKey'] == 'albname', stacks[0]['Outputs']))[0] - - dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) - name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) +from __future__ import unicode_literals +import json + +import base64 +import boto +import boto.cloudformation +import boto.datapipeline +import boto.ec2 +import boto.ec2.autoscale +import boto.ec2.elb +from boto.exception import BotoServerError +import boto.iam +import boto.redshift +import boto.sns +import boto.sqs +import boto.vpc +import boto3 +import sure # noqa + +from moto import ( + mock_autoscaling_deprecated, + mock_cloudformation, + mock_cloudformation_deprecated, + mock_datapipeline_deprecated, + mock_ec2, + mock_ec2_deprecated, + mock_elb, + mock_elb_deprecated, + mock_iam_deprecated, + mock_kms, + mock_lambda, + mock_rds_deprecated, + mock_rds2, + mock_rds2_deprecated, + mock_redshift, + mock_redshift_deprecated, + mock_route53_deprecated, + mock_sns_deprecated, + mock_sqs, + mock_sqs_deprecated, + mock_elbv2) + +from .fixtures import ( + ec2_classic_eip, + fn_join, + rds_mysql_with_db_parameter_group, + rds_mysql_with_read_replica, + redshift, + route53_ec2_instance_with_public_ip, + route53_health_check, + route53_roundrobin, + single_instance_with_ebs_volume, + vpc_eip, + vpc_single_instance_in_subnet, +) + + +@mock_cloudformation_deprecated() +def test_stack_sqs_integration(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + stack = conn.describe_stacks()[0] + queue = stack.describe_resources()[0] + queue.resource_type.should.equal('AWS::SQS::Queue') + queue.logical_resource_id.should.equal("QueueGroup") + queue.physical_resource_id.should.equal("my-queue") + + +@mock_cloudformation_deprecated() +def test_stack_list_resources(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + resources = conn.list_stack_resources("test_stack") + assert len(resources) == 1 + queue = resources[0] + queue.resource_type.should.equal('AWS::SQS::Queue') + queue.logical_resource_id.should.equal("QueueGroup") + queue.physical_resource_id.should.equal("my-queue") + + +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() +def test_update_stack(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + sqs_conn = boto.sqs.connect_to_region("us-west-1") + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(1) + queues[0].get_attributes('VisibilityTimeout')[ + 'VisibilityTimeout'].should.equal('60') + + sqs_template['Resources']['QueueGroup'][ + 'Properties']['VisibilityTimeout'] = 100 + sqs_template_json = json.dumps(sqs_template) + conn.update_stack("test_stack", sqs_template_json) + + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(1) + queues[0].get_attributes('VisibilityTimeout')[ + 'VisibilityTimeout'].should.equal('100') + + +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() +def test_update_stack_and_remove_resource(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + sqs_conn = boto.sqs.connect_to_region("us-west-1") + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(1) + + sqs_template['Resources'].pop('QueueGroup') + sqs_template_json = json.dumps(sqs_template) + conn.update_stack("test_stack", sqs_template_json) + + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(0) + + +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() +def test_update_stack_and_add_resource(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {}, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + sqs_conn = boto.sqs.connect_to_region("us-west-1") + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(0) + + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + conn.update_stack("test_stack", sqs_template_json) + + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(1) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_stack_ec2_integration(): + ec2_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "WebServerGroup": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + ec2_template_json = json.dumps(ec2_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "ec2_stack", + template_body=ec2_template_json, + ) + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + + stack = conn.describe_stacks()[0] + instance = stack.describe_resources()[0] + instance.resource_type.should.equal('AWS::EC2::Instance') + instance.logical_resource_id.should.contain("WebServerGroup") + instance.physical_resource_id.should.equal(ec2_instance.id) + + +@mock_ec2_deprecated() +@mock_elb_deprecated() +@mock_cloudformation_deprecated() +def test_stack_elb_integration_with_attached_ec2_instances(): + elb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MyELB": { + "Type": "AWS::ElasticLoadBalancing::LoadBalancer", + "Properties": { + "Instances": [{"Ref": "Ec2Instance1"}], + "LoadBalancerName": "test-elb", + "AvailabilityZones": ['us-east-1'], + "Listeners": [ + { + "InstancePort": "80", + "LoadBalancerPort": "80", + "Protocol": "HTTP", + } + ], + } + }, + "Ec2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + elb_template_json = json.dumps(elb_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "elb_stack", + template_body=elb_template_json, + ) + + elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + load_balancer = elb_conn.get_all_load_balancers()[0] + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + + load_balancer.instances[0].id.should.equal(ec2_instance.id) + list(load_balancer.availability_zones).should.equal(['us-east-1']) + + +@mock_elb_deprecated() +@mock_cloudformation_deprecated() +def test_stack_elb_integration_with_health_check(): + elb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MyELB": { + "Type": "AWS::ElasticLoadBalancing::LoadBalancer", + "Properties": { + "LoadBalancerName": "test-elb", + "AvailabilityZones": ['us-west-1'], + "HealthCheck": { + "HealthyThreshold": "3", + "Interval": "5", + "Target": "HTTP:80/healthcheck", + "Timeout": "4", + "UnhealthyThreshold": "2", + }, + "Listeners": [ + { + "InstancePort": "80", + "LoadBalancerPort": "80", + "Protocol": "HTTP", + } + ], + } + }, + }, + } + elb_template_json = json.dumps(elb_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "elb_stack", + template_body=elb_template_json, + ) + + elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + load_balancer = elb_conn.get_all_load_balancers()[0] + health_check = load_balancer.health_check + + health_check.healthy_threshold.should.equal(3) + health_check.interval.should.equal(5) + health_check.target.should.equal("HTTP:80/healthcheck") + health_check.timeout.should.equal(4) + health_check.unhealthy_threshold.should.equal(2) + + +@mock_elb_deprecated() +@mock_cloudformation_deprecated() +def test_stack_elb_integration_with_update(): + elb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MyELB": { + "Type": "AWS::ElasticLoadBalancing::LoadBalancer", + "Properties": { + "LoadBalancerName": "test-elb", + "AvailabilityZones": ['us-west-1a'], + "Listeners": [ + { + "InstancePort": "80", + "LoadBalancerPort": "80", + "Protocol": "HTTP", + } + ], + "Policies": {"Ref": "AWS::NoValue"}, + } + }, + }, + } + elb_template_json = json.dumps(elb_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "elb_stack", + template_body=elb_template_json, + ) + + elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + load_balancer = elb_conn.get_all_load_balancers()[0] + load_balancer.availability_zones[0].should.equal('us-west-1a') + + elb_template['Resources']['MyELB']['Properties'][ + 'AvailabilityZones'] = ['us-west-1b'] + elb_template_json = json.dumps(elb_template) + conn.update_stack( + "elb_stack", + template_body=elb_template_json, + ) + load_balancer = elb_conn.get_all_load_balancers()[0] + load_balancer.availability_zones[0].should.equal('us-west-1b') + + +@mock_ec2_deprecated() +@mock_redshift_deprecated() +@mock_cloudformation_deprecated() +def test_redshift_stack(): + redshift_template_json = json.dumps(redshift.template) + + vpc_conn = boto.vpc.connect_to_region("us-west-2") + conn = boto.cloudformation.connect_to_region("us-west-2") + conn.create_stack( + "redshift_stack", + template_body=redshift_template_json, + parameters=[ + ("DatabaseName", "mydb"), + ("ClusterType", "multi-node"), + ("NumberOfNodes", 2), + ("NodeType", "dw1.xlarge"), + ("MasterUsername", "myuser"), + ("MasterUserPassword", "mypass"), + ("InboundTraffic", "10.0.0.1/16"), + ("PortNumber", 5439), + ] + ) + + redshift_conn = boto.redshift.connect_to_region("us-west-2") + + cluster_res = redshift_conn.describe_clusters() + clusters = cluster_res['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] + clusters.should.have.length_of(1) + cluster = clusters[0] + cluster['DBName'].should.equal("mydb") + cluster['NumberOfNodes'].should.equal(2) + cluster['NodeType'].should.equal("dw1.xlarge") + cluster['MasterUsername'].should.equal("myuser") + cluster['Port'].should.equal(5439) + cluster['VpcSecurityGroups'].should.have.length_of(1) + security_group_id = cluster['VpcSecurityGroups'][0]['VpcSecurityGroupId'] + + groups = vpc_conn.get_all_security_groups(group_ids=[security_group_id]) + groups.should.have.length_of(1) + group = groups[0] + group.rules.should.have.length_of(1) + group.rules[0].grants[0].cidr_ip.should.equal("10.0.0.1/16") + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_stack_security_groups(): + security_group_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "my-security-group": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "My other group", + }, + }, + "Ec2Instance2": { + "Type": "AWS::EC2::Instance", + "Properties": { + "SecurityGroups": [{"Ref": "InstanceSecurityGroup"}], + "ImageId": "ami-1234abcd", + } + }, + "InstanceSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "My security group", + "Tags": [ + { + "Key": "bar", + "Value": "baz" + } + ], + "SecurityGroupIngress": [{ + "IpProtocol": "tcp", + "FromPort": "22", + "ToPort": "22", + "CidrIp": "123.123.123.123/32", + }, { + "IpProtocol": "tcp", + "FromPort": "80", + "ToPort": "8000", + "SourceSecurityGroupId": {"Ref": "my-security-group"}, + }] + } + } + }, + } + security_group_template_json = json.dumps(security_group_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "security_group_stack", + template_body=security_group_template_json, + tags={"foo": "bar"} + ) + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + instance_group = ec2_conn.get_all_security_groups( + filters={'description': ['My security group']})[0] + other_group = ec2_conn.get_all_security_groups( + filters={'description': ['My other group']})[0] + + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + + ec2_instance.groups[0].id.should.equal(instance_group.id) + instance_group.description.should.equal("My security group") + instance_group.tags.should.have.key('foo').which.should.equal('bar') + instance_group.tags.should.have.key('bar').which.should.equal('baz') + rule1, rule2 = instance_group.rules + int(rule1.to_port).should.equal(22) + int(rule1.from_port).should.equal(22) + rule1.grants[0].cidr_ip.should.equal("123.123.123.123/32") + rule1.ip_protocol.should.equal('tcp') + + int(rule2.to_port).should.equal(8000) + int(rule2.from_port).should.equal(80) + rule2.ip_protocol.should.equal('tcp') + rule2.grants[0].group_id.should.equal(other_group.id) + + +@mock_autoscaling_deprecated() +@mock_elb_deprecated() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +def test_autoscaling_group_with_elb(): + web_setup_template = { + "AWSTemplateFormatVersion": "2010-09-09", + + "Resources": { + "my-as-group": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AvailabilityZones": ['us-east1'], + "LaunchConfigurationName": {"Ref": "my-launch-config"}, + "MinSize": "2", + "MaxSize": "2", + "DesiredCapacity": "2", + "LoadBalancerNames": [{"Ref": "my-elb"}], + "Tags": [ + { + "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", + "PropagateAtLaunch": True}, + { + "Key": "not-propagated-test-tag", + "Value": "not-propagated-test-tag-value", + "PropagateAtLaunch": False + } + ] + }, + }, + + "my-launch-config": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + + "my-elb": { + "Type": "AWS::ElasticLoadBalancing::LoadBalancer", + "Properties": { + "AvailabilityZones": ['us-east1'], + "Listeners": [{ + "LoadBalancerPort": "80", + "InstancePort": "80", + "Protocol": "HTTP", + }], + "LoadBalancerName": "my-elb", + "HealthCheck": { + "Target": "HTTP:80", + "HealthyThreshold": "3", + "UnhealthyThreshold": "5", + "Interval": "30", + "Timeout": "5", + }, + }, + }, + } + } + + web_setup_template_json = json.dumps(web_setup_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "web_stack", + template_body=web_setup_template_json, + ) + + autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") + autoscale_group = autoscale_conn.get_all_groups()[0] + autoscale_group.launch_config_name.should.contain("my-launch-config") + autoscale_group.load_balancers[0].should.equal('my-elb') + + # Confirm the Launch config was actually created + autoscale_conn.get_all_launch_configurations().should.have.length_of(1) + + # Confirm the ELB was actually created + elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + elb_conn.get_all_load_balancers().should.have.length_of(1) + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + as_group_resource = [resource for resource in resources if resource.resource_type == + 'AWS::AutoScaling::AutoScalingGroup'][0] + as_group_resource.physical_resource_id.should.contain("my-as-group") + + launch_config_resource = [ + resource for resource in resources if + resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] + launch_config_resource.physical_resource_id.should.contain( + "my-launch-config") + + elb_resource = [resource for resource in resources if resource.resource_type == + 'AWS::ElasticLoadBalancing::LoadBalancer'][0] + elb_resource.physical_resource_id.should.contain("my-elb") + + # confirm the instances were created with the right tags + ec2_conn = boto.ec2.connect_to_region('us-west-1') + reservations = ec2_conn.get_all_reservations() + len(reservations).should.equal(1) + reservation = reservations[0] + len(reservation.instances).should.equal(2) + for instance in reservation.instances: + instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') + instance.tags.keys().should_not.contain('not-propagated-test-tag') + + +@mock_autoscaling_deprecated() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +def test_autoscaling_group_update(): + asg_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "my-as-group": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AvailabilityZones": ['us-west-1'], + "LaunchConfigurationName": {"Ref": "my-launch-config"}, + "MinSize": "2", + "MaxSize": "2", + "DesiredCapacity": "2" + }, + }, + + "my-launch-config": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + asg_template_json = json.dumps(asg_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "asg_stack", + template_body=asg_template_json, + ) + + autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") + asg = autoscale_conn.get_all_groups()[0] + asg.min_size.should.equal(2) + asg.max_size.should.equal(2) + asg.desired_capacity.should.equal(2) + + asg_template['Resources']['my-as-group']['Properties']['MaxSize'] = 3 + asg_template['Resources']['my-as-group']['Properties']['Tags'] = [ + { + "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", + "PropagateAtLaunch": True}, + { + "Key": "not-propagated-test-tag", + "Value": "not-propagated-test-tag-value", + "PropagateAtLaunch": False + } + ] + asg_template_json = json.dumps(asg_template) + conn.update_stack( + "asg_stack", + template_body=asg_template_json, + ) + asg = autoscale_conn.get_all_groups()[0] + asg.min_size.should.equal(2) + asg.max_size.should.equal(3) + asg.desired_capacity.should.equal(2) + + # confirm the instances were created with the right tags + ec2_conn = boto.ec2.connect_to_region('us-west-1') + reservations = ec2_conn.get_all_reservations() + running_instance_count = 0 + for res in reservations: + for instance in res.instances: + if instance.state == 'running': + running_instance_count += 1 + instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') + instance.tags.keys().should_not.contain('not-propagated-test-tag') + running_instance_count.should.equal(2) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_vpc_single_instance_in_subnet(): + template_json = json.dumps(vpc_single_instance_in_subnet.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + parameters=[("KeyName", "my_key")], + ) + + vpc_conn = boto.vpc.connect_to_region("us-west-1") + + vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] + vpc.cidr_block.should.equal("10.0.0.0/16") + + # Add this once we implement the endpoint + # vpc_conn.get_all_internet_gateways().should.have.length_of(1) + + subnet = vpc_conn.get_all_subnets(filters={'vpcId': vpc.id})[0] + subnet.vpc_id.should.equal(vpc.id) + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + instance = reservation.instances[0] + instance.tags["Foo"].should.equal("Bar") + # Check that the EIP is attached the the EC2 instance + eip = ec2_conn.get_all_addresses()[0] + eip.domain.should.equal('vpc') + eip.instance_id.should.equal(instance.id) + + security_group = ec2_conn.get_all_security_groups( + filters={'vpc_id': [vpc.id]})[0] + security_group.vpc_id.should.equal(vpc.id) + + stack = conn.describe_stacks()[0] + + vpc.tags.should.have.key('Application').which.should.equal(stack.stack_id) + + resources = stack.describe_resources() + vpc_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] + vpc_resource.physical_resource_id.should.equal(vpc.id) + + subnet_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::Subnet'][0] + subnet_resource.physical_resource_id.should.equal(subnet.id) + + eip_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + eip_resource.physical_resource_id.should.equal(eip.public_ip) + + +@mock_cloudformation() +@mock_ec2() +@mock_rds2() +def test_rds_db_parameter_groups(): + ec2_conn = boto3.client("ec2", region_name="us-west-1") + ec2_conn.create_security_group( + GroupName='application', Description='Our Application Group') + + template_json = json.dumps(rds_mysql_with_db_parameter_group.template) + cf_conn = boto3.client('cloudformation', 'us-west-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + Parameters=[{'ParameterKey': key, 'ParameterValue': value} for + key, value in [ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("EC2SecurityGroup", "application"), + ("MultiAZ", "true"), + ] + ], + ) + + rds_conn = boto3.client('rds', region_name="us-west-1") + + db_parameter_groups = rds_conn.describe_db_parameter_groups() + len(db_parameter_groups['DBParameterGroups']).should.equal(1) + db_parameter_group_name = db_parameter_groups[ + 'DBParameterGroups'][0]['DBParameterGroupName'] + + found_cloudformation_set_parameter = False + for db_parameter in rds_conn.describe_db_parameters(DBParameterGroupName=db_parameter_group_name)[ + 'Parameters']: + if db_parameter['ParameterName'] == 'BACKLOG_QUEUE_LIMIT' and db_parameter[ + 'ParameterValue'] == '2048': + found_cloudformation_set_parameter = True + + found_cloudformation_set_parameter.should.equal(True) + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +@mock_rds_deprecated() +def test_rds_mysql_with_read_replica(): + ec2_conn = boto.ec2.connect_to_region("us-west-1") + ec2_conn.create_security_group('application', 'Our Application Group') + + template_json = json.dumps(rds_mysql_with_read_replica.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + parameters=[ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("EC2SecurityGroup", "application"), + ("MultiAZ", "true"), + ], + ) + + rds_conn = boto.rds.connect_to_region("us-west-1") + + primary = rds_conn.get_all_dbinstances("master_db")[0] + primary.master_username.should.equal("my_user") + primary.allocated_storage.should.equal(20) + primary.instance_class.should.equal("db.m1.medium") + primary.multi_az.should.equal(True) + list(primary.read_replica_dbinstance_identifiers).should.have.length_of(1) + replica_id = primary.read_replica_dbinstance_identifiers[0] + + replica = rds_conn.get_all_dbinstances(replica_id)[0] + replica.instance_class.should.equal("db.m1.medium") + + security_group_name = primary.security_groups[0].name + security_group = rds_conn.get_all_dbsecurity_groups(security_group_name)[0] + security_group.ec2_groups[0].name.should.equal("application") + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +@mock_rds_deprecated() +def test_rds_mysql_with_read_replica_in_vpc(): + template_json = json.dumps(rds_mysql_with_read_replica.template) + conn = boto.cloudformation.connect_to_region("eu-central-1") + conn.create_stack( + "test_stack", + template_body=template_json, + parameters=[ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("MultiAZ", "true"), + ], + ) + + rds_conn = boto.rds.connect_to_region("eu-central-1") + primary = rds_conn.get_all_dbinstances("master_db")[0] + + subnet_group_name = primary.subnet_group.name + subnet_group = rds_conn.get_all_db_subnet_groups(subnet_group_name)[0] + subnet_group.description.should.equal("my db subnet group") + + +@mock_autoscaling_deprecated() +@mock_iam_deprecated() +@mock_cloudformation_deprecated() +def test_iam_roles(): + iam_template = { + "AWSTemplateFormatVersion": "2010-09-09", + + "Resources": { + + "my-launch-config": { + "Properties": { + "IamInstanceProfile": {"Ref": "my-instance-profile-with-path"}, + "ImageId": "ami-1234abcd", + }, + "Type": "AWS::AutoScaling::LaunchConfiguration" + }, + "my-instance-profile-with-path": { + "Properties": { + "Path": "my-path", + "Roles": [{"Ref": "my-role-with-path"}], + }, + "Type": "AWS::IAM::InstanceProfile" + }, + "my-instance-profile-no-path": { + "Properties": { + "Roles": [{"Ref": "my-role-no-path"}], + }, + "Type": "AWS::IAM::InstanceProfile" + }, + "my-role-with-path": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + }, + "Path": "my-path", + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "ec2:CreateTags", + "ec2:DescribeInstances", + "ec2:DescribeTags" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "EC2_Tags" + }, + { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "sqs:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "SQS" + }, + ] + }, + "Type": "AWS::IAM::Role" + }, + "my-role-no-path": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + }, + }, + "Type": "AWS::IAM::Role" + } + } + } + + iam_template_json = json.dumps(iam_template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=iam_template_json, + ) + + iam_conn = boto.iam.connect_to_region("us-west-1") + + role_results = iam_conn.list_roles()['list_roles_response'][ + 'list_roles_result']['roles'] + role_name_to_id = {} + for role_result in role_results: + role = iam_conn.get_role(role_result.role_name) + role.role_name.should.contain("my-role") + if 'with-path' in role.role_name: + role_name_to_id['with-path'] = role.role_id + role.path.should.equal("my-path") + else: + role_name_to_id['no-path'] = role.role_id + role.role_name.should.contain('no-path') + role.path.should.equal('/') + + instance_profile_responses = iam_conn.list_instance_profiles()[ + 'list_instance_profiles_response']['list_instance_profiles_result']['instance_profiles'] + instance_profile_responses.should.have.length_of(2) + instance_profile_names = [] + + for instance_profile_response in instance_profile_responses: + instance_profile = iam_conn.get_instance_profile(instance_profile_response.instance_profile_name) + instance_profile_names.append(instance_profile.instance_profile_name) + instance_profile.instance_profile_name.should.contain( + "my-instance-profile") + if "with-path" in instance_profile.instance_profile_name: + instance_profile.path.should.equal("my-path") + instance_profile.role_id.should.equal(role_name_to_id['with-path']) + else: + instance_profile.instance_profile_name.should.contain('no-path') + instance_profile.role_id.should.equal(role_name_to_id['no-path']) + instance_profile.path.should.equal('/') + + autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") + launch_config = autoscale_conn.get_all_launch_configurations()[0] + launch_config.instance_profile_name.should.contain("my-instance-profile-with-path") + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + instance_profile_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'] + {ip.physical_resource_id for ip in instance_profile_resources}.should.equal(set(instance_profile_names)) + + role_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'] + {r.physical_resource_id for r in role_resources}.should.equal(set(role_name_to_id.values())) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_single_instance_with_ebs_volume(): + template_json = json.dumps(single_instance_with_ebs_volume.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + parameters=[("KeyName", "key_name")] + ) + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + + volumes = ec2_conn.get_all_volumes() + # Grab the mounted drive + volume = [ + volume for volume in volumes if volume.attach_data.device == '/dev/sdh'][0] + volume.volume_state().should.equal('in-use') + volume.attach_data.instance_id.should.equal(ec2_instance.id) + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + ebs_volumes = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::Volume'] + ebs_volumes[0].physical_resource_id.should.equal(volume.id) + + +@mock_cloudformation_deprecated() +def test_create_template_without_required_param(): + template_json = json.dumps(single_instance_with_ebs_volume.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack.when.called_with( + "test_stack", + template_body=template_json, + ).should.throw(BotoServerError) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_classic_eip(): + template_json = json.dumps(ec2_classic_eip.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack("test_stack", template_body=template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eip = ec2_conn.get_all_addresses()[0] + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + cfn_eip = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + cfn_eip.physical_resource_id.should.equal(eip.public_ip) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_vpc_eip(): + template_json = json.dumps(vpc_eip.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack("test_stack", template_body=template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eip = ec2_conn.get_all_addresses()[0] + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + cfn_eip = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + cfn_eip.physical_resource_id.should.equal(eip.public_ip) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_fn_join(): + template_json = json.dumps(fn_join.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack("test_stack", template_body=template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eip = ec2_conn.get_all_addresses()[0] + + stack = conn.describe_stacks()[0] + fn_join_output = stack.outputs[0] + fn_join_output.value.should.equal('test eip:{0}'.format(eip.public_ip)) + + +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() +def test_conditional_resources(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Parameters": { + "EnvType": { + "Description": "Environment type.", + "Type": "String", + } + }, + "Conditions": { + "CreateQueue": {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]} + }, + "Resources": { + "QueueGroup": { + "Condition": "CreateQueue", + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack_without_queue", + template_body=sqs_template_json, + parameters=[("EnvType", "staging")], + ) + sqs_conn = boto.sqs.connect_to_region("us-west-1") + list(sqs_conn.get_all_queues()).should.have.length_of(0) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack_with_queue", + template_body=sqs_template_json, + parameters=[("EnvType", "prod")], + ) + sqs_conn = boto.sqs.connect_to_region("us-west-1") + list(sqs_conn.get_all_queues()).should.have.length_of(1) + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +def test_conditional_if_handling(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Conditions": { + "EnvEqualsPrd": { + "Fn::Equals": [ + { + "Ref": "ENV" + }, + "prd" + ] + } + }, + "Parameters": { + "ENV": { + "Default": "dev", + "Description": "Deployment environment for the stack (dev/prd)", + "Type": "String" + }, + }, + "Description": "Stack 1", + "Resources": { + "App1": { + "Properties": { + "ImageId": { + "Fn::If": [ + "EnvEqualsPrd", + "ami-00000000", + "ami-ffffffff" + ] + }, + }, + "Type": "AWS::EC2::Instance" + }, + } + } + dummy_template_json = json.dumps(dummy_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack('test_stack1', template_body=dummy_template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + ec2_instance.image_id.should.equal("ami-ffffffff") + ec2_instance.terminate() + + conn = boto.cloudformation.connect_to_region("us-west-2") + conn.create_stack( + 'test_stack1', template_body=dummy_template_json, parameters=[("ENV", "prd")]) + ec2_conn = boto.ec2.connect_to_region("us-west-2") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + ec2_instance.image_id.should.equal("ami-00000000") + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +def test_cloudformation_mapping(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Mappings": { + "RegionMap": { + "us-east-1": {"32": "ami-6411e20d", "64": "ami-7a11e213"}, + "us-west-1": {"32": "ami-c9c7978c", "64": "ami-cfc7978a"}, + "eu-west-1": {"32": "ami-37c2f643", "64": "ami-31c2f645"}, + "ap-southeast-1": {"32": "ami-66f28c34", "64": "ami-60f28c32"}, + "ap-northeast-1": {"32": "ami-9c03a89d", "64": "ami-a003a8a1"} + } + }, + "Resources": { + "WebServer": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": ["RegionMap", {"Ref": "AWS::Region"}, "32"] + }, + "InstanceType": "m1.small" + }, + "Type": "AWS::EC2::Instance", + }, + }, + } + + dummy_template_json = json.dumps(dummy_template) + + conn = boto.cloudformation.connect_to_region("us-east-1") + conn.create_stack('test_stack1', template_body=dummy_template_json) + ec2_conn = boto.ec2.connect_to_region("us-east-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + ec2_instance.image_id.should.equal("ami-6411e20d") + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack('test_stack1', template_body=dummy_template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + ec2_instance.image_id.should.equal("ami-c9c7978c") + + +@mock_cloudformation_deprecated() +@mock_route53_deprecated() +def test_route53_roundrobin(): + route53_conn = boto.connect_route53() + + template_json = json.dumps(route53_roundrobin.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + stack = conn.create_stack( + "test_stack", + template_body=template_json, + ) + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.hosted_zone_id.should.equal(zone_id) + rrsets.should.have.length_of(2) + record_set1 = rrsets[0] + record_set1.name.should.equal('test_stack.us-west-1.my_zone.') + record_set1.identifier.should.equal("test_stack AWS") + record_set1.type.should.equal('CNAME') + record_set1.ttl.should.equal('900') + record_set1.weight.should.equal('3') + record_set1.resource_records[0].should.equal("aws.amazon.com") + + record_set2 = rrsets[1] + record_set2.name.should.equal('test_stack.us-west-1.my_zone.') + record_set2.identifier.should.equal("test_stack Amazon") + record_set2.type.should.equal('CNAME') + record_set2.ttl.should.equal('900') + record_set2.weight.should.equal('1') + record_set2.resource_records[0].should.equal("www.amazon.com") + + stack = conn.describe_stacks()[0] + output = stack.outputs[0] + output.key.should.equal('DomainName') + output.value.should.equal( + 'arn:aws:route53:::hostedzone/{0}'.format(zone_id)) + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +@mock_route53_deprecated() +def test_route53_ec2_instance_with_public_ip(): + route53_conn = boto.connect_route53() + ec2_conn = boto.ec2.connect_to_region("us-west-1") + + template_json = json.dumps(route53_ec2_instance_with_public_ip.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + ) + + instance_id = ec2_conn.get_all_reservations()[0].instances[0].id + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.should.have.length_of(1) + + record_set1 = rrsets[0] + record_set1.name.should.equal('{0}.us-west-1.my_zone.'.format(instance_id)) + record_set1.identifier.should.equal(None) + record_set1.type.should.equal('A') + record_set1.ttl.should.equal('900') + record_set1.weight.should.equal(None) + record_set1.resource_records[0].should.equal("10.0.0.25") + + +@mock_cloudformation_deprecated() +@mock_route53_deprecated() +def test_route53_associate_health_check(): + route53_conn = boto.connect_route53() + + template_json = json.dumps(route53_health_check.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + ) + + checks = route53_conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] + list(checks).should.have.length_of(1) + check = checks[0] + health_check_id = check['Id'] + config = check['HealthCheckConfig'] + config["FailureThreshold"].should.equal("3") + config["IPAddress"].should.equal("10.0.0.4") + config["Port"].should.equal("80") + config["RequestInterval"].should.equal("10") + config["ResourcePath"].should.equal("/") + config["Type"].should.equal("HTTP") + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.should.have.length_of(1) + + record_set = rrsets[0] + record_set.health_check.should.equal(health_check_id) + + +@mock_cloudformation_deprecated() +@mock_route53_deprecated() +def test_route53_with_update(): + route53_conn = boto.connect_route53() + + template_json = json.dumps(route53_health_check.template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.should.have.length_of(1) + + record_set = rrsets[0] + record_set.resource_records.should.equal(["my.example.com"]) + + route53_health_check.template['Resources']['myDNSRecord'][ + 'Properties']['ResourceRecords'] = ["my_other.example.com"] + template_json = json.dumps(route53_health_check.template) + cf_conn.update_stack( + "test_stack", + template_body=template_json, + ) + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.should.have.length_of(1) + + record_set = rrsets[0] + record_set.resource_records.should.equal(["my_other.example.com"]) + + +@mock_cloudformation_deprecated() +@mock_sns_deprecated() +def test_sns_topic(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MySNSTopic": { + "Type": "AWS::SNS::Topic", + "Properties": { + "Subscription": [ + {"Endpoint": "https://example.com", "Protocol": "https"}, + ], + "TopicName": "my_topics", + } + } + }, + "Outputs": { + "topic_name": { + "Value": {"Fn::GetAtt": ["MySNSTopic", "TopicName"]} + }, + "topic_arn": { + "Value": {"Ref": "MySNSTopic"} + }, + } + } + template_json = json.dumps(dummy_template) + conn = boto.cloudformation.connect_to_region("us-west-1") + stack = conn.create_stack( + "test_stack", + template_body=template_json, + ) + + sns_conn = boto.sns.connect_to_region("us-west-1") + topics = sns_conn.get_all_topics()["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + topics.should.have.length_of(1) + topic_arn = topics[0]['TopicArn'] + topic_arn.should.contain("my_topics") + + subscriptions = sns_conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("https") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("https://example.com") + + stack = conn.describe_stacks()[0] + topic_name_output = [x for x in stack.outputs if x.key == 'topic_name'][0] + topic_name_output.value.should.equal("my_topics") + topic_arn_output = [x for x in stack.outputs if x.key == 'topic_arn'][0] + topic_arn_output.value.should.equal(topic_arn) + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_vpc_gateway_attachment_creation_should_attach_itself_to_vpc(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "internetgateway": { + "Type": "AWS::EC2::InternetGateway" + }, + "testvpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "EnableDnsHostnames": "true", + "EnableDnsSupport": "true", + "InstanceTenancy": "default" + }, + }, + "vpcgatewayattachment": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "InternetGatewayId": { + "Ref": "internetgateway" + }, + "VpcId": { + "Ref": "testvpc" + } + }, + }, + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] + igws = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc.id} + ) + + igws.should.have.length_of(1) + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_vpc_peering_creation(): + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc_source = vpc_conn.create_vpc("10.0.0.0/16") + peer_vpc = vpc_conn.create_vpc("10.1.0.0/16") + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "vpcpeeringconnection": { + "Type": "AWS::EC2::VPCPeeringConnection", + "Properties": { + "PeerVpcId": peer_vpc.id, + "VpcId": vpc_source.id, + } + }, + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + peering_connections = vpc_conn.get_all_vpc_peering_connections() + peering_connections.should.have.length_of(1) + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_multiple_security_group_ingress_separate_from_security_group_by_id(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "test-security-group1": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "Tags": [ + { + "Key": "sg-name", + "Value": "sg1" + } + ] + }, + }, + "test-security-group2": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "Tags": [ + { + "Key": "sg-name", + "Value": "sg2" + } + ] + }, + }, + "test-sg-ingress": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": {"Ref": "test-security-group1"}, + "IpProtocol": "tcp", + "FromPort": "80", + "ToPort": "8080", + "SourceSecurityGroupId": {"Ref": "test-security-group2"}, + } + } + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + + security_group1 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg1"})[0] + security_group2 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] + + security_group1.rules.should.have.length_of(1) + security_group1.rules[0].grants.should.have.length_of(1) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) + security_group1.rules[0].ip_protocol.should.equal('tcp') + security_group1.rules[0].from_port.should.equal('80') + security_group1.rules[0].to_port.should.equal('8080') + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_security_group_ingress_separate_from_security_group_by_id(): + ec2_conn = boto.ec2.connect_to_region("us-west-1") + ec2_conn.create_security_group( + "test-security-group1", "test security group") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "test-security-group2": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "Tags": [ + { + "Key": "sg-name", + "Value": "sg2" + } + ] + }, + }, + "test-sg-ingress": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupName": "test-security-group1", + "IpProtocol": "tcp", + "FromPort": "80", + "ToPort": "8080", + "SourceSecurityGroupId": {"Ref": "test-security-group2"}, + } + } + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + security_group1 = ec2_conn.get_all_security_groups( + groupnames=["test-security-group1"])[0] + security_group2 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] + + security_group1.rules.should.have.length_of(1) + security_group1.rules[0].grants.should.have.length_of(1) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) + security_group1.rules[0].ip_protocol.should.equal('tcp') + security_group1.rules[0].from_port.should.equal('80') + security_group1.rules[0].to_port.should.equal('8080') + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_security_group_ingress_separate_from_security_group_by_id_using_vpc(): + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "test-security-group1": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "VpcId": vpc.id, + "Tags": [ + { + "Key": "sg-name", + "Value": "sg1" + } + ] + }, + }, + "test-security-group2": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "VpcId": vpc.id, + "Tags": [ + { + "Key": "sg-name", + "Value": "sg2" + } + ] + }, + }, + "test-sg-ingress": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": {"Ref": "test-security-group1"}, + "VpcId": vpc.id, + "IpProtocol": "tcp", + "FromPort": "80", + "ToPort": "8080", + "SourceSecurityGroupId": {"Ref": "test-security-group2"}, + } + } + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + security_group1 = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg1"})[0] + security_group2 = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] + + security_group1.rules.should.have.length_of(1) + security_group1.rules[0].grants.should.have.length_of(1) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) + security_group1.rules[0].ip_protocol.should.equal('tcp') + security_group1.rules[0].from_port.should.equal('80') + security_group1.rules[0].to_port.should.equal('8080') + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_security_group_with_update(): + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc1 = vpc_conn.create_vpc("10.0.0.0/16") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "test-security-group": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "VpcId": vpc1.id, + "Tags": [ + { + "Key": "sg-name", + "Value": "sg" + } + ] + }, + }, + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + security_group = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg"})[0] + security_group.vpc_id.should.equal(vpc1.id) + + vpc2 = vpc_conn.create_vpc("10.1.0.0/16") + template['Resources'][ + 'test-security-group']['Properties']['VpcId'] = vpc2.id + template_json = json.dumps(template) + cf_conn.update_stack( + "test_stack", + template_body=template_json, + ) + security_group = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg"})[0] + security_group.vpc_id.should.equal(vpc2.id) + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_subnets_should_be_created_with_availability_zone(): + vpc_conn = boto.vpc.connect_to_region('us-west-1') + vpc = vpc_conn.create_vpc("10.0.0.0/16") + + subnet_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testSubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": vpc.id, + "CidrBlock": "10.0.0.0/24", + "AvailabilityZone": "us-west-1b", + } + } + } + } + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + template_json = json.dumps(subnet_template) + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] + subnet.availability_zone.should.equal('us-west-1b') + + +@mock_cloudformation_deprecated +@mock_datapipeline_deprecated +def test_datapipeline(): + dp_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "dataPipeline": { + "Properties": { + "Activate": "true", + "Name": "testDataPipeline", + "PipelineObjects": [ + { + "Fields": [ + { + "Key": "failureAndRerunMode", + "StringValue": "CASCADE" + }, + { + "Key": "scheduleType", + "StringValue": "cron" + }, + { + "Key": "schedule", + "RefValue": "DefaultSchedule" + }, + { + "Key": "pipelineLogUri", + "StringValue": "s3://bucket/logs" + }, + { + "Key": "type", + "StringValue": "Default" + }, + ], + "Id": "Default", + "Name": "Default" + }, + { + "Fields": [ + { + "Key": "startDateTime", + "StringValue": "1970-01-01T01:00:00" + }, + { + "Key": "period", + "StringValue": "1 Day" + }, + { + "Key": "type", + "StringValue": "Schedule" + } + ], + "Id": "DefaultSchedule", + "Name": "RunOnce" + } + ], + "PipelineTags": [] + }, + "Type": "AWS::DataPipeline::Pipeline" + } + } + } + cf_conn = boto.cloudformation.connect_to_region("us-east-1") + template_json = json.dumps(dp_template) + stack_id = cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + dp_conn = boto.datapipeline.connect_to_region('us-east-1') + data_pipelines = dp_conn.list_pipelines() + + data_pipelines['pipelineIdList'].should.have.length_of(1) + data_pipelines['pipelineIdList'][0][ + 'name'].should.equal('testDataPipeline') + + stack_resources = cf_conn.list_stack_resources(stack_id) + stack_resources.should.have.length_of(1) + stack_resources[0].physical_resource_id.should.equal( + data_pipelines['pipelineIdList'][0]['id']) + + +@mock_cloudformation +@mock_lambda +def test_lambda_function(): + # switch this to python as backend lambda only supports python execution. + lambda_code = """ +def lambda_handler(event, context): + return (event, context) +""" + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "lambdaTest": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + # CloudFormation expects a string as ZipFile, not a ZIP file base64-encoded + "ZipFile": {"Fn::Join": ["\n", lambda_code.splitlines()]} + }, + "Handler": "lambda_function.handler", + "Description": "Test function", + "MemorySize": 128, + "Role": "test-role", + "Runtime": "python2.7" + } + } + } + } + + template_json = json.dumps(template) + cf_conn = boto3.client('cloudformation', 'us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + conn = boto3.client('lambda', 'us-east-1') + result = conn.list_functions() + result['Functions'].should.have.length_of(1) + result['Functions'][0]['Description'].should.equal('Test function') + result['Functions'][0]['Handler'].should.equal('lambda_function.handler') + result['Functions'][0]['MemorySize'].should.equal(128) + result['Functions'][0]['Role'].should.equal('test-role') + result['Functions'][0]['Runtime'].should.equal('python2.7') + + +@mock_cloudformation +@mock_ec2 +def test_nat_gateway(): + ec2_conn = boto3.client('ec2', 'us-east-1') + vpc_id = ec2_conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']['VpcId'] + subnet_id = ec2_conn.create_subnet( + CidrBlock='10.0.1.0/24', VpcId=vpc_id)['Subnet']['SubnetId'] + route_table_id = ec2_conn.create_route_table( + VpcId=vpc_id)['RouteTable']['RouteTableId'] + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "NAT": { + "DependsOn": "vpcgatewayattachment", + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": {"Fn::GetAtt": ["EIP", "AllocationId"]}, + "SubnetId": subnet_id + } + }, + "EIP": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + }, + "Route": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": route_table_id, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": {"Ref": "NAT"} + } + }, + "internetgateway": { + "Type": "AWS::EC2::InternetGateway" + }, + "vpcgatewayattachment": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "InternetGatewayId": { + "Ref": "internetgateway" + }, + "VpcId": vpc_id, + }, + } + } + } + + cf_conn = boto3.client('cloudformation', 'us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=json.dumps(template), + ) + + result = ec2_conn.describe_nat_gateways() + + result['NatGateways'].should.have.length_of(1) + result['NatGateways'][0]['VpcId'].should.equal(vpc_id) + result['NatGateways'][0]['SubnetId'].should.equal(subnet_id) + result['NatGateways'][0]['State'].should.equal('available') + + +@mock_cloudformation() +@mock_kms() +def test_stack_kms(): + kms_key_template = { + 'Resources': { + 'kmskey': { + 'Properties': { + 'Description': 'A kms key', + 'EnableKeyRotation': True, + 'Enabled': True, + 'KeyPolicy': 'a policy', + }, + 'Type': 'AWS::KMS::Key' + } + } + } + kms_key_template_json = json.dumps(kms_key_template) + + cf_conn = boto3.client('cloudformation', 'us-east-1') + cf_conn.create_stack( + StackName='test_stack', + TemplateBody=kms_key_template_json, + ) + + kms_conn = boto3.client('kms', 'us-east-1') + keys = kms_conn.list_keys()['Keys'] + len(keys).should.equal(1) + result = kms_conn.describe_key(KeyId=keys[0]['KeyId']) + + result['KeyMetadata']['Enabled'].should.equal(True) + result['KeyMetadata']['KeyUsage'].should.equal('ENCRYPT_DECRYPT') + + +@mock_cloudformation() +@mock_ec2() +def test_stack_spot_fleet(): + conn = boto3.client('ec2', 'us-east-1') + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + + spot_fleet_template = { + 'Resources': { + "SpotFleet": { + "Type": "AWS::EC2::SpotFleet", + "Properties": { + "SpotFleetRequestConfigData": { + "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", + "SpotPrice": "0.12", + "TargetCapacity": 6, + "AllocationStrategy": "diversified", + "LaunchSpecifications": [ + { + "EbsOptimized": "false", + "InstanceType": 't2.small', + "ImageId": "ami-1234", + "SubnetId": subnet_id, + "WeightedCapacity": "2", + "SpotPrice": "0.13", + }, + { + "EbsOptimized": "true", + "InstanceType": 't2.large', + "ImageId": "ami-1234", + "Monitoring": {"Enabled": "true"}, + "SecurityGroups": [{"GroupId": "sg-123"}], + "SubnetId": subnet_id, + "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, + "WeightedCapacity": "4", + "SpotPrice": "10.00", + } + ] + } + } + } + } + } + spot_fleet_template_json = json.dumps(spot_fleet_template) + + cf_conn = boto3.client('cloudformation', 'us-east-1') + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=spot_fleet_template_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + stack_resources['StackResourceSummaries'].should.have.length_of(1) + spot_fleet_id = stack_resources[ + 'StackResourceSummaries'][0]['PhysicalResourceId'] + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_request['SpotFleetRequestState'].should.equal("active") + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + spot_fleet_config['SpotPrice'].should.equal('0.12') + spot_fleet_config['TargetCapacity'].should.equal(6) + spot_fleet_config['IamFleetRole'].should.equal( + 'arn:aws:iam::123456789012:role/fleet') + spot_fleet_config['AllocationStrategy'].should.equal('diversified') + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) + + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec = spot_fleet_config['LaunchSpecifications'][0] + + launch_spec['EbsOptimized'].should.equal(False) + launch_spec['ImageId'].should.equal("ami-1234") + launch_spec['InstanceType'].should.equal("t2.small") + launch_spec['SubnetId'].should.equal(subnet_id) + launch_spec['SpotPrice'].should.equal("0.13") + launch_spec['WeightedCapacity'].should.equal(2.0) + + +@mock_cloudformation() +@mock_ec2() +def test_stack_spot_fleet_should_figure_out_default_price(): + conn = boto3.client('ec2', 'us-east-1') + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + + spot_fleet_template = { + 'Resources': { + "SpotFleet1": { + "Type": "AWS::EC2::SpotFleet", + "Properties": { + "SpotFleetRequestConfigData": { + "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", + "TargetCapacity": 6, + "AllocationStrategy": "diversified", + "LaunchSpecifications": [ + { + "EbsOptimized": "false", + "InstanceType": 't2.small', + "ImageId": "ami-1234", + "SubnetId": subnet_id, + "WeightedCapacity": "2", + }, + { + "EbsOptimized": "true", + "InstanceType": 't2.large', + "ImageId": "ami-1234", + "Monitoring": {"Enabled": "true"}, + "SecurityGroups": [{"GroupId": "sg-123"}], + "SubnetId": subnet_id, + "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, + "WeightedCapacity": "4", + } + ] + } + } + } + } + } + spot_fleet_template_json = json.dumps(spot_fleet_template) + + cf_conn = boto3.client('cloudformation', 'us-east-1') + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=spot_fleet_template_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + stack_resources['StackResourceSummaries'].should.have.length_of(1) + spot_fleet_id = stack_resources[ + 'StackResourceSummaries'][0]['PhysicalResourceId'] + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_request['SpotFleetRequestState'].should.equal("active") + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + assert 'SpotPrice' not in spot_fleet_config + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] + launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] + + assert 'SpotPrice' not in launch_spec1 + assert 'SpotPrice' not in launch_spec2 + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_stack_elbv2_resources_integration(): + alb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "albdns": { + "Description": "Load balanacer DNS", + "Value": {"Fn::GetAtt": ["alb", "DNSName"]}, + }, + "albname": { + "Description": "Load balancer name", + "Value": {"Fn::GetAtt": ["alb", "LoadBalancerName"]}, + }, + "canonicalhostedzoneid": { + "Description": "Load balancer canonical hosted zone ID", + "Value": {"Fn::GetAtt": ["alb", "CanonicalHostedZoneID"]}, + }, + }, + "Resources": { + "alb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "myelbv2", + "Scheme": "internet-facing", + "Subnets": [{ + "Ref": "mysubnet", + }], + "SecurityGroups": [{ + "Ref": "mysg", + }], + "Type": "application", + "IpAddressType": "ipv4", + } + }, + "mytargetgroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "HealthCheckIntervalSeconds": 30, + "HealthCheckPath": "/status", + "HealthCheckPort": 80, + "HealthCheckProtocol": "HTTP", + "HealthCheckTimeoutSeconds": 5, + "HealthyThresholdCount": 30, + "UnhealthyThresholdCount": 5, + "Matcher": { + "HttpCode": "200,201" + }, + "Name": "mytargetgroup1", + "Port": 80, + "Protocol": "HTTP", + "TargetType": "instance", + "Targets": [{ + "Id": { + "Ref": "ec2instance", + "Port": 80, + }, + }], + "VpcId": { + "Ref": "myvpc", + } + } + }, + "mytargetgroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "HealthCheckIntervalSeconds": 30, + "HealthCheckPath": "/status", + "HealthCheckPort": 8080, + "HealthCheckProtocol": "HTTP", + "HealthCheckTimeoutSeconds": 5, + "HealthyThresholdCount": 30, + "UnhealthyThresholdCount": 5, + "Name": "mytargetgroup2", + "Port": 8080, + "Protocol": "HTTP", + "TargetType": "instance", + "Targets": [{ + "Id": { + "Ref": "ec2instance", + "Port": 8080, + }, + }], + "VpcId": { + "Ref": "myvpc", + } + } + }, + "listener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "DefaultActions": [{ + "Type": "forward", + "TargetGroupArn": {"Ref": "mytargetgroup1"} + }], + "LoadBalancerArn": {"Ref": "alb"}, + "Port": "80", + "Protocol": "HTTP" + } + }, + "myvpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + } + }, + "mysubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/27", + "VpcId": {"Ref": "myvpc"}, + } + }, + "mysg": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupName": "mysg", + "GroupDescription": "test security group", + "VpcId": {"Ref": "myvpc"} + } + }, + "ec2instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + alb_template_json = json.dumps(alb_template) + + cfn_conn = boto3.client("cloudformation", "us-west-1") + cfn_conn.create_stack( + StackName="elb_stack", + TemplateBody=alb_template_json, + ) + + elbv2_conn = boto3.client("elbv2", "us-west-1") + + load_balancers = elbv2_conn.describe_load_balancers()['LoadBalancers'] + len(load_balancers).should.equal(1) + load_balancers[0]['LoadBalancerName'].should.equal('myelbv2') + load_balancers[0]['Scheme'].should.equal('internet-facing') + load_balancers[0]['Type'].should.equal('application') + load_balancers[0]['IpAddressType'].should.equal('ipv4') + + target_groups = sorted( + elbv2_conn.describe_target_groups()['TargetGroups'], + key=lambda tg: tg['TargetGroupName']) # sort to do comparison with indexes + len(target_groups).should.equal(2) + target_groups[0]['HealthCheckIntervalSeconds'].should.equal(30) + target_groups[0]['HealthCheckPath'].should.equal('/status') + target_groups[0]['HealthCheckPort'].should.equal('80') + target_groups[0]['HealthCheckProtocol'].should.equal('HTTP') + target_groups[0]['HealthCheckTimeoutSeconds'].should.equal(5) + target_groups[0]['HealthyThresholdCount'].should.equal(30) + target_groups[0]['UnhealthyThresholdCount'].should.equal(5) + target_groups[0]['Matcher'].should.equal({'HttpCode': '200,201'}) + target_groups[0]['TargetGroupName'].should.equal('mytargetgroup1') + target_groups[0]['Port'].should.equal(80) + target_groups[0]['Protocol'].should.equal('HTTP') + target_groups[0]['TargetType'].should.equal('instance') + + target_groups[1]['HealthCheckIntervalSeconds'].should.equal(30) + target_groups[1]['HealthCheckPath'].should.equal('/status') + target_groups[1]['HealthCheckPort'].should.equal('8080') + target_groups[1]['HealthCheckProtocol'].should.equal('HTTP') + target_groups[1]['HealthCheckTimeoutSeconds'].should.equal(5) + target_groups[1]['HealthyThresholdCount'].should.equal(30) + target_groups[1]['UnhealthyThresholdCount'].should.equal(5) + target_groups[1]['Matcher'].should.equal({'HttpCode': '200'}) + target_groups[1]['TargetGroupName'].should.equal('mytargetgroup2') + target_groups[1]['Port'].should.equal(8080) + target_groups[1]['Protocol'].should.equal('HTTP') + target_groups[1]['TargetType'].should.equal('instance') + + listeners = elbv2_conn.describe_listeners(LoadBalancerArn=load_balancers[0]['LoadBalancerArn'])['Listeners'] + len(listeners).should.equal(1) + listeners[0]['LoadBalancerArn'].should.equal(load_balancers[0]['LoadBalancerArn']) + listeners[0]['Port'].should.equal(80) + listeners[0]['Protocol'].should.equal('HTTP') + listeners[0]['DefaultActions'].should.equal([{ + "Type": "forward", + "TargetGroupArn": target_groups[0]['TargetGroupArn'] + }]) + + # test outputs + stacks = cfn_conn.describe_stacks(StackName='elb_stack')['Stacks'] + len(stacks).should.equal(1) + + dns = list(filter(lambda item: item['OutputKey'] == 'albdns', stacks[0]['Outputs']))[0] + name = list(filter(lambda item: item['OutputKey'] == 'albname', stacks[0]['Outputs']))[0] + + dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) + name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) diff --git a/tests/test_cloudformation/test_import_value.py b/tests/test_cloudformation/test_import_value.py index 04c2b580110e..d702753a65ca 100644 --- a/tests/test_cloudformation/test_import_value.py +++ b/tests/test_cloudformation/test_import_value.py @@ -1,87 +1,87 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function, unicode_literals - -# Standard library modules -import unittest - -# Third-party modules -import boto3 -from botocore.exceptions import ClientError - -# Package modules -from moto import mock_cloudformation - -AWS_REGION = 'us-west-1' - -SG_STACK_NAME = 'simple-sg-stack' -SG_TEMPLATE = """ -AWSTemplateFormatVersion: 2010-09-09 -Description: Simple test CF template for moto_cloudformation - - -Resources: - SimpleSecurityGroup: - Type: AWS::EC2::SecurityGroup - Description: "A simple security group" - Properties: - GroupName: simple-security-group - GroupDescription: "A simple security group" - SecurityGroupEgress: - - - Description: "Egress to remote HTTPS servers" - CidrIp: 0.0.0.0/0 - IpProtocol: tcp - FromPort: 443 - ToPort: 443 - -Outputs: - SimpleSecurityGroupName: - Value: !GetAtt SimpleSecurityGroup.GroupId - Export: - Name: "SimpleSecurityGroup" - -""" - -EC2_STACK_NAME = 'simple-ec2-stack' -EC2_TEMPLATE = """ ---- -# The latest template format version is "2010-09-09" and as of 2018-04-09 -# is currently the only valid value. -AWSTemplateFormatVersion: 2010-09-09 -Description: Simple test CF template for moto_cloudformation - - -Resources: - SimpleInstance: - Type: AWS::EC2::Instance - Properties: - ImageId: ami-03cf127a - InstanceType: t2.micro - SecurityGroups: !Split [',', !ImportValue SimpleSecurityGroup] -""" - - -class TestSimpleInstance(unittest.TestCase): - def test_simple_instance(self): - """Test that we can create a simple CloudFormation stack that imports values from an existing CloudFormation stack""" - with mock_cloudformation(): - client = boto3.client('cloudformation', region_name=AWS_REGION) - client.create_stack(StackName=SG_STACK_NAME, TemplateBody=SG_TEMPLATE) - response = client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) - self.assertIn('StackId', response) - response = client.describe_stacks(StackName=response['StackId']) - self.assertIn('Stacks', response) - stack_info = response['Stacks'] - self.assertEqual(1, len(stack_info)) - self.assertIn('StackName', stack_info[0]) - self.assertEqual(EC2_STACK_NAME, stack_info[0]['StackName']) - - def test_simple_instance_missing_export(self): - """Test that we get an exception if a CloudFormation stack tries to imports a non-existent export value""" - with mock_cloudformation(): - client = boto3.client('cloudformation', region_name=AWS_REGION) - with self.assertRaises(ClientError) as e: - client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) - self.assertIn('Error', e.exception.response) - self.assertIn('Code', e.exception.response['Error']) - self.assertEqual('ExportNotFound', e.exception.response['Error']['Code']) +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function, unicode_literals + +# Standard library modules +import unittest + +# Third-party modules +import boto3 +from botocore.exceptions import ClientError + +# Package modules +from moto import mock_cloudformation + +AWS_REGION = 'us-west-1' + +SG_STACK_NAME = 'simple-sg-stack' +SG_TEMPLATE = """ +AWSTemplateFormatVersion: 2010-09-09 +Description: Simple test CF template for moto_cloudformation + + +Resources: + SimpleSecurityGroup: + Type: AWS::EC2::SecurityGroup + Description: "A simple security group" + Properties: + GroupName: simple-security-group + GroupDescription: "A simple security group" + SecurityGroupEgress: + - + Description: "Egress to remote HTTPS servers" + CidrIp: 0.0.0.0/0 + IpProtocol: tcp + FromPort: 443 + ToPort: 443 + +Outputs: + SimpleSecurityGroupName: + Value: !GetAtt SimpleSecurityGroup.GroupId + Export: + Name: "SimpleSecurityGroup" + +""" + +EC2_STACK_NAME = 'simple-ec2-stack' +EC2_TEMPLATE = """ +--- +# The latest template format version is "2010-09-09" and as of 2018-04-09 +# is currently the only valid value. +AWSTemplateFormatVersion: 2010-09-09 +Description: Simple test CF template for moto_cloudformation + + +Resources: + SimpleInstance: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-03cf127a + InstanceType: t2.micro + SecurityGroups: !Split [',', !ImportValue SimpleSecurityGroup] +""" + + +class TestSimpleInstance(unittest.TestCase): + def test_simple_instance(self): + """Test that we can create a simple CloudFormation stack that imports values from an existing CloudFormation stack""" + with mock_cloudformation(): + client = boto3.client('cloudformation', region_name=AWS_REGION) + client.create_stack(StackName=SG_STACK_NAME, TemplateBody=SG_TEMPLATE) + response = client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) + self.assertIn('StackId', response) + response = client.describe_stacks(StackName=response['StackId']) + self.assertIn('Stacks', response) + stack_info = response['Stacks'] + self.assertEqual(1, len(stack_info)) + self.assertIn('StackName', stack_info[0]) + self.assertEqual(EC2_STACK_NAME, stack_info[0]['StackName']) + + def test_simple_instance_missing_export(self): + """Test that we get an exception if a CloudFormation stack tries to imports a non-existent export value""" + with mock_cloudformation(): + client = boto3.client('cloudformation', region_name=AWS_REGION) + with self.assertRaises(ClientError) as e: + client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) + self.assertIn('Error', e.exception.response) + self.assertIn('Code', e.exception.response['Error']) + self.assertEqual('ExportNotFound', e.exception.response['Error']['Code']) diff --git a/tests/test_cloudformation/test_server.py b/tests/test_cloudformation/test_server.py index de3ab77b5783..11f810357ff2 100644 --- a/tests/test_cloudformation/test_server.py +++ b/tests/test_cloudformation/test_server.py @@ -1,33 +1,33 @@ -from __future__ import unicode_literals - -import json -from six.moves.urllib.parse import urlencode -import re -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_cloudformation_server_get(): - backend = server.create_backend_app("cloudformation") - stack_name = 'test stack' - test_client = backend.test_client() - template_body = { - "Resources": {}, - } - create_stack_resp = test_client.action_data("CreateStack", StackName=stack_name, - TemplateBody=json.dumps(template_body)) - create_stack_resp.should.match( - r".*.*.*.*.*", re.DOTALL) - stack_id_from_create_response = re.search( - "(.*)", create_stack_resp).groups()[0] - - list_stacks_resp = test_client.action_data("ListStacks") - stack_id_from_list_response = re.search( - "(.*)", list_stacks_resp).groups()[0] - - stack_id_from_create_response.should.equal(stack_id_from_list_response) +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import urlencode +import re +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_cloudformation_server_get(): + backend = server.create_backend_app("cloudformation") + stack_name = 'test stack' + test_client = backend.test_client() + template_body = { + "Resources": {}, + } + create_stack_resp = test_client.action_data("CreateStack", StackName=stack_name, + TemplateBody=json.dumps(template_body)) + create_stack_resp.should.match( + r".*.*.*.*.*", re.DOTALL) + stack_id_from_create_response = re.search( + "(.*)", create_stack_resp).groups()[0] + + list_stacks_resp = test_client.action_data("ListStacks") + stack_id_from_list_response = re.search( + "(.*)", list_stacks_resp).groups()[0] + + stack_id_from_create_response.should.equal(stack_id_from_list_response) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index d25c69cf1f8c..9aea55f54e98 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -1,471 +1,471 @@ -from __future__ import unicode_literals -import json -import yaml - -from mock import patch -import sure # noqa - -from moto.cloudformation.exceptions import ValidationError -from moto.cloudformation.models import FakeStack -from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export -from moto.sqs.models import Queue -from moto.s3.models import FakeBucket -from moto.cloudformation.utils import yaml_tag_constructor -from boto.cloudformation.stack import Output - - - -dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - - "Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", - - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - "S3Bucket": { - "Type": "AWS::S3::Bucket", - "DeletionPolicy": "Retain" - }, - }, -} - -name_type_template = { - "AWSTemplateFormatVersion": "2010-09-09", - - "Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", - - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "VisibilityTimeout": 60, - } - }, - }, -} - -output_dict = { - "Outputs": { - "Output1": { - "Value": {"Ref": "Queue"}, - "Description": "This is a description." - } - } -} - -bad_output = { - "Outputs": { - "Output1": { - "Value": {"Fn::GetAtt": ["Queue", "InvalidAttribute"]} - } - } -} - -get_attribute_output = { - "Outputs": { - "Output1": { - "Value": {"Fn::GetAtt": ["Queue", "QueueName"]} - } - } -} - -get_availability_zones_output = { - "Outputs": { - "Output1": { - "Value": {"Fn::GetAZs": ""} - } - } -} - -split_select_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::Select": [ "1", {"Fn::Split": [ "-", "123-myqueue" ] } ] }, - "VisibilityTimeout": 60, - } - } - } -} - -sub_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue1": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::Sub": '${AWS::StackName}-queue-${!Literal}'}, - "VisibilityTimeout": 60, - } - }, - "Queue2": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::Sub": '${Queue1.QueueName}'}, - "VisibilityTimeout": 60, - } - }, - } -} - -export_value_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::Sub": '${AWS::StackName}-queue'}, - "VisibilityTimeout": 60, - } - } - }, - "Outputs": { - "Output1": { - "Value": "value", - "Export": {"Name": 'queue-us-west-1'} - } - } -} - -import_value_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::ImportValue": 'queue-us-west-1'}, - "VisibilityTimeout": 60, - } - } - } -} - -outputs_template = dict(list(dummy_template.items()) + - list(output_dict.items())) -bad_outputs_template = dict( - list(dummy_template.items()) + list(bad_output.items())) -get_attribute_outputs_template = dict( - list(dummy_template.items()) + list(get_attribute_output.items())) -get_availability_zones_template = dict( - list(dummy_template.items()) + list(get_availability_zones_output.items())) - -dummy_template_json = json.dumps(dummy_template) -name_type_template_json = json.dumps(name_type_template) -output_type_template_json = json.dumps(outputs_template) -bad_output_template_json = json.dumps(bad_outputs_template) -get_attribute_outputs_template_json = json.dumps( - get_attribute_outputs_template) -get_availability_zones_template_json = json.dumps( - get_availability_zones_template) -split_select_template_json = json.dumps(split_select_template) -sub_template_json = json.dumps(sub_template) -export_value_template_json = json.dumps(export_value_template) -import_value_template_json = json.dumps(import_value_template) - - -def test_parse_stack_resources(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=dummy_template_json, - parameters={}, - region_name='us-west-1') - - stack.resource_map.should.have.length_of(2) - - queue = stack.resource_map['Queue'] - queue.should.be.a(Queue) - queue.name.should.equal("my-queue") - - bucket = stack.resource_map['S3Bucket'] - bucket.should.be.a(FakeBucket) - bucket.physical_resource_id.should.equal(bucket.name) - - -@patch("moto.cloudformation.parsing.logger") -def test_missing_resource_logs(logger): - resource_class_from_type("foobar") - logger.warning.assert_called_with( - 'No Moto CloudFormation support for %s', 'foobar') - - -def test_parse_stack_with_name_type_resource(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=name_type_template_json, - parameters={}, - region_name='us-west-1') - - stack.resource_map.should.have.length_of(1) - list(stack.resource_map.keys())[0].should.equal('Queue') - queue = list(stack.resource_map.values())[0] - queue.should.be.a(Queue) - - -def test_parse_stack_with_yaml_template(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=yaml.dump(name_type_template), - parameters={}, - region_name='us-west-1') - - stack.resource_map.should.have.length_of(1) - list(stack.resource_map.keys())[0].should.equal('Queue') - queue = list(stack.resource_map.values())[0] - queue.should.be.a(Queue) - - -def test_parse_stack_with_outputs(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=output_type_template_json, - parameters={}, - region_name='us-west-1') - - stack.output_map.should.have.length_of(1) - list(stack.output_map.keys())[0].should.equal('Output1') - output = list(stack.output_map.values())[0] - output.should.be.a(Output) - output.description.should.equal("This is a description.") - - -def test_parse_stack_with_get_attribute_outputs(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=get_attribute_outputs_template_json, - parameters={}, - region_name='us-west-1') - - stack.output_map.should.have.length_of(1) - list(stack.output_map.keys())[0].should.equal('Output1') - output = list(stack.output_map.values())[0] - output.should.be.a(Output) - output.value.should.equal("my-queue") - -def test_parse_stack_with_get_attribute_kms(): - from .fixtures.kms_key import template - - template_json = json.dumps(template) - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=template_json, - parameters={}, - region_name='us-west-1') - - stack.output_map.should.have.length_of(1) - list(stack.output_map.keys())[0].should.equal('KeyArn') - output = list(stack.output_map.values())[0] - output.should.be.a(Output) - -def test_parse_stack_with_get_availability_zones(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=get_availability_zones_template_json, - parameters={}, - region_name='us-east-1') - - stack.output_map.should.have.length_of(1) - list(stack.output_map.keys())[0].should.equal('Output1') - output = list(stack.output_map.values())[0] - output.should.be.a(Output) - output.value.should.equal([ "us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d" ]) - - -def test_parse_stack_with_bad_get_attribute_outputs(): - FakeStack.when.called_with( - "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) - - -def test_parse_equals_condition(): - parse_condition( - condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(True) - - parse_condition( - condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - resources_map={"EnvType": "staging"}, - condition_map={}, - ).should.equal(False) - - -def test_parse_not_condition(): - parse_condition( - condition={ - "Fn::Not": [{ - "Fn::Equals": [{"Ref": "EnvType"}, "prod"] - }] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(False) - - parse_condition( - condition={ - "Fn::Not": [{ - "Fn::Equals": [{"Ref": "EnvType"}, "prod"] - }] - }, - resources_map={"EnvType": "staging"}, - condition_map={}, - ).should.equal(True) - - -def test_parse_and_condition(): - parse_condition( - condition={ - "Fn::And": [ - {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, - ] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(False) - - parse_condition( - condition={ - "Fn::And": [ - {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - ] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(True) - - -def test_parse_or_condition(): - parse_condition( - condition={ - "Fn::Or": [ - {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, - ] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(True) - - parse_condition( - condition={ - "Fn::Or": [ - {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, - {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, - ] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(False) - - -def test_reference_other_conditions(): - parse_condition( - condition={"Fn::Not": [{"Condition": "OtherCondition"}]}, - resources_map={}, - condition_map={"OtherCondition": True}, - ).should.equal(False) - - -def test_parse_split_and_select(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=split_select_template_json, - parameters={}, - region_name='us-west-1') - - stack.resource_map.should.have.length_of(1) - queue = stack.resource_map['Queue'] - queue.name.should.equal("myqueue") - - -def test_sub(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=sub_template_json, - parameters={}, - region_name='us-west-1') - - queue1 = stack.resource_map['Queue1'] - queue2 = stack.resource_map['Queue2'] - queue2.name.should.equal(queue1.name) - - -def test_import(): - export_stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=export_value_template_json, - parameters={}, - region_name='us-west-1') - import_stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=import_value_template_json, - parameters={}, - region_name='us-west-1', - cross_stack_resources={export_stack.exports[0].value: export_stack.exports[0]}) - - queue = import_stack.resource_map['Queue'] - queue.name.should.equal("value") - - - -def test_short_form_func_in_yaml_teamplate(): - template = """--- - KeyB64: !Base64 valueToEncode - KeyRef: !Ref foo - KeyAnd: !And - - A - - B - KeyEquals: !Equals [A, B] - KeyIf: !If [A, B, C] - KeyNot: !Not [A] - KeyOr: !Or [A, B] - KeyFindInMap: !FindInMap [A, B, C] - KeyGetAtt: !GetAtt A.B - KeyGetAZs: !GetAZs A - KeyImportValue: !ImportValue A - KeyJoin: !Join [ ":", [A, B, C] ] - KeySelect: !Select [A, B] - KeySplit: !Split [A, B] - KeySub: !Sub A - """ - yaml.add_multi_constructor('', yaml_tag_constructor) - template_dict = yaml.load(template) - key_and_expects = [ - ['KeyRef', {'Ref': 'foo'}], - ['KeyB64', {'Fn::Base64': 'valueToEncode'}], - ['KeyAnd', {'Fn::And': ['A', 'B']}], - ['KeyEquals', {'Fn::Equals': ['A', 'B']}], - ['KeyIf', {'Fn::If': ['A', 'B', 'C']}], - ['KeyNot', {'Fn::Not': ['A']}], - ['KeyOr', {'Fn::Or': ['A', 'B']}], - ['KeyFindInMap', {'Fn::FindInMap': ['A', 'B', 'C']}], - ['KeyGetAtt', {'Fn::GetAtt': ['A', 'B']}], - ['KeyGetAZs', {'Fn::GetAZs': 'A'}], - ['KeyImportValue', {'Fn::ImportValue': 'A'}], - ['KeyJoin', {'Fn::Join': [ ":", [ 'A', 'B', 'C' ] ]}], - ['KeySelect', {'Fn::Select': ['A', 'B']}], - ['KeySplit', {'Fn::Split': ['A', 'B']}], - ['KeySub', {'Fn::Sub': 'A'}], - ] - for k, v in key_and_expects: - template_dict.should.have.key(k).which.should.be.equal(v) +from __future__ import unicode_literals +import json +import yaml + +from mock import patch +import sure # noqa + +from moto.cloudformation.exceptions import ValidationError +from moto.cloudformation.models import FakeStack +from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export +from moto.sqs.models import Queue +from moto.s3.models import FakeBucket +from moto.cloudformation.utils import yaml_tag_constructor +from boto.cloudformation.stack import Output + + + +dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + + "Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", + + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + "S3Bucket": { + "Type": "AWS::S3::Bucket", + "DeletionPolicy": "Retain" + }, + }, +} + +name_type_template = { + "AWSTemplateFormatVersion": "2010-09-09", + + "Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", + + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "VisibilityTimeout": 60, + } + }, + }, +} + +output_dict = { + "Outputs": { + "Output1": { + "Value": {"Ref": "Queue"}, + "Description": "This is a description." + } + } +} + +bad_output = { + "Outputs": { + "Output1": { + "Value": {"Fn::GetAtt": ["Queue", "InvalidAttribute"]} + } + } +} + +get_attribute_output = { + "Outputs": { + "Output1": { + "Value": {"Fn::GetAtt": ["Queue", "QueueName"]} + } + } +} + +get_availability_zones_output = { + "Outputs": { + "Output1": { + "Value": {"Fn::GetAZs": ""} + } + } +} + +split_select_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Select": [ "1", {"Fn::Split": [ "-", "123-myqueue" ] } ] }, + "VisibilityTimeout": 60, + } + } + } +} + +sub_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue1": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Sub": '${AWS::StackName}-queue-${!Literal}'}, + "VisibilityTimeout": 60, + } + }, + "Queue2": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Sub": '${Queue1.QueueName}'}, + "VisibilityTimeout": 60, + } + }, + } +} + +export_value_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Sub": '${AWS::StackName}-queue'}, + "VisibilityTimeout": 60, + } + } + }, + "Outputs": { + "Output1": { + "Value": "value", + "Export": {"Name": 'queue-us-west-1'} + } + } +} + +import_value_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::ImportValue": 'queue-us-west-1'}, + "VisibilityTimeout": 60, + } + } + } +} + +outputs_template = dict(list(dummy_template.items()) + + list(output_dict.items())) +bad_outputs_template = dict( + list(dummy_template.items()) + list(bad_output.items())) +get_attribute_outputs_template = dict( + list(dummy_template.items()) + list(get_attribute_output.items())) +get_availability_zones_template = dict( + list(dummy_template.items()) + list(get_availability_zones_output.items())) + +dummy_template_json = json.dumps(dummy_template) +name_type_template_json = json.dumps(name_type_template) +output_type_template_json = json.dumps(outputs_template) +bad_output_template_json = json.dumps(bad_outputs_template) +get_attribute_outputs_template_json = json.dumps( + get_attribute_outputs_template) +get_availability_zones_template_json = json.dumps( + get_availability_zones_template) +split_select_template_json = json.dumps(split_select_template) +sub_template_json = json.dumps(sub_template) +export_value_template_json = json.dumps(export_value_template) +import_value_template_json = json.dumps(import_value_template) + + +def test_parse_stack_resources(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=dummy_template_json, + parameters={}, + region_name='us-west-1') + + stack.resource_map.should.have.length_of(2) + + queue = stack.resource_map['Queue'] + queue.should.be.a(Queue) + queue.name.should.equal("my-queue") + + bucket = stack.resource_map['S3Bucket'] + bucket.should.be.a(FakeBucket) + bucket.physical_resource_id.should.equal(bucket.name) + + +@patch("moto.cloudformation.parsing.logger") +def test_missing_resource_logs(logger): + resource_class_from_type("foobar") + logger.warning.assert_called_with( + 'No Moto CloudFormation support for %s', 'foobar') + + +def test_parse_stack_with_name_type_resource(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=name_type_template_json, + parameters={}, + region_name='us-west-1') + + stack.resource_map.should.have.length_of(1) + list(stack.resource_map.keys())[0].should.equal('Queue') + queue = list(stack.resource_map.values())[0] + queue.should.be.a(Queue) + + +def test_parse_stack_with_yaml_template(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=yaml.dump(name_type_template), + parameters={}, + region_name='us-west-1') + + stack.resource_map.should.have.length_of(1) + list(stack.resource_map.keys())[0].should.equal('Queue') + queue = list(stack.resource_map.values())[0] + queue.should.be.a(Queue) + + +def test_parse_stack_with_outputs(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=output_type_template_json, + parameters={}, + region_name='us-west-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('Output1') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + output.description.should.equal("This is a description.") + + +def test_parse_stack_with_get_attribute_outputs(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=get_attribute_outputs_template_json, + parameters={}, + region_name='us-west-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('Output1') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + output.value.should.equal("my-queue") + +def test_parse_stack_with_get_attribute_kms(): + from .fixtures.kms_key import template + + template_json = json.dumps(template) + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=template_json, + parameters={}, + region_name='us-west-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('KeyArn') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + +def test_parse_stack_with_get_availability_zones(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=get_availability_zones_template_json, + parameters={}, + region_name='us-east-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('Output1') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + output.value.should.equal([ "us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d" ]) + + +def test_parse_stack_with_bad_get_attribute_outputs(): + FakeStack.when.called_with( + "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) + + +def test_parse_equals_condition(): + parse_condition( + condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(True) + + parse_condition( + condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + resources_map={"EnvType": "staging"}, + condition_map={}, + ).should.equal(False) + + +def test_parse_not_condition(): + parse_condition( + condition={ + "Fn::Not": [{ + "Fn::Equals": [{"Ref": "EnvType"}, "prod"] + }] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(False) + + parse_condition( + condition={ + "Fn::Not": [{ + "Fn::Equals": [{"Ref": "EnvType"}, "prod"] + }] + }, + resources_map={"EnvType": "staging"}, + condition_map={}, + ).should.equal(True) + + +def test_parse_and_condition(): + parse_condition( + condition={ + "Fn::And": [ + {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, + ] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(False) + + parse_condition( + condition={ + "Fn::And": [ + {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + ] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(True) + + +def test_parse_or_condition(): + parse_condition( + condition={ + "Fn::Or": [ + {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, + ] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(True) + + parse_condition( + condition={ + "Fn::Or": [ + {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, + {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, + ] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(False) + + +def test_reference_other_conditions(): + parse_condition( + condition={"Fn::Not": [{"Condition": "OtherCondition"}]}, + resources_map={}, + condition_map={"OtherCondition": True}, + ).should.equal(False) + + +def test_parse_split_and_select(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=split_select_template_json, + parameters={}, + region_name='us-west-1') + + stack.resource_map.should.have.length_of(1) + queue = stack.resource_map['Queue'] + queue.name.should.equal("myqueue") + + +def test_sub(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=sub_template_json, + parameters={}, + region_name='us-west-1') + + queue1 = stack.resource_map['Queue1'] + queue2 = stack.resource_map['Queue2'] + queue2.name.should.equal(queue1.name) + + +def test_import(): + export_stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=export_value_template_json, + parameters={}, + region_name='us-west-1') + import_stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=import_value_template_json, + parameters={}, + region_name='us-west-1', + cross_stack_resources={export_stack.exports[0].value: export_stack.exports[0]}) + + queue = import_stack.resource_map['Queue'] + queue.name.should.equal("value") + + + +def test_short_form_func_in_yaml_teamplate(): + template = """--- + KeyB64: !Base64 valueToEncode + KeyRef: !Ref foo + KeyAnd: !And + - A + - B + KeyEquals: !Equals [A, B] + KeyIf: !If [A, B, C] + KeyNot: !Not [A] + KeyOr: !Or [A, B] + KeyFindInMap: !FindInMap [A, B, C] + KeyGetAtt: !GetAtt A.B + KeyGetAZs: !GetAZs A + KeyImportValue: !ImportValue A + KeyJoin: !Join [ ":", [A, B, C] ] + KeySelect: !Select [A, B] + KeySplit: !Split [A, B] + KeySub: !Sub A + """ + yaml.add_multi_constructor('', yaml_tag_constructor) + template_dict = yaml.load(template) + key_and_expects = [ + ['KeyRef', {'Ref': 'foo'}], + ['KeyB64', {'Fn::Base64': 'valueToEncode'}], + ['KeyAnd', {'Fn::And': ['A', 'B']}], + ['KeyEquals', {'Fn::Equals': ['A', 'B']}], + ['KeyIf', {'Fn::If': ['A', 'B', 'C']}], + ['KeyNot', {'Fn::Not': ['A']}], + ['KeyOr', {'Fn::Or': ['A', 'B']}], + ['KeyFindInMap', {'Fn::FindInMap': ['A', 'B', 'C']}], + ['KeyGetAtt', {'Fn::GetAtt': ['A', 'B']}], + ['KeyGetAZs', {'Fn::GetAZs': 'A'}], + ['KeyImportValue', {'Fn::ImportValue': 'A'}], + ['KeyJoin', {'Fn::Join': [ ":", [ 'A', 'B', 'C' ] ]}], + ['KeySelect', {'Fn::Select': ['A', 'B']}], + ['KeySplit', {'Fn::Split': ['A', 'B']}], + ['KeySub', {'Fn::Sub': 'A'}], + ] + for k, v in key_and_expects: + template_dict.should.have.key(k).which.should.be.equal(v) diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index a0f3871c065c..2ba2337355cf 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -1,123 +1,123 @@ -import boto -from boto.ec2.cloudwatch.alarm import MetricAlarm -import boto3 -from datetime import datetime, timedelta -import pytz -import sure # noqa - -from moto import mock_cloudwatch_deprecated - - -def alarm_fixture(name="tester", action=None): - action = action or ['arn:alarm'] - return MetricAlarm( - name=name, - namespace="{0}_namespace".format(name), - metric="{0}_metric".format(name), - comparison='>=', - threshold=2.0, - period=60, - evaluation_periods=5, - statistic='Average', - description='A test', - dimensions={'InstanceId': ['i-0123456,i-0123457']}, - alarm_actions=action, - ok_actions=['arn:ok'], - insufficient_data_actions=['arn:insufficient'], - unit='Seconds', - ) - - -@mock_cloudwatch_deprecated -def test_create_alarm(): - conn = boto.connect_cloudwatch() - - alarm = alarm_fixture() - conn.create_alarm(alarm) - - alarms = conn.describe_alarms() - alarms.should.have.length_of(1) - alarm = alarms[0] - alarm.name.should.equal('tester') - alarm.namespace.should.equal('tester_namespace') - alarm.metric.should.equal('tester_metric') - alarm.comparison.should.equal('>=') - alarm.threshold.should.equal(2.0) - alarm.period.should.equal(60) - alarm.evaluation_periods.should.equal(5) - alarm.statistic.should.equal('Average') - alarm.description.should.equal('A test') - dict(alarm.dimensions).should.equal( - {'InstanceId': ['i-0123456,i-0123457']}) - list(alarm.alarm_actions).should.equal(['arn:alarm']) - list(alarm.ok_actions).should.equal(['arn:ok']) - list(alarm.insufficient_data_actions).should.equal(['arn:insufficient']) - alarm.unit.should.equal('Seconds') - - -@mock_cloudwatch_deprecated -def test_delete_alarm(): - conn = boto.connect_cloudwatch() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) - - alarm = alarm_fixture() - conn.create_alarm(alarm) - - alarms = conn.describe_alarms() - alarms.should.have.length_of(1) - - alarms[0].delete() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) - - -@mock_cloudwatch_deprecated -def test_put_metric_data(): - conn = boto.connect_cloudwatch() - - conn.put_metric_data( - namespace='tester', - name='metric', - value=1.5, - dimensions={'InstanceId': ['i-0123456,i-0123457']}, - ) - - metrics = conn.list_metrics() - metrics.should.have.length_of(1) - metric = metrics[0] - metric.namespace.should.equal('tester') - metric.name.should.equal('metric') - dict(metric.dimensions).should.equal( - {'InstanceId': ['i-0123456,i-0123457']}) - - -@mock_cloudwatch_deprecated -def test_describe_alarms(): - conn = boto.connect_cloudwatch() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) - - conn.create_alarm(alarm_fixture(name="nfoobar", action="afoobar")) - conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) - conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) - conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) - - alarms = conn.describe_alarms() - alarms.should.have.length_of(4) - alarms = conn.describe_alarms(alarm_name_prefix="nfoo") - alarms.should.have.length_of(2) - alarms = conn.describe_alarms( - alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) - alarms.should.have.length_of(3) - alarms = conn.describe_alarms(action_prefix="afoo") - alarms.should.have.length_of(2) - - for alarm in conn.describe_alarms(): - alarm.delete() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) +import boto +from boto.ec2.cloudwatch.alarm import MetricAlarm +import boto3 +from datetime import datetime, timedelta +import pytz +import sure # noqa + +from moto import mock_cloudwatch_deprecated + + +def alarm_fixture(name="tester", action=None): + action = action or ['arn:alarm'] + return MetricAlarm( + name=name, + namespace="{0}_namespace".format(name), + metric="{0}_metric".format(name), + comparison='>=', + threshold=2.0, + period=60, + evaluation_periods=5, + statistic='Average', + description='A test', + dimensions={'InstanceId': ['i-0123456,i-0123457']}, + alarm_actions=action, + ok_actions=['arn:ok'], + insufficient_data_actions=['arn:insufficient'], + unit='Seconds', + ) + + +@mock_cloudwatch_deprecated +def test_create_alarm(): + conn = boto.connect_cloudwatch() + + alarm = alarm_fixture() + conn.create_alarm(alarm) + + alarms = conn.describe_alarms() + alarms.should.have.length_of(1) + alarm = alarms[0] + alarm.name.should.equal('tester') + alarm.namespace.should.equal('tester_namespace') + alarm.metric.should.equal('tester_metric') + alarm.comparison.should.equal('>=') + alarm.threshold.should.equal(2.0) + alarm.period.should.equal(60) + alarm.evaluation_periods.should.equal(5) + alarm.statistic.should.equal('Average') + alarm.description.should.equal('A test') + dict(alarm.dimensions).should.equal( + {'InstanceId': ['i-0123456,i-0123457']}) + list(alarm.alarm_actions).should.equal(['arn:alarm']) + list(alarm.ok_actions).should.equal(['arn:ok']) + list(alarm.insufficient_data_actions).should.equal(['arn:insufficient']) + alarm.unit.should.equal('Seconds') + + +@mock_cloudwatch_deprecated +def test_delete_alarm(): + conn = boto.connect_cloudwatch() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + alarm = alarm_fixture() + conn.create_alarm(alarm) + + alarms = conn.describe_alarms() + alarms.should.have.length_of(1) + + alarms[0].delete() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + +@mock_cloudwatch_deprecated +def test_put_metric_data(): + conn = boto.connect_cloudwatch() + + conn.put_metric_data( + namespace='tester', + name='metric', + value=1.5, + dimensions={'InstanceId': ['i-0123456,i-0123457']}, + ) + + metrics = conn.list_metrics() + metrics.should.have.length_of(1) + metric = metrics[0] + metric.namespace.should.equal('tester') + metric.name.should.equal('metric') + dict(metric.dimensions).should.equal( + {'InstanceId': ['i-0123456,i-0123457']}) + + +@mock_cloudwatch_deprecated +def test_describe_alarms(): + conn = boto.connect_cloudwatch() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + conn.create_alarm(alarm_fixture(name="nfoobar", action="afoobar")) + conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) + conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) + conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) + + alarms = conn.describe_alarms() + alarms.should.have.length_of(4) + alarms = conn.describe_alarms(alarm_name_prefix="nfoo") + alarms.should.have.length_of(2) + alarms = conn.describe_alarms( + alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) + alarms.should.have.length_of(3) + alarms = conn.describe_alarms(action_prefix="afoo") + alarms.should.have.length_of(2) + + for alarm in conn.describe_alarms(): + alarm.delete() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 40b5eee08064..3c205f400ed7 100755 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -1,224 +1,224 @@ -from __future__ import unicode_literals - -import boto3 -from botocore.exceptions import ClientError -from datetime import datetime, timedelta -import pytz -import sure # noqa - -from moto import mock_cloudwatch - - -@mock_cloudwatch -def test_put_list_dashboard(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - resp = client.list_dashboards() - - len(resp['DashboardEntries']).should.equal(1) - - -@mock_cloudwatch -def test_put_list_prefix_nomatch_dashboard(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - resp = client.list_dashboards(DashboardNamePrefix='nomatch') - - len(resp['DashboardEntries']).should.equal(0) - - -@mock_cloudwatch -def test_delete_dashboard(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - client.put_dashboard(DashboardName='test2', DashboardBody=widget) - client.put_dashboard(DashboardName='test3', DashboardBody=widget) - client.delete_dashboards(DashboardNames=['test2', 'test1']) - - resp = client.list_dashboards(DashboardNamePrefix='test3') - len(resp['DashboardEntries']).should.equal(1) - - -@mock_cloudwatch -def test_delete_dashboard_fail(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - client.put_dashboard(DashboardName='test2', DashboardBody=widget) - client.put_dashboard(DashboardName='test3', DashboardBody=widget) - # Doesnt delete anything if all dashboards to be deleted do not exist - try: - client.delete_dashboards(DashboardNames=['test2', 'test1', 'test_no_match']) - except ClientError as err: - err.response['Error']['Code'].should.equal('ResourceNotFound') - else: - raise RuntimeError('Should of raised error') - - resp = client.list_dashboards() - len(resp['DashboardEntries']).should.equal(3) - - -@mock_cloudwatch -def test_get_dashboard(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - - resp = client.get_dashboard(DashboardName='test1') - resp.should.contain('DashboardArn') - resp.should.contain('DashboardBody') - resp['DashboardName'].should.equal('test1') - - -@mock_cloudwatch -def test_get_dashboard_fail(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - - try: - client.get_dashboard(DashboardName='test1') - except ClientError as err: - err.response['Error']['Code'].should.equal('ResourceNotFound') - else: - raise RuntimeError('Should of raised error') - - -@mock_cloudwatch -def test_alarm_state(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - - client.put_metric_alarm( - AlarmName='testalarm1', - MetricName='cpu', - Namespace='blah', - Period=10, - EvaluationPeriods=5, - Statistic='Average', - Threshold=2, - ComparisonOperator='GreaterThanThreshold', - ) - client.put_metric_alarm( - AlarmName='testalarm2', - MetricName='cpu', - Namespace='blah', - Period=10, - EvaluationPeriods=5, - Statistic='Average', - Threshold=2, - ComparisonOperator='GreaterThanThreshold', - ) - - # This is tested implicitly as if it doesnt work the rest will die - client.set_alarm_state( - AlarmName='testalarm1', - StateValue='ALARM', - StateReason='testreason', - StateReasonData='{"some": "json_data"}' - ) - - resp = client.describe_alarms( - StateValue='ALARM' - ) - len(resp['MetricAlarms']).should.equal(1) - resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1') - resp['MetricAlarms'][0]['StateValue'].should.equal('ALARM') - - resp = client.describe_alarms( - StateValue='OK' - ) - len(resp['MetricAlarms']).should.equal(1) - resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2') - resp['MetricAlarms'][0]['StateValue'].should.equal('OK') - - # Just for sanity - resp = client.describe_alarms() - len(resp['MetricAlarms']).should.equal(2) - - -@mock_cloudwatch -def test_put_metric_data_no_dimensions(): - conn = boto3.client('cloudwatch', region_name='us-east-1') - - conn.put_metric_data( - Namespace='tester', - MetricData=[ - dict( - MetricName='metric', - Value=1.5, - ) - ] - ) - - metrics = conn.list_metrics()['Metrics'] - metrics.should.have.length_of(1) - metric = metrics[0] - metric['Namespace'].should.equal('tester') - metric['MetricName'].should.equal('metric') - - - -@mock_cloudwatch -def test_put_metric_data_with_statistics(): - conn = boto3.client('cloudwatch', region_name='us-east-1') - - conn.put_metric_data( - Namespace='tester', - MetricData=[ - dict( - MetricName='statmetric', - Timestamp=datetime(2015, 1, 1), - # no Value to test https://github.com/spulec/moto/issues/1615 - StatisticValues=dict( - SampleCount=123.0, - Sum=123.0, - Minimum=123.0, - Maximum=123.0 - ), - Unit='Milliseconds', - StorageResolution=123 - ) - ] - ) - - metrics = conn.list_metrics()['Metrics'] - metrics.should.have.length_of(1) - metric = metrics[0] - metric['Namespace'].should.equal('tester') - metric['MetricName'].should.equal('statmetric') - # TODO: test statistics - https://github.com/spulec/moto/issues/1615 - -@mock_cloudwatch -def test_get_metric_statistics(): - conn = boto3.client('cloudwatch', region_name='us-east-1') - utc_now = datetime.now(tz=pytz.utc) - - conn.put_metric_data( - Namespace='tester', - MetricData=[ - dict( - MetricName='metric', - Value=1.5, - Timestamp=utc_now - ) - ] - ) - - stats = conn.get_metric_statistics( - Namespace='tester', - MetricName='metric', - StartTime=utc_now - timedelta(seconds=60), - EndTime=utc_now + timedelta(seconds=60), - Period=60, - Statistics=['SampleCount', 'Sum'] - ) - - stats['Datapoints'].should.have.length_of(1) - datapoint = stats['Datapoints'][0] - datapoint['SampleCount'].should.equal(1.0) - datapoint['Sum'].should.equal(1.5) +from __future__ import unicode_literals + +import boto3 +from botocore.exceptions import ClientError +from datetime import datetime, timedelta +import pytz +import sure # noqa + +from moto import mock_cloudwatch + + +@mock_cloudwatch +def test_put_list_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + resp = client.list_dashboards() + + len(resp['DashboardEntries']).should.equal(1) + + +@mock_cloudwatch +def test_put_list_prefix_nomatch_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + resp = client.list_dashboards(DashboardNamePrefix='nomatch') + + len(resp['DashboardEntries']).should.equal(0) + + +@mock_cloudwatch +def test_delete_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + client.put_dashboard(DashboardName='test2', DashboardBody=widget) + client.put_dashboard(DashboardName='test3', DashboardBody=widget) + client.delete_dashboards(DashboardNames=['test2', 'test1']) + + resp = client.list_dashboards(DashboardNamePrefix='test3') + len(resp['DashboardEntries']).should.equal(1) + + +@mock_cloudwatch +def test_delete_dashboard_fail(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + client.put_dashboard(DashboardName='test2', DashboardBody=widget) + client.put_dashboard(DashboardName='test3', DashboardBody=widget) + # Doesnt delete anything if all dashboards to be deleted do not exist + try: + client.delete_dashboards(DashboardNames=['test2', 'test1', 'test_no_match']) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFound') + else: + raise RuntimeError('Should of raised error') + + resp = client.list_dashboards() + len(resp['DashboardEntries']).should.equal(3) + + +@mock_cloudwatch +def test_get_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + + resp = client.get_dashboard(DashboardName='test1') + resp.should.contain('DashboardArn') + resp.should.contain('DashboardBody') + resp['DashboardName'].should.equal('test1') + + +@mock_cloudwatch +def test_get_dashboard_fail(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + + try: + client.get_dashboard(DashboardName='test1') + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFound') + else: + raise RuntimeError('Should of raised error') + + +@mock_cloudwatch +def test_alarm_state(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + + client.put_metric_alarm( + AlarmName='testalarm1', + MetricName='cpu', + Namespace='blah', + Period=10, + EvaluationPeriods=5, + Statistic='Average', + Threshold=2, + ComparisonOperator='GreaterThanThreshold', + ) + client.put_metric_alarm( + AlarmName='testalarm2', + MetricName='cpu', + Namespace='blah', + Period=10, + EvaluationPeriods=5, + Statistic='Average', + Threshold=2, + ComparisonOperator='GreaterThanThreshold', + ) + + # This is tested implicitly as if it doesnt work the rest will die + client.set_alarm_state( + AlarmName='testalarm1', + StateValue='ALARM', + StateReason='testreason', + StateReasonData='{"some": "json_data"}' + ) + + resp = client.describe_alarms( + StateValue='ALARM' + ) + len(resp['MetricAlarms']).should.equal(1) + resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1') + resp['MetricAlarms'][0]['StateValue'].should.equal('ALARM') + + resp = client.describe_alarms( + StateValue='OK' + ) + len(resp['MetricAlarms']).should.equal(1) + resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2') + resp['MetricAlarms'][0]['StateValue'].should.equal('OK') + + # Just for sanity + resp = client.describe_alarms() + len(resp['MetricAlarms']).should.equal(2) + + +@mock_cloudwatch +def test_put_metric_data_no_dimensions(): + conn = boto3.client('cloudwatch', region_name='us-east-1') + + conn.put_metric_data( + Namespace='tester', + MetricData=[ + dict( + MetricName='metric', + Value=1.5, + ) + ] + ) + + metrics = conn.list_metrics()['Metrics'] + metrics.should.have.length_of(1) + metric = metrics[0] + metric['Namespace'].should.equal('tester') + metric['MetricName'].should.equal('metric') + + + +@mock_cloudwatch +def test_put_metric_data_with_statistics(): + conn = boto3.client('cloudwatch', region_name='us-east-1') + + conn.put_metric_data( + Namespace='tester', + MetricData=[ + dict( + MetricName='statmetric', + Timestamp=datetime(2015, 1, 1), + # no Value to test https://github.com/spulec/moto/issues/1615 + StatisticValues=dict( + SampleCount=123.0, + Sum=123.0, + Minimum=123.0, + Maximum=123.0 + ), + Unit='Milliseconds', + StorageResolution=123 + ) + ] + ) + + metrics = conn.list_metrics()['Metrics'] + metrics.should.have.length_of(1) + metric = metrics[0] + metric['Namespace'].should.equal('tester') + metric['MetricName'].should.equal('statmetric') + # TODO: test statistics - https://github.com/spulec/moto/issues/1615 + +@mock_cloudwatch +def test_get_metric_statistics(): + conn = boto3.client('cloudwatch', region_name='us-east-1') + utc_now = datetime.now(tz=pytz.utc) + + conn.put_metric_data( + Namespace='tester', + MetricData=[ + dict( + MetricName='metric', + Value=1.5, + Timestamp=utc_now + ) + ] + ) + + stats = conn.get_metric_statistics( + Namespace='tester', + MetricName='metric', + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + Period=60, + Statistics=['SampleCount', 'Sum'] + ) + + stats['Datapoints'].should.have.length_of(1) + datapoint = stats['Datapoints'][0] + datapoint['SampleCount'].should.equal(1.0) + datapoint['Sum'].should.equal(1.5) diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index ac79fa2239e8..278c20660fc6 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -1,85 +1,85 @@ -from __future__ import unicode_literals - -import boto3 - -from moto import mock_cognitoidentity -import sure # noqa - -from moto.cognitoidentity.utils import get_random_identity_id - - -@mock_cognitoidentity -def test_create_identity_pool(): - conn = boto3.client('cognito-identity', 'us-west-2') - - result = conn.create_identity_pool(IdentityPoolName='TestPool', - AllowUnauthenticatedIdentities=False, - SupportedLoginProviders={'graph.facebook.com': '123456789012345'}, - DeveloperProviderName='devname', - OpenIdConnectProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db'], - CognitoIdentityProviders=[ - { - 'ProviderName': 'testprovider', - 'ClientId': 'CLIENT12345', - 'ServerSideTokenCheck': True - }, - ], - SamlProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db']) - assert result['IdentityPoolId'] != '' - - -# testing a helper function -def test_get_random_identity_id(): - assert len(get_random_identity_id('us-west-2')) > 0 - assert len(get_random_identity_id('us-west-2').split(':')[1]) == 19 - - -@mock_cognitoidentity -def test_get_id(): - # These two do NOT work in server mode. They just don't return the data from the model. - conn = boto3.client('cognito-identity', 'us-west-2') - result = conn.get_id(AccountId='someaccount', - IdentityPoolId='us-west-2:12345', - Logins={ - 'someurl': '12345' - }) - print(result) - assert result.get('IdentityId', "").startswith('us-west-2') or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 - - -@mock_cognitoidentity -def test_get_credentials_for_identity(): - # These two do NOT work in server mode. They just don't return the data from the model. - conn = boto3.client('cognito-identity', 'us-west-2') - result = conn.get_credentials_for_identity(IdentityId='12345') - - assert result.get('Expiration', 0) > 0 or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 - assert result.get('IdentityId') == '12345' or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 - - -@mock_cognitoidentity -def test_get_open_id_token_for_developer_identity(): - conn = boto3.client('cognito-identity', 'us-west-2') - result = conn.get_open_id_token_for_developer_identity( - IdentityPoolId='us-west-2:12345', - IdentityId='12345', - Logins={ - 'someurl': '12345' - }, - TokenDuration=123 - ) - assert len(result['Token']) - assert result['IdentityId'] == '12345' - -@mock_cognitoidentity -def test_get_open_id_token_for_developer_identity_when_no_explicit_identity_id(): - conn = boto3.client('cognito-identity', 'us-west-2') - result = conn.get_open_id_token_for_developer_identity( - IdentityPoolId='us-west-2:12345', - Logins={ - 'someurl': '12345' - }, - TokenDuration=123 - ) - assert len(result['Token']) > 0 - assert len(result['IdentityId']) > 0 +from __future__ import unicode_literals + +import boto3 + +from moto import mock_cognitoidentity +import sure # noqa + +from moto.cognitoidentity.utils import get_random_identity_id + + +@mock_cognitoidentity +def test_create_identity_pool(): + conn = boto3.client('cognito-identity', 'us-west-2') + + result = conn.create_identity_pool(IdentityPoolName='TestPool', + AllowUnauthenticatedIdentities=False, + SupportedLoginProviders={'graph.facebook.com': '123456789012345'}, + DeveloperProviderName='devname', + OpenIdConnectProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db'], + CognitoIdentityProviders=[ + { + 'ProviderName': 'testprovider', + 'ClientId': 'CLIENT12345', + 'ServerSideTokenCheck': True + }, + ], + SamlProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db']) + assert result['IdentityPoolId'] != '' + + +# testing a helper function +def test_get_random_identity_id(): + assert len(get_random_identity_id('us-west-2')) > 0 + assert len(get_random_identity_id('us-west-2').split(':')[1]) == 19 + + +@mock_cognitoidentity +def test_get_id(): + # These two do NOT work in server mode. They just don't return the data from the model. + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_id(AccountId='someaccount', + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }) + print(result) + assert result.get('IdentityId', "").startswith('us-west-2') or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + + +@mock_cognitoidentity +def test_get_credentials_for_identity(): + # These two do NOT work in server mode. They just don't return the data from the model. + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_credentials_for_identity(IdentityId='12345') + + assert result.get('Expiration', 0) > 0 or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + assert result.get('IdentityId') == '12345' or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + + +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + IdentityId='12345', + Logins={ + 'someurl': '12345' + }, + TokenDuration=123 + ) + assert len(result['Token']) + assert result['IdentityId'] == '12345' + +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity_when_no_explicit_identity_id(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }, + TokenDuration=123 + ) + assert len(result['Token']) > 0 + assert len(result['IdentityId']) > 0 diff --git a/tests/test_cognitoidentity/test_server.py b/tests/test_cognitoidentity/test_server.py index b63d42bc0102..d093158c58a9 100644 --- a/tests/test_cognitoidentity/test_server.py +++ b/tests/test_cognitoidentity/test_server.py @@ -1,45 +1,45 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_cognitoidentity - -''' -Test the different server responses -''' - - -@mock_cognitoidentity -def test_create_identity_pool(): - - backend = server.create_backend_app("cognito-identity") - test_client = backend.test_client() - - res = test_client.post('/', - data={"IdentityPoolName": "test", "AllowUnauthenticatedIdentities": True}, - headers={ - "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.CreateIdentityPool"}, - ) - - json_data = json.loads(res.data.decode("utf-8")) - assert json_data['IdentityPoolName'] == "test" - - -@mock_cognitoidentity -def test_get_id(): - backend = server.create_backend_app("cognito-identity") - test_client = backend.test_client() - - res = test_client.post('/', - data=json.dumps({'AccountId': 'someaccount', - 'IdentityPoolId': 'us-west-2:12345', - 'Logins': {'someurl': '12345'}}), - headers={ - "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.GetId"}, - ) - - print(res.data) - json_data = json.loads(res.data.decode("utf-8")) - assert ':' in json_data['IdentityId'] +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_cognitoidentity + +''' +Test the different server responses +''' + + +@mock_cognitoidentity +def test_create_identity_pool(): + + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data={"IdentityPoolName": "test", "AllowUnauthenticatedIdentities": True}, + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.CreateIdentityPool"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + assert json_data['IdentityPoolName'] == "test" + + +@mock_cognitoidentity +def test_get_id(): + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data=json.dumps({'AccountId': 'someaccount', + 'IdentityPoolId': 'us-west-2:12345', + 'Logins': {'someurl': '12345'}}), + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.GetId"}, + ) + + print(res.data) + json_data = json.loads(res.data.decode("utf-8")) + assert ':' in json_data['IdentityId'] diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index f72a4476249a..6c0ad131b096 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1,601 +1,601 @@ -from __future__ import unicode_literals - -import boto3 -import json -import os -import uuid - -from jose import jws - -from moto import mock_cognitoidp -import sure # noqa - - -@mock_cognitoidp -def test_create_user_pool(): - conn = boto3.client("cognito-idp", "us-west-2") - - name = str(uuid.uuid4()) - value = str(uuid.uuid4()) - result = conn.create_user_pool( - PoolName=name, - LambdaConfig={ - "PreSignUp": value - } - ) - - result["UserPool"]["Id"].should_not.be.none - result["UserPool"]["Id"].should.match(r'[\w-]+_[0-9a-zA-Z]+') - result["UserPool"]["Name"].should.equal(name) - result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) - - -@mock_cognitoidp -def test_list_user_pools(): - conn = boto3.client("cognito-idp", "us-west-2") - - name = str(uuid.uuid4()) - conn.create_user_pool(PoolName=name) - result = conn.list_user_pools(MaxResults=10) - result["UserPools"].should.have.length_of(1) - result["UserPools"][0]["Name"].should.equal(name) - - -@mock_cognitoidp -def test_describe_user_pool(): - conn = boto3.client("cognito-idp", "us-west-2") - - name = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_details = conn.create_user_pool( - PoolName=name, - LambdaConfig={ - "PreSignUp": value - } - ) - - result = conn.describe_user_pool(UserPoolId=user_pool_details["UserPool"]["Id"]) - result["UserPool"]["Name"].should.equal(name) - result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) - - -@mock_cognitoidp -def test_delete_user_pool(): - conn = boto3.client("cognito-idp", "us-west-2") - - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(1) - conn.delete_user_pool(UserPoolId=user_pool_id) - conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(0) - - -@mock_cognitoidp -def test_create_user_pool_domain(): - conn = boto3.client("cognito-idp", "us-west-2") - - domain = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - result = conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) - result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - - -@mock_cognitoidp -def test_describe_user_pool_domain(): - conn = boto3.client("cognito-idp", "us-west-2") - - domain = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) - result = conn.describe_user_pool_domain(Domain=domain) - result["DomainDescription"]["Domain"].should.equal(domain) - result["DomainDescription"]["UserPoolId"].should.equal(user_pool_id) - result["DomainDescription"]["AWSAccountId"].should_not.be.none - - -@mock_cognitoidp -def test_delete_user_pool_domain(): - conn = boto3.client("cognito-idp", "us-west-2") - - domain = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) - result = conn.delete_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) - result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - result = conn.describe_user_pool_domain(Domain=domain) - # This is a surprising behavior of the real service: describing a missing domain comes - # back with status 200 and a DomainDescription of {} - result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - result["DomainDescription"].keys().should.have.length_of(0) - - -@mock_cognitoidp -def test_create_user_pool_client(): - conn = boto3.client("cognito-idp", "us-west-2") - - client_name = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - result = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=client_name, - CallbackURLs=[value], - ) - - result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) - result["UserPoolClient"]["ClientId"].should_not.be.none - result["UserPoolClient"]["ClientName"].should.equal(client_name) - result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) - result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) - - -@mock_cognitoidp -def test_list_user_pool_clients(): - conn = boto3.client("cognito-idp", "us-west-2") - - client_name = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) - result = conn.list_user_pool_clients(UserPoolId=user_pool_id, MaxResults=10) - result["UserPoolClients"].should.have.length_of(1) - result["UserPoolClients"][0]["ClientName"].should.equal(client_name) - - -@mock_cognitoidp -def test_describe_user_pool_client(): - conn = boto3.client("cognito-idp", "us-west-2") - - client_name = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - client_details = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=client_name, - CallbackURLs=[value], - ) - - result = conn.describe_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_details["UserPoolClient"]["ClientId"], - ) - - result["UserPoolClient"]["ClientName"].should.equal(client_name) - result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) - result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) - - -@mock_cognitoidp -def test_update_user_pool_client(): - conn = boto3.client("cognito-idp", "us-west-2") - - old_client_name = str(uuid.uuid4()) - new_client_name = str(uuid.uuid4()) - old_value = str(uuid.uuid4()) - new_value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - client_details = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=old_client_name, - CallbackURLs=[old_value], - ) - - result = conn.update_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_details["UserPoolClient"]["ClientId"], - ClientName=new_client_name, - CallbackURLs=[new_value], - ) - - result["UserPoolClient"]["ClientName"].should.equal(new_client_name) - result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) - result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) - - -@mock_cognitoidp -def test_delete_user_pool_client(): - conn = boto3.client("cognito-idp", "us-west-2") - - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - client_details = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - ) - - conn.delete_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_details["UserPoolClient"]["ClientId"], - ) - - caught = False - try: - conn.describe_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_details["UserPoolClient"]["ClientId"], - ) - except conn.exceptions.ResourceNotFoundException: - caught = True - - caught.should.be.true - - -@mock_cognitoidp -def test_create_identity_provider(): - conn = boto3.client("cognito-idp", "us-west-2") - - provider_name = str(uuid.uuid4()) - provider_type = "Facebook" - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - result = conn.create_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ProviderType=provider_type, - ProviderDetails={ - "thing": value - }, - ) - - result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) - result["IdentityProvider"]["ProviderName"].should.equal(provider_name) - result["IdentityProvider"]["ProviderType"].should.equal(provider_type) - result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) - - -@mock_cognitoidp -def test_list_identity_providers(): - conn = boto3.client("cognito-idp", "us-west-2") - - provider_name = str(uuid.uuid4()) - provider_type = "Facebook" - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ProviderType=provider_type, - ProviderDetails={}, - ) - - result = conn.list_identity_providers( - UserPoolId=user_pool_id, - MaxResults=10, - ) - - result["Providers"].should.have.length_of(1) - result["Providers"][0]["ProviderName"].should.equal(provider_name) - result["Providers"][0]["ProviderType"].should.equal(provider_type) - - -@mock_cognitoidp -def test_describe_identity_providers(): - conn = boto3.client("cognito-idp", "us-west-2") - - provider_name = str(uuid.uuid4()) - provider_type = "Facebook" - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ProviderType=provider_type, - ProviderDetails={ - "thing": value - }, - ) - - result = conn.describe_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ) - - result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) - result["IdentityProvider"]["ProviderName"].should.equal(provider_name) - result["IdentityProvider"]["ProviderType"].should.equal(provider_type) - result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) - - -@mock_cognitoidp -def test_delete_identity_providers(): - conn = boto3.client("cognito-idp", "us-west-2") - - provider_name = str(uuid.uuid4()) - provider_type = "Facebook" - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ProviderType=provider_type, - ProviderDetails={ - "thing": value - }, - ) - - conn.delete_identity_provider(UserPoolId=user_pool_id, ProviderName=provider_name) - - caught = False - try: - conn.describe_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ) - except conn.exceptions.ResourceNotFoundException: - caught = True - - caught.should.be.true - - -@mock_cognitoidp -def test_admin_create_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - result = conn.admin_create_user( - UserPoolId=user_pool_id, - Username=username, - UserAttributes=[ - {"Name": "thing", "Value": value} - ], - ) - - result["User"]["Username"].should.equal(username) - result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD") - result["User"]["Attributes"].should.have.length_of(1) - result["User"]["Attributes"][0]["Name"].should.equal("thing") - result["User"]["Attributes"][0]["Value"].should.equal(value) - result["User"]["Enabled"].should.equal(True) - - -@mock_cognitoidp -def test_admin_get_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user( - UserPoolId=user_pool_id, - Username=username, - UserAttributes=[ - {"Name": "thing", "Value": value} - ], - ) - - result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) - result["Username"].should.equal(username) - result["UserAttributes"].should.have.length_of(1) - result["UserAttributes"][0]["Name"].should.equal("thing") - result["UserAttributes"][0]["Value"].should.equal(value) - - -@mock_cognitoidp -def test_admin_get_missing_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - - caught = False - try: - conn.admin_get_user(UserPoolId=user_pool_id, Username=username) - except conn.exceptions.UserNotFoundException: - caught = True - - caught.should.be.true - - -@mock_cognitoidp -def test_list_users(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user(UserPoolId=user_pool_id, Username=username) - result = conn.list_users(UserPoolId=user_pool_id) - result["Users"].should.have.length_of(1) - result["Users"][0]["Username"].should.equal(username) - - -@mock_cognitoidp -def test_admin_disable_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user(UserPoolId=user_pool_id, Username=username) - - result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) - list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected - - conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ - ["Enabled"].should.equal(False) - - -@mock_cognitoidp -def test_admin_enable_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user(UserPoolId=user_pool_id, Username=username) - conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) - - result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username) - list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected - - conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ - ["Enabled"].should.equal(True) - - -@mock_cognitoidp -def test_admin_delete_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user(UserPoolId=user_pool_id, Username=username) - conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) - - caught = False - try: - conn.admin_get_user(UserPoolId=user_pool_id, Username=username) - except conn.exceptions.UserNotFoundException: - caught = True - - caught.should.be.true - - -def authentication_flow(conn): - username = str(uuid.uuid4()) - temporary_password = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - user_attribute_name = str(uuid.uuid4()) - user_attribute_value = str(uuid.uuid4()) - client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - ReadAttributes=[user_attribute_name] - )["UserPoolClient"]["ClientId"] - - conn.admin_create_user( - UserPoolId=user_pool_id, - Username=username, - TemporaryPassword=temporary_password, - UserAttributes=[{ - 'Name': user_attribute_name, - 'Value': user_attribute_value - }] - ) - - result = conn.admin_initiate_auth( - UserPoolId=user_pool_id, - ClientId=client_id, - AuthFlow="ADMIN_NO_SRP_AUTH", - AuthParameters={ - "USERNAME": username, - "PASSWORD": temporary_password - }, - ) - - # A newly created user is forced to set a new password - result["ChallengeName"].should.equal("NEW_PASSWORD_REQUIRED") - result["Session"].should_not.be.none - - # This sets a new password and logs the user in (creates tokens) - new_password = str(uuid.uuid4()) - result = conn.respond_to_auth_challenge( - Session=result["Session"], - ClientId=client_id, - ChallengeName="NEW_PASSWORD_REQUIRED", - ChallengeResponses={ - "USERNAME": username, - "NEW_PASSWORD": new_password - } - ) - - result["AuthenticationResult"]["IdToken"].should_not.be.none - result["AuthenticationResult"]["AccessToken"].should_not.be.none - - return { - "user_pool_id": user_pool_id, - "client_id": client_id, - "id_token": result["AuthenticationResult"]["IdToken"], - "access_token": result["AuthenticationResult"]["AccessToken"], - "username": username, - "password": new_password, - "additional_fields": { - user_attribute_name: user_attribute_value - } - } - - -@mock_cognitoidp -def test_authentication_flow(): - conn = boto3.client("cognito-idp", "us-west-2") - - authentication_flow(conn) - - -@mock_cognitoidp -def test_token_legitimacy(): - conn = boto3.client("cognito-idp", "us-west-2") - - path = "../../moto/cognitoidp/resources/jwks-public.json" - with open(os.path.join(os.path.dirname(__file__), path)) as f: - json_web_key = json.loads(f.read())["keys"][0] - - outputs = authentication_flow(conn) - id_token = outputs["id_token"] - access_token = outputs["access_token"] - client_id = outputs["client_id"] - issuer = "https://cognito-idp.us-west-2.amazonaws.com/{}".format(outputs["user_pool_id"]) - id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256")) - id_claims["iss"].should.equal(issuer) - id_claims["aud"].should.equal(client_id) - access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) - access_claims["iss"].should.equal(issuer) - access_claims["aud"].should.equal(client_id) - for k, v in outputs["additional_fields"].items(): - access_claims[k].should.equal(v) - - -@mock_cognitoidp -def test_change_password(): - conn = boto3.client("cognito-idp", "us-west-2") - - outputs = authentication_flow(conn) - - # Take this opportunity to test change_password, which requires an access token. - newer_password = str(uuid.uuid4()) - conn.change_password( - AccessToken=outputs["access_token"], - PreviousPassword=outputs["password"], - ProposedPassword=newer_password, - ) - - # Log in again, which should succeed without a challenge because the user is no - # longer in the force-new-password state. - result = conn.admin_initiate_auth( - UserPoolId=outputs["user_pool_id"], - ClientId=outputs["client_id"], - AuthFlow="ADMIN_NO_SRP_AUTH", - AuthParameters={ - "USERNAME": outputs["username"], - "PASSWORD": newer_password, - }, - ) - - result["AuthenticationResult"].should_not.be.none - - -@mock_cognitoidp -def test_forgot_password(): - conn = boto3.client("cognito-idp", "us-west-2") - - result = conn.forgot_password(ClientId=str(uuid.uuid4()), Username=str(uuid.uuid4())) - result["CodeDeliveryDetails"].should_not.be.none - - -@mock_cognitoidp -def test_confirm_forgot_password(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - )["UserPoolClient"]["ClientId"] - - conn.admin_create_user( - UserPoolId=user_pool_id, - Username=username, - TemporaryPassword=str(uuid.uuid4()), - ) - - conn.confirm_forgot_password( - ClientId=client_id, - Username=username, - ConfirmationCode=str(uuid.uuid4()), - Password=str(uuid.uuid4()), - ) +from __future__ import unicode_literals + +import boto3 +import json +import os +import uuid + +from jose import jws + +from moto import mock_cognitoidp +import sure # noqa + + +@mock_cognitoidp +def test_create_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + result = conn.create_user_pool( + PoolName=name, + LambdaConfig={ + "PreSignUp": value + } + ) + + result["UserPool"]["Id"].should_not.be.none + result["UserPool"]["Id"].should.match(r'[\w-]+_[0-9a-zA-Z]+') + result["UserPool"]["Name"].should.equal(name) + result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) + + +@mock_cognitoidp +def test_list_user_pools(): + conn = boto3.client("cognito-idp", "us-west-2") + + name = str(uuid.uuid4()) + conn.create_user_pool(PoolName=name) + result = conn.list_user_pools(MaxResults=10) + result["UserPools"].should.have.length_of(1) + result["UserPools"][0]["Name"].should.equal(name) + + +@mock_cognitoidp +def test_describe_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_details = conn.create_user_pool( + PoolName=name, + LambdaConfig={ + "PreSignUp": value + } + ) + + result = conn.describe_user_pool(UserPoolId=user_pool_details["UserPool"]["Id"]) + result["UserPool"]["Name"].should.equal(name) + result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) + + +@mock_cognitoidp +def test_delete_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(1) + conn.delete_user_pool(UserPoolId=user_pool_id) + conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(0) + + +@mock_cognitoidp +def test_create_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + +@mock_cognitoidp +def test_describe_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result = conn.describe_user_pool_domain(Domain=domain) + result["DomainDescription"]["Domain"].should.equal(domain) + result["DomainDescription"]["UserPoolId"].should.equal(user_pool_id) + result["DomainDescription"]["AWSAccountId"].should_not.be.none + + +@mock_cognitoidp +def test_delete_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result = conn.delete_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + result = conn.describe_user_pool_domain(Domain=domain) + # This is a surprising behavior of the real service: describing a missing domain comes + # back with status 200 and a DomainDescription of {} + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + result["DomainDescription"].keys().should.have.length_of(0) + + +@mock_cognitoidp +def test_create_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=client_name, + CallbackURLs=[value], + ) + + result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) + result["UserPoolClient"]["ClientId"].should_not.be.none + result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) + + +@mock_cognitoidp +def test_list_user_pool_clients(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) + result = conn.list_user_pool_clients(UserPoolId=user_pool_id, MaxResults=10) + result["UserPoolClients"].should.have.length_of(1) + result["UserPoolClients"][0]["ClientName"].should.equal(client_name) + + +@mock_cognitoidp +def test_describe_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=client_name, + CallbackURLs=[value], + ) + + result = conn.describe_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ) + + result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) + + +@mock_cognitoidp +def test_update_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + old_client_name = str(uuid.uuid4()) + new_client_name = str(uuid.uuid4()) + old_value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=old_client_name, + CallbackURLs=[old_value], + ) + + result = conn.update_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ClientName=new_client_name, + CallbackURLs=[new_value], + ) + + result["UserPoolClient"]["ClientName"].should.equal(new_client_name) + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) + + +@mock_cognitoidp +def test_delete_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + ) + + conn.delete_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ) + + caught = False + try: + conn.describe_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ) + except conn.exceptions.ResourceNotFoundException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_create_identity_provider(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) + result["IdentityProvider"]["ProviderName"].should.equal(provider_name) + result["IdentityProvider"]["ProviderType"].should.equal(provider_type) + result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) + + +@mock_cognitoidp +def test_list_identity_providers(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={}, + ) + + result = conn.list_identity_providers( + UserPoolId=user_pool_id, + MaxResults=10, + ) + + result["Providers"].should.have.length_of(1) + result["Providers"][0]["ProviderName"].should.equal(provider_name) + result["Providers"][0]["ProviderType"].should.equal(provider_type) + + +@mock_cognitoidp +def test_describe_identity_providers(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + result = conn.describe_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ) + + result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) + result["IdentityProvider"]["ProviderName"].should.equal(provider_name) + result["IdentityProvider"]["ProviderType"].should.equal(provider_type) + result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) + + +@mock_cognitoidp +def test_delete_identity_providers(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + conn.delete_identity_provider(UserPoolId=user_pool_id, ProviderName=provider_name) + + caught = False + try: + conn.describe_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ) + except conn.exceptions.ResourceNotFoundException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_admin_create_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + {"Name": "thing", "Value": value} + ], + ) + + result["User"]["Username"].should.equal(username) + result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD") + result["User"]["Attributes"].should.have.length_of(1) + result["User"]["Attributes"][0]["Name"].should.equal("thing") + result["User"]["Attributes"][0]["Value"].should.equal(value) + result["User"]["Enabled"].should.equal(True) + + +@mock_cognitoidp +def test_admin_get_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + {"Name": "thing", "Value": value} + ], + ) + + result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + result["Username"].should.equal(username) + result["UserAttributes"].should.have.length_of(1) + result["UserAttributes"][0]["Name"].should.equal("thing") + result["UserAttributes"][0]["Value"].should.equal(value) + + +@mock_cognitoidp +def test_admin_get_missing_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + caught = False + try: + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + except conn.exceptions.UserNotFoundException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_list_users(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + result = conn.list_users(UserPoolId=user_pool_id) + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username) + + +@mock_cognitoidp +def test_admin_disable_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ + ["Enabled"].should.equal(False) + + +@mock_cognitoidp +def test_admin_enable_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) + + result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ + ["Enabled"].should.equal(True) + + +@mock_cognitoidp +def test_admin_delete_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) + + caught = False + try: + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + except conn.exceptions.UserNotFoundException: + caught = True + + caught.should.be.true + + +def authentication_flow(conn): + username = str(uuid.uuid4()) + temporary_password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + user_attribute_name = str(uuid.uuid4()) + user_attribute_value = str(uuid.uuid4()) + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + ReadAttributes=[user_attribute_name] + )["UserPoolClient"]["ClientId"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + TemporaryPassword=temporary_password, + UserAttributes=[{ + 'Name': user_attribute_name, + 'Value': user_attribute_value + }] + ) + + result = conn.admin_initiate_auth( + UserPoolId=user_pool_id, + ClientId=client_id, + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "PASSWORD": temporary_password + }, + ) + + # A newly created user is forced to set a new password + result["ChallengeName"].should.equal("NEW_PASSWORD_REQUIRED") + result["Session"].should_not.be.none + + # This sets a new password and logs the user in (creates tokens) + new_password = str(uuid.uuid4()) + result = conn.respond_to_auth_challenge( + Session=result["Session"], + ClientId=client_id, + ChallengeName="NEW_PASSWORD_REQUIRED", + ChallengeResponses={ + "USERNAME": username, + "NEW_PASSWORD": new_password + } + ) + + result["AuthenticationResult"]["IdToken"].should_not.be.none + result["AuthenticationResult"]["AccessToken"].should_not.be.none + + return { + "user_pool_id": user_pool_id, + "client_id": client_id, + "id_token": result["AuthenticationResult"]["IdToken"], + "access_token": result["AuthenticationResult"]["AccessToken"], + "username": username, + "password": new_password, + "additional_fields": { + user_attribute_name: user_attribute_value + } + } + + +@mock_cognitoidp +def test_authentication_flow(): + conn = boto3.client("cognito-idp", "us-west-2") + + authentication_flow(conn) + + +@mock_cognitoidp +def test_token_legitimacy(): + conn = boto3.client("cognito-idp", "us-west-2") + + path = "../../moto/cognitoidp/resources/jwks-public.json" + with open(os.path.join(os.path.dirname(__file__), path)) as f: + json_web_key = json.loads(f.read())["keys"][0] + + outputs = authentication_flow(conn) + id_token = outputs["id_token"] + access_token = outputs["access_token"] + client_id = outputs["client_id"] + issuer = "https://cognito-idp.us-west-2.amazonaws.com/{}".format(outputs["user_pool_id"]) + id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256")) + id_claims["iss"].should.equal(issuer) + id_claims["aud"].should.equal(client_id) + access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) + access_claims["iss"].should.equal(issuer) + access_claims["aud"].should.equal(client_id) + for k, v in outputs["additional_fields"].items(): + access_claims[k].should.equal(v) + + +@mock_cognitoidp +def test_change_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + outputs = authentication_flow(conn) + + # Take this opportunity to test change_password, which requires an access token. + newer_password = str(uuid.uuid4()) + conn.change_password( + AccessToken=outputs["access_token"], + PreviousPassword=outputs["password"], + ProposedPassword=newer_password, + ) + + # Log in again, which should succeed without a challenge because the user is no + # longer in the force-new-password state. + result = conn.admin_initiate_auth( + UserPoolId=outputs["user_pool_id"], + ClientId=outputs["client_id"], + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={ + "USERNAME": outputs["username"], + "PASSWORD": newer_password, + }, + ) + + result["AuthenticationResult"].should_not.be.none + + +@mock_cognitoidp +def test_forgot_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + result = conn.forgot_password(ClientId=str(uuid.uuid4()), Username=str(uuid.uuid4())) + result["CodeDeliveryDetails"].should_not.be.none + + +@mock_cognitoidp +def test_confirm_forgot_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + )["UserPoolClient"]["ClientId"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + TemporaryPassword=str(uuid.uuid4()), + ) + + conn.confirm_forgot_password( + ClientId=client_id, + Username=username, + ConfirmationCode=str(uuid.uuid4()), + Password=str(uuid.uuid4()), + ) diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 5d2f6a4ef007..b7e5f7448101 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -1,98 +1,98 @@ -from __future__ import unicode_literals -import boto -from boto.exception import EC2ResponseError -import sure # noqa -import unittest - -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -from moto import mock_ec2_deprecated, mock_s3_deprecated - -''' -Test the different ways that the decorator can be used -''' - - -@mock_ec2_deprecated -def test_basic_connect(): - boto.connect_ec2() - - -@mock_ec2_deprecated -def test_basic_decorator(): - conn = boto.connect_ec2('the_key', 'the_secret') - list(conn.get_all_instances()).should.equal([]) - - -def test_context_manager(): - conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(EC2ResponseError): - conn.get_all_instances() - - with mock_ec2_deprecated(): - conn = boto.connect_ec2('the_key', 'the_secret') - list(conn.get_all_instances()).should.equal([]) - - with assert_raises(EC2ResponseError): - conn = boto.connect_ec2('the_key', 'the_secret') - conn.get_all_instances() - - -def test_decorator_start_and_stop(): - conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(EC2ResponseError): - conn.get_all_instances() - - mock = mock_ec2_deprecated() - mock.start() - conn = boto.connect_ec2('the_key', 'the_secret') - list(conn.get_all_instances()).should.equal([]) - mock.stop() - - with assert_raises(EC2ResponseError): - conn.get_all_instances() - - -@mock_ec2_deprecated -def test_decorater_wrapped_gets_set(): - """ - Moto decorator's __wrapped__ should get set to the tests function - """ - test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal( - 'test_decorater_wrapped_gets_set') - - -@mock_ec2_deprecated -class Tester(object): - - def test_the_class(self): - conn = boto.connect_ec2() - list(conn.get_all_instances()).should.have.length_of(0) - - def test_still_the_same(self): - conn = boto.connect_ec2() - list(conn.get_all_instances()).should.have.length_of(0) - - -@mock_s3_deprecated -class TesterWithSetup(unittest.TestCase): - - def setUp(self): - self.conn = boto.connect_s3() - self.conn.create_bucket('mybucket') - - def test_still_the_same(self): - bucket = self.conn.get_bucket('mybucket') - bucket.name.should.equal("mybucket") - - -@mock_s3_deprecated -class TesterWithStaticmethod(object): - - @staticmethod - def static(*args): - assert not args or not isinstance(args[0], TesterWithStaticmethod) - - def test_no_instance_sent_to_staticmethod(self): - self.static() +from __future__ import unicode_literals +import boto +from boto.exception import EC2ResponseError +import sure # noqa +import unittest + +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +from moto import mock_ec2_deprecated, mock_s3_deprecated + +''' +Test the different ways that the decorator can be used +''' + + +@mock_ec2_deprecated +def test_basic_connect(): + boto.connect_ec2() + + +@mock_ec2_deprecated +def test_basic_decorator(): + conn = boto.connect_ec2('the_key', 'the_secret') + list(conn.get_all_instances()).should.equal([]) + + +def test_context_manager(): + conn = boto.connect_ec2('the_key', 'the_secret') + with assert_raises(EC2ResponseError): + conn.get_all_instances() + + with mock_ec2_deprecated(): + conn = boto.connect_ec2('the_key', 'the_secret') + list(conn.get_all_instances()).should.equal([]) + + with assert_raises(EC2ResponseError): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.get_all_instances() + + +def test_decorator_start_and_stop(): + conn = boto.connect_ec2('the_key', 'the_secret') + with assert_raises(EC2ResponseError): + conn.get_all_instances() + + mock = mock_ec2_deprecated() + mock.start() + conn = boto.connect_ec2('the_key', 'the_secret') + list(conn.get_all_instances()).should.equal([]) + mock.stop() + + with assert_raises(EC2ResponseError): + conn.get_all_instances() + + +@mock_ec2_deprecated +def test_decorater_wrapped_gets_set(): + """ + Moto decorator's __wrapped__ should get set to the tests function + """ + test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal( + 'test_decorater_wrapped_gets_set') + + +@mock_ec2_deprecated +class Tester(object): + + def test_the_class(self): + conn = boto.connect_ec2() + list(conn.get_all_instances()).should.have.length_of(0) + + def test_still_the_same(self): + conn = boto.connect_ec2() + list(conn.get_all_instances()).should.have.length_of(0) + + +@mock_s3_deprecated +class TesterWithSetup(unittest.TestCase): + + def setUp(self): + self.conn = boto.connect_s3() + self.conn.create_bucket('mybucket') + + def test_still_the_same(self): + bucket = self.conn.get_bucket('mybucket') + bucket.name.should.equal("mybucket") + + +@mock_s3_deprecated +class TesterWithStaticmethod(object): + + @staticmethod + def static(*args): + assert not args or not isinstance(args[0], TesterWithStaticmethod) + + def test_no_instance_sent_to_staticmethod(self): + self.static() diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py index f8bf2481470b..b66f9637eb52 100644 --- a/tests/test_core/test_instance_metadata.py +++ b/tests/test_core/test_instance_metadata.py @@ -1,46 +1,46 @@ -from __future__ import unicode_literals -import sure # noqa -from nose.tools import assert_raises -import requests - -from moto import mock_ec2, settings - -if settings.TEST_SERVER_MODE: - BASE_URL = 'http://localhost:5000' -else: - BASE_URL = 'http://169.254.169.254' - - -@mock_ec2 -def test_latest_meta_data(): - res = requests.get("{0}/latest/meta-data/".format(BASE_URL)) - res.content.should.equal(b"iam") - - -@mock_ec2 -def test_meta_data_iam(): - res = requests.get("{0}/latest/meta-data/iam".format(BASE_URL)) - json_response = res.json() - default_role = json_response['security-credentials']['default-role'] - default_role.should.contain('AccessKeyId') - default_role.should.contain('SecretAccessKey') - default_role.should.contain('Token') - default_role.should.contain('Expiration') - - -@mock_ec2 -def test_meta_data_security_credentials(): - res = requests.get( - "{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL)) - res.content.should.equal(b"default-role") - - -@mock_ec2 -def test_meta_data_default_role(): - res = requests.get( - "{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL)) - json_response = res.json() - json_response.should.contain('AccessKeyId') - json_response.should.contain('SecretAccessKey') - json_response.should.contain('Token') - json_response.should.contain('Expiration') +from __future__ import unicode_literals +import sure # noqa +from nose.tools import assert_raises +import requests + +from moto import mock_ec2, settings + +if settings.TEST_SERVER_MODE: + BASE_URL = 'http://localhost:5000' +else: + BASE_URL = 'http://169.254.169.254' + + +@mock_ec2 +def test_latest_meta_data(): + res = requests.get("{0}/latest/meta-data/".format(BASE_URL)) + res.content.should.equal(b"iam") + + +@mock_ec2 +def test_meta_data_iam(): + res = requests.get("{0}/latest/meta-data/iam".format(BASE_URL)) + json_response = res.json() + default_role = json_response['security-credentials']['default-role'] + default_role.should.contain('AccessKeyId') + default_role.should.contain('SecretAccessKey') + default_role.should.contain('Token') + default_role.should.contain('Expiration') + + +@mock_ec2 +def test_meta_data_security_credentials(): + res = requests.get( + "{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL)) + res.content.should.equal(b"default-role") + + +@mock_ec2 +def test_meta_data_default_role(): + res = requests.get( + "{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL)) + json_response = res.json() + json_response.should.contain('AccessKeyId') + json_response.should.contain('SecretAccessKey') + json_response.should.contain('Token') + json_response.should.contain('Expiration') diff --git a/tests/test_core/test_moto_api.py b/tests/test_core/test_moto_api.py index cb0ca89394f9..47dbe5a4adfa 100644 --- a/tests/test_core/test_moto_api.py +++ b/tests/test_core/test_moto_api.py @@ -1,33 +1,33 @@ -from __future__ import unicode_literals -import sure # noqa -from nose.tools import assert_raises -import requests - -import boto3 -from moto import mock_sqs, settings - -base_url = "http://localhost:5000" if settings.TEST_SERVER_MODE else "http://motoapi.amazonaws.com" - - -@mock_sqs -def test_reset_api(): - conn = boto3.client("sqs", region_name='us-west-1') - conn.create_queue(QueueName="queue1") - conn.list_queues()['QueueUrls'].should.have.length_of(1) - - res = requests.post("{base_url}/moto-api/reset".format(base_url=base_url)) - res.content.should.equal(b'{"status": "ok"}') - - conn.list_queues().shouldnt.contain('QueueUrls') # No more queues - - -@mock_sqs -def test_data_api(): - conn = boto3.client("sqs", region_name='us-west-1') - conn.create_queue(QueueName="queue1") - - res = requests.post("{base_url}/moto-api/data.json".format(base_url=base_url)) - queues = res.json()['sqs']['Queue'] - len(queues).should.equal(1) - queue = queues[0] - queue['name'].should.equal("queue1") +from __future__ import unicode_literals +import sure # noqa +from nose.tools import assert_raises +import requests + +import boto3 +from moto import mock_sqs, settings + +base_url = "http://localhost:5000" if settings.TEST_SERVER_MODE else "http://motoapi.amazonaws.com" + + +@mock_sqs +def test_reset_api(): + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="queue1") + conn.list_queues()['QueueUrls'].should.have.length_of(1) + + res = requests.post("{base_url}/moto-api/reset".format(base_url=base_url)) + res.content.should.equal(b'{"status": "ok"}') + + conn.list_queues().shouldnt.contain('QueueUrls') # No more queues + + +@mock_sqs +def test_data_api(): + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="queue1") + + res = requests.post("{base_url}/moto-api/data.json".format(base_url=base_url)) + queues = res.json()['sqs']['Queue'] + len(queues).should.equal(1) + queue = queues[0] + queue['name'].should.equal("queue1") diff --git a/tests/test_core/test_nested.py b/tests/test_core/test_nested.py index 7c0b8f687681..ec10a69b90aa 100644 --- a/tests/test_core/test_nested.py +++ b/tests/test_core/test_nested.py @@ -1,29 +1,29 @@ -from __future__ import unicode_literals -import unittest - -from boto.sqs.connection import SQSConnection -from boto.sqs.message import Message -from boto.ec2 import EC2Connection - -from moto import mock_sqs_deprecated, mock_ec2_deprecated - - -class TestNestedDecorators(unittest.TestCase): - - @mock_sqs_deprecated - def setup_sqs_queue(self): - conn = SQSConnection() - q = conn.create_queue('some-queue') - - m = Message() - m.set_body('This is my first message.') - q.write(m) - - self.assertEqual(q.count(), 1) - - @mock_ec2_deprecated - def test_nested(self): - self.setup_sqs_queue() - - conn = EC2Connection() - conn.run_instances('ami-123456') +from __future__ import unicode_literals +import unittest + +from boto.sqs.connection import SQSConnection +from boto.sqs.message import Message +from boto.ec2 import EC2Connection + +from moto import mock_sqs_deprecated, mock_ec2_deprecated + + +class TestNestedDecorators(unittest.TestCase): + + @mock_sqs_deprecated + def setup_sqs_queue(self): + conn = SQSConnection() + q = conn.create_queue('some-queue') + + m = Message() + m.set_body('This is my first message.') + q.write(m) + + self.assertEqual(q.count(), 1) + + @mock_ec2_deprecated + def test_nested(self): + self.setup_sqs_queue() + + conn = EC2Connection() + conn.run_instances('ami-123456') diff --git a/tests/test_core/test_responses.py b/tests/test_core/test_responses.py index c3cc27aefdbe..f3f369ff3e84 100644 --- a/tests/test_core/test_responses.py +++ b/tests/test_core/test_responses.py @@ -1,81 +1,81 @@ -from __future__ import unicode_literals - -import sure # noqa - -from moto.core.responses import AWSServiceSpec -from moto.core.responses import flatten_json_request_body - - -def test_flatten_json_request_body(): - spec = AWSServiceSpec( - 'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') - - body = { - 'Name': 'cluster', - 'Instances': { - 'Ec2KeyName': 'ec2key', - 'InstanceGroups': [ - {'InstanceRole': 'MASTER', - 'InstanceType': 'm1.small'}, - {'InstanceRole': 'CORE', - 'InstanceType': 'm1.medium'}, - ], - 'Placement': {'AvailabilityZone': 'us-east-1'}, - }, - 'Steps': [ - {'HadoopJarStep': { - 'Properties': [ - {'Key': 'k1', 'Value': 'v1'}, - {'Key': 'k2', 'Value': 'v2'} - ], - 'Args': ['arg1', 'arg2']}}, - ], - 'Configurations': [ - {'Classification': 'class', - 'Properties': {'propkey1': 'propkey1', - 'propkey2': 'propkey2'}}, - {'Classification': 'anotherclass', - 'Properties': {'propkey3': 'propkey3'}}, - ] - } - - flat = flatten_json_request_body('', body, spec) - flat['Name'].should.equal(body['Name']) - flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName']) - for idx in range(2): - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal( - body['Instances']['InstanceGroups'][idx]['InstanceRole']) - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal( - body['Instances']['InstanceGroups'][idx]['InstanceType']) - flat['Instances.Placement.AvailabilityZone'].should.equal( - body['Instances']['Placement']['AvailabilityZone']) - - for idx in range(1): - prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep' - step = body['Steps'][idx]['HadoopJarStep'] - i = 0 - while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat: - flat[prefix + '.Properties.member.' + - str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) - flat[prefix + '.Properties.member.' + - str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) - i += 1 - i = 0 - while prefix + '.Args.member.' + str(i + 1) in flat: - flat[prefix + '.Args.member.' + - str(i + 1)].should.equal(step['Args'][i]) - i += 1 - - for idx in range(2): - flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal( - body['Configurations'][idx]['Classification']) - - props = {} - i = 1 - keyfmt = 'Configurations.member.{0}.Properties.entry.{1}' - key = keyfmt.format(idx + 1, i) - while key + '.key' in flat: - props[flat[key + '.key']] = flat[key + '.value'] - i += 1 - key = keyfmt.format(idx + 1, i) - props.should.equal(body['Configurations'][idx]['Properties']) +from __future__ import unicode_literals + +import sure # noqa + +from moto.core.responses import AWSServiceSpec +from moto.core.responses import flatten_json_request_body + + +def test_flatten_json_request_body(): + spec = AWSServiceSpec( + 'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') + + body = { + 'Name': 'cluster', + 'Instances': { + 'Ec2KeyName': 'ec2key', + 'InstanceGroups': [ + {'InstanceRole': 'MASTER', + 'InstanceType': 'm1.small'}, + {'InstanceRole': 'CORE', + 'InstanceType': 'm1.medium'}, + ], + 'Placement': {'AvailabilityZone': 'us-east-1'}, + }, + 'Steps': [ + {'HadoopJarStep': { + 'Properties': [ + {'Key': 'k1', 'Value': 'v1'}, + {'Key': 'k2', 'Value': 'v2'} + ], + 'Args': ['arg1', 'arg2']}}, + ], + 'Configurations': [ + {'Classification': 'class', + 'Properties': {'propkey1': 'propkey1', + 'propkey2': 'propkey2'}}, + {'Classification': 'anotherclass', + 'Properties': {'propkey3': 'propkey3'}}, + ] + } + + flat = flatten_json_request_body('', body, spec) + flat['Name'].should.equal(body['Name']) + flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName']) + for idx in range(2): + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceRole']) + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceType']) + flat['Instances.Placement.AvailabilityZone'].should.equal( + body['Instances']['Placement']['AvailabilityZone']) + + for idx in range(1): + prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep' + step = body['Steps'][idx]['HadoopJarStep'] + i = 0 + while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat: + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) + i += 1 + i = 0 + while prefix + '.Args.member.' + str(i + 1) in flat: + flat[prefix + '.Args.member.' + + str(i + 1)].should.equal(step['Args'][i]) + i += 1 + + for idx in range(2): + flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal( + body['Configurations'][idx]['Classification']) + + props = {} + i = 1 + keyfmt = 'Configurations.member.{0}.Properties.entry.{1}' + key = keyfmt.format(idx + 1, i) + while key + '.key' in flat: + props[flat[key + '.key']] = flat[key + '.value'] + i += 1 + key = keyfmt.format(idx + 1, i) + props.should.equal(body['Configurations'][idx]['Properties']) diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index b7290e3514be..d1261a49a483 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -1,53 +1,53 @@ -from __future__ import unicode_literals -from mock import patch -import sure # noqa - -from moto.server import main, create_backend_app, DomainDispatcherApplication - - -def test_wrong_arguments(): - try: - main(["name", "test1", "test2", "test3"]) - assert False, ("main() when called with the incorrect number of args" - " should raise a system exit") - except SystemExit: - pass - - -@patch('moto.server.run_simple') -def test_right_arguments(run_simple): - main(["s3"]) - func_call = run_simple.call_args[0] - func_call[0].should.equal("127.0.0.1") - func_call[1].should.equal(5000) - - -@patch('moto.server.run_simple') -def test_port_argument(run_simple): - main(["s3", "--port", "8080"]) - func_call = run_simple.call_args[0] - func_call[0].should.equal("127.0.0.1") - func_call[1].should.equal(8080) - - -def test_domain_dispatched(): - dispatcher = DomainDispatcherApplication(create_backend_app) - backend_app = dispatcher.get_application( - {"HTTP_HOST": "email.us-east1.amazonaws.com"}) - keys = list(backend_app.view_functions.keys()) - keys[0].should.equal('EmailResponse.dispatch') - - -def test_domain_without_matches(): - dispatcher = DomainDispatcherApplication(create_backend_app) - dispatcher.get_application.when.called_with( - {"HTTP_HOST": "not-matching-anything.com"}).should.throw(RuntimeError) - - -def test_domain_dispatched_with_service(): - # If we pass a particular service, always return that. - dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") - backend_app = dispatcher.get_application( - {"HTTP_HOST": "s3.us-east1.amazonaws.com"}) - keys = set(backend_app.view_functions.keys()) - keys.should.contain('ResponseObject.key_response') +from __future__ import unicode_literals +from mock import patch +import sure # noqa + +from moto.server import main, create_backend_app, DomainDispatcherApplication + + +def test_wrong_arguments(): + try: + main(["name", "test1", "test2", "test3"]) + assert False, ("main() when called with the incorrect number of args" + " should raise a system exit") + except SystemExit: + pass + + +@patch('moto.server.run_simple') +def test_right_arguments(run_simple): + main(["s3"]) + func_call = run_simple.call_args[0] + func_call[0].should.equal("127.0.0.1") + func_call[1].should.equal(5000) + + +@patch('moto.server.run_simple') +def test_port_argument(run_simple): + main(["s3", "--port", "8080"]) + func_call = run_simple.call_args[0] + func_call[0].should.equal("127.0.0.1") + func_call[1].should.equal(8080) + + +def test_domain_dispatched(): + dispatcher = DomainDispatcherApplication(create_backend_app) + backend_app = dispatcher.get_application( + {"HTTP_HOST": "email.us-east1.amazonaws.com"}) + keys = list(backend_app.view_functions.keys()) + keys[0].should.equal('EmailResponse.dispatch') + + +def test_domain_without_matches(): + dispatcher = DomainDispatcherApplication(create_backend_app) + dispatcher.get_application.when.called_with( + {"HTTP_HOST": "not-matching-anything.com"}).should.throw(RuntimeError) + + +def test_domain_dispatched_with_service(): + # If we pass a particular service, always return that. + dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") + backend_app = dispatcher.get_application( + {"HTTP_HOST": "s3.us-east1.amazonaws.com"}) + keys = set(backend_app.view_functions.keys()) + keys.should.contain('ResponseObject.key_response') diff --git a/tests/test_core/test_url_mapping.py b/tests/test_core/test_url_mapping.py index 8f7921a5ab52..b58e991c4be3 100644 --- a/tests/test_core/test_url_mapping.py +++ b/tests/test_core/test_url_mapping.py @@ -1,22 +1,22 @@ -from __future__ import unicode_literals -import sure # noqa - -from moto.core.utils import convert_regex_to_flask_path - - -def test_flask_path_converting_simple(): - convert_regex_to_flask_path("/").should.equal("/") - convert_regex_to_flask_path("/$").should.equal("/") - - convert_regex_to_flask_path("/foo").should.equal("/foo") - - convert_regex_to_flask_path("/foo/bar/").should.equal("/foo/bar/") - - -def test_flask_path_converting_regex(): - convert_regex_to_flask_path( - "/(?P[a-zA-Z0-9\-_]+)").should.equal('/') - - convert_regex_to_flask_path("(?P\d+)/(?P.*)$").should.equal( - '/' - ) +from __future__ import unicode_literals +import sure # noqa + +from moto.core.utils import convert_regex_to_flask_path + + +def test_flask_path_converting_simple(): + convert_regex_to_flask_path("/").should.equal("/") + convert_regex_to_flask_path("/$").should.equal("/") + + convert_regex_to_flask_path("/foo").should.equal("/foo") + + convert_regex_to_flask_path("/foo/bar/").should.equal("/foo/bar/") + + +def test_flask_path_converting_regex(): + convert_regex_to_flask_path( + "/(?P[a-zA-Z0-9\-_]+)").should.equal('/') + + convert_regex_to_flask_path("(?P\d+)/(?P.*)$").should.equal( + '/' + ) diff --git a/tests/test_core/test_utils.py b/tests/test_core/test_utils.py index 8dbf21716fe3..22449a910c72 100644 --- a/tests/test_core/test_utils.py +++ b/tests/test_core/test_utils.py @@ -1,30 +1,30 @@ -from __future__ import unicode_literals - -import sure # noqa -from freezegun import freeze_time - -from moto.core.utils import camelcase_to_underscores, underscores_to_camelcase, unix_time - - -def test_camelcase_to_underscores(): - cases = { - "theNewAttribute": "the_new_attribute", - "attri bute With Space": "attribute_with_space", - "FirstLetterCapital": "first_letter_capital", - "ListMFADevices": "list_mfa_devices", - } - for arg, expected in cases.items(): - camelcase_to_underscores(arg).should.equal(expected) - - -def test_underscores_to_camelcase(): - cases = { - "the_new_attribute": "theNewAttribute", - } - for arg, expected in cases.items(): - underscores_to_camelcase(arg).should.equal(expected) - - -@freeze_time("2015-01-01 12:00:00") -def test_unix_time(): - unix_time().should.equal(1420113600.0) +from __future__ import unicode_literals + +import sure # noqa +from freezegun import freeze_time + +from moto.core.utils import camelcase_to_underscores, underscores_to_camelcase, unix_time + + +def test_camelcase_to_underscores(): + cases = { + "theNewAttribute": "the_new_attribute", + "attri bute With Space": "attribute_with_space", + "FirstLetterCapital": "first_letter_capital", + "ListMFADevices": "list_mfa_devices", + } + for arg, expected in cases.items(): + camelcase_to_underscores(arg).should.equal(expected) + + +def test_underscores_to_camelcase(): + cases = { + "the_new_attribute": "theNewAttribute", + } + for arg, expected in cases.items(): + underscores_to_camelcase(arg).should.equal(expected) + + +@freeze_time("2015-01-01 12:00:00") +def test_unix_time(): + unix_time().should.equal(1420113600.0) diff --git a/tests/test_datapipeline/test_datapipeline.py b/tests/test_datapipeline/test_datapipeline.py index ce190c7e44af..7cf76f5d7855 100644 --- a/tests/test_datapipeline/test_datapipeline.py +++ b/tests/test_datapipeline/test_datapipeline.py @@ -1,204 +1,204 @@ -from __future__ import unicode_literals - -import boto.datapipeline -import sure # noqa - -from moto import mock_datapipeline_deprecated -from moto.datapipeline.utils import remove_capitalization_of_dict_keys - - -def get_value_from_fields(key, fields): - for field in fields: - if field['key'] == key: - return field['stringValue'] - - -@mock_datapipeline_deprecated -def test_create_pipeline(): - conn = boto.datapipeline.connect_to_region("us-west-2") - - res = conn.create_pipeline("mypipeline", "some-unique-id") - - pipeline_id = res["pipelineId"] - pipeline_descriptions = conn.describe_pipelines( - [pipeline_id])["pipelineDescriptionList"] - pipeline_descriptions.should.have.length_of(1) - - pipeline_description = pipeline_descriptions[0] - pipeline_description['name'].should.equal("mypipeline") - pipeline_description["pipelineId"].should.equal(pipeline_id) - fields = pipeline_description['fields'] - - get_value_from_fields('@pipelineState', fields).should.equal("PENDING") - get_value_from_fields('uniqueId', fields).should.equal("some-unique-id") - - -PIPELINE_OBJECTS = [ - { - "id": "Default", - "name": "Default", - "fields": [{ - "key": "workerGroup", - "stringValue": "workerGroup" - }] - }, - { - "id": "Schedule", - "name": "Schedule", - "fields": [{ - "key": "startDateTime", - "stringValue": "2012-12-12T00:00:00" - }, { - "key": "type", - "stringValue": "Schedule" - }, { - "key": "period", - "stringValue": "1 hour" - }, { - "key": "endDateTime", - "stringValue": "2012-12-21T18:00:00" - }] - }, - { - "id": "SayHello", - "name": "SayHello", - "fields": [{ - "key": "type", - "stringValue": "ShellCommandActivity" - }, { - "key": "command", - "stringValue": "echo hello" - }, { - "key": "parent", - "refValue": "Default" - }, { - "key": "schedule", - "refValue": "Schedule" - }] - } -] - - -@mock_datapipeline_deprecated -def test_creating_pipeline_definition(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res = conn.create_pipeline("mypipeline", "some-unique-id") - pipeline_id = res["pipelineId"] - - conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) - - pipeline_definition = conn.get_pipeline_definition(pipeline_id) - pipeline_definition['pipelineObjects'].should.have.length_of(3) - default_object = pipeline_definition['pipelineObjects'][0] - default_object['name'].should.equal("Default") - default_object['id'].should.equal("Default") - default_object['fields'].should.equal([{ - "key": "workerGroup", - "stringValue": "workerGroup" - }]) - - -@mock_datapipeline_deprecated -def test_describing_pipeline_objects(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res = conn.create_pipeline("mypipeline", "some-unique-id") - pipeline_id = res["pipelineId"] - - conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) - - objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)[ - 'pipelineObjects'] - - objects.should.have.length_of(2) - default_object = [x for x in objects if x['id'] == 'Default'][0] - default_object['name'].should.equal("Default") - default_object['fields'].should.equal([{ - "key": "workerGroup", - "stringValue": "workerGroup" - }]) - - -@mock_datapipeline_deprecated -def test_activate_pipeline(): - conn = boto.datapipeline.connect_to_region("us-west-2") - - res = conn.create_pipeline("mypipeline", "some-unique-id") - - pipeline_id = res["pipelineId"] - conn.activate_pipeline(pipeline_id) - - pipeline_descriptions = conn.describe_pipelines( - [pipeline_id])["pipelineDescriptionList"] - pipeline_descriptions.should.have.length_of(1) - pipeline_description = pipeline_descriptions[0] - fields = pipeline_description['fields'] - - get_value_from_fields('@pipelineState', fields).should.equal("SCHEDULED") - - -@mock_datapipeline_deprecated -def test_delete_pipeline(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res = conn.create_pipeline("mypipeline", "some-unique-id") - pipeline_id = res["pipelineId"] - - conn.delete_pipeline(pipeline_id) - - response = conn.list_pipelines() - - response["pipelineIdList"].should.have.length_of(0) - - -@mock_datapipeline_deprecated -def test_listing_pipelines(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res1 = conn.create_pipeline("mypipeline1", "some-unique-id1") - res2 = conn.create_pipeline("mypipeline2", "some-unique-id2") - - response = conn.list_pipelines() - - response["hasMoreResults"].should.be(False) - response["marker"].should.be.none - response["pipelineIdList"].should.have.length_of(2) - response["pipelineIdList"].should.contain({ - "id": res1["pipelineId"], - "name": "mypipeline1", - }) - response["pipelineIdList"].should.contain({ - "id": res2["pipelineId"], - "name": "mypipeline2" - }) - - -@mock_datapipeline_deprecated -def test_listing_paginated_pipelines(): - conn = boto.datapipeline.connect_to_region("us-west-2") - for i in range(100): - conn.create_pipeline("mypipeline%d" % i, "some-unique-id%d" % i) - - response = conn.list_pipelines() - - response["hasMoreResults"].should.be(True) - response["marker"].should.equal(response["pipelineIdList"][-1]['id']) - response["pipelineIdList"].should.have.length_of(50) - - -# testing a helper function -def test_remove_capitalization_of_dict_keys(): - result = remove_capitalization_of_dict_keys( - { - "Id": "IdValue", - "Fields": [{ - "Key": "KeyValue", - "StringValue": "StringValueValue" - }] - } - ) - - result.should.equal({ - "id": "IdValue", - "fields": [{ - "key": "KeyValue", - "stringValue": "StringValueValue" - }], - }) +from __future__ import unicode_literals + +import boto.datapipeline +import sure # noqa + +from moto import mock_datapipeline_deprecated +from moto.datapipeline.utils import remove_capitalization_of_dict_keys + + +def get_value_from_fields(key, fields): + for field in fields: + if field['key'] == key: + return field['stringValue'] + + +@mock_datapipeline_deprecated +def test_create_pipeline(): + conn = boto.datapipeline.connect_to_region("us-west-2") + + res = conn.create_pipeline("mypipeline", "some-unique-id") + + pipeline_id = res["pipelineId"] + pipeline_descriptions = conn.describe_pipelines( + [pipeline_id])["pipelineDescriptionList"] + pipeline_descriptions.should.have.length_of(1) + + pipeline_description = pipeline_descriptions[0] + pipeline_description['name'].should.equal("mypipeline") + pipeline_description["pipelineId"].should.equal(pipeline_id) + fields = pipeline_description['fields'] + + get_value_from_fields('@pipelineState', fields).should.equal("PENDING") + get_value_from_fields('uniqueId', fields).should.equal("some-unique-id") + + +PIPELINE_OBJECTS = [ + { + "id": "Default", + "name": "Default", + "fields": [{ + "key": "workerGroup", + "stringValue": "workerGroup" + }] + }, + { + "id": "Schedule", + "name": "Schedule", + "fields": [{ + "key": "startDateTime", + "stringValue": "2012-12-12T00:00:00" + }, { + "key": "type", + "stringValue": "Schedule" + }, { + "key": "period", + "stringValue": "1 hour" + }, { + "key": "endDateTime", + "stringValue": "2012-12-21T18:00:00" + }] + }, + { + "id": "SayHello", + "name": "SayHello", + "fields": [{ + "key": "type", + "stringValue": "ShellCommandActivity" + }, { + "key": "command", + "stringValue": "echo hello" + }, { + "key": "parent", + "refValue": "Default" + }, { + "key": "schedule", + "refValue": "Schedule" + }] + } +] + + +@mock_datapipeline_deprecated +def test_creating_pipeline_definition(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res = conn.create_pipeline("mypipeline", "some-unique-id") + pipeline_id = res["pipelineId"] + + conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) + + pipeline_definition = conn.get_pipeline_definition(pipeline_id) + pipeline_definition['pipelineObjects'].should.have.length_of(3) + default_object = pipeline_definition['pipelineObjects'][0] + default_object['name'].should.equal("Default") + default_object['id'].should.equal("Default") + default_object['fields'].should.equal([{ + "key": "workerGroup", + "stringValue": "workerGroup" + }]) + + +@mock_datapipeline_deprecated +def test_describing_pipeline_objects(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res = conn.create_pipeline("mypipeline", "some-unique-id") + pipeline_id = res["pipelineId"] + + conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) + + objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)[ + 'pipelineObjects'] + + objects.should.have.length_of(2) + default_object = [x for x in objects if x['id'] == 'Default'][0] + default_object['name'].should.equal("Default") + default_object['fields'].should.equal([{ + "key": "workerGroup", + "stringValue": "workerGroup" + }]) + + +@mock_datapipeline_deprecated +def test_activate_pipeline(): + conn = boto.datapipeline.connect_to_region("us-west-2") + + res = conn.create_pipeline("mypipeline", "some-unique-id") + + pipeline_id = res["pipelineId"] + conn.activate_pipeline(pipeline_id) + + pipeline_descriptions = conn.describe_pipelines( + [pipeline_id])["pipelineDescriptionList"] + pipeline_descriptions.should.have.length_of(1) + pipeline_description = pipeline_descriptions[0] + fields = pipeline_description['fields'] + + get_value_from_fields('@pipelineState', fields).should.equal("SCHEDULED") + + +@mock_datapipeline_deprecated +def test_delete_pipeline(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res = conn.create_pipeline("mypipeline", "some-unique-id") + pipeline_id = res["pipelineId"] + + conn.delete_pipeline(pipeline_id) + + response = conn.list_pipelines() + + response["pipelineIdList"].should.have.length_of(0) + + +@mock_datapipeline_deprecated +def test_listing_pipelines(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res1 = conn.create_pipeline("mypipeline1", "some-unique-id1") + res2 = conn.create_pipeline("mypipeline2", "some-unique-id2") + + response = conn.list_pipelines() + + response["hasMoreResults"].should.be(False) + response["marker"].should.be.none + response["pipelineIdList"].should.have.length_of(2) + response["pipelineIdList"].should.contain({ + "id": res1["pipelineId"], + "name": "mypipeline1", + }) + response["pipelineIdList"].should.contain({ + "id": res2["pipelineId"], + "name": "mypipeline2" + }) + + +@mock_datapipeline_deprecated +def test_listing_paginated_pipelines(): + conn = boto.datapipeline.connect_to_region("us-west-2") + for i in range(100): + conn.create_pipeline("mypipeline%d" % i, "some-unique-id%d" % i) + + response = conn.list_pipelines() + + response["hasMoreResults"].should.be(True) + response["marker"].should.equal(response["pipelineIdList"][-1]['id']) + response["pipelineIdList"].should.have.length_of(50) + + +# testing a helper function +def test_remove_capitalization_of_dict_keys(): + result = remove_capitalization_of_dict_keys( + { + "Id": "IdValue", + "Fields": [{ + "Key": "KeyValue", + "StringValue": "StringValueValue" + }] + } + ) + + result.should.equal({ + "id": "IdValue", + "fields": [{ + "key": "KeyValue", + "stringValue": "StringValueValue" + }], + }) diff --git a/tests/test_datapipeline/test_server.py b/tests/test_datapipeline/test_server.py index 03c77b0344bc..7cb2657da379 100644 --- a/tests/test_datapipeline/test_server.py +++ b/tests/test_datapipeline/test_server.py @@ -1,28 +1,28 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_datapipeline - -''' -Test the different server responses -''' - - -@mock_datapipeline -def test_list_streams(): - backend = server.create_backend_app("datapipeline") - test_client = backend.test_client() - - res = test_client.post('/', - data={"pipelineIds": ["ASdf"]}, - headers={ - "X-Amz-Target": "DataPipeline.DescribePipelines"}, - ) - - json_data = json.loads(res.data.decode("utf-8")) - json_data.should.equal({ - 'pipelineDescriptionList': [] - }) +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_datapipeline + +''' +Test the different server responses +''' + + +@mock_datapipeline +def test_list_streams(): + backend = server.create_backend_app("datapipeline") + test_client = backend.test_client() + + res = test_client.post('/', + data={"pipelineIds": ["ASdf"]}, + headers={ + "X-Amz-Target": "DataPipeline.DescribePipelines"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + json_data.should.equal({ + 'pipelineDescriptionList': [] + }) diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index d48519755c83..2c675756f5a1 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -1,54 +1,54 @@ -from __future__ import unicode_literals -import six -import boto -import boto.dynamodb -import sure # noqa -import requests -import tests.backport_assert_raises -from nose.tools import assert_raises - -from moto import mock_dynamodb, mock_dynamodb_deprecated -from moto.dynamodb import dynamodb_backend - -from boto.exception import DynamoDBResponseError - - -@mock_dynamodb_deprecated -def test_list_tables(): - name = 'TestTable' - dynamodb_backend.create_table( - name, hash_key_attr="name", hash_key_type="S") - conn = boto.connect_dynamodb('the_key', 'the_secret') - assert conn.list_tables() == ['TestTable'] - - -@mock_dynamodb_deprecated -def test_list_tables_layer_1(): - dynamodb_backend.create_table( - "test_1", hash_key_attr="name", hash_key_type="S") - dynamodb_backend.create_table( - "test_2", hash_key_attr="name", hash_key_type="S") - conn = boto.connect_dynamodb('the_key', 'the_secret') - res = conn.layer1.list_tables(limit=1) - expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} - res.should.equal(expected) - - res = conn.layer1.list_tables(limit=1, start_table="test_1") - expected = {"TableNames": ["test_2"]} - res.should.equal(expected) - - -@mock_dynamodb_deprecated -def test_describe_missing_table(): - conn = boto.connect_dynamodb('the_key', 'the_secret') - with assert_raises(DynamoDBResponseError): - conn.describe_table('messages') - - -@mock_dynamodb_deprecated -def test_dynamodb_with_connect_to_region(): - # this will work if connected with boto.connect_dynamodb() - dynamodb = boto.dynamodb.connect_to_region('us-west-2') - - schema = dynamodb.create_schema('column1', str(), 'column2', int()) - dynamodb.create_table('table1', schema, 200, 200) +from __future__ import unicode_literals +import six +import boto +import boto.dynamodb +import sure # noqa +import requests +import tests.backport_assert_raises +from nose.tools import assert_raises + +from moto import mock_dynamodb, mock_dynamodb_deprecated +from moto.dynamodb import dynamodb_backend + +from boto.exception import DynamoDBResponseError + + +@mock_dynamodb_deprecated +def test_list_tables(): + name = 'TestTable' + dynamodb_backend.create_table( + name, hash_key_attr="name", hash_key_type="S") + conn = boto.connect_dynamodb('the_key', 'the_secret') + assert conn.list_tables() == ['TestTable'] + + +@mock_dynamodb_deprecated +def test_list_tables_layer_1(): + dynamodb_backend.create_table( + "test_1", hash_key_attr="name", hash_key_type="S") + dynamodb_backend.create_table( + "test_2", hash_key_attr="name", hash_key_type="S") + conn = boto.connect_dynamodb('the_key', 'the_secret') + res = conn.layer1.list_tables(limit=1) + expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} + res.should.equal(expected) + + res = conn.layer1.list_tables(limit=1, start_table="test_1") + expected = {"TableNames": ["test_2"]} + res.should.equal(expected) + + +@mock_dynamodb_deprecated +def test_describe_missing_table(): + conn = boto.connect_dynamodb('the_key', 'the_secret') + with assert_raises(DynamoDBResponseError): + conn.describe_table('messages') + + +@mock_dynamodb_deprecated +def test_dynamodb_with_connect_to_region(): + # this will work if connected with boto.connect_dynamodb() + dynamodb = boto.dynamodb.connect_to_region('us-west-2') + + schema = dynamodb.create_schema('column1', str(), 'column2', int()) + dynamodb.create_table('table1', schema, 200, 200) diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index 2a482b31ec1f..ee6738934c54 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -1,526 +1,526 @@ -from __future__ import unicode_literals - -import boto -import sure # noqa -from freezegun import freeze_time - -from moto import mock_dynamodb_deprecated - -from boto.dynamodb import condition -from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError -from boto.exception import DynamoDBResponseError - - -def create_table(conn): - message_table_schema = conn.create_schema( - hash_key_name='forum_name', - hash_key_proto_value=str, - range_key_name='subject', - range_key_proto_value=str - ) - - table = conn.create_table( - name='messages', - schema=message_table_schema, - read_units=10, - write_units=10 - ) - return table - - -@freeze_time("2012-01-14") -@mock_dynamodb_deprecated -def test_create_table(): - conn = boto.connect_dynamodb() - create_table(conn) - - expected = { - 'Table': { - 'CreationDateTime': 1326499200.0, - 'ItemCount': 0, - 'KeySchema': { - 'HashKeyElement': { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - 'RangeKeyElement': { - 'AttributeName': 'subject', - 'AttributeType': 'S' - } - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 10 - }, - 'TableName': 'messages', - 'TableSizeBytes': 0, - 'TableStatus': 'ACTIVE' - } - } - conn.describe_table('messages').should.equal(expected) - - -@mock_dynamodb_deprecated -def test_delete_table(): - conn = boto.connect_dynamodb() - create_table(conn) - conn.list_tables().should.have.length_of(1) - - conn.layer1.delete_table('messages') - conn.list_tables().should.have.length_of(0) - - conn.layer1.delete_table.when.called_with( - 'messages').should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_update_table_throughput(): - conn = boto.connect_dynamodb() - table = create_table(conn) - table.read_units.should.equal(10) - table.write_units.should.equal(10) - - table.update_throughput(5, 6) - table.refresh() - - table.read_units.should.equal(5) - table.write_units.should.equal(6) - - -@mock_dynamodb_deprecated -def test_item_add_and_describe_and_update(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attrs=item_data, - ) - item.put() - - table.has_item("LOLCat Forum", "Check this out!").should.equal(True) - - returned_item = table.get_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attributes_to_get=['Body', 'SentBy'] - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - }) - - item['SentBy'] = 'User B' - item.put() - - returned_item = table.get_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attributes_to_get=['Body', 'SentBy'] - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@mock_dynamodb_deprecated -def test_item_put_without_table(): - conn = boto.connect_dynamodb() - - conn.layer1.put_item.when.called_with( - table_name='undeclared-table', - item=dict( - hash_key='LOLCat Forum', - range_key='Check this out!', - ), - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_get_missing_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - table.get_item.when.called_with( - hash_key='tester', - range_key='other', - ).should.throw(DynamoDBKeyNotFoundError) - table.has_item("foobar", "more").should.equal(False) - - -@mock_dynamodb_deprecated -def test_get_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.get_item.when.called_with( - table_name='undeclared-table', - key={ - 'HashKeyElement': {'S': 'tester'}, - 'RangeKeyElement': {'S': 'test-range'}, - }, - ).should.throw(DynamoDBKeyNotFoundError) - - -@mock_dynamodb_deprecated -def test_get_item_without_range_key(): - conn = boto.connect_dynamodb() - message_table_schema = conn.create_schema( - hash_key_name="test_hash", - hash_key_proto_value=int, - range_key_name="test_range", - range_key_proto_value=int, - ) - table = conn.create_table( - name='messages', - schema=message_table_schema, - read_units=10, - write_units=10 - ) - - hash_key = 3241526475 - range_key = 1234567890987 - new_item = table.new_item(hash_key=hash_key, range_key=range_key) - new_item.put() - - table.get_item.when.called_with( - hash_key=hash_key).should.throw(DynamoDBValidationError) - - -@mock_dynamodb_deprecated -def test_delete_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attrs=item_data, - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete() - response.should.equal({u'Attributes': [], u'ConsumedCapacityUnits': 0.5}) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_attribute_response(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attrs=item_data, - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete(return_values='ALL_OLD') - response.should.equal({ - 'Attributes': { - 'Body': 'http://url_to_lolcat.gif', - 'forum_name': 'LOLCat Forum', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'SentBy': 'User A', - 'subject': 'Check this out!' - }, - 'ConsumedCapacityUnits': 0.5 - }) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.delete_item.when.called_with( - table_name='undeclared-table', - key={ - 'HashKeyElement': {'S': 'tester'}, - 'RangeKeyElement': {'S': 'test-range'}, - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_query(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - range_key='456', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key', - range_key='123', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key', - range_key='789', - attrs=item_data, - ) - item.put() - - results = table.query(hash_key='the-key', - range_key_condition=condition.GT('1')) - results.response['Items'].should.have.length_of(3) - - results = table.query(hash_key='the-key', - range_key_condition=condition.GT('234')) - results.response['Items'].should.have.length_of(2) - - results = table.query(hash_key='the-key', - range_key_condition=condition.GT('9999')) - results.response['Items'].should.have.length_of(0) - - results = table.query(hash_key='the-key', - range_key_condition=condition.CONTAINS('12')) - results.response['Items'].should.have.length_of(1) - - results = table.query(hash_key='the-key', - range_key_condition=condition.BEGINS_WITH('7')) - results.response['Items'].should.have.length_of(1) - - results = table.query(hash_key='the-key', - range_key_condition=condition.BETWEEN('567', '890')) - results.response['Items'].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_query_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.query.when.called_with( - table_name='undeclared-table', - hash_key_value={'S': 'the-key'}, - range_key_conditions={ - "AttributeValueList": [{ - "S": "User B" - }], - "ComparisonOperator": "EQ", - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - range_key='456', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key', - range_key='123', - attrs=item_data, - ) - item.put() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = table.new_item( - hash_key='the-key', - range_key='789', - attrs=item_data, - ) - item.put() - - results = table.scan() - results.response['Items'].should.have.length_of(3) - - results = table.scan(scan_filter={'SentBy': condition.EQ('User B')}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Body': condition.BEGINS_WITH('http')}) - results.response['Items'].should.have.length_of(3) - - results = table.scan(scan_filter={'Ids': condition.CONTAINS(2)}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Ids': condition.NOT_NULL()}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Ids': condition.NULL()}) - results.response['Items'].should.have.length_of(2) - - results = table.scan(scan_filter={'PK': condition.BETWEEN(8, 9)}) - results.response['Items'].should.have.length_of(0) - - results = table.scan(scan_filter={'PK': condition.BETWEEN(5, 8)}) - results.response['Items'].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_scan_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.scan.when.called_with( - table_name='undeclared-table', - scan_filter={ - "SentBy": { - "AttributeValueList": [{ - "S": "User B"} - ], - "ComparisonOperator": "EQ" - } - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan_after_has_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - list(table.scan()).should.equal([]) - - table.has_item(hash_key='the-key', range_key='123') - - list(table.scan()).should.equal([]) - - -@mock_dynamodb_deprecated -def test_write_batch(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - batch_list = conn.new_batch_write_list() - - items = [] - items.append(table.new_item( - hash_key='the-key', - range_key='123', - attrs={ - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }, - )) - - items.append(table.new_item( - hash_key='the-key', - range_key='789', - attrs={ - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - }, - )) - - batch_list.add_batch(table, puts=items) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(2) - - batch_list = conn.new_batch_write_list() - batch_list.add_batch(table, deletes=[('the-key', '789')]) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(1) - - -@mock_dynamodb_deprecated -def test_batch_read(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - range_key='456', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key', - range_key='123', - attrs=item_data, - ) - item.put() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = table.new_item( - hash_key='another-key', - range_key='789', - attrs=item_data, - ) - item.put() - - items = table.batch_get_item([('the-key', '123'), ('another-key', '789')]) - # Iterate through so that batch_item gets called - count = len([x for x in items]) - count.should.equal(2) +from __future__ import unicode_literals + +import boto +import sure # noqa +from freezegun import freeze_time + +from moto import mock_dynamodb_deprecated + +from boto.dynamodb import condition +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError +from boto.exception import DynamoDBResponseError + + +def create_table(conn): + message_table_schema = conn.create_schema( + hash_key_name='forum_name', + hash_key_proto_value=str, + range_key_name='subject', + range_key_proto_value=str + ) + + table = conn.create_table( + name='messages', + schema=message_table_schema, + read_units=10, + write_units=10 + ) + return table + + +@freeze_time("2012-01-14") +@mock_dynamodb_deprecated +def test_create_table(): + conn = boto.connect_dynamodb() + create_table(conn) + + expected = { + 'Table': { + 'CreationDateTime': 1326499200.0, + 'ItemCount': 0, + 'KeySchema': { + 'HashKeyElement': { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + 'RangeKeyElement': { + 'AttributeName': 'subject', + 'AttributeType': 'S' + } + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 10 + }, + 'TableName': 'messages', + 'TableSizeBytes': 0, + 'TableStatus': 'ACTIVE' + } + } + conn.describe_table('messages').should.equal(expected) + + +@mock_dynamodb_deprecated +def test_delete_table(): + conn = boto.connect_dynamodb() + create_table(conn) + conn.list_tables().should.have.length_of(1) + + conn.layer1.delete_table('messages') + conn.list_tables().should.have.length_of(0) + + conn.layer1.delete_table.when.called_with( + 'messages').should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_update_table_throughput(): + conn = boto.connect_dynamodb() + table = create_table(conn) + table.read_units.should.equal(10) + table.write_units.should.equal(10) + + table.update_throughput(5, 6) + table.refresh() + + table.read_units.should.equal(5) + table.write_units.should.equal(6) + + +@mock_dynamodb_deprecated +def test_item_add_and_describe_and_update(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attrs=item_data, + ) + item.put() + + table.has_item("LOLCat Forum", "Check this out!").should.equal(True) + + returned_item = table.get_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attributes_to_get=['Body', 'SentBy'] + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + }) + + item['SentBy'] = 'User B' + item.put() + + returned_item = table.get_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attributes_to_get=['Body', 'SentBy'] + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@mock_dynamodb_deprecated +def test_item_put_without_table(): + conn = boto.connect_dynamodb() + + conn.layer1.put_item.when.called_with( + table_name='undeclared-table', + item=dict( + hash_key='LOLCat Forum', + range_key='Check this out!', + ), + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_get_missing_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + table.get_item.when.called_with( + hash_key='tester', + range_key='other', + ).should.throw(DynamoDBKeyNotFoundError) + table.has_item("foobar", "more").should.equal(False) + + +@mock_dynamodb_deprecated +def test_get_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.get_item.when.called_with( + table_name='undeclared-table', + key={ + 'HashKeyElement': {'S': 'tester'}, + 'RangeKeyElement': {'S': 'test-range'}, + }, + ).should.throw(DynamoDBKeyNotFoundError) + + +@mock_dynamodb_deprecated +def test_get_item_without_range_key(): + conn = boto.connect_dynamodb() + message_table_schema = conn.create_schema( + hash_key_name="test_hash", + hash_key_proto_value=int, + range_key_name="test_range", + range_key_proto_value=int, + ) + table = conn.create_table( + name='messages', + schema=message_table_schema, + read_units=10, + write_units=10 + ) + + hash_key = 3241526475 + range_key = 1234567890987 + new_item = table.new_item(hash_key=hash_key, range_key=range_key) + new_item.put() + + table.get_item.when.called_with( + hash_key=hash_key).should.throw(DynamoDBValidationError) + + +@mock_dynamodb_deprecated +def test_delete_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attrs=item_data, + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete() + response.should.equal({u'Attributes': [], u'ConsumedCapacityUnits': 0.5}) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_attribute_response(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attrs=item_data, + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete(return_values='ALL_OLD') + response.should.equal({ + 'Attributes': { + 'Body': 'http://url_to_lolcat.gif', + 'forum_name': 'LOLCat Forum', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'SentBy': 'User A', + 'subject': 'Check this out!' + }, + 'ConsumedCapacityUnits': 0.5 + }) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.delete_item.when.called_with( + table_name='undeclared-table', + key={ + 'HashKeyElement': {'S': 'tester'}, + 'RangeKeyElement': {'S': 'test-range'}, + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_query(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + range_key='456', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key', + range_key='123', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key', + range_key='789', + attrs=item_data, + ) + item.put() + + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('1')) + results.response['Items'].should.have.length_of(3) + + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('234')) + results.response['Items'].should.have.length_of(2) + + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('9999')) + results.response['Items'].should.have.length_of(0) + + results = table.query(hash_key='the-key', + range_key_condition=condition.CONTAINS('12')) + results.response['Items'].should.have.length_of(1) + + results = table.query(hash_key='the-key', + range_key_condition=condition.BEGINS_WITH('7')) + results.response['Items'].should.have.length_of(1) + + results = table.query(hash_key='the-key', + range_key_condition=condition.BETWEEN('567', '890')) + results.response['Items'].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_query_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.query.when.called_with( + table_name='undeclared-table', + hash_key_value={'S': 'the-key'}, + range_key_conditions={ + "AttributeValueList": [{ + "S": "User B" + }], + "ComparisonOperator": "EQ", + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + range_key='456', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key', + range_key='123', + attrs=item_data, + ) + item.put() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = table.new_item( + hash_key='the-key', + range_key='789', + attrs=item_data, + ) + item.put() + + results = table.scan() + results.response['Items'].should.have.length_of(3) + + results = table.scan(scan_filter={'SentBy': condition.EQ('User B')}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Body': condition.BEGINS_WITH('http')}) + results.response['Items'].should.have.length_of(3) + + results = table.scan(scan_filter={'Ids': condition.CONTAINS(2)}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Ids': condition.NOT_NULL()}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Ids': condition.NULL()}) + results.response['Items'].should.have.length_of(2) + + results = table.scan(scan_filter={'PK': condition.BETWEEN(8, 9)}) + results.response['Items'].should.have.length_of(0) + + results = table.scan(scan_filter={'PK': condition.BETWEEN(5, 8)}) + results.response['Items'].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_scan_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.scan.when.called_with( + table_name='undeclared-table', + scan_filter={ + "SentBy": { + "AttributeValueList": [{ + "S": "User B"} + ], + "ComparisonOperator": "EQ" + } + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan_after_has_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + list(table.scan()).should.equal([]) + + table.has_item(hash_key='the-key', range_key='123') + + list(table.scan()).should.equal([]) + + +@mock_dynamodb_deprecated +def test_write_batch(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + batch_list = conn.new_batch_write_list() + + items = [] + items.append(table.new_item( + hash_key='the-key', + range_key='123', + attrs={ + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }, + )) + + items.append(table.new_item( + hash_key='the-key', + range_key='789', + attrs={ + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + }, + )) + + batch_list.add_batch(table, puts=items) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(2) + + batch_list = conn.new_batch_write_list() + batch_list.add_batch(table, deletes=[('the-key', '789')]) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(1) + + +@mock_dynamodb_deprecated +def test_batch_read(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + range_key='456', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key', + range_key='123', + attrs=item_data, + ) + item.put() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = table.new_item( + hash_key='another-key', + range_key='789', + attrs=item_data, + ) + item.put() + + items = table.batch_get_item([('the-key', '123'), ('another-key', '789')]) + # Iterate through so that batch_item gets called + count = len([x for x in items]) + count.should.equal(2) diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index ebd0c2051af3..c31b1994d5aa 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -1,430 +1,430 @@ -from __future__ import unicode_literals - -import boto -import sure # noqa -from freezegun import freeze_time - -from moto import mock_dynamodb_deprecated - -from boto.dynamodb import condition -from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError -from boto.exception import DynamoDBResponseError - - -def create_table(conn): - message_table_schema = conn.create_schema( - hash_key_name='forum_name', - hash_key_proto_value=str, - ) - - table = conn.create_table( - name='messages', - schema=message_table_schema, - read_units=10, - write_units=10 - ) - return table - - -@freeze_time("2012-01-14") -@mock_dynamodb_deprecated -def test_create_table(): - conn = boto.connect_dynamodb() - create_table(conn) - - expected = { - 'Table': { - 'CreationDateTime': 1326499200.0, - 'ItemCount': 0, - 'KeySchema': { - 'HashKeyElement': { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 10 - }, - 'TableName': 'messages', - 'TableSizeBytes': 0, - 'TableStatus': 'ACTIVE', - } - } - conn.describe_table('messages').should.equal(expected) - - -@mock_dynamodb_deprecated -def test_delete_table(): - conn = boto.connect_dynamodb() - create_table(conn) - conn.list_tables().should.have.length_of(1) - - conn.layer1.delete_table('messages') - conn.list_tables().should.have.length_of(0) - - conn.layer1.delete_table.when.called_with( - 'messages').should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_update_table_throughput(): - conn = boto.connect_dynamodb() - table = create_table(conn) - table.read_units.should.equal(10) - table.write_units.should.equal(10) - - table.update_throughput(5, 6) - table.refresh() - - table.read_units.should.equal(5) - table.write_units.should.equal(6) - - -@mock_dynamodb_deprecated -def test_item_add_and_describe_and_update(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - attrs=item_data, - ) - item.put() - - returned_item = table.get_item( - hash_key='LOLCat Forum', - attributes_to_get=['Body', 'SentBy'] - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - }) - - item['SentBy'] = 'User B' - item.put() - - returned_item = table.get_item( - hash_key='LOLCat Forum', - attributes_to_get=['Body', 'SentBy'] - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@mock_dynamodb_deprecated -def test_item_put_without_table(): - conn = boto.connect_dynamodb() - - conn.layer1.put_item.when.called_with( - table_name='undeclared-table', - item=dict( - hash_key='LOLCat Forum', - ), - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_get_missing_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - table.get_item.when.called_with( - hash_key='tester', - ).should.throw(DynamoDBKeyNotFoundError) - - -@mock_dynamodb_deprecated -def test_get_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.get_item.when.called_with( - table_name='undeclared-table', - key={ - 'HashKeyElement': {'S': 'tester'}, - }, - ).should.throw(DynamoDBKeyNotFoundError) - - -@mock_dynamodb_deprecated -def test_delete_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - attrs=item_data, - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete() - response.should.equal({u'Attributes': [], u'ConsumedCapacityUnits': 0.5}) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_attribute_response(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - attrs=item_data, - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete(return_values='ALL_OLD') - response.should.equal({ - u'Attributes': { - u'Body': u'http://url_to_lolcat.gif', - u'forum_name': u'LOLCat Forum', - u'ReceivedTime': u'12/9/2011 11:36:03 PM', - u'SentBy': u'User A', - }, - u'ConsumedCapacityUnits': 0.5 - }) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.delete_item.when.called_with( - table_name='undeclared-table', - key={ - 'HashKeyElement': {'S': 'tester'}, - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_query(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - attrs=item_data, - ) - item.put() - - results = table.query(hash_key='the-key') - results.response['Items'].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_query_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.query.when.called_with( - table_name='undeclared-table', - hash_key_value={'S': 'the-key'}, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key2', - attrs=item_data, - ) - item.put() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = table.new_item( - hash_key='the-key3', - attrs=item_data, - ) - item.put() - - results = table.scan() - results.response['Items'].should.have.length_of(3) - - results = table.scan(scan_filter={'SentBy': condition.EQ('User B')}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Body': condition.BEGINS_WITH('http')}) - results.response['Items'].should.have.length_of(3) - - results = table.scan(scan_filter={'Ids': condition.CONTAINS(2)}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Ids': condition.NOT_NULL()}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Ids': condition.NULL()}) - results.response['Items'].should.have.length_of(2) - - results = table.scan(scan_filter={'PK': condition.BETWEEN(8, 9)}) - results.response['Items'].should.have.length_of(0) - - results = table.scan(scan_filter={'PK': condition.BETWEEN(5, 8)}) - results.response['Items'].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_scan_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.scan.when.called_with( - table_name='undeclared-table', - scan_filter={ - "SentBy": { - "AttributeValueList": [{ - "S": "User B"} - ], - "ComparisonOperator": "EQ" - } - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan_after_has_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - list(table.scan()).should.equal([]) - - table.has_item('the-key') - - list(table.scan()).should.equal([]) - - -@mock_dynamodb_deprecated -def test_write_batch(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - batch_list = conn.new_batch_write_list() - - items = [] - items.append(table.new_item( - hash_key='the-key', - attrs={ - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }, - )) - - items.append(table.new_item( - hash_key='the-key2', - attrs={ - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - }, - )) - - batch_list.add_batch(table, puts=items) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(2) - - batch_list = conn.new_batch_write_list() - batch_list.add_batch(table, deletes=[('the-key')]) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(1) - - -@mock_dynamodb_deprecated -def test_batch_read(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key1', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key2', - attrs=item_data, - ) - item.put() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = table.new_item( - hash_key='another-key', - attrs=item_data, - ) - item.put() - - items = table.batch_get_item([('the-key1'), ('another-key')]) - # Iterate through so that batch_item gets called - count = len([x for x in items]) - count.should.have.equal(2) +from __future__ import unicode_literals + +import boto +import sure # noqa +from freezegun import freeze_time + +from moto import mock_dynamodb_deprecated + +from boto.dynamodb import condition +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError +from boto.exception import DynamoDBResponseError + + +def create_table(conn): + message_table_schema = conn.create_schema( + hash_key_name='forum_name', + hash_key_proto_value=str, + ) + + table = conn.create_table( + name='messages', + schema=message_table_schema, + read_units=10, + write_units=10 + ) + return table + + +@freeze_time("2012-01-14") +@mock_dynamodb_deprecated +def test_create_table(): + conn = boto.connect_dynamodb() + create_table(conn) + + expected = { + 'Table': { + 'CreationDateTime': 1326499200.0, + 'ItemCount': 0, + 'KeySchema': { + 'HashKeyElement': { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 10 + }, + 'TableName': 'messages', + 'TableSizeBytes': 0, + 'TableStatus': 'ACTIVE', + } + } + conn.describe_table('messages').should.equal(expected) + + +@mock_dynamodb_deprecated +def test_delete_table(): + conn = boto.connect_dynamodb() + create_table(conn) + conn.list_tables().should.have.length_of(1) + + conn.layer1.delete_table('messages') + conn.list_tables().should.have.length_of(0) + + conn.layer1.delete_table.when.called_with( + 'messages').should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_update_table_throughput(): + conn = boto.connect_dynamodb() + table = create_table(conn) + table.read_units.should.equal(10) + table.write_units.should.equal(10) + + table.update_throughput(5, 6) + table.refresh() + + table.read_units.should.equal(5) + table.write_units.should.equal(6) + + +@mock_dynamodb_deprecated +def test_item_add_and_describe_and_update(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + attrs=item_data, + ) + item.put() + + returned_item = table.get_item( + hash_key='LOLCat Forum', + attributes_to_get=['Body', 'SentBy'] + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + }) + + item['SentBy'] = 'User B' + item.put() + + returned_item = table.get_item( + hash_key='LOLCat Forum', + attributes_to_get=['Body', 'SentBy'] + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@mock_dynamodb_deprecated +def test_item_put_without_table(): + conn = boto.connect_dynamodb() + + conn.layer1.put_item.when.called_with( + table_name='undeclared-table', + item=dict( + hash_key='LOLCat Forum', + ), + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_get_missing_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + table.get_item.when.called_with( + hash_key='tester', + ).should.throw(DynamoDBKeyNotFoundError) + + +@mock_dynamodb_deprecated +def test_get_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.get_item.when.called_with( + table_name='undeclared-table', + key={ + 'HashKeyElement': {'S': 'tester'}, + }, + ).should.throw(DynamoDBKeyNotFoundError) + + +@mock_dynamodb_deprecated +def test_delete_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + attrs=item_data, + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete() + response.should.equal({u'Attributes': [], u'ConsumedCapacityUnits': 0.5}) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_attribute_response(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + attrs=item_data, + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete(return_values='ALL_OLD') + response.should.equal({ + u'Attributes': { + u'Body': u'http://url_to_lolcat.gif', + u'forum_name': u'LOLCat Forum', + u'ReceivedTime': u'12/9/2011 11:36:03 PM', + u'SentBy': u'User A', + }, + u'ConsumedCapacityUnits': 0.5 + }) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.delete_item.when.called_with( + table_name='undeclared-table', + key={ + 'HashKeyElement': {'S': 'tester'}, + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_query(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + attrs=item_data, + ) + item.put() + + results = table.query(hash_key='the-key') + results.response['Items'].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_query_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.query.when.called_with( + table_name='undeclared-table', + hash_key_value={'S': 'the-key'}, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key2', + attrs=item_data, + ) + item.put() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = table.new_item( + hash_key='the-key3', + attrs=item_data, + ) + item.put() + + results = table.scan() + results.response['Items'].should.have.length_of(3) + + results = table.scan(scan_filter={'SentBy': condition.EQ('User B')}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Body': condition.BEGINS_WITH('http')}) + results.response['Items'].should.have.length_of(3) + + results = table.scan(scan_filter={'Ids': condition.CONTAINS(2)}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Ids': condition.NOT_NULL()}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Ids': condition.NULL()}) + results.response['Items'].should.have.length_of(2) + + results = table.scan(scan_filter={'PK': condition.BETWEEN(8, 9)}) + results.response['Items'].should.have.length_of(0) + + results = table.scan(scan_filter={'PK': condition.BETWEEN(5, 8)}) + results.response['Items'].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_scan_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.scan.when.called_with( + table_name='undeclared-table', + scan_filter={ + "SentBy": { + "AttributeValueList": [{ + "S": "User B"} + ], + "ComparisonOperator": "EQ" + } + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan_after_has_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + list(table.scan()).should.equal([]) + + table.has_item('the-key') + + list(table.scan()).should.equal([]) + + +@mock_dynamodb_deprecated +def test_write_batch(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + batch_list = conn.new_batch_write_list() + + items = [] + items.append(table.new_item( + hash_key='the-key', + attrs={ + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }, + )) + + items.append(table.new_item( + hash_key='the-key2', + attrs={ + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + }, + )) + + batch_list.add_batch(table, puts=items) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(2) + + batch_list = conn.new_batch_write_list() + batch_list.add_batch(table, deletes=[('the-key')]) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(1) + + +@mock_dynamodb_deprecated +def test_batch_read(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key1', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key2', + attrs=item_data, + ) + item.put() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = table.new_item( + hash_key='another-key', + attrs=item_data, + ) + item.put() + + items = table.batch_get_item([('the-key1'), ('another-key')]) + # Iterate through so that batch_item gets called + count = len([x for x in items]) + count.should.have.equal(2) diff --git a/tests/test_dynamodb/test_server.py b/tests/test_dynamodb/test_server.py index 66004bbe1ad1..a9fb7607e6da 100644 --- a/tests/test_dynamodb/test_server.py +++ b/tests/test_dynamodb/test_server.py @@ -1,20 +1,20 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_table_list(): - backend = server.create_backend_app("dynamodb") - test_client = backend.test_client() - - res = test_client.get('/') - res.status_code.should.equal(404) - - headers = {'X-Amz-Target': 'TestTable.ListTables'} - res = test_client.get('/', headers=headers) - res.data.should.contain(b'TableNames') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_table_list(): + backend = server.create_backend_app("dynamodb") + test_client = backend.test_client() + + res = test_client.get('/') + res.status_code.should.equal(404) + + headers = {'X-Amz-Target': 'TestTable.ListTables'} + res = test_client.get('/', headers=headers) + res.data.should.contain(b'TableNames') diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index afc919dd7c0c..8cef24cda4cf 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,1338 +1,1338 @@ -from __future__ import unicode_literals, print_function - -from decimal import Decimal - -import six -import boto -import boto3 -from boto3.dynamodb.conditions import Attr, Key -import sure # noqa -import requests -from moto import mock_dynamodb2, mock_dynamodb2_deprecated -from moto.dynamodb2 import dynamodb_backend2 -from boto.exception import JSONResponseError -from botocore.exceptions import ClientError -from tests.helpers import requires_boto_gte -import tests.backport_assert_raises - -import moto.dynamodb2.comparisons -import moto.dynamodb2.models - -from nose.tools import assert_raises -try: - import boto.dynamodb2 -except ImportError: - print("This boto version is not supported") - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_list_tables(): - name = 'TestTable' - # Should make tables properly with boto - dynamodb_backend2.create_table(name, schema=[ - {u'KeyType': u'HASH', u'AttributeName': u'forum_name'}, - {u'KeyType': u'RANGE', u'AttributeName': u'subject'} - ]) - conn = boto.dynamodb2.connect_to_region( - 'us-east-1', - aws_access_key_id="ak", - aws_secret_access_key="sk") - assert conn.list_tables()["TableNames"] == [name] - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_list_tables_layer_1(): - # Should make tables properly with boto - dynamodb_backend2.create_table("test_1", schema=[ - {u'KeyType': u'HASH', u'AttributeName': u'name'} - ]) - dynamodb_backend2.create_table("test_2", schema=[ - {u'KeyType': u'HASH', u'AttributeName': u'name'} - ]) - conn = boto.dynamodb2.connect_to_region( - 'us-east-1', - aws_access_key_id="ak", - aws_secret_access_key="sk") - - res = conn.list_tables(limit=1) - expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} - res.should.equal(expected) - - res = conn.list_tables(limit=1, exclusive_start_table_name="test_1") - expected = {"TableNames": ["test_2"]} - res.should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_describe_missing_table(): - conn = boto.dynamodb2.connect_to_region( - 'us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - with assert_raises(JSONResponseError): - conn.describe_table('messages') - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_list_table_tags(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - table_description = conn.describe_table(TableName=name) - arn = table_description['Table']['TableArn'] - - # Tag table - tags = [{'Key': 'TestTag', 'Value': 'TestValue'}, {'Key': 'TestTag2', 'Value': 'TestValue2'}] - conn.tag_resource(ResourceArn=arn, Tags=tags) - - # Check tags - resp = conn.list_tags_of_resource(ResourceArn=arn) - assert resp["Tags"] == tags - - # Remove 1 tag - conn.untag_resource(ResourceArn=arn, TagKeys=['TestTag']) - - # Check tags - resp = conn.list_tags_of_resource(ResourceArn=arn) - assert resp["Tags"] == [{'Key': 'TestTag2', 'Value': 'TestValue2'}] - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_list_table_tags_empty(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - table_description = conn.describe_table(TableName=name) - arn = table_description['Table']['TableArn'] - tags = [{'Key':'TestTag', 'Value': 'TestValue'}] - # conn.tag_resource(ResourceArn=arn, - # Tags=tags) - resp = conn.list_tags_of_resource(ResourceArn=arn) - assert resp["Tags"] == [] - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_list_table_tags_paginated(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - table_description = conn.describe_table(TableName=name) - arn = table_description['Table']['TableArn'] - for i in range(11): - tags = [{'Key':'TestTag%d' % i, 'Value': 'TestValue'}] - conn.tag_resource(ResourceArn=arn, - Tags=tags) - resp = conn.list_tags_of_resource(ResourceArn=arn) - assert len(resp["Tags"]) == 10 - assert 'NextToken' in resp.keys() - resp2 = conn.list_tags_of_resource(ResourceArn=arn, - NextToken=resp['NextToken']) - assert len(resp2["Tags"]) == 1 - assert 'NextToken' not in resp2.keys() - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_list_not_found_table_tags(): - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - arn = 'DymmyArn' - try: - conn.list_tags_of_resource(ResourceArn=arn) - except ClientError as exception: - assert exception.response['Error']['Code'] == "ResourceNotFoundException" - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_item_add_empty_string_exception(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - - with assert_raises(ClientError) as ex: - conn.put_item( - TableName=name, - Item={ - 'forum_name': { 'S': 'LOLCat Forum' }, - 'subject': { 'S': 'Check this out!' }, - 'Body': { 'S': 'http://url_to_lolcat.gif'}, - 'SentBy': { 'S': "" }, - 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, - } - ) - - ex.exception.response['Error']['Code'].should.equal('ValidationException') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' - ) - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_update_item_with_empty_string_exception(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - - conn.put_item( - TableName=name, - Item={ - 'forum_name': { 'S': 'LOLCat Forum' }, - 'subject': { 'S': 'Check this out!' }, - 'Body': { 'S': 'http://url_to_lolcat.gif'}, - 'SentBy': { 'S': "test" }, - 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, - } - ) - - with assert_raises(ClientError) as ex: - conn.update_item( - TableName=name, - Key={ - 'forum_name': { 'S': 'LOLCat Forum'}, - }, - UpdateExpression='set Body=:Body', - ExpressionAttributeValues={ - ':Body': {'S': ''} - }) - - ex.exception.response['Error']['Code'].should.equal('ValidationException') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' - ) - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_query_invalid_table(): - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - try: - conn.query(TableName='invalid_table', KeyConditionExpression='index1 = :partitionkeyval', ExpressionAttributeValues={':partitionkeyval': {'S':'test'}}) - except ClientError as exception: - assert exception.response['Error']['Code'] == "ResourceNotFoundException" - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_scan_returns_consumed_capacity(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - - conn.put_item( - TableName=name, - Item={ - 'forum_name': { 'S': 'LOLCat Forum' }, - 'subject': { 'S': 'Check this out!' }, - 'Body': { 'S': 'http://url_to_lolcat.gif'}, - 'SentBy': { 'S': "test" }, - 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, - } - ) - - response = conn.scan( - TableName=name, - ) - - assert 'ConsumedCapacity' in response - assert 'CapacityUnits' in response['ConsumedCapacity'] - assert response['ConsumedCapacity']['TableName'] == name - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_put_item_with_special_chars(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - - conn.put_item( - TableName=name, - Item={ - 'forum_name': { 'S': 'LOLCat Forum' }, - 'subject': { 'S': 'Check this out!' }, - 'Body': { 'S': 'http://url_to_lolcat.gif'}, - 'SentBy': { 'S': "test" }, - 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, - '"': {"S": "foo"}, - } - ) - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_query_returns_consumed_capacity(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message' - }) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') - ) - - assert 'ConsumedCapacity' in results - assert 'CapacityUnits' in results['ConsumedCapacity'] - assert results['ConsumedCapacity']['CapacityUnits'] == 1 - - -@mock_dynamodb2 -def test_basic_projection_expressions(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message' - }) - - table.put_item(Item={ - 'forum_name': 'not-the-key', - 'subject': '123', - 'body': 'some other test message' - }) - # Test a query returning all items - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key'), - ProjectionExpression='body, subject' - ) - - assert 'body' in results['Items'][0] - assert results['Items'][0]['body'] == 'some test message' - assert 'subject' in results['Items'][0] - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '1234', - 'body': 'yet another test message' - }) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key'), - ProjectionExpression='body' - ) - - assert 'body' in results['Items'][0] - assert 'subject' not in results['Items'][0] - assert results['Items'][0]['body'] == 'some test message' - assert 'body' in results['Items'][1] - assert 'subject' not in results['Items'][1] - assert results['Items'][1]['body'] == 'yet another test message' - - # The projection expression should not remove data from storage - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key'), - ) - assert 'subject' in results['Items'][0] - assert 'body' in results['Items'][1] - assert 'forum_name' in results['Items'][1] - - -@mock_dynamodb2 -def test_basic_projection_expressions_with_attr_expression_names(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message', - 'attachment': 'something' - }) - - table.put_item(Item={ - 'forum_name': 'not-the-key', - 'subject': '123', - 'body': 'some other test message', - 'attachment': 'something' - }) - # Test a query returning all items - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key'), - ProjectionExpression='#rl, #rt, subject', - ExpressionAttributeNames={ - '#rl': 'body', - '#rt': 'attachment' - }, - ) - - assert 'body' in results['Items'][0] - assert results['Items'][0]['body'] == 'some test message' - assert 'subject' in results['Items'][0] - assert results['Items'][0]['subject'] == '123' - assert 'attachment' in results['Items'][0] - assert results['Items'][0]['attachment'] == 'something' - - -@mock_dynamodb2 -def test_put_item_returns_consumed_capacity(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - response = table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message', - }) - - assert 'ConsumedCapacity' in response - - -@mock_dynamodb2 -def test_update_item_returns_consumed_capacity(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message', - }) - - response = table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - UpdateExpression='set body=:tb', - ExpressionAttributeValues={ - ':tb': 'a new message' - }) - - assert 'ConsumedCapacity' in response - assert 'CapacityUnits' in response['ConsumedCapacity'] - assert 'TableName' in response['ConsumedCapacity'] - - -@mock_dynamodb2 -def test_get_item_returns_consumed_capacity(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message', - }) - - response = table.get_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }) - - assert 'ConsumedCapacity' in response - assert 'CapacityUnits' in response['ConsumedCapacity'] - assert 'TableName' in response['ConsumedCapacity'] - - -def test_filter_expression(): - row1 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '5'}, 'Desc': {'S': 'Some description'}, 'KV': {'SS': ['test1', 'test2']}}) - row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) - - # NOT test 1 - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT attribute_not_exists(Id)', {}, {}) - filter_expr.expr(row1).should.be(True) - - # NOT test 2 - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT (Id = :v0)', {}, {':v0': {'N': 8}}) - filter_expr.expr(row1).should.be(False) # Id = 8 so should be false - - # AND test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}}) - filter_expr.expr(row1).should.be(True) - filter_expr.expr(row2).should.be(False) - - # OR test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': 5}, ':v1': {'N': 8}}) - filter_expr.expr(row1).should.be(True) - - # BETWEEN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 10}}) - filter_expr.expr(row1).should.be(True) - - # PAREN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': 8}, ':v1': {'N': 5}}) - filter_expr.expr(row1).should.be(True) - - # IN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) - filter_expr.expr(row1).should.be(True) - - # attribute function tests (with extra spaces) - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists (User)', {}, {}) - filter_expr.expr(row1).should.be(True) - - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) - filter_expr.expr(row1).should.be(True) - - # beginswith function test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('begins_with(Desc, Some)', {}, {}) - filter_expr.expr(row1).should.be(True) - filter_expr.expr(row2).should.be(False) - - # contains function test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('contains(KV, test1)', {}, {}) - filter_expr.expr(row1).should.be(True) - filter_expr.expr(row2).should.be(False) - - # size function test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('size(Desc) > size(KV)', {}, {}) - filter_expr.expr(row1).should.be(True) - - # Expression from @batkuip - filter_expr = moto.dynamodb2.comparisons.get_filter_expression( - '(#n0 < :v0 AND attribute_not_exists(#n1))', - {'#n0': 'Subs', '#n1': 'fanout_ts'}, - {':v0': {'N': '7'}} - ) - filter_expr.expr(row1).should.be(True) - # Expression from to check contains on string value - filter_expr = moto.dynamodb2.comparisons.get_filter_expression( - 'contains(#n0, :v0)', - {'#n0': 'Desc'}, - {':v0': {'S': 'Some'}} - ) - filter_expr.expr(row1).should.be(True) - filter_expr.expr(row2).should.be(False) - - -@mock_dynamodb2 -def test_query_filter(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app1'} - } - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app2'} - } - ) - - table = dynamodb.Table('test1') - response = table.query( - KeyConditionExpression=Key('client').eq('client1') - ) - assert response['Count'] == 2 - - response = table.query( - KeyConditionExpression=Key('client').eq('client1'), - FilterExpression=Attr('app').eq('app2') - ) - assert response['Count'] == 1 - assert response['Items'][0]['app'] == 'app2' - response = table.query( - KeyConditionExpression=Key('client').eq('client1'), - FilterExpression=Attr('app').contains('app') - ) - assert response['Count'] == 2 - - -@mock_dynamodb2 -def test_scan_filter(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app1'} - } - ) - - table = dynamodb.Table('test1') - response = table.scan( - FilterExpression=Attr('app').eq('app2') - ) - assert response['Count'] == 0 - - response = table.scan( - FilterExpression=Attr('app').eq('app1') - ) - assert response['Count'] == 1 - - -@mock_dynamodb2 -def test_scan_filter2(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'N': '1'} - } - ) - - response = client.scan( - TableName='test1', - Select='ALL_ATTRIBUTES', - FilterExpression='#tb >= :dt', - ExpressionAttributeNames={"#tb": "app"}, - ExpressionAttributeValues={":dt": {"N": str(1)}} - ) - assert response['Count'] == 1 - - -@mock_dynamodb2 -def test_scan_filter3(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'N': '1'}, - 'active': {'BOOL': True} - } - ) - - table = dynamodb.Table('test1') - response = table.scan( - FilterExpression=Attr('active').eq(True) - ) - assert response['Count'] == 1 - - -@mock_dynamodb2 -def test_scan_filter4(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - - table = dynamodb.Table('test1') - response = table.scan( - FilterExpression=Attr('epoch_ts').lt(7) & Attr('fanout_ts').not_exists() - ) - # Just testing - assert response['Count'] == 0 - - -@mock_dynamodb2 -def test_bad_scan_filter(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - table = dynamodb.Table('test1') - - # Bad expression - try: - table.scan( - FilterExpression='client test' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('ValidationError') - else: - raise RuntimeError('Should of raised ResourceInUseException') - - -@mock_dynamodb2 -def test_duplicate_create(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - - try: - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('ResourceInUseException') - else: - raise RuntimeError('Should of raised ResourceInUseException') - - -@mock_dynamodb2 -def test_delete_table(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - - client.delete_table(TableName='test1') - - resp = client.list_tables() - len(resp['TableNames']).should.equal(0) - - try: - client.delete_table(TableName='test1') - except ClientError as err: - err.response['Error']['Code'].should.equal('ResourceNotFoundException') - else: - raise RuntimeError('Should of raised ResourceNotFoundException') - - -@mock_dynamodb2 -def test_delete_item(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app1'} - } - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app2'} - } - ) - - table = dynamodb.Table('test1') - response = table.scan() - assert response['Count'] == 2 - - # Test deletion and returning old value - response = table.delete_item(Key={'client': 'client1', 'app': 'app1'}, ReturnValues='ALL_OLD') - response['Attributes'].should.contain('client') - response['Attributes'].should.contain('app') - - response = table.scan() - assert response['Count'] == 1 - - # Test deletion returning nothing - response = table.delete_item(Key={'client': 'client1', 'app': 'app2'}) - len(response['Attributes']).should.equal(0) - - response = table.scan() - assert response['Count'] == 0 - - -@mock_dynamodb2 -def test_describe_limits(): - client = boto3.client('dynamodb', region_name='eu-central-1') - resp = client.describe_limits() - - resp['AccountMaxReadCapacityUnits'].should.equal(20000) - resp['AccountMaxWriteCapacityUnits'].should.equal(20000) - resp['TableMaxWriteCapacityUnits'].should.equal(10000) - resp['TableMaxReadCapacityUnits'].should.equal(10000) - - -@mock_dynamodb2 -def test_set_ttl(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - - client.update_time_to_live( - TableName='test1', - TimeToLiveSpecification={ - 'Enabled': True, - 'AttributeName': 'expire' - } - ) - - resp = client.describe_time_to_live(TableName='test1') - resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('ENABLED') - resp['TimeToLiveDescription']['AttributeName'].should.equal('expire') - - client.update_time_to_live( - TableName='test1', - TimeToLiveSpecification={ - 'Enabled': False, - 'AttributeName': 'expire' - } - ) - - resp = client.describe_time_to_live(TableName='test1') - resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('DISABLED') - - -# https://github.com/spulec/moto/issues/1043 -@mock_dynamodb2 -def test_query_missing_expr_names(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item(TableName='test1', Item={'client': {'S': 'test1'}, 'app': {'S': 'test1'}}) - client.put_item(TableName='test1', Item={'client': {'S': 'test2'}, 'app': {'S': 'test2'}}) - - resp = client.query(TableName='test1', KeyConditionExpression='client=:client', - ExpressionAttributeValues={':client': {'S': 'test1'}}) - - resp['Count'].should.equal(1) - resp['Items'][0]['client']['S'].should.equal('test1') - - resp = client.query(TableName='test1', KeyConditionExpression=':name=test2', - ExpressionAttributeNames={':name': 'client'}) - - resp['Count'].should.equal(1) - resp['Items'][0]['client']['S'].should.equal('test2') - - -# https://github.com/spulec/moto/issues/1342 -@mock_dynamodb2 -def test_update_item_on_map(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': {'nested': {'data': 'test'}}, - }) - - resp = table.scan() - resp['Items'][0]['body'].should.equal({'nested': {'data': 'test'}}) - - # Nonexistent nested attributes are supported for existing top-level attributes. - table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - UpdateExpression='SET body.#nested.#data = :tb, body.nested.#nonexistentnested.#data = :tb2', - ExpressionAttributeNames={ - '#nested': 'nested', - '#nonexistentnested': 'nonexistentnested', - '#data': 'data' - }, - ExpressionAttributeValues={ - ':tb': 'new_value', - ':tb2': 'other_value' - }) - - resp = table.scan() - resp['Items'][0]['body'].should.equal({ - 'nested': { - 'data': 'new_value', - 'nonexistentnested': {'data': 'other_value'} - } - }) - - # Test nested value for a nonexistent attribute. - with assert_raises(client.exceptions.ConditionalCheckFailedException): - table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - UpdateExpression='SET nonexistent.#nested = :tb', - ExpressionAttributeNames={ - '#nested': 'nested' - }, - ExpressionAttributeValues={ - ':tb': 'new_value' - }) - - - -# https://github.com/spulec/moto/issues/1358 -@mock_dynamodb2 -def test_update_if_not_exists(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123' - }) - - table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - # if_not_exists without space - UpdateExpression='SET created_at=if_not_exists(created_at,:created_at)', - ExpressionAttributeValues={ - ':created_at': 123 - } - ) - - resp = table.scan() - assert resp['Items'][0]['created_at'] == 123 - - table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - # if_not_exists with space - UpdateExpression='SET created_at = if_not_exists (created_at, :created_at)', - ExpressionAttributeValues={ - ':created_at': 456 - } - ) - - resp = table.scan() - # Still the original value - assert resp['Items'][0]['created_at'] == 123 - - -@mock_dynamodb2 -def test_query_global_secondary_index_when_created_via_update_table_resource(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'user_id', - 'KeyType': 'HASH' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'user_id', - 'AttributeType': 'N', - }, - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - }, - ) - table = dynamodb.Table('users') - table.update( - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - ], - GlobalSecondaryIndexUpdates=[ - {'Create': - { - 'IndexName': 'forum_name_index', - 'KeySchema': [ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH', - }, - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - }, - } - } - ] - ) - - next_user_id = 1 - for my_forum_name in ['cats', 'dogs']: - for my_subject in ['my pet is the cutest', 'wow look at what my pet did', "don't you love my pet?"]: - table.put_item(Item={'user_id': next_user_id, 'forum_name': my_forum_name, 'subject': my_subject}) - next_user_id += 1 - - # get all the cat users - forum_only_query_response = table.query( - IndexName='forum_name_index', - Select='ALL_ATTRIBUTES', - KeyConditionExpression=Key('forum_name').eq('cats'), - ) - forum_only_items = forum_only_query_response['Items'] - assert len(forum_only_items) == 3 - for item in forum_only_items: - assert item['forum_name'] == 'cats' - - # query all cat users with a particular subject - forum_and_subject_query_results = table.query( - IndexName='forum_name_index', - Select='ALL_ATTRIBUTES', - KeyConditionExpression=Key('forum_name').eq('cats'), - FilterExpression=Attr('subject').eq('my pet is the cutest'), - ) - forum_and_subject_items = forum_and_subject_query_results['Items'] - assert len(forum_and_subject_items) == 1 - assert forum_and_subject_items[0] == {'user_id': Decimal('1'), 'forum_name': 'cats', - 'subject': 'my pet is the cutest'} +from __future__ import unicode_literals, print_function + +from decimal import Decimal + +import six +import boto +import boto3 +from boto3.dynamodb.conditions import Attr, Key +import sure # noqa +import requests +from moto import mock_dynamodb2, mock_dynamodb2_deprecated +from moto.dynamodb2 import dynamodb_backend2 +from boto.exception import JSONResponseError +from botocore.exceptions import ClientError +from tests.helpers import requires_boto_gte +import tests.backport_assert_raises + +import moto.dynamodb2.comparisons +import moto.dynamodb2.models + +from nose.tools import assert_raises +try: + import boto.dynamodb2 +except ImportError: + print("This boto version is not supported") + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_list_tables(): + name = 'TestTable' + # Should make tables properly with boto + dynamodb_backend2.create_table(name, schema=[ + {u'KeyType': u'HASH', u'AttributeName': u'forum_name'}, + {u'KeyType': u'RANGE', u'AttributeName': u'subject'} + ]) + conn = boto.dynamodb2.connect_to_region( + 'us-east-1', + aws_access_key_id="ak", + aws_secret_access_key="sk") + assert conn.list_tables()["TableNames"] == [name] + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_list_tables_layer_1(): + # Should make tables properly with boto + dynamodb_backend2.create_table("test_1", schema=[ + {u'KeyType': u'HASH', u'AttributeName': u'name'} + ]) + dynamodb_backend2.create_table("test_2", schema=[ + {u'KeyType': u'HASH', u'AttributeName': u'name'} + ]) + conn = boto.dynamodb2.connect_to_region( + 'us-east-1', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + res = conn.list_tables(limit=1) + expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} + res.should.equal(expected) + + res = conn.list_tables(limit=1, exclusive_start_table_name="test_1") + expected = {"TableNames": ["test_2"]} + res.should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_describe_missing_table(): + conn = boto.dynamodb2.connect_to_region( + 'us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + with assert_raises(JSONResponseError): + conn.describe_table('messages') + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_list_table_tags(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + table_description = conn.describe_table(TableName=name) + arn = table_description['Table']['TableArn'] + + # Tag table + tags = [{'Key': 'TestTag', 'Value': 'TestValue'}, {'Key': 'TestTag2', 'Value': 'TestValue2'}] + conn.tag_resource(ResourceArn=arn, Tags=tags) + + # Check tags + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert resp["Tags"] == tags + + # Remove 1 tag + conn.untag_resource(ResourceArn=arn, TagKeys=['TestTag']) + + # Check tags + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert resp["Tags"] == [{'Key': 'TestTag2', 'Value': 'TestValue2'}] + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_list_table_tags_empty(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + table_description = conn.describe_table(TableName=name) + arn = table_description['Table']['TableArn'] + tags = [{'Key':'TestTag', 'Value': 'TestValue'}] + # conn.tag_resource(ResourceArn=arn, + # Tags=tags) + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert resp["Tags"] == [] + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_list_table_tags_paginated(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + table_description = conn.describe_table(TableName=name) + arn = table_description['Table']['TableArn'] + for i in range(11): + tags = [{'Key':'TestTag%d' % i, 'Value': 'TestValue'}] + conn.tag_resource(ResourceArn=arn, + Tags=tags) + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert len(resp["Tags"]) == 10 + assert 'NextToken' in resp.keys() + resp2 = conn.list_tags_of_resource(ResourceArn=arn, + NextToken=resp['NextToken']) + assert len(resp2["Tags"]) == 1 + assert 'NextToken' not in resp2.keys() + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_list_not_found_table_tags(): + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + arn = 'DymmyArn' + try: + conn.list_tags_of_resource(ResourceArn=arn) + except ClientError as exception: + assert exception.response['Error']['Code'] == "ResourceNotFoundException" + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_item_add_empty_string_exception(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + with assert_raises(ClientError) as ex: + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' + ) + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_item_with_empty_string_exception(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + with assert_raises(ClientError) as ex: + conn.update_item( + TableName=name, + Key={ + 'forum_name': { 'S': 'LOLCat Forum'}, + }, + UpdateExpression='set Body=:Body', + ExpressionAttributeValues={ + ':Body': {'S': ''} + }) + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' + ) + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_query_invalid_table(): + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + try: + conn.query(TableName='invalid_table', KeyConditionExpression='index1 = :partitionkeyval', ExpressionAttributeValues={':partitionkeyval': {'S':'test'}}) + except ClientError as exception: + assert exception.response['Error']['Code'] == "ResourceNotFoundException" + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_scan_returns_consumed_capacity(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + response = conn.scan( + TableName=name, + ) + + assert 'ConsumedCapacity' in response + assert 'CapacityUnits' in response['ConsumedCapacity'] + assert response['ConsumedCapacity']['TableName'] == name + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_put_item_with_special_chars(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + '"': {"S": "foo"}, + } + ) + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_query_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') + ) + + assert 'ConsumedCapacity' in results + assert 'CapacityUnits' in results['ConsumedCapacity'] + assert results['ConsumedCapacity']['CapacityUnits'] == 1 + + +@mock_dynamodb2 +def test_basic_projection_expressions(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message' + }) + # Test a query returning all items + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body, subject' + ) + + assert 'body' in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'subject' in results['Items'][0] + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '1234', + 'body': 'yet another test message' + }) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body' + ) + + assert 'body' in results['Items'][0] + assert 'subject' not in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'body' in results['Items'][1] + assert 'subject' not in results['Items'][1] + assert results['Items'][1]['body'] == 'yet another test message' + + # The projection expression should not remove data from storage + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ) + assert 'subject' in results['Items'][0] + assert 'body' in results['Items'][1] + assert 'forum_name' in results['Items'][1] + + +@mock_dynamodb2 +def test_basic_projection_expressions_with_attr_expression_names(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + 'attachment': 'something' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message', + 'attachment': 'something' + }) + # Test a query returning all items + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='#rl, #rt, subject', + ExpressionAttributeNames={ + '#rl': 'body', + '#rt': 'attachment' + }, + ) + + assert 'body' in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'subject' in results['Items'][0] + assert results['Items'][0]['subject'] == '123' + assert 'attachment' in results['Items'][0] + assert results['Items'][0]['attachment'] == 'something' + + +@mock_dynamodb2 +def test_put_item_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + response = table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + }) + + assert 'ConsumedCapacity' in response + + +@mock_dynamodb2 +def test_update_item_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + }) + + response = table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='set body=:tb', + ExpressionAttributeValues={ + ':tb': 'a new message' + }) + + assert 'ConsumedCapacity' in response + assert 'CapacityUnits' in response['ConsumedCapacity'] + assert 'TableName' in response['ConsumedCapacity'] + + +@mock_dynamodb2 +def test_get_item_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + }) + + response = table.get_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + assert 'ConsumedCapacity' in response + assert 'CapacityUnits' in response['ConsumedCapacity'] + assert 'TableName' in response['ConsumedCapacity'] + + +def test_filter_expression(): + row1 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '5'}, 'Desc': {'S': 'Some description'}, 'KV': {'SS': ['test1', 'test2']}}) + row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) + + # NOT test 1 + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT attribute_not_exists(Id)', {}, {}) + filter_expr.expr(row1).should.be(True) + + # NOT test 2 + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT (Id = :v0)', {}, {':v0': {'N': 8}}) + filter_expr.expr(row1).should.be(False) # Id = 8 so should be false + + # AND test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}}) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + # OR test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': 5}, ':v1': {'N': 8}}) + filter_expr.expr(row1).should.be(True) + + # BETWEEN test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 10}}) + filter_expr.expr(row1).should.be(True) + + # PAREN test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': 8}, ':v1': {'N': 5}}) + filter_expr.expr(row1).should.be(True) + + # IN test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) + filter_expr.expr(row1).should.be(True) + + # attribute function tests (with extra spaces) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists (User)', {}, {}) + filter_expr.expr(row1).should.be(True) + + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) + filter_expr.expr(row1).should.be(True) + + # beginswith function test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('begins_with(Desc, Some)', {}, {}) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + # contains function test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('contains(KV, test1)', {}, {}) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + # size function test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('size(Desc) > size(KV)', {}, {}) + filter_expr.expr(row1).should.be(True) + + # Expression from @batkuip + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + '(#n0 < :v0 AND attribute_not_exists(#n1))', + {'#n0': 'Subs', '#n1': 'fanout_ts'}, + {':v0': {'N': '7'}} + ) + filter_expr.expr(row1).should.be(True) + # Expression from to check contains on string value + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + 'contains(#n0, :v0)', + {'#n0': 'Desc'}, + {':v0': {'S': 'Some'}} + ) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + +@mock_dynamodb2 +def test_query_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app2'} + } + ) + + table = dynamodb.Table('test1') + response = table.query( + KeyConditionExpression=Key('client').eq('client1') + ) + assert response['Count'] == 2 + + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('app').eq('app2') + ) + assert response['Count'] == 1 + assert response['Items'][0]['app'] == 'app2' + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('app').contains('app') + ) + assert response['Count'] == 2 + + +@mock_dynamodb2 +def test_scan_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('app').eq('app2') + ) + assert response['Count'] == 0 + + response = table.scan( + FilterExpression=Attr('app').eq('app1') + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter2(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'} + } + ) + + response = client.scan( + TableName='test1', + Select='ALL_ATTRIBUTES', + FilterExpression='#tb >= :dt', + ExpressionAttributeNames={"#tb": "app"}, + ExpressionAttributeValues={":dt": {"N": str(1)}} + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter3(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'}, + 'active': {'BOOL': True} + } + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('active').eq(True) + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter4(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('epoch_ts').lt(7) & Attr('fanout_ts').not_exists() + ) + # Just testing + assert response['Count'] == 0 + + +@mock_dynamodb2 +def test_bad_scan_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + table = dynamodb.Table('test1') + + # Bad expression + try: + table.scan( + FilterExpression='client test' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ValidationError') + else: + raise RuntimeError('Should of raised ResourceInUseException') + + +@mock_dynamodb2 +def test_duplicate_create(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + try: + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceInUseException') + else: + raise RuntimeError('Should of raised ResourceInUseException') + + +@mock_dynamodb2 +def test_delete_table(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + client.delete_table(TableName='test1') + + resp = client.list_tables() + len(resp['TableNames']).should.equal(0) + + try: + client.delete_table(TableName='test1') + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') + + +@mock_dynamodb2 +def test_delete_item(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app2'} + } + ) + + table = dynamodb.Table('test1') + response = table.scan() + assert response['Count'] == 2 + + # Test deletion and returning old value + response = table.delete_item(Key={'client': 'client1', 'app': 'app1'}, ReturnValues='ALL_OLD') + response['Attributes'].should.contain('client') + response['Attributes'].should.contain('app') + + response = table.scan() + assert response['Count'] == 1 + + # Test deletion returning nothing + response = table.delete_item(Key={'client': 'client1', 'app': 'app2'}) + len(response['Attributes']).should.equal(0) + + response = table.scan() + assert response['Count'] == 0 + + +@mock_dynamodb2 +def test_describe_limits(): + client = boto3.client('dynamodb', region_name='eu-central-1') + resp = client.describe_limits() + + resp['AccountMaxReadCapacityUnits'].should.equal(20000) + resp['AccountMaxWriteCapacityUnits'].should.equal(20000) + resp['TableMaxWriteCapacityUnits'].should.equal(10000) + resp['TableMaxReadCapacityUnits'].should.equal(10000) + + +@mock_dynamodb2 +def test_set_ttl(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + client.update_time_to_live( + TableName='test1', + TimeToLiveSpecification={ + 'Enabled': True, + 'AttributeName': 'expire' + } + ) + + resp = client.describe_time_to_live(TableName='test1') + resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('ENABLED') + resp['TimeToLiveDescription']['AttributeName'].should.equal('expire') + + client.update_time_to_live( + TableName='test1', + TimeToLiveSpecification={ + 'Enabled': False, + 'AttributeName': 'expire' + } + ) + + resp = client.describe_time_to_live(TableName='test1') + resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('DISABLED') + + +# https://github.com/spulec/moto/issues/1043 +@mock_dynamodb2 +def test_query_missing_expr_names(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item(TableName='test1', Item={'client': {'S': 'test1'}, 'app': {'S': 'test1'}}) + client.put_item(TableName='test1', Item={'client': {'S': 'test2'}, 'app': {'S': 'test2'}}) + + resp = client.query(TableName='test1', KeyConditionExpression='client=:client', + ExpressionAttributeValues={':client': {'S': 'test1'}}) + + resp['Count'].should.equal(1) + resp['Items'][0]['client']['S'].should.equal('test1') + + resp = client.query(TableName='test1', KeyConditionExpression=':name=test2', + ExpressionAttributeNames={':name': 'client'}) + + resp['Count'].should.equal(1) + resp['Items'][0]['client']['S'].should.equal('test2') + + +# https://github.com/spulec/moto/issues/1342 +@mock_dynamodb2 +def test_update_item_on_map(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': {'nested': {'data': 'test'}}, + }) + + resp = table.scan() + resp['Items'][0]['body'].should.equal({'nested': {'data': 'test'}}) + + # Nonexistent nested attributes are supported for existing top-level attributes. + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET body.#nested.#data = :tb, body.nested.#nonexistentnested.#data = :tb2', + ExpressionAttributeNames={ + '#nested': 'nested', + '#nonexistentnested': 'nonexistentnested', + '#data': 'data' + }, + ExpressionAttributeValues={ + ':tb': 'new_value', + ':tb2': 'other_value' + }) + + resp = table.scan() + resp['Items'][0]['body'].should.equal({ + 'nested': { + 'data': 'new_value', + 'nonexistentnested': {'data': 'other_value'} + } + }) + + # Test nested value for a nonexistent attribute. + with assert_raises(client.exceptions.ConditionalCheckFailedException): + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET nonexistent.#nested = :tb', + ExpressionAttributeNames={ + '#nested': 'nested' + }, + ExpressionAttributeValues={ + ':tb': 'new_value' + }) + + + +# https://github.com/spulec/moto/issues/1358 +@mock_dynamodb2 +def test_update_if_not_exists(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + # if_not_exists without space + UpdateExpression='SET created_at=if_not_exists(created_at,:created_at)', + ExpressionAttributeValues={ + ':created_at': 123 + } + ) + + resp = table.scan() + assert resp['Items'][0]['created_at'] == 123 + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + # if_not_exists with space + UpdateExpression='SET created_at = if_not_exists (created_at, :created_at)', + ExpressionAttributeValues={ + ':created_at': 456 + } + ) + + resp = table.scan() + # Still the original value + assert resp['Items'][0]['created_at'] == 123 + + +@mock_dynamodb2 +def test_query_global_secondary_index_when_created_via_update_table_resource(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'user_id', + 'KeyType': 'HASH' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'user_id', + 'AttributeType': 'N', + }, + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + }, + ) + table = dynamodb.Table('users') + table.update( + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + ], + GlobalSecondaryIndexUpdates=[ + {'Create': + { + 'IndexName': 'forum_name_index', + 'KeySchema': [ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH', + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + }, + } + } + ] + ) + + next_user_id = 1 + for my_forum_name in ['cats', 'dogs']: + for my_subject in ['my pet is the cutest', 'wow look at what my pet did', "don't you love my pet?"]: + table.put_item(Item={'user_id': next_user_id, 'forum_name': my_forum_name, 'subject': my_subject}) + next_user_id += 1 + + # get all the cat users + forum_only_query_response = table.query( + IndexName='forum_name_index', + Select='ALL_ATTRIBUTES', + KeyConditionExpression=Key('forum_name').eq('cats'), + ) + forum_only_items = forum_only_query_response['Items'] + assert len(forum_only_items) == 3 + for item in forum_only_items: + assert item['forum_name'] == 'cats' + + # query all cat users with a particular subject + forum_and_subject_query_results = table.query( + IndexName='forum_name_index', + Select='ALL_ATTRIBUTES', + KeyConditionExpression=Key('forum_name').eq('cats'), + FilterExpression=Attr('subject').eq('my pet is the cutest'), + ) + forum_and_subject_items = forum_and_subject_query_results['Items'] + assert len(forum_and_subject_items) == 1 + assert forum_and_subject_items[0] == {'user_id': Decimal('1'), 'forum_name': 'cats', + 'subject': 'my pet is the cutest'} diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index a9ab298b7fbe..cc7fca11e552 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1,1963 +1,1963 @@ -from __future__ import unicode_literals - -from decimal import Decimal - -import boto -import boto3 -from boto3.dynamodb.conditions import Key -from botocore.exceptions import ClientError -import sure # noqa -from freezegun import freeze_time -from moto import mock_dynamodb2, mock_dynamodb2_deprecated -from boto.exception import JSONResponseError -from tests.helpers import requires_boto_gte -try: - from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey, AllIndex - from boto.dynamodb2.table import Item, Table - from boto.dynamodb2.types import STRING, NUMBER - from boto.dynamodb2.exceptions import ValidationException - from boto.dynamodb2.exceptions import ConditionalCheckFailedException -except ImportError: - pass - - -def create_table(): - table = Table.create('messages', schema=[ - HashKey('forum_name'), - RangeKey('subject'), - ], throughput={ - 'read': 10, - 'write': 10, - }) - return table - - -def create_table_with_local_indexes(): - table = Table.create( - 'messages', - schema=[ - HashKey('forum_name'), - RangeKey('subject'), - ], - throughput={ - 'read': 10, - 'write': 10, - }, - indexes=[ - AllIndex( - 'threads_index', - parts=[ - HashKey('forum_name', data_type=STRING), - RangeKey('threads', data_type=NUMBER), - ] - ) - ] - ) - return table - - -def iterate_results(res): - for i in res: - pass - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -@freeze_time("2012-01-14") -def test_create_table(): - table = create_table() - expected = { - 'Table': { - 'AttributeDefinitions': [ - {'AttributeName': 'forum_name', 'AttributeType': 'S'}, - {'AttributeName': 'subject', 'AttributeType': 'S'} - ], - 'ProvisionedThroughput': { - 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 - }, - 'TableSizeBytes': 0, - 'TableName': 'messages', - 'TableStatus': 'ACTIVE', - 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', - 'KeySchema': [ - {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, - {'KeyType': 'RANGE', 'AttributeName': 'subject'} - ], - 'LocalSecondaryIndexes': [], - 'ItemCount': 0, 'CreationDateTime': 1326499200.0, - 'GlobalSecondaryIndexes': [] - } - } - table.describe().should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -@freeze_time("2012-01-14") -def test_create_table_with_local_index(): - table = create_table_with_local_indexes() - expected = { - 'Table': { - 'AttributeDefinitions': [ - {'AttributeName': 'forum_name', 'AttributeType': 'S'}, - {'AttributeName': 'subject', 'AttributeType': 'S'}, - {'AttributeName': 'threads', 'AttributeType': 'N'} - ], - 'ProvisionedThroughput': { - 'NumberOfDecreasesToday': 0, - 'WriteCapacityUnits': 10, - 'ReadCapacityUnits': 10, - }, - 'TableSizeBytes': 0, - 'TableName': 'messages', - 'TableStatus': 'ACTIVE', - 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', - 'KeySchema': [ - {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, - {'KeyType': 'RANGE', 'AttributeName': 'subject'} - ], - 'LocalSecondaryIndexes': [ - { - 'IndexName': 'threads_index', - 'KeySchema': [ - {'AttributeName': 'forum_name', 'KeyType': 'HASH'}, - {'AttributeName': 'threads', 'KeyType': 'RANGE'} - ], - 'Projection': {'ProjectionType': 'ALL'} - } - ], - 'ItemCount': 0, - 'CreationDateTime': 1326499200.0, - 'GlobalSecondaryIndexes': [] - } - } - table.describe().should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - table = create_table() - conn.list_tables()["TableNames"].should.have.length_of(1) - - table.delete() - conn.list_tables()["TableNames"].should.have.length_of(0) - conn.delete_table.when.called_with( - 'messages').should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_update_table_throughput(): - table = create_table() - table.throughput["read"].should.equal(10) - table.throughput["write"].should.equal(10) - table.update(throughput={ - 'read': 5, - 'write': 15, - }) - - table.throughput["read"].should.equal(5) - table.throughput["write"].should.equal(15) - - table.update(throughput={ - 'read': 5, - 'write': 6, - }) - - table.describe() - - table.throughput["read"].should.equal(5) - table.throughput["write"].should.equal(6) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_add_and_describe_and_update(): - table = create_table() - ok = table.put_item(data={ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - ok.should.equal(True) - - table.get_item(forum_name="LOLCat Forum", - subject='Check this out!').should_not.be.none - - returned_item = table.get_item( - forum_name='LOLCat Forum', - subject='Check this out!' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - returned_item['SentBy'] = 'User B' - returned_item.save(overwrite=True) - - returned_item = table.get_item( - forum_name='LOLCat Forum', - subject='Check this out!' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_partial_save(): - table = create_table() - - data = { - 'forum_name': 'LOLCat Forum', - 'subject': 'The LOLz', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - } - - table.put_item(data=data) - returned_item = table.get_item( - forum_name="LOLCat Forum", subject='The LOLz') - - returned_item['SentBy'] = 'User B' - returned_item.partial_save() - - returned_item = table.get_item( - forum_name='LOLCat Forum', - subject='The LOLz' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'The LOLz', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_put_without_table(): - table = Table('undeclared-table') - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.save.when.called_with().should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_missing_item(): - table = create_table() - - table.get_item.when.called_with( - hash_key='tester', - range_key='other', - ).should.throw(ValidationException) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_item_with_undeclared_table(): - table = Table('undeclared-table') - table.get_item.when.called_with( - test_hash=3241526475).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_item_without_range_key(): - table = Table.create('messages', schema=[ - HashKey('test_hash'), - RangeKey('test_range'), - ], throughput={ - 'read': 10, - 'write': 10, - }) - - hash_key = 3241526475 - range_key = 1234567890987 - table.put_item(data={'test_hash': hash_key, 'test_range': range_key}) - table.get_item.when.called_with( - test_hash=hash_key).should.throw(ValidationException) - - -@requires_boto_gte("2.30.0") -@mock_dynamodb2_deprecated -def test_delete_item(): - table = create_table() - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item['subject'] = 'Check this out!' - item.save() - table.count().should.equal(1) - - response = item.delete() - response.should.equal(True) - - table.count().should.equal(0) - # Deletes are idempotent - item.delete().should.equal(True) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_item_with_undeclared_table(): - table = Table("undeclared-table") - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.delete.when.called_with().should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query(): - table = create_table() - - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'subject': 'Check this out!' - } - item = Item(table, item_data) - item.save(overwrite=True) - - item['forum_name'] = 'the-key' - item['subject'] = '456' - item.save(overwrite=True) - - item['forum_name'] = 'the-key' - item['subject'] = '123' - item.save(overwrite=True) - - item['forum_name'] = 'the-key' - item['subject'] = '789' - item.save(overwrite=True) - - table.count().should.equal(4) - - results = table.query_2(forum_name__eq='the-key', - subject__gt='1', consistent=True) - expected = ["123", "456", "789"] - for index, item in enumerate(results): - item["subject"].should.equal(expected[index]) - - results = table.query_2(forum_name__eq="the-key", - subject__gt='1', reverse=True) - for index, item in enumerate(results): - item["subject"].should.equal(expected[len(expected) - 1 - index]) - - results = table.query_2(forum_name__eq='the-key', - subject__gt='1', consistent=True) - sum(1 for _ in results).should.equal(3) - - results = table.query_2(forum_name__eq='the-key', - subject__gt='234', consistent=True) - sum(1 for _ in results).should.equal(2) - - results = table.query_2(forum_name__eq='the-key', subject__gt='9999') - sum(1 for _ in results).should.equal(0) - - results = table.query_2(forum_name__eq='the-key', subject__beginswith='12') - sum(1 for _ in results).should.equal(1) - - results = table.query_2(forum_name__eq='the-key', subject__beginswith='7') - sum(1 for _ in results).should.equal(1) - - results = table.query_2(forum_name__eq='the-key', - subject__between=['567', '890']) - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_with_undeclared_table(): - table = Table('undeclared') - results = table.query( - forum_name__eq='Amazon DynamoDB', - subject__beginswith='DynamoDB', - limit=1 - ) - iterate_results.when.called_with(results).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan(): - table = create_table() - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item_data['forum_name'] = 'the-key' - item_data['subject'] = '456' - - item = Item(table, item_data) - item.save() - - item['forum_name'] = 'the-key' - item['subject'] = '123' - item.save() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:09 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - - item_data['forum_name'] = 'the-key' - item_data['subject'] = '789' - - item = Item(table, item_data) - item.save() - - results = table.scan() - sum(1 for _ in results).should.equal(3) - - results = table.scan(SentBy__eq='User B') - sum(1 for _ in results).should.equal(1) - - results = table.scan(Body__beginswith='http') - sum(1 for _ in results).should.equal(3) - - results = table.scan(Ids__null=False) - sum(1 for _ in results).should.equal(1) - - results = table.scan(Ids__null=True) - sum(1 for _ in results).should.equal(2) - - results = table.scan(PK__between=[8, 9]) - sum(1 for _ in results).should.equal(0) - - results = table.scan(PK__between=[5, 8]) - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - conn.scan.when.called_with( - table_name='undeclared-table', - scan_filter={ - "SentBy": { - "AttributeValueList": [{ - "S": "User B"} - ], - "ComparisonOperator": "EQ" - } - }, - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_write_batch(): - table = create_table() - with table.batch_write() as batch: - batch.put_item(data={ - 'forum_name': 'the-key', - 'subject': '123', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - batch.put_item(data={ - 'forum_name': 'the-key', - 'subject': '789', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - table.count().should.equal(2) - with table.batch_write() as batch: - batch.delete_item( - forum_name='the-key', - subject='789' - ) - - table.count().should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_batch_read(): - table = create_table() - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - - item_data['forum_name'] = 'the-key' - item_data['subject'] = '456' - - item = Item(table, item_data) - item.save() - - item = Item(table, item_data) - item_data['forum_name'] = 'the-key' - item_data['subject'] = '123' - item.save() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = Item(table, item_data) - item_data['forum_name'] = 'another-key' - item_data['subject'] = '789' - item.save() - results = table.batch_get( - keys=[ - {'forum_name': 'the-key', 'subject': '123'}, - {'forum_name': 'another-key', 'subject': '789'}, - ] - ) - - # Iterate through so that batch_item gets called - count = len([x for x in results]) - count.should.equal(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_key_fields(): - table = create_table() - kf = table.get_key_fields() - kf.should.equal(['forum_name', 'subject']) - - -@mock_dynamodb2_deprecated -def test_create_with_global_indexes(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - Table.create('messages', schema=[ - HashKey('subject'), - RangeKey('version'), - ], global_indexes=[ - GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), - ]) - - table_description = conn.describe_table("messages") - table_description['Table']["GlobalSecondaryIndexes"].should.equal([ - { - "IndexName": "topic-created_at-index", - "KeySchema": [ - { - "AttributeName": "topic", - "KeyType": "HASH" - }, - { - "AttributeName": "created_at", - "KeyType": "RANGE" - }, - ], - "Projection": { - "ProjectionType": "ALL" - }, - "ProvisionedThroughput": { - "ReadCapacityUnits": 6, - "WriteCapacityUnits": 1, - } - } - ]) - - -@mock_dynamodb2_deprecated -def test_query_with_global_indexes(): - table = Table.create('messages', schema=[ - HashKey('subject'), - RangeKey('version'), - ], global_indexes=[ - GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), - GlobalAllIndex('status-created_at-index', - parts=[ - HashKey('status'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 2, - 'write': 1 - } - ) - ]) - - item_data = { - 'subject': 'Check this out!', - 'version': '1', - 'created_at': 0, - 'status': 'inactive' - } - item = Item(table, item_data) - item.save(overwrite=True) - - item['version'] = '2' - item.save(overwrite=True) - - results = table.query(status__eq='active') - list(results).should.have.length_of(0) - - -@mock_dynamodb2_deprecated -def test_query_with_local_indexes(): - table = create_table_with_local_indexes() - item_data = { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - 'status': 'inactive' - } - item = Item(table, item_data) - item.save(overwrite=True) - - item['version'] = '2' - item.save(overwrite=True) - results = table.query(forum_name__eq='Cool Forum', - index='threads_index', threads__eq=1) - list(results).should.have.length_of(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_eq(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - results = table.query_2( - forum_name__eq='Cool Forum', index='threads_index', threads__eq=5 - ) - list(results).should.have.length_of(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_lt(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__lt=5 - ) - results = list(results) - results.should.have.length_of(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_gt(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__gt=1 - ) - list(results).should.have.length_of(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_lte(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__lte=5 - ) - list(results).should.have.length_of(3) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_gte(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__gte=1 - ) - list(results).should.have.length_of(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_non_hash_range_key(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '3', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '2', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', version__gt="2" - ) - results = list(results) - results.should.have.length_of(1) - - results = table.query( - forum_name__eq='Cool Forum', version__lt="3" - ) - results = list(results) - results.should.have.length_of(2) - - -@mock_dynamodb2_deprecated -def test_reverse_query(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - table = Table.create('messages', schema=[ - HashKey('subject'), - RangeKey('created_at', data_type='N') - ]) - - for i in range(10): - table.put_item({ - 'subject': "Hi", - 'created_at': i - }) - - results = table.query_2(subject__eq="Hi", - created_at__lt=6, - limit=4, - reverse=True) - - expected = [Decimal(5), Decimal(4), Decimal(3), Decimal(2)] - [r['created_at'] for r in results].should.equal(expected) - - -@mock_dynamodb2_deprecated -def test_lookup(): - from decimal import Decimal - table = Table.create('messages', schema=[ - HashKey('test_hash'), - RangeKey('test_range'), - ], throughput={ - 'read': 10, - 'write': 10, - }) - - hash_key = 3241526475 - range_key = 1234567890987 - data = {'test_hash': hash_key, 'test_range': range_key} - table.put_item(data=data) - message = table.lookup(hash_key, range_key) - message.get('test_hash').should.equal(Decimal(hash_key)) - message.get('test_range').should.equal(Decimal(range_key)) - - -@mock_dynamodb2_deprecated -def test_failed_overwrite(): - table = Table.create('messages', schema=[ - HashKey('id'), - RangeKey('range'), - ], throughput={ - 'read': 7, - 'write': 3, - }) - - data1 = {'id': '123', 'range': 'abc', 'data': '678'} - table.put_item(data=data1) - - data2 = {'id': '123', 'range': 'abc', 'data': '345'} - table.put_item(data=data2, overwrite=True) - - data3 = {'id': '123', 'range': 'abc', 'data': '812'} - table.put_item.when.called_with(data=data3).should.throw( - ConditionalCheckFailedException) - - returned_item = table.lookup('123', 'abc') - dict(returned_item).should.equal(data2) - - data4 = {'id': '123', 'range': 'ghi', 'data': 812} - table.put_item(data=data4) - - returned_item = table.lookup('123', 'ghi') - dict(returned_item).should.equal(data4) - - -@mock_dynamodb2_deprecated -def test_conflicting_writes(): - table = Table.create('messages', schema=[ - HashKey('id'), - RangeKey('range'), - ]) - - item_data = {'id': '123', 'range': 'abc', 'data': '678'} - item1 = Item(table, item_data) - item2 = Item(table, item_data) - item1.save() - - item1['data'] = '579' - item2['data'] = '912' - - item1.save() - item2.save.when.called_with().should.throw(ConditionalCheckFailedException) - -""" -boto3 -""" - - -@mock_dynamodb2 -def test_boto3_conditions(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123' - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '456' - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '789' - }) - - # Test a query returning all items - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('1'), - ScanIndexForward=True, - ) - expected = ["123", "456", "789"] - for index, item in enumerate(results['Items']): - item["subject"].should.equal(expected[index]) - - # Return all items again, but in reverse - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('1'), - ScanIndexForward=False, - ) - for index, item in enumerate(reversed(results['Items'])): - item["subject"].should.equal(expected[index]) - - # Filter the subjects to only return some of the results - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('234'), - ConsistentRead=True, - ) - results['Count'].should.equal(2) - - # Filter to return no results - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('9999') - ) - results['Count'].should.equal(0) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").begins_with('12') - ) - results['Count'].should.equal(1) - - results = table.query( - KeyConditionExpression=Key("subject").begins_with( - '7') & Key('forum_name').eq('the-key') - ) - results['Count'].should.equal(1) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").between('567', '890') - ) - results['Count'].should.equal(1) - - -@mock_dynamodb2 -def test_boto3_put_item_with_conditions(): - import botocore - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123' - }) - - table.put_item( - Item={ - 'forum_name': 'the-key-2', - 'subject': '1234', - }, - ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' - ) - - table.put_item.when.called_with( - Item={ - 'forum_name': 'the-key', - 'subject': '123' - }, - ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' - ).should.throw(botocore.exceptions.ClientError) - - table.put_item.when.called_with( - Item={ - 'forum_name': 'bogus-key', - 'subject': 'bogus', - 'test': '123' - }, - ConditionExpression='attribute_exists(forum_name) AND attribute_exists(subject)' - ).should.throw(botocore.exceptions.ClientError) - - -def _create_table_with_range_key(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - GlobalSecondaryIndexes=[{ - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - }], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'username', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'created', - 'AttributeType': 'N' - } - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - return dynamodb.Table('users') - - -@mock_dynamodb2 -def test_update_item_range_key_set(): - table = _create_table_with_range_key() - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'username': 'johndoe', - 'created': Decimal('3'), - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'username': { - 'Action': u'PUT', - 'Value': 'johndoe2' - }, - 'created': { - 'Action': u'PUT', - 'Value': Decimal('4'), - }, - 'mapfield': { - 'Action': u'PUT', - 'Value': {'key': 'value'}, - } - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'username': "johndoe2", - 'forum_name': 'the-key', - 'subject': '123', - 'created': '4', - 'mapfield': {'key': 'value'}, - }) - - -@mock_dynamodb2 -def test_update_item_does_not_exist_is_created(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - result = table.update_item( - Key=item_key, - AttributeUpdates={ - 'username': { - 'Action': u'PUT', - 'Value': 'johndoe2' - }, - 'created': { - 'Action': u'PUT', - 'Value': Decimal('4'), - }, - 'mapfield': { - 'Action': u'PUT', - 'Value': {'key': 'value'}, - } - }, - ReturnValues='ALL_OLD', - ) - - assert not result.get('Attributes') - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'username': "johndoe2", - 'forum_name': 'the-key', - 'subject': '123', - 'created': '4', - 'mapfield': {'key': 'value'}, - }) - - -@mock_dynamodb2 -def test_update_item_add_value(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'numeric_field': Decimal('-1'), - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'numeric_field': { - 'Action': u'ADD', - 'Value': Decimal('2'), - }, - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'numeric_field': '1', - 'forum_name': 'the-key', - 'subject': '123', - }) - - -@mock_dynamodb2 -def test_update_item_add_value_string_set(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'string_set': set(['str1', 'str2']), - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'string_set': { - 'Action': u'ADD', - 'Value': set(['str3']), - }, - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'string_set': set(['str1', 'str2', 'str3']), - 'forum_name': 'the-key', - 'subject': '123', - }) - - -@mock_dynamodb2 -def test_update_item_add_value_does_not_exist_is_created(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'numeric_field': { - 'Action': u'ADD', - 'Value': Decimal('2'), - }, - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'numeric_field': '2', - 'forum_name': 'the-key', - 'subject': '123', - }) - - -@mock_dynamodb2 -def test_update_item_with_expression(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'field': '1' - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - - table.update_item( - Key=item_key, - UpdateExpression='SET field=2', - ) - dict(table.get_item(Key=item_key)['Item']).should.equal({ - 'field': '2', - 'forum_name': 'the-key', - 'subject': '123', - }) - - table.update_item( - Key=item_key, - UpdateExpression='SET field = 3', - ) - dict(table.get_item(Key=item_key)['Item']).should.equal({ - 'field': '3', - 'forum_name': 'the-key', - 'subject': '123', - }) - -@mock_dynamodb2 -def test_update_item_add_with_expression(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - current_item = { - 'forum_name': 'the-key', - 'subject': '123', - 'str_set': {'item1', 'item2', 'item3'}, - 'num_set': {1, 2, 3}, - 'num_val': 6 - } - - # Put an entry in the DB to play with - table.put_item(Item=current_item) - - # Update item to add a string value to a string set - table.update_item( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': {'item4'} - } - ) - current_item['str_set'] = current_item['str_set'].union({'item4'}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Update item to add a num value to a num set - table.update_item( - Key=item_key, - UpdateExpression='ADD num_set :v', - ExpressionAttributeValues={ - ':v': {6} - } - ) - current_item['num_set'] = current_item['num_set'].union({6}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Update item to add a value to a number value - table.update_item( - Key=item_key, - UpdateExpression='ADD num_val :v', - ExpressionAttributeValues={ - ':v': 20 - } - ) - current_item['num_val'] = current_item['num_val'] + 20 - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to add a number value to a string set, should raise Client Error - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': 20 - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to add a number set to the string set, should raise a ClientError - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': { 20 } - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to update with a bad expression - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set bad_value' - ).should.have.raised(ClientError) - - # Attempt to add a string value instead of a string set - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': 'new_string' - } - ).should.have.raised(ClientError) - - -@mock_dynamodb2 -def test_update_item_delete_with_expression(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - current_item = { - 'forum_name': 'the-key', - 'subject': '123', - 'str_set': {'item1', 'item2', 'item3'}, - 'num_set': {1, 2, 3}, - 'num_val': 6 - } - - # Put an entry in the DB to play with - table.put_item(Item=current_item) - - # Update item to delete a string value from a string set - table.update_item( - Key=item_key, - UpdateExpression='DELETE str_set :v', - ExpressionAttributeValues={ - ':v': {'item2'} - } - ) - current_item['str_set'] = current_item['str_set'].difference({'item2'}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Update item to delete a num value from a num set - table.update_item( - Key=item_key, - UpdateExpression='DELETE num_set :v', - ExpressionAttributeValues={ - ':v': {2} - } - ) - current_item['num_set'] = current_item['num_set'].difference({2}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Try to delete on a number, this should fail - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='DELETE num_val :v', - ExpressionAttributeValues={ - ':v': 20 - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Try to delete a string set from a number set - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='DELETE num_set :v', - ExpressionAttributeValues={ - ':v': {'del_str'} - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to update with a bad expression - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='DELETE num_val badvalue' - ).should.have.raised(ClientError) - - -@mock_dynamodb2 -def test_boto3_query_gsi_range_comparison(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'username': 'johndoe', - 'created': 3, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '456', - 'username': 'johndoe', - 'created': 1, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '789', - 'username': 'johndoe', - 'created': 2, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '159', - 'username': 'janedoe', - 'created': 2, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '601', - 'username': 'janedoe', - 'created': 5, - }) - - # Test a query returning all johndoe items - results = table.query( - KeyConditionExpression=Key('username').eq( - 'johndoe') & Key("created").gt(0), - ScanIndexForward=True, - IndexName='TestGSI', - ) - expected = ["456", "789", "123"] - for index, item in enumerate(results['Items']): - item["subject"].should.equal(expected[index]) - - # Return all johndoe items again, but in reverse - results = table.query( - KeyConditionExpression=Key('username').eq( - 'johndoe') & Key("created").gt(0), - ScanIndexForward=False, - IndexName='TestGSI', - ) - for index, item in enumerate(reversed(results['Items'])): - item["subject"].should.equal(expected[index]) - - # Filter the creation to only return some of the results - # And reverse order of hash + range key - results = table.query( - KeyConditionExpression=Key("created").gt( - 1) & Key('username').eq('johndoe'), - ConsistentRead=True, - IndexName='TestGSI', - ) - results['Count'].should.equal(2) - - # Filter to return no results - results = table.query( - KeyConditionExpression=Key('username').eq( - 'janedoe') & Key("created").gt(9), - IndexName='TestGSI', - ) - results['Count'].should.equal(0) - - results = table.query( - KeyConditionExpression=Key('username').eq( - 'janedoe') & Key("created").eq(5), - IndexName='TestGSI', - ) - results['Count'].should.equal(1) - - # Test range key sorting - results = table.query( - KeyConditionExpression=Key('username').eq( - 'johndoe') & Key("created").gt(0), - IndexName='TestGSI', - ) - expected = [Decimal('1'), Decimal('2'), Decimal('3')] - for index, item in enumerate(results['Items']): - item["created"].should.equal(expected[index]) - - -@mock_dynamodb2 -def test_boto3_update_table_throughput(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - - table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) - - table.update(ProvisionedThroughput={ - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 11, - }) - - table = dynamodb.Table('users') - - table.provisioned_throughput['ReadCapacityUnits'].should.equal(10) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(11) - - -@mock_dynamodb2 -def test_boto3_update_table_gsi_throughput(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - GlobalSecondaryIndexes=[{ - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 3, - 'WriteCapacityUnits': 4 - } - }], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - gsi_throughput['ReadCapacityUnits'].should.equal(3) - gsi_throughput['WriteCapacityUnits'].should.equal(4) - - table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Update': { - 'IndexName': 'TestGSI', - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 11, - } - }, - }]) - - table = dynamodb.Table('users') - - # Primary throughput has not changed - table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - gsi_throughput['ReadCapacityUnits'].should.equal(10) - gsi_throughput['WriteCapacityUnits'].should.equal(11) - - -@mock_dynamodb2 -def test_update_table_gsi_create(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - - table.global_secondary_indexes.should.have.length_of(0) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Create': { - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 3, - 'WriteCapacityUnits': 4 - } - }, - }]) - - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(1) - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - assert gsi_throughput['ReadCapacityUnits'].should.equal(3) - assert gsi_throughput['WriteCapacityUnits'].should.equal(4) - - # Check update works - table.update(GlobalSecondaryIndexUpdates=[{ - 'Update': { - 'IndexName': 'TestGSI', - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 11, - } - }, - }]) - table = dynamodb.Table('users') - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - assert gsi_throughput['ReadCapacityUnits'].should.equal(10) - assert gsi_throughput['WriteCapacityUnits'].should.equal(11) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Delete': { - 'IndexName': 'TestGSI', - }, - }]) - - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(0) - - -@mock_dynamodb2 -def test_update_table_gsi_throughput(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - GlobalSecondaryIndexes=[{ - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 3, - 'WriteCapacityUnits': 4 - } - }], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(1) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Delete': { - 'IndexName': 'TestGSI', - }, - }]) - - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(0) - - -@mock_dynamodb2 -def test_query_pagination(): - table = _create_table_with_range_key() - for i in range(10): - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '{0}'.format(i), - 'username': 'johndoe', - 'created': Decimal('3'), - }) - - page1 = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key'), - Limit=6 - ) - page1['Count'].should.equal(6) - page1['Items'].should.have.length_of(6) - page1.should.have.key('LastEvaluatedKey') - - page2 = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key'), - Limit=6, - ExclusiveStartKey=page1['LastEvaluatedKey'] - ) - page2['Count'].should.equal(4) - page2['Items'].should.have.length_of(4) - page2.should_not.have.key('LastEvaluatedKey') - - results = page1['Items'] + page2['Items'] - subjects = set([int(r['subject']) for r in results]) - subjects.should.equal(set(range(10))) +from __future__ import unicode_literals + +from decimal import Decimal + +import boto +import boto3 +from boto3.dynamodb.conditions import Key +from botocore.exceptions import ClientError +import sure # noqa +from freezegun import freeze_time +from moto import mock_dynamodb2, mock_dynamodb2_deprecated +from boto.exception import JSONResponseError +from tests.helpers import requires_boto_gte +try: + from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey, AllIndex + from boto.dynamodb2.table import Item, Table + from boto.dynamodb2.types import STRING, NUMBER + from boto.dynamodb2.exceptions import ValidationException + from boto.dynamodb2.exceptions import ConditionalCheckFailedException +except ImportError: + pass + + +def create_table(): + table = Table.create('messages', schema=[ + HashKey('forum_name'), + RangeKey('subject'), + ], throughput={ + 'read': 10, + 'write': 10, + }) + return table + + +def create_table_with_local_indexes(): + table = Table.create( + 'messages', + schema=[ + HashKey('forum_name'), + RangeKey('subject'), + ], + throughput={ + 'read': 10, + 'write': 10, + }, + indexes=[ + AllIndex( + 'threads_index', + parts=[ + HashKey('forum_name', data_type=STRING), + RangeKey('threads', data_type=NUMBER), + ] + ) + ] + ) + return table + + +def iterate_results(res): + for i in res: + pass + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +@freeze_time("2012-01-14") +def test_create_table(): + table = create_table() + expected = { + 'Table': { + 'AttributeDefinitions': [ + {'AttributeName': 'forum_name', 'AttributeType': 'S'}, + {'AttributeName': 'subject', 'AttributeType': 'S'} + ], + 'ProvisionedThroughput': { + 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 + }, + 'TableSizeBytes': 0, + 'TableName': 'messages', + 'TableStatus': 'ACTIVE', + 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', + 'KeySchema': [ + {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, + {'KeyType': 'RANGE', 'AttributeName': 'subject'} + ], + 'LocalSecondaryIndexes': [], + 'ItemCount': 0, 'CreationDateTime': 1326499200.0, + 'GlobalSecondaryIndexes': [] + } + } + table.describe().should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +@freeze_time("2012-01-14") +def test_create_table_with_local_index(): + table = create_table_with_local_indexes() + expected = { + 'Table': { + 'AttributeDefinitions': [ + {'AttributeName': 'forum_name', 'AttributeType': 'S'}, + {'AttributeName': 'subject', 'AttributeType': 'S'}, + {'AttributeName': 'threads', 'AttributeType': 'N'} + ], + 'ProvisionedThroughput': { + 'NumberOfDecreasesToday': 0, + 'WriteCapacityUnits': 10, + 'ReadCapacityUnits': 10, + }, + 'TableSizeBytes': 0, + 'TableName': 'messages', + 'TableStatus': 'ACTIVE', + 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', + 'KeySchema': [ + {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, + {'KeyType': 'RANGE', 'AttributeName': 'subject'} + ], + 'LocalSecondaryIndexes': [ + { + 'IndexName': 'threads_index', + 'KeySchema': [ + {'AttributeName': 'forum_name', 'KeyType': 'HASH'}, + {'AttributeName': 'threads', 'KeyType': 'RANGE'} + ], + 'Projection': {'ProjectionType': 'ALL'} + } + ], + 'ItemCount': 0, + 'CreationDateTime': 1326499200.0, + 'GlobalSecondaryIndexes': [] + } + } + table.describe().should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + table = create_table() + conn.list_tables()["TableNames"].should.have.length_of(1) + + table.delete() + conn.list_tables()["TableNames"].should.have.length_of(0) + conn.delete_table.when.called_with( + 'messages').should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_update_table_throughput(): + table = create_table() + table.throughput["read"].should.equal(10) + table.throughput["write"].should.equal(10) + table.update(throughput={ + 'read': 5, + 'write': 15, + }) + + table.throughput["read"].should.equal(5) + table.throughput["write"].should.equal(15) + + table.update(throughput={ + 'read': 5, + 'write': 6, + }) + + table.describe() + + table.throughput["read"].should.equal(5) + table.throughput["write"].should.equal(6) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_add_and_describe_and_update(): + table = create_table() + ok = table.put_item(data={ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + ok.should.equal(True) + + table.get_item(forum_name="LOLCat Forum", + subject='Check this out!').should_not.be.none + + returned_item = table.get_item( + forum_name='LOLCat Forum', + subject='Check this out!' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + returned_item['SentBy'] = 'User B' + returned_item.save(overwrite=True) + + returned_item = table.get_item( + forum_name='LOLCat Forum', + subject='Check this out!' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_partial_save(): + table = create_table() + + data = { + 'forum_name': 'LOLCat Forum', + 'subject': 'The LOLz', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + } + + table.put_item(data=data) + returned_item = table.get_item( + forum_name="LOLCat Forum", subject='The LOLz') + + returned_item['SentBy'] = 'User B' + returned_item.partial_save() + + returned_item = table.get_item( + forum_name='LOLCat Forum', + subject='The LOLz' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'The LOLz', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_put_without_table(): + table = Table('undeclared-table') + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.save.when.called_with().should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_missing_item(): + table = create_table() + + table.get_item.when.called_with( + hash_key='tester', + range_key='other', + ).should.throw(ValidationException) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_item_with_undeclared_table(): + table = Table('undeclared-table') + table.get_item.when.called_with( + test_hash=3241526475).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_item_without_range_key(): + table = Table.create('messages', schema=[ + HashKey('test_hash'), + RangeKey('test_range'), + ], throughput={ + 'read': 10, + 'write': 10, + }) + + hash_key = 3241526475 + range_key = 1234567890987 + table.put_item(data={'test_hash': hash_key, 'test_range': range_key}) + table.get_item.when.called_with( + test_hash=hash_key).should.throw(ValidationException) + + +@requires_boto_gte("2.30.0") +@mock_dynamodb2_deprecated +def test_delete_item(): + table = create_table() + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item['subject'] = 'Check this out!' + item.save() + table.count().should.equal(1) + + response = item.delete() + response.should.equal(True) + + table.count().should.equal(0) + # Deletes are idempotent + item.delete().should.equal(True) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_item_with_undeclared_table(): + table = Table("undeclared-table") + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.delete.when.called_with().should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query(): + table = create_table() + + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'subject': 'Check this out!' + } + item = Item(table, item_data) + item.save(overwrite=True) + + item['forum_name'] = 'the-key' + item['subject'] = '456' + item.save(overwrite=True) + + item['forum_name'] = 'the-key' + item['subject'] = '123' + item.save(overwrite=True) + + item['forum_name'] = 'the-key' + item['subject'] = '789' + item.save(overwrite=True) + + table.count().should.equal(4) + + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) + expected = ["123", "456", "789"] + for index, item in enumerate(results): + item["subject"].should.equal(expected[index]) + + results = table.query_2(forum_name__eq="the-key", + subject__gt='1', reverse=True) + for index, item in enumerate(results): + item["subject"].should.equal(expected[len(expected) - 1 - index]) + + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) + sum(1 for _ in results).should.equal(3) + + results = table.query_2(forum_name__eq='the-key', + subject__gt='234', consistent=True) + sum(1 for _ in results).should.equal(2) + + results = table.query_2(forum_name__eq='the-key', subject__gt='9999') + sum(1 for _ in results).should.equal(0) + + results = table.query_2(forum_name__eq='the-key', subject__beginswith='12') + sum(1 for _ in results).should.equal(1) + + results = table.query_2(forum_name__eq='the-key', subject__beginswith='7') + sum(1 for _ in results).should.equal(1) + + results = table.query_2(forum_name__eq='the-key', + subject__between=['567', '890']) + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_with_undeclared_table(): + table = Table('undeclared') + results = table.query( + forum_name__eq='Amazon DynamoDB', + subject__beginswith='DynamoDB', + limit=1 + ) + iterate_results.when.called_with(results).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan(): + table = create_table() + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item_data['forum_name'] = 'the-key' + item_data['subject'] = '456' + + item = Item(table, item_data) + item.save() + + item['forum_name'] = 'the-key' + item['subject'] = '123' + item.save() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:09 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + + item_data['forum_name'] = 'the-key' + item_data['subject'] = '789' + + item = Item(table, item_data) + item.save() + + results = table.scan() + sum(1 for _ in results).should.equal(3) + + results = table.scan(SentBy__eq='User B') + sum(1 for _ in results).should.equal(1) + + results = table.scan(Body__beginswith='http') + sum(1 for _ in results).should.equal(3) + + results = table.scan(Ids__null=False) + sum(1 for _ in results).should.equal(1) + + results = table.scan(Ids__null=True) + sum(1 for _ in results).should.equal(2) + + results = table.scan(PK__between=[8, 9]) + sum(1 for _ in results).should.equal(0) + + results = table.scan(PK__between=[5, 8]) + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + conn.scan.when.called_with( + table_name='undeclared-table', + scan_filter={ + "SentBy": { + "AttributeValueList": [{ + "S": "User B"} + ], + "ComparisonOperator": "EQ" + } + }, + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_write_batch(): + table = create_table() + with table.batch_write() as batch: + batch.put_item(data={ + 'forum_name': 'the-key', + 'subject': '123', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + batch.put_item(data={ + 'forum_name': 'the-key', + 'subject': '789', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + table.count().should.equal(2) + with table.batch_write() as batch: + batch.delete_item( + forum_name='the-key', + subject='789' + ) + + table.count().should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_batch_read(): + table = create_table() + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + + item_data['forum_name'] = 'the-key' + item_data['subject'] = '456' + + item = Item(table, item_data) + item.save() + + item = Item(table, item_data) + item_data['forum_name'] = 'the-key' + item_data['subject'] = '123' + item.save() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = Item(table, item_data) + item_data['forum_name'] = 'another-key' + item_data['subject'] = '789' + item.save() + results = table.batch_get( + keys=[ + {'forum_name': 'the-key', 'subject': '123'}, + {'forum_name': 'another-key', 'subject': '789'}, + ] + ) + + # Iterate through so that batch_item gets called + count = len([x for x in results]) + count.should.equal(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_key_fields(): + table = create_table() + kf = table.get_key_fields() + kf.should.equal(['forum_name', 'subject']) + + +@mock_dynamodb2_deprecated +def test_create_with_global_indexes(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + Table.create('messages', schema=[ + HashKey('subject'), + RangeKey('version'), + ], global_indexes=[ + GlobalAllIndex('topic-created_at-index', + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), + ]) + + table_description = conn.describe_table("messages") + table_description['Table']["GlobalSecondaryIndexes"].should.equal([ + { + "IndexName": "topic-created_at-index", + "KeySchema": [ + { + "AttributeName": "topic", + "KeyType": "HASH" + }, + { + "AttributeName": "created_at", + "KeyType": "RANGE" + }, + ], + "Projection": { + "ProjectionType": "ALL" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": 6, + "WriteCapacityUnits": 1, + } + } + ]) + + +@mock_dynamodb2_deprecated +def test_query_with_global_indexes(): + table = Table.create('messages', schema=[ + HashKey('subject'), + RangeKey('version'), + ], global_indexes=[ + GlobalAllIndex('topic-created_at-index', + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), + GlobalAllIndex('status-created_at-index', + parts=[ + HashKey('status'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 2, + 'write': 1 + } + ) + ]) + + item_data = { + 'subject': 'Check this out!', + 'version': '1', + 'created_at': 0, + 'status': 'inactive' + } + item = Item(table, item_data) + item.save(overwrite=True) + + item['version'] = '2' + item.save(overwrite=True) + + results = table.query(status__eq='active') + list(results).should.have.length_of(0) + + +@mock_dynamodb2_deprecated +def test_query_with_local_indexes(): + table = create_table_with_local_indexes() + item_data = { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + 'status': 'inactive' + } + item = Item(table, item_data) + item.save(overwrite=True) + + item['version'] = '2' + item.save(overwrite=True) + results = table.query(forum_name__eq='Cool Forum', + index='threads_index', threads__eq=1) + list(results).should.have.length_of(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_eq(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + results = table.query_2( + forum_name__eq='Cool Forum', index='threads_index', threads__eq=5 + ) + list(results).should.have.length_of(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_lt(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__lt=5 + ) + results = list(results) + results.should.have.length_of(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_gt(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__gt=1 + ) + list(results).should.have.length_of(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_lte(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__lte=5 + ) + list(results).should.have.length_of(3) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_gte(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__gte=1 + ) + list(results).should.have.length_of(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_non_hash_range_key(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '3', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '2', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', version__gt="2" + ) + results = list(results) + results.should.have.length_of(1) + + results = table.query( + forum_name__eq='Cool Forum', version__lt="3" + ) + results = list(results) + results.should.have.length_of(2) + + +@mock_dynamodb2_deprecated +def test_reverse_query(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + table = Table.create('messages', schema=[ + HashKey('subject'), + RangeKey('created_at', data_type='N') + ]) + + for i in range(10): + table.put_item({ + 'subject': "Hi", + 'created_at': i + }) + + results = table.query_2(subject__eq="Hi", + created_at__lt=6, + limit=4, + reverse=True) + + expected = [Decimal(5), Decimal(4), Decimal(3), Decimal(2)] + [r['created_at'] for r in results].should.equal(expected) + + +@mock_dynamodb2_deprecated +def test_lookup(): + from decimal import Decimal + table = Table.create('messages', schema=[ + HashKey('test_hash'), + RangeKey('test_range'), + ], throughput={ + 'read': 10, + 'write': 10, + }) + + hash_key = 3241526475 + range_key = 1234567890987 + data = {'test_hash': hash_key, 'test_range': range_key} + table.put_item(data=data) + message = table.lookup(hash_key, range_key) + message.get('test_hash').should.equal(Decimal(hash_key)) + message.get('test_range').should.equal(Decimal(range_key)) + + +@mock_dynamodb2_deprecated +def test_failed_overwrite(): + table = Table.create('messages', schema=[ + HashKey('id'), + RangeKey('range'), + ], throughput={ + 'read': 7, + 'write': 3, + }) + + data1 = {'id': '123', 'range': 'abc', 'data': '678'} + table.put_item(data=data1) + + data2 = {'id': '123', 'range': 'abc', 'data': '345'} + table.put_item(data=data2, overwrite=True) + + data3 = {'id': '123', 'range': 'abc', 'data': '812'} + table.put_item.when.called_with(data=data3).should.throw( + ConditionalCheckFailedException) + + returned_item = table.lookup('123', 'abc') + dict(returned_item).should.equal(data2) + + data4 = {'id': '123', 'range': 'ghi', 'data': 812} + table.put_item(data=data4) + + returned_item = table.lookup('123', 'ghi') + dict(returned_item).should.equal(data4) + + +@mock_dynamodb2_deprecated +def test_conflicting_writes(): + table = Table.create('messages', schema=[ + HashKey('id'), + RangeKey('range'), + ]) + + item_data = {'id': '123', 'range': 'abc', 'data': '678'} + item1 = Item(table, item_data) + item2 = Item(table, item_data) + item1.save() + + item1['data'] = '579' + item2['data'] = '912' + + item1.save() + item2.save.when.called_with().should.throw(ConditionalCheckFailedException) + +""" +boto3 +""" + + +@mock_dynamodb2 +def test_boto3_conditions(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '456' + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '789' + }) + + # Test a query returning all items + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), + ScanIndexForward=True, + ) + expected = ["123", "456", "789"] + for index, item in enumerate(results['Items']): + item["subject"].should.equal(expected[index]) + + # Return all items again, but in reverse + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), + ScanIndexForward=False, + ) + for index, item in enumerate(reversed(results['Items'])): + item["subject"].should.equal(expected[index]) + + # Filter the subjects to only return some of the results + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('234'), + ConsistentRead=True, + ) + results['Count'].should.equal(2) + + # Filter to return no results + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('9999') + ) + results['Count'].should.equal(0) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").begins_with('12') + ) + results['Count'].should.equal(1) + + results = table.query( + KeyConditionExpression=Key("subject").begins_with( + '7') & Key('forum_name').eq('the-key') + ) + results['Count'].should.equal(1) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").between('567', '890') + ) + results['Count'].should.equal(1) + + +@mock_dynamodb2 +def test_boto3_put_item_with_conditions(): + import botocore + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + table.put_item( + Item={ + 'forum_name': 'the-key-2', + 'subject': '1234', + }, + ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' + ) + + table.put_item.when.called_with( + Item={ + 'forum_name': 'the-key', + 'subject': '123' + }, + ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' + ).should.throw(botocore.exceptions.ClientError) + + table.put_item.when.called_with( + Item={ + 'forum_name': 'bogus-key', + 'subject': 'bogus', + 'test': '123' + }, + ConditionExpression='attribute_exists(forum_name) AND attribute_exists(subject)' + ).should.throw(botocore.exceptions.ClientError) + + +def _create_table_with_range_key(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + GlobalSecondaryIndexes=[{ + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + }], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'created', + 'AttributeType': 'N' + } + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + return dynamodb.Table('users') + + +@mock_dynamodb2 +def test_update_item_range_key_set(): + table = _create_table_with_range_key() + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'username': 'johndoe', + 'created': Decimal('3'), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'username': { + 'Action': u'PUT', + 'Value': 'johndoe2' + }, + 'created': { + 'Action': u'PUT', + 'Value': Decimal('4'), + }, + 'mapfield': { + 'Action': u'PUT', + 'Value': {'key': 'value'}, + } + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'username': "johndoe2", + 'forum_name': 'the-key', + 'subject': '123', + 'created': '4', + 'mapfield': {'key': 'value'}, + }) + + +@mock_dynamodb2 +def test_update_item_does_not_exist_is_created(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + result = table.update_item( + Key=item_key, + AttributeUpdates={ + 'username': { + 'Action': u'PUT', + 'Value': 'johndoe2' + }, + 'created': { + 'Action': u'PUT', + 'Value': Decimal('4'), + }, + 'mapfield': { + 'Action': u'PUT', + 'Value': {'key': 'value'}, + } + }, + ReturnValues='ALL_OLD', + ) + + assert not result.get('Attributes') + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'username': "johndoe2", + 'forum_name': 'the-key', + 'subject': '123', + 'created': '4', + 'mapfield': {'key': 'value'}, + }) + + +@mock_dynamodb2 +def test_update_item_add_value(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'numeric_field': Decimal('-1'), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'numeric_field': { + 'Action': u'ADD', + 'Value': Decimal('2'), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'numeric_field': '1', + 'forum_name': 'the-key', + 'subject': '123', + }) + + +@mock_dynamodb2 +def test_update_item_add_value_string_set(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'string_set': set(['str1', 'str2']), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'string_set': { + 'Action': u'ADD', + 'Value': set(['str3']), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'string_set': set(['str1', 'str2', 'str3']), + 'forum_name': 'the-key', + 'subject': '123', + }) + + +@mock_dynamodb2 +def test_update_item_add_value_does_not_exist_is_created(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'numeric_field': { + 'Action': u'ADD', + 'Value': Decimal('2'), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'numeric_field': '2', + 'forum_name': 'the-key', + 'subject': '123', + }) + + +@mock_dynamodb2 +def test_update_item_with_expression(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'field': '1' + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + + table.update_item( + Key=item_key, + UpdateExpression='SET field=2', + ) + dict(table.get_item(Key=item_key)['Item']).should.equal({ + 'field': '2', + 'forum_name': 'the-key', + 'subject': '123', + }) + + table.update_item( + Key=item_key, + UpdateExpression='SET field = 3', + ) + dict(table.get_item(Key=item_key)['Item']).should.equal({ + 'field': '3', + 'forum_name': 'the-key', + 'subject': '123', + }) + +@mock_dynamodb2 +def test_update_item_add_with_expression(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + current_item = { + 'forum_name': 'the-key', + 'subject': '123', + 'str_set': {'item1', 'item2', 'item3'}, + 'num_set': {1, 2, 3}, + 'num_val': 6 + } + + # Put an entry in the DB to play with + table.put_item(Item=current_item) + + # Update item to add a string value to a string set + table.update_item( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': {'item4'} + } + ) + current_item['str_set'] = current_item['str_set'].union({'item4'}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to add a num value to a num set + table.update_item( + Key=item_key, + UpdateExpression='ADD num_set :v', + ExpressionAttributeValues={ + ':v': {6} + } + ) + current_item['num_set'] = current_item['num_set'].union({6}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to add a value to a number value + table.update_item( + Key=item_key, + UpdateExpression='ADD num_val :v', + ExpressionAttributeValues={ + ':v': 20 + } + ) + current_item['num_val'] = current_item['num_val'] + 20 + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to add a number value to a string set, should raise Client Error + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': 20 + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to add a number set to the string set, should raise a ClientError + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': { 20 } + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to update with a bad expression + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set bad_value' + ).should.have.raised(ClientError) + + # Attempt to add a string value instead of a string set + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': 'new_string' + } + ).should.have.raised(ClientError) + + +@mock_dynamodb2 +def test_update_item_delete_with_expression(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + current_item = { + 'forum_name': 'the-key', + 'subject': '123', + 'str_set': {'item1', 'item2', 'item3'}, + 'num_set': {1, 2, 3}, + 'num_val': 6 + } + + # Put an entry in the DB to play with + table.put_item(Item=current_item) + + # Update item to delete a string value from a string set + table.update_item( + Key=item_key, + UpdateExpression='DELETE str_set :v', + ExpressionAttributeValues={ + ':v': {'item2'} + } + ) + current_item['str_set'] = current_item['str_set'].difference({'item2'}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to delete a num value from a num set + table.update_item( + Key=item_key, + UpdateExpression='DELETE num_set :v', + ExpressionAttributeValues={ + ':v': {2} + } + ) + current_item['num_set'] = current_item['num_set'].difference({2}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Try to delete on a number, this should fail + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_val :v', + ExpressionAttributeValues={ + ':v': 20 + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Try to delete a string set from a number set + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_set :v', + ExpressionAttributeValues={ + ':v': {'del_str'} + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to update with a bad expression + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_val badvalue' + ).should.have.raised(ClientError) + + +@mock_dynamodb2 +def test_boto3_query_gsi_range_comparison(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'username': 'johndoe', + 'created': 3, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '456', + 'username': 'johndoe', + 'created': 1, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '789', + 'username': 'johndoe', + 'created': 2, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '159', + 'username': 'janedoe', + 'created': 2, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '601', + 'username': 'janedoe', + 'created': 5, + }) + + # Test a query returning all johndoe items + results = table.query( + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), + ScanIndexForward=True, + IndexName='TestGSI', + ) + expected = ["456", "789", "123"] + for index, item in enumerate(results['Items']): + item["subject"].should.equal(expected[index]) + + # Return all johndoe items again, but in reverse + results = table.query( + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), + ScanIndexForward=False, + IndexName='TestGSI', + ) + for index, item in enumerate(reversed(results['Items'])): + item["subject"].should.equal(expected[index]) + + # Filter the creation to only return some of the results + # And reverse order of hash + range key + results = table.query( + KeyConditionExpression=Key("created").gt( + 1) & Key('username').eq('johndoe'), + ConsistentRead=True, + IndexName='TestGSI', + ) + results['Count'].should.equal(2) + + # Filter to return no results + results = table.query( + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").gt(9), + IndexName='TestGSI', + ) + results['Count'].should.equal(0) + + results = table.query( + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").eq(5), + IndexName='TestGSI', + ) + results['Count'].should.equal(1) + + # Test range key sorting + results = table.query( + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), + IndexName='TestGSI', + ) + expected = [Decimal('1'), Decimal('2'), Decimal('3')] + for index, item in enumerate(results['Items']): + item["created"].should.equal(expected[index]) + + +@mock_dynamodb2 +def test_boto3_update_table_throughput(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + + table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) + + table.update(ProvisionedThroughput={ + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 11, + }) + + table = dynamodb.Table('users') + + table.provisioned_throughput['ReadCapacityUnits'].should.equal(10) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(11) + + +@mock_dynamodb2 +def test_boto3_update_table_gsi_throughput(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + GlobalSecondaryIndexes=[{ + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + gsi_throughput['ReadCapacityUnits'].should.equal(3) + gsi_throughput['WriteCapacityUnits'].should.equal(4) + + table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Update': { + 'IndexName': 'TestGSI', + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 11, + } + }, + }]) + + table = dynamodb.Table('users') + + # Primary throughput has not changed + table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + gsi_throughput['ReadCapacityUnits'].should.equal(10) + gsi_throughput['WriteCapacityUnits'].should.equal(11) + + +@mock_dynamodb2 +def test_update_table_gsi_create(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + + table.global_secondary_indexes.should.have.length_of(0) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Create': { + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }, + }]) + + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(1) + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + assert gsi_throughput['ReadCapacityUnits'].should.equal(3) + assert gsi_throughput['WriteCapacityUnits'].should.equal(4) + + # Check update works + table.update(GlobalSecondaryIndexUpdates=[{ + 'Update': { + 'IndexName': 'TestGSI', + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 11, + } + }, + }]) + table = dynamodb.Table('users') + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + assert gsi_throughput['ReadCapacityUnits'].should.equal(10) + assert gsi_throughput['WriteCapacityUnits'].should.equal(11) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Delete': { + 'IndexName': 'TestGSI', + }, + }]) + + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(0) + + +@mock_dynamodb2 +def test_update_table_gsi_throughput(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + GlobalSecondaryIndexes=[{ + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(1) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Delete': { + 'IndexName': 'TestGSI', + }, + }]) + + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(0) + + +@mock_dynamodb2 +def test_query_pagination(): + table = _create_table_with_range_key() + for i in range(10): + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '{0}'.format(i), + 'username': 'johndoe', + 'created': Decimal('3'), + }) + + page1 = table.query( + KeyConditionExpression=Key('forum_name').eq('the-key'), + Limit=6 + ) + page1['Count'].should.equal(6) + page1['Items'].should.have.length_of(6) + page1.should.have.key('LastEvaluatedKey') + + page2 = table.query( + KeyConditionExpression=Key('forum_name').eq('the-key'), + Limit=6, + ExclusiveStartKey=page1['LastEvaluatedKey'] + ) + page2['Count'].should.equal(4) + page2['Items'].should.have.length_of(4) + page2.should_not.have.key('LastEvaluatedKey') + + results = page1['Items'] + page2['Items'] + subjects = set([int(r['subject']) for r in results]) + subjects.should.equal(set(range(10))) diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 15e5284b7bf4..faa826fb08ec 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -1,790 +1,790 @@ -from __future__ import unicode_literals - -import boto -import boto3 -from boto3.dynamodb.conditions import Key -import sure # noqa -from freezegun import freeze_time -from boto.exception import JSONResponseError -from moto import mock_dynamodb2, mock_dynamodb2_deprecated -from tests.helpers import requires_boto_gte -import botocore -try: - from boto.dynamodb2.fields import HashKey - from boto.dynamodb2.table import Table - from boto.dynamodb2.table import Item - from boto.dynamodb2.exceptions import ConditionalCheckFailedException, ItemNotFound -except ImportError: - pass - - -def create_table(): - table = Table.create('messages', schema=[ - HashKey('forum_name') - ], throughput={ - 'read': 10, - 'write': 10, - }) - return table - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -@freeze_time("2012-01-14") -def test_create_table(): - create_table() - expected = { - 'Table': { - 'AttributeDefinitions': [ - {'AttributeName': 'forum_name', 'AttributeType': 'S'} - ], - 'ProvisionedThroughput': { - 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 - }, - 'TableSizeBytes': 0, - 'TableName': 'messages', - 'TableStatus': 'ACTIVE', - 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', - 'KeySchema': [ - {'KeyType': 'HASH', 'AttributeName': 'forum_name'} - ], - 'ItemCount': 0, 'CreationDateTime': 1326499200.0, - 'GlobalSecondaryIndexes': [], - 'LocalSecondaryIndexes': [] - } - } - conn = boto.dynamodb2.connect_to_region( - 'us-east-1', - aws_access_key_id="ak", - aws_secret_access_key="sk" - ) - - conn.describe_table('messages').should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_table(): - create_table() - conn = boto.dynamodb2.layer1.DynamoDBConnection() - conn.list_tables()["TableNames"].should.have.length_of(1) - - conn.delete_table('messages') - conn.list_tables()["TableNames"].should.have.length_of(0) - - conn.delete_table.when.called_with( - 'messages').should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_update_table_throughput(): - table = create_table() - table.throughput["read"].should.equal(10) - table.throughput["write"].should.equal(10) - - table.update(throughput={ - 'read': 5, - 'write': 6, - }) - - table.throughput["read"].should.equal(5) - table.throughput["write"].should.equal(6) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_add_and_describe_and_update(): - table = create_table() - - data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - } - - table.put_item(data=data) - returned_item = table.get_item(forum_name="LOLCat Forum") - returned_item.should_not.be.none - - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - }) - - returned_item['SentBy'] = 'User B' - returned_item.save(overwrite=True) - - returned_item = table.get_item( - forum_name='LOLCat Forum' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_partial_save(): - table = create_table() - - data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - } - - table.put_item(data=data) - returned_item = table.get_item(forum_name="LOLCat Forum") - - returned_item['SentBy'] = 'User B' - returned_item.partial_save() - - returned_item = table.get_item( - forum_name='LOLCat Forum' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_put_without_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.put_item.when.called_with( - table_name='undeclared-table', - item={ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - } - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_item_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.get_item.when.called_with( - table_name='undeclared-table', - key={"forum_name": {"S": "LOLCat Forum"}}, - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.30.0") -@mock_dynamodb2_deprecated -def test_delete_item(): - table = create_table() - - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.save() - table.count().should.equal(1) - - response = item.delete() - - response.should.equal(True) - - table.count().should.equal(0) - - # Deletes are idempotent and 'False' here would imply an error condition - item.delete().should.equal(True) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_item_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.delete_item.when.called_with( - table_name='undeclared-table', - key={"forum_name": {"S": "LOLCat Forum"}}, - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query(): - table = create_table() - - item_data = { - 'forum_name': 'the-key', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.save(overwrite=True) - table.count().should.equal(1) - table = Table("messages") - - results = table.query(forum_name__eq='the-key') - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.query.when.called_with( - table_name='undeclared-table', - key_conditions={"forum_name": { - "ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}} - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan(): - table = create_table() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item_data['forum_name'] = 'the-key' - - item = Item(table, item_data) - item.save() - - item['forum_name'] = 'the-key2' - item.save(overwrite=True) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item_data['forum_name'] = 'the-key3' - item = Item(table, item_data) - item.save() - - results = table.scan() - sum(1 for _ in results).should.equal(3) - - results = table.scan(SentBy__eq='User B') - sum(1 for _ in results).should.equal(1) - - results = table.scan(Body__beginswith='http') - sum(1 for _ in results).should.equal(3) - - results = table.scan(Ids__null=False) - sum(1 for _ in results).should.equal(1) - - results = table.scan(Ids__null=True) - sum(1 for _ in results).should.equal(2) - - results = table.scan(PK__between=[8, 9]) - sum(1 for _ in results).should.equal(0) - - results = table.scan(PK__between=[5, 8]) - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.scan.when.called_with( - table_name='undeclared-table', - scan_filter={ - "SentBy": { - "AttributeValueList": [{ - "S": "User B"} - ], - "ComparisonOperator": "EQ" - } - }, - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_write_batch(): - table = create_table() - - with table.batch_write() as batch: - batch.put_item(data={ - 'forum_name': 'the-key', - 'subject': '123', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - batch.put_item(data={ - 'forum_name': 'the-key2', - 'subject': '789', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - table.count().should.equal(2) - with table.batch_write() as batch: - batch.delete_item( - forum_name='the-key', - subject='789' - ) - - table.count().should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_batch_read(): - table = create_table() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item_data['forum_name'] = 'the-key1' - item = Item(table, item_data) - item.save() - - item = Item(table, item_data) - item_data['forum_name'] = 'the-key2' - item.save(overwrite=True) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = Item(table, item_data) - item_data['forum_name'] = 'another-key' - item.save(overwrite=True) - - results = table.batch_get( - keys=[ - {'forum_name': 'the-key1'}, - {'forum_name': 'another-key'}, - ] - ) - - # Iterate through so that batch_item gets called - count = len([x for x in results]) - count.should.equal(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_key_fields(): - table = create_table() - kf = table.get_key_fields() - kf[0].should.equal('forum_name') - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_missing_item(): - table = create_table() - table.get_item.when.called_with( - forum_name='missing').should.throw(ItemNotFound) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_special_item(): - table = Table.create('messages', schema=[ - HashKey('date-joined') - ], throughput={ - 'read': 10, - 'write': 10, - }) - - data = { - 'date-joined': 127549192, - 'SentBy': 'User A', - } - table.put_item(data=data) - returned_item = table.get_item(**{'date-joined': 127549192}) - dict(returned_item).should.equal(data) - - -@mock_dynamodb2_deprecated -def test_update_item_remove(): - conn = boto.dynamodb2.connect_to_region("us-east-1") - table = Table.create('messages', schema=[ - HashKey('username') - ]) - - data = { - 'username': "steve", - 'SentBy': 'User A', - 'SentTo': 'User B', - } - table.put_item(data=data) - key_map = { - 'username': {"S": "steve"} - } - - # Then remove the SentBy field - conn.update_item("messages", key_map, - update_expression="REMOVE SentBy, SentTo") - - returned_item = table.get_item(username="steve") - dict(returned_item).should.equal({ - 'username': "steve", - }) - - -@mock_dynamodb2_deprecated -def test_update_item_set(): - conn = boto.dynamodb2.connect_to_region("us-east-1") - table = Table.create('messages', schema=[ - HashKey('username') - ]) - - data = { - 'username': "steve", - 'SentBy': 'User A', - } - table.put_item(data=data) - key_map = { - 'username': {"S": "steve"} - } - - conn.update_item("messages", key_map, - update_expression="SET foo=bar, blah=baz REMOVE SentBy") - - returned_item = table.get_item(username="steve") - dict(returned_item).should.equal({ - 'username': "steve", - 'foo': 'bar', - 'blah': 'baz', - }) - - -@mock_dynamodb2_deprecated -def test_failed_overwrite(): - table = Table.create('messages', schema=[ - HashKey('id'), - ], throughput={ - 'read': 7, - 'write': 3, - }) - - data1 = {'id': '123', 'data': '678'} - table.put_item(data=data1) - - data2 = {'id': '123', 'data': '345'} - table.put_item(data=data2, overwrite=True) - - data3 = {'id': '123', 'data': '812'} - table.put_item.when.called_with(data=data3).should.throw( - ConditionalCheckFailedException) - - returned_item = table.lookup('123') - dict(returned_item).should.equal(data2) - - data4 = {'id': '124', 'data': 812} - table.put_item(data=data4) - - returned_item = table.lookup('124') - dict(returned_item).should.equal(data4) - - -@mock_dynamodb2_deprecated -def test_conflicting_writes(): - table = Table.create('messages', schema=[ - HashKey('id'), - ]) - - item_data = {'id': '123', 'data': '678'} - item1 = Item(table, item_data) - item2 = Item(table, item_data) - item1.save() - - item1['data'] = '579' - item2['data'] = '912' - - item1.save() - item2.save.when.called_with().should.throw(ConditionalCheckFailedException) - - -""" -boto3 -""" - - -@mock_dynamodb2 -def test_boto3_create_table(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'username', - 'KeyType': 'HASH' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'username', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table.name.should.equal('users') - - -def _create_user_table(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'username', - 'KeyType': 'HASH' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'username', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - return dynamodb.Table('users') - - -@mock_dynamodb2 -def test_boto3_conditions(): - table = _create_user_table() - - table.put_item(Item={'username': 'johndoe'}) - table.put_item(Item={'username': 'janedoe'}) - - response = table.query( - KeyConditionExpression=Key('username').eq('johndoe') - ) - response['Count'].should.equal(1) - response['Items'].should.have.length_of(1) - response['Items'][0].should.equal({"username": "johndoe"}) - - -@mock_dynamodb2 -def test_boto3_put_item_conditions_pass(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'foo': { - 'ComparisonOperator': 'EQ', - 'AttributeValueList': ['bar'] - } - }) - final_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(final_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_put_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'whatever': { - 'ComparisonOperator': 'NULL', - } - }) - final_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(final_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_put_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'foo': { - 'ComparisonOperator': 'NOT_NULL', - } - }) - final_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(final_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_put_item_conditions_fail(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item.when.called_with( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'foo': { - 'ComparisonOperator': 'NE', - 'AttributeValueList': ['bar'] - } - }).should.throw(botocore.client.ClientError) - -@mock_dynamodb2 -def test_boto3_update_item_conditions_fail(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) - table.update_item.when.called_with( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=bar', - Expected={ - 'foo': { - 'Value': 'bar', - } - }).should.throw(botocore.client.ClientError) - -@mock_dynamodb2 -def test_boto3_update_item_conditions_fail_because_expect_not_exists(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) - table.update_item.when.called_with( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=bar', - Expected={ - 'foo': { - 'Exists': False - } - }).should.throw(botocore.client.ClientError) - -@mock_dynamodb2 -def test_boto3_update_item_conditions_fail_because_expect_not_exists_by_compare_to_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) - table.update_item.when.called_with( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=bar', - Expected={ - 'foo': { - 'ComparisonOperator': 'NULL', - } - }).should.throw(botocore.client.ClientError) - -@mock_dynamodb2 -def test_boto3_update_item_conditions_pass(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.update_item( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=baz', - Expected={ - 'foo': { - 'Value': 'bar', - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_update_item_conditions_pass_because_expect_not_exists(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.update_item( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=baz', - Expected={ - 'whatever': { - 'Exists': False, - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_update_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.update_item( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=baz', - Expected={ - 'whatever': { - 'ComparisonOperator': 'NULL', - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.update_item( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=baz', - Expected={ - 'foo': { - 'ComparisonOperator': 'NOT_NULL', - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_put_item_conditions_pass(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'foo': { - 'ComparisonOperator': 'EQ', - 'AttributeValueList': ['bar'] - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - - -@mock_dynamodb2 -def test_scan_pagination(): - table = _create_user_table() - - expected_usernames = ['user{0}'.format(i) for i in range(10)] - for u in expected_usernames: - table.put_item(Item={'username': u}) - - page1 = table.scan(Limit=6) - page1['Count'].should.equal(6) - page1['Items'].should.have.length_of(6) - page1.should.have.key('LastEvaluatedKey') - - page2 = table.scan(Limit=6, - ExclusiveStartKey=page1['LastEvaluatedKey']) - page2['Count'].should.equal(4) - page2['Items'].should.have.length_of(4) - page2.should_not.have.key('LastEvaluatedKey') - - results = page1['Items'] + page2['Items'] - usernames = set([r['username'] for r in results]) - usernames.should.equal(set(expected_usernames)) +from __future__ import unicode_literals + +import boto +import boto3 +from boto3.dynamodb.conditions import Key +import sure # noqa +from freezegun import freeze_time +from boto.exception import JSONResponseError +from moto import mock_dynamodb2, mock_dynamodb2_deprecated +from tests.helpers import requires_boto_gte +import botocore +try: + from boto.dynamodb2.fields import HashKey + from boto.dynamodb2.table import Table + from boto.dynamodb2.table import Item + from boto.dynamodb2.exceptions import ConditionalCheckFailedException, ItemNotFound +except ImportError: + pass + + +def create_table(): + table = Table.create('messages', schema=[ + HashKey('forum_name') + ], throughput={ + 'read': 10, + 'write': 10, + }) + return table + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +@freeze_time("2012-01-14") +def test_create_table(): + create_table() + expected = { + 'Table': { + 'AttributeDefinitions': [ + {'AttributeName': 'forum_name', 'AttributeType': 'S'} + ], + 'ProvisionedThroughput': { + 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 + }, + 'TableSizeBytes': 0, + 'TableName': 'messages', + 'TableStatus': 'ACTIVE', + 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', + 'KeySchema': [ + {'KeyType': 'HASH', 'AttributeName': 'forum_name'} + ], + 'ItemCount': 0, 'CreationDateTime': 1326499200.0, + 'GlobalSecondaryIndexes': [], + 'LocalSecondaryIndexes': [] + } + } + conn = boto.dynamodb2.connect_to_region( + 'us-east-1', + aws_access_key_id="ak", + aws_secret_access_key="sk" + ) + + conn.describe_table('messages').should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_table(): + create_table() + conn = boto.dynamodb2.layer1.DynamoDBConnection() + conn.list_tables()["TableNames"].should.have.length_of(1) + + conn.delete_table('messages') + conn.list_tables()["TableNames"].should.have.length_of(0) + + conn.delete_table.when.called_with( + 'messages').should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_update_table_throughput(): + table = create_table() + table.throughput["read"].should.equal(10) + table.throughput["write"].should.equal(10) + + table.update(throughput={ + 'read': 5, + 'write': 6, + }) + + table.throughput["read"].should.equal(5) + table.throughput["write"].should.equal(6) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_add_and_describe_and_update(): + table = create_table() + + data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + } + + table.put_item(data=data) + returned_item = table.get_item(forum_name="LOLCat Forum") + returned_item.should_not.be.none + + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + }) + + returned_item['SentBy'] = 'User B' + returned_item.save(overwrite=True) + + returned_item = table.get_item( + forum_name='LOLCat Forum' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_partial_save(): + table = create_table() + + data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + } + + table.put_item(data=data) + returned_item = table.get_item(forum_name="LOLCat Forum") + + returned_item['SentBy'] = 'User B' + returned_item.partial_save() + + returned_item = table.get_item( + forum_name='LOLCat Forum' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_put_without_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.put_item.when.called_with( + table_name='undeclared-table', + item={ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + } + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_item_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.get_item.when.called_with( + table_name='undeclared-table', + key={"forum_name": {"S": "LOLCat Forum"}}, + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.30.0") +@mock_dynamodb2_deprecated +def test_delete_item(): + table = create_table() + + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.save() + table.count().should.equal(1) + + response = item.delete() + + response.should.equal(True) + + table.count().should.equal(0) + + # Deletes are idempotent and 'False' here would imply an error condition + item.delete().should.equal(True) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_item_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.delete_item.when.called_with( + table_name='undeclared-table', + key={"forum_name": {"S": "LOLCat Forum"}}, + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query(): + table = create_table() + + item_data = { + 'forum_name': 'the-key', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.save(overwrite=True) + table.count().should.equal(1) + table = Table("messages") + + results = table.query(forum_name__eq='the-key') + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.query.when.called_with( + table_name='undeclared-table', + key_conditions={"forum_name": { + "ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}} + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan(): + table = create_table() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item_data['forum_name'] = 'the-key' + + item = Item(table, item_data) + item.save() + + item['forum_name'] = 'the-key2' + item.save(overwrite=True) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item_data['forum_name'] = 'the-key3' + item = Item(table, item_data) + item.save() + + results = table.scan() + sum(1 for _ in results).should.equal(3) + + results = table.scan(SentBy__eq='User B') + sum(1 for _ in results).should.equal(1) + + results = table.scan(Body__beginswith='http') + sum(1 for _ in results).should.equal(3) + + results = table.scan(Ids__null=False) + sum(1 for _ in results).should.equal(1) + + results = table.scan(Ids__null=True) + sum(1 for _ in results).should.equal(2) + + results = table.scan(PK__between=[8, 9]) + sum(1 for _ in results).should.equal(0) + + results = table.scan(PK__between=[5, 8]) + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.scan.when.called_with( + table_name='undeclared-table', + scan_filter={ + "SentBy": { + "AttributeValueList": [{ + "S": "User B"} + ], + "ComparisonOperator": "EQ" + } + }, + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_write_batch(): + table = create_table() + + with table.batch_write() as batch: + batch.put_item(data={ + 'forum_name': 'the-key', + 'subject': '123', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + batch.put_item(data={ + 'forum_name': 'the-key2', + 'subject': '789', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + table.count().should.equal(2) + with table.batch_write() as batch: + batch.delete_item( + forum_name='the-key', + subject='789' + ) + + table.count().should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_batch_read(): + table = create_table() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item_data['forum_name'] = 'the-key1' + item = Item(table, item_data) + item.save() + + item = Item(table, item_data) + item_data['forum_name'] = 'the-key2' + item.save(overwrite=True) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = Item(table, item_data) + item_data['forum_name'] = 'another-key' + item.save(overwrite=True) + + results = table.batch_get( + keys=[ + {'forum_name': 'the-key1'}, + {'forum_name': 'another-key'}, + ] + ) + + # Iterate through so that batch_item gets called + count = len([x for x in results]) + count.should.equal(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_key_fields(): + table = create_table() + kf = table.get_key_fields() + kf[0].should.equal('forum_name') + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_missing_item(): + table = create_table() + table.get_item.when.called_with( + forum_name='missing').should.throw(ItemNotFound) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_special_item(): + table = Table.create('messages', schema=[ + HashKey('date-joined') + ], throughput={ + 'read': 10, + 'write': 10, + }) + + data = { + 'date-joined': 127549192, + 'SentBy': 'User A', + } + table.put_item(data=data) + returned_item = table.get_item(**{'date-joined': 127549192}) + dict(returned_item).should.equal(data) + + +@mock_dynamodb2_deprecated +def test_update_item_remove(): + conn = boto.dynamodb2.connect_to_region("us-east-1") + table = Table.create('messages', schema=[ + HashKey('username') + ]) + + data = { + 'username': "steve", + 'SentBy': 'User A', + 'SentTo': 'User B', + } + table.put_item(data=data) + key_map = { + 'username': {"S": "steve"} + } + + # Then remove the SentBy field + conn.update_item("messages", key_map, + update_expression="REMOVE SentBy, SentTo") + + returned_item = table.get_item(username="steve") + dict(returned_item).should.equal({ + 'username': "steve", + }) + + +@mock_dynamodb2_deprecated +def test_update_item_set(): + conn = boto.dynamodb2.connect_to_region("us-east-1") + table = Table.create('messages', schema=[ + HashKey('username') + ]) + + data = { + 'username': "steve", + 'SentBy': 'User A', + } + table.put_item(data=data) + key_map = { + 'username': {"S": "steve"} + } + + conn.update_item("messages", key_map, + update_expression="SET foo=bar, blah=baz REMOVE SentBy") + + returned_item = table.get_item(username="steve") + dict(returned_item).should.equal({ + 'username': "steve", + 'foo': 'bar', + 'blah': 'baz', + }) + + +@mock_dynamodb2_deprecated +def test_failed_overwrite(): + table = Table.create('messages', schema=[ + HashKey('id'), + ], throughput={ + 'read': 7, + 'write': 3, + }) + + data1 = {'id': '123', 'data': '678'} + table.put_item(data=data1) + + data2 = {'id': '123', 'data': '345'} + table.put_item(data=data2, overwrite=True) + + data3 = {'id': '123', 'data': '812'} + table.put_item.when.called_with(data=data3).should.throw( + ConditionalCheckFailedException) + + returned_item = table.lookup('123') + dict(returned_item).should.equal(data2) + + data4 = {'id': '124', 'data': 812} + table.put_item(data=data4) + + returned_item = table.lookup('124') + dict(returned_item).should.equal(data4) + + +@mock_dynamodb2_deprecated +def test_conflicting_writes(): + table = Table.create('messages', schema=[ + HashKey('id'), + ]) + + item_data = {'id': '123', 'data': '678'} + item1 = Item(table, item_data) + item2 = Item(table, item_data) + item1.save() + + item1['data'] = '579' + item2['data'] = '912' + + item1.save() + item2.save.when.called_with().should.throw(ConditionalCheckFailedException) + + +""" +boto3 +""" + + +@mock_dynamodb2 +def test_boto3_create_table(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table.name.should.equal('users') + + +def _create_user_table(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + return dynamodb.Table('users') + + +@mock_dynamodb2 +def test_boto3_conditions(): + table = _create_user_table() + + table.put_item(Item={'username': 'johndoe'}) + table.put_item(Item={'username': 'janedoe'}) + + response = table.query( + KeyConditionExpression=Key('username').eq('johndoe') + ) + response['Count'].should.equal(1) + response['Items'].should.have.length_of(1) + response['Items'][0].should.equal({"username": "johndoe"}) + + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'EQ', + 'AttributeValueList': ['bar'] + } + }) + final_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(final_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'whatever': { + 'ComparisonOperator': 'NULL', + } + }) + final_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(final_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'NOT_NULL', + } + }) + final_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(final_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_fail(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item.when.called_with( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'NE', + 'AttributeValueList': ['bar'] + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_fail(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) + table.update_item.when.called_with( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=bar', + Expected={ + 'foo': { + 'Value': 'bar', + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_fail_because_expect_not_exists(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) + table.update_item.when.called_with( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=bar', + Expected={ + 'foo': { + 'Exists': False + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_fail_because_expect_not_exists_by_compare_to_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) + table.update_item.when.called_with( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=bar', + Expected={ + 'foo': { + 'ComparisonOperator': 'NULL', + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'foo': { + 'Value': 'bar', + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass_because_expect_not_exists(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'whatever': { + 'Exists': False, + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'whatever': { + 'ComparisonOperator': 'NULL', + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'foo': { + 'ComparisonOperator': 'NOT_NULL', + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'EQ', + 'AttributeValueList': ['bar'] + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + + +@mock_dynamodb2 +def test_scan_pagination(): + table = _create_user_table() + + expected_usernames = ['user{0}'.format(i) for i in range(10)] + for u in expected_usernames: + table.put_item(Item={'username': u}) + + page1 = table.scan(Limit=6) + page1['Count'].should.equal(6) + page1['Items'].should.have.length_of(6) + page1.should.have.key('LastEvaluatedKey') + + page2 = table.scan(Limit=6, + ExclusiveStartKey=page1['LastEvaluatedKey']) + page2['Count'].should.equal(4) + page2['Items'].should.have.length_of(4) + page2.should_not.have.key('LastEvaluatedKey') + + results = page1['Items'] + page2['Items'] + usernames = set([r['username'] for r in results]) + usernames.should.equal(set(expected_usernames)) diff --git a/tests/test_dynamodb2/test_server.py b/tests/test_dynamodb2/test_server.py index af820beafae5..be94df0f430a 100644 --- a/tests/test_dynamodb2/test_server.py +++ b/tests/test_dynamodb2/test_server.py @@ -1,19 +1,19 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_table_list(): - backend = server.create_backend_app("dynamodb2") - test_client = backend.test_client() - res = test_client.get('/') - res.status_code.should.equal(404) - - headers = {'X-Amz-Target': 'TestTable.ListTables'} - res = test_client.get('/', headers=headers) - res.data.should.contain(b'TableNames') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_table_list(): + backend = server.create_backend_app("dynamodb2") + test_client = backend.test_client() + res = test_client.get('/') + res.status_code.should.equal(404) + + headers = {'X-Amz-Target': 'TestTable.ListTables'} + res = test_client.get('/', headers=headers) + res.data.should.contain(b'TableNames') diff --git a/tests/test_ec2/test_account_attributes.py b/tests/test_ec2/test_account_attributes.py index 30309bec8ada..45ae09419ca5 100644 --- a/tests/test_ec2/test_account_attributes.py +++ b/tests/test_ec2/test_account_attributes.py @@ -1,44 +1,44 @@ -from __future__ import unicode_literals -import boto3 -from moto import mock_ec2 -import sure # noqa - - -@mock_ec2 -def test_describe_account_attributes(): - conn = boto3.client('ec2', region_name='us-east-1') - response = conn.describe_account_attributes() - expected_attribute_values = [{ - 'AttributeValues': [{ - 'AttributeValue': '5' - }], - 'AttributeName': 'vpc-max-security-groups-per-interface' - }, { - 'AttributeValues': [{ - 'AttributeValue': '20' - }], - 'AttributeName': 'max-instances' - }, { - 'AttributeValues': [{ - 'AttributeValue': 'EC2' - }, { - 'AttributeValue': 'VPC' - }], - 'AttributeName': 'supported-platforms' - }, { - 'AttributeValues': [{ - 'AttributeValue': 'none' - }], - 'AttributeName': 'default-vpc' - }, { - 'AttributeValues': [{ - 'AttributeValue': '5' - }], - 'AttributeName': 'max-elastic-ips' - }, { - 'AttributeValues': [{ - 'AttributeValue': '5' - }], - 'AttributeName': 'vpc-max-elastic-ips' - }] - response['AccountAttributes'].should.equal(expected_attribute_values) +from __future__ import unicode_literals +import boto3 +from moto import mock_ec2 +import sure # noqa + + +@mock_ec2 +def test_describe_account_attributes(): + conn = boto3.client('ec2', region_name='us-east-1') + response = conn.describe_account_attributes() + expected_attribute_values = [{ + 'AttributeValues': [{ + 'AttributeValue': '5' + }], + 'AttributeName': 'vpc-max-security-groups-per-interface' + }, { + 'AttributeValues': [{ + 'AttributeValue': '20' + }], + 'AttributeName': 'max-instances' + }, { + 'AttributeValues': [{ + 'AttributeValue': 'EC2' + }, { + 'AttributeValue': 'VPC' + }], + 'AttributeName': 'supported-platforms' + }, { + 'AttributeValues': [{ + 'AttributeValue': 'none' + }], + 'AttributeName': 'default-vpc' + }, { + 'AttributeValues': [{ + 'AttributeValue': '5' + }], + 'AttributeName': 'max-elastic-ips' + }, { + 'AttributeValues': [{ + 'AttributeValue': '5' + }], + 'AttributeName': 'vpc-max-elastic-ips' + }] + response['AccountAttributes'].should.equal(expected_attribute_values) diff --git a/tests/test_ec2/test_amazon_dev_pay.py b/tests/test_ec2/test_amazon_dev_pay.py index 38e1eb75197d..1dd9cc74e311 100644 --- a/tests/test_ec2/test_amazon_dev_pay.py +++ b/tests/test_ec2/test_amazon_dev_pay.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_amazon_dev_pay(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_amazon_dev_pay(): + pass diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index a8d4d1b67b53..bb5fb3facf69 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -1,776 +1,776 @@ -from __future__ import unicode_literals - -import boto -import boto.ec2 -import boto3 -from boto.exception import EC2ResponseError -from botocore.exceptions import ClientError -# Ensure 'assert_raises' context manager support for Python 2.6 -from nose.tools import assert_raises -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 -from moto.ec2.models import AMIS -from tests.helpers import requires_boto_gte - - -@mock_ec2_deprecated -def test_ami_create_and_delete(): - conn = boto.connect_ec2('the_key', 'the_secret') - - initial_ami_count = len(AMIS) - conn.get_all_volumes().should.have.length_of(0) - conn.get_all_snapshots().should.have.length_of(initial_ami_count) - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - image_id = conn.create_image( - instance.id, "test-ami", "this is a test ami", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set') - - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - - all_images = conn.get_all_images() - set([i.id for i in all_images]).should.contain(image_id) - - retrieved_image = [i for i in all_images if i.id == image_id][0] - - retrieved_image.id.should.equal(image_id) - retrieved_image.virtualization_type.should.equal(instance.virtualization_type) - retrieved_image.architecture.should.equal(instance.architecture) - retrieved_image.kernel_id.should.equal(instance.kernel) - retrieved_image.platform.should.equal(instance.platform) - retrieved_image.creationDate.should_not.be.none - instance.terminate() - - # Ensure we're no longer creating a volume - volumes = conn.get_all_volumes() - volumes.should.have.length_of(0) - - # Validate auto-created snapshot - snapshots = conn.get_all_snapshots() - snapshots.should.have.length_of(initial_ami_count + 1) - - retrieved_image_snapshot_id = retrieved_image.block_device_mapping.current_value.snapshot_id - [s.id for s in snapshots].should.contain(retrieved_image_snapshot_id) - snapshot = [s for s in snapshots if s.id == retrieved_image_snapshot_id][0] - snapshot.description.should.equal( - "Auto-created snapshot for AMI {0}".format(retrieved_image.id)) - - # root device should be in AMI's block device mappings - root_mapping = retrieved_image.block_device_mapping.get(retrieved_image.root_device_name) - root_mapping.should_not.be.none - - # Deregister - with assert_raises(EC2ResponseError) as ex: - success = conn.deregister_image(image_id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set') - - success = conn.deregister_image(image_id) - success.should.be.true - - with assert_raises(EC2ResponseError) as cm: - conn.deregister_image(image_id) - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.14.0") -@mock_ec2_deprecated -def test_ami_copy(): - conn = boto.ec2.connect_to_region("us-west-1") - - initial_ami_count = len(AMIS) - conn.get_all_volumes().should.have.length_of(0) - conn.get_all_snapshots().should.have.length_of(initial_ami_count) - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - source_image_id = conn.create_image( - instance.id, "test-ami", "this is a test ami") - instance.terminate() - source_image = conn.get_all_images(image_ids=[source_image_id])[0] - - # Boto returns a 'CopyImage' object with an image_id attribute here. Use - # the image_id to fetch the full info. - with assert_raises(EC2ResponseError) as ex: - copy_image_ref = conn.copy_image( - source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", - dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set') - - copy_image_ref = conn.copy_image( - source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami") - copy_image_id = copy_image_ref.image_id - copy_image = conn.get_all_images(image_ids=[copy_image_id])[0] - - copy_image.id.should.equal(copy_image_id) - copy_image.virtualization_type.should.equal( - source_image.virtualization_type) - copy_image.architecture.should.equal(source_image.architecture) - copy_image.kernel_id.should.equal(source_image.kernel_id) - copy_image.platform.should.equal(source_image.platform) - - # Ensure we're no longer creating a volume - conn.get_all_volumes().should.have.length_of(0) - - # Validate auto-created snapshot - conn.get_all_snapshots().should.have.length_of(initial_ami_count + 2) - - copy_image.block_device_mapping.current_value.snapshot_id.should_not.equal( - source_image.block_device_mapping.current_value.snapshot_id) - - # Copy from non-existent source ID. - with assert_raises(EC2ResponseError) as cm: - conn.copy_image(source_image.region.name, 'ami-abcd1234', - "test-copy-ami", "this is a test copy ami") - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Copy from non-existent source region. - with assert_raises(EC2ResponseError) as cm: - invalid_region = 'us-east-1' if (source_image.region.name != - 'us-east-1') else 'us-west-1' - conn.copy_image(invalid_region, source_image.id, - "test-copy-ami", "this is a test copy ami") - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_ami_tagging(): - conn = boto.connect_vpc('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_all_images()[0] - - with assert_raises(EC2ResponseError) as ex: - image.add_tag("a key", "some value", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - image.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the DHCP options - image = conn.get_all_images()[0] - image.tags.should.have.length_of(1) - image.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_ami_create_from_missing_instance(): - conn = boto.connect_ec2('the_key', 'the_secret') - args = ["i-abcdefg", "test-ami", "this is a test ami"] - - with assert_raises(EC2ResponseError) as cm: - conn.create_image(*args) - cm.exception.code.should.equal('InvalidInstanceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_ami_pulls_attributes_from_instance(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.modify_attribute("kernel", "test-kernel") - - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.kernel_id.should.equal('test-kernel') - - -@mock_ec2_deprecated -def test_ami_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') - - reservationA = conn.run_instances('ami-1234abcd') - instanceA = reservationA.instances[0] - instanceA.modify_attribute("architecture", "i386") - instanceA.modify_attribute("kernel", "k-1234abcd") - instanceA.modify_attribute("platform", "windows") - instanceA.modify_attribute("virtualization_type", "hvm") - imageA_id = conn.create_image( - instanceA.id, "test-ami-A", "this is a test ami") - imageA = conn.get_image(imageA_id) - - reservationB = conn.run_instances('ami-abcd1234') - instanceB = reservationB.instances[0] - instanceB.modify_attribute("architecture", "x86_64") - instanceB.modify_attribute("kernel", "k-abcd1234") - instanceB.modify_attribute("platform", "linux") - instanceB.modify_attribute("virtualization_type", "paravirtual") - imageB_id = conn.create_image( - instanceB.id, "test-ami-B", "this is a test ami") - imageB = conn.get_image(imageB_id) - imageB.set_launch_permissions(group_names=("all")) - - amis_by_architecture = conn.get_all_images( - filters={'architecture': 'x86_64'}) - set([ami.id for ami in amis_by_architecture]).should.contain(imageB.id) - len(amis_by_architecture).should.equal(35) - - amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'}) - set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id])) - - amis_by_virtualization = conn.get_all_images( - filters={'virtualization-type': 'paravirtual'}) - set([ami.id for ami in amis_by_virtualization] - ).should.contain(imageB.id) - len(amis_by_virtualization).should.equal(3) - - amis_by_platform = conn.get_all_images(filters={'platform': 'windows'}) - set([ami.id for ami in amis_by_platform]).should.contain(imageA.id) - len(amis_by_platform).should.equal(24) - - amis_by_id = conn.get_all_images(filters={'image-id': imageA.id}) - set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id])) - - amis_by_state = conn.get_all_images(filters={'state': 'available'}) - ami_ids_by_state = [ami.id for ami in amis_by_state] - ami_ids_by_state.should.contain(imageA.id) - ami_ids_by_state.should.contain(imageB.id) - len(amis_by_state).should.equal(36) - - amis_by_name = conn.get_all_images(filters={'name': imageA.name}) - set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id])) - - amis_by_public = conn.get_all_images(filters={'is-public': True}) - set([ami.id for ami in amis_by_public]).should.contain(imageB.id) - len(amis_by_public).should.equal(35) - - amis_by_nonpublic = conn.get_all_images(filters={'is-public': False}) - set([ami.id for ami in amis_by_nonpublic]).should.contain(imageA.id) - len(amis_by_nonpublic).should.equal(1) - - -@mock_ec2_deprecated -def test_ami_filtering_via_tag(): - conn = boto.connect_vpc('the_key', 'the_secret') - - reservationA = conn.run_instances('ami-1234abcd') - instanceA = reservationA.instances[0] - imageA_id = conn.create_image( - instanceA.id, "test-ami-A", "this is a test ami") - imageA = conn.get_image(imageA_id) - imageA.add_tag("a key", "some value") - - reservationB = conn.run_instances('ami-abcd1234') - instanceB = reservationB.instances[0] - imageB_id = conn.create_image( - instanceB.id, "test-ami-B", "this is a test ami") - imageB = conn.get_image(imageB_id) - imageB.add_tag("another key", "some other value") - - amis_by_tagA = conn.get_all_images(filters={'tag:a key': 'some value'}) - set([ami.id for ami in amis_by_tagA]).should.equal(set([imageA.id])) - - amis_by_tagB = conn.get_all_images( - filters={'tag:another key': 'some other value'}) - set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id])) - - -@mock_ec2_deprecated -def test_getting_missing_ami(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_image('ami-missing') - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_getting_malformed_ami(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_image('foo-missing') - cm.exception.code.should.equal('InvalidAMIID.Malformed') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_ami_attribute_group_permissions(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - - # Baseline - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.name.should.equal('launch_permission') - attributes.attrs.should.have.length_of(0) - - ADD_GROUP_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'add', - 'groups': 'all'} - - REMOVE_GROUP_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'remove', - 'groups': 'all'} - - # Add 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: - conn.modify_image_attribute( - **dict(ADD_GROUP_ARGS, **{'dry_run': True})) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_image_attribute(**ADD_GROUP_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs['groups'].should.have.length_of(1) - attributes.attrs['groups'].should.equal(['all']) - image = conn.get_image(image_id) - image.is_public.should.equal(True) - - # Add is idempotent - conn.modify_image_attribute.when.called_with( - **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) - - # Remove 'all' group and confirm - conn.modify_image_attribute(**REMOVE_GROUP_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs.should.have.length_of(0) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - # Remove is idempotent - conn.modify_image_attribute.when.called_with( - **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) - - -@mock_ec2_deprecated -def test_ami_attribute_user_permissions(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - - # Baseline - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.name.should.equal('launch_permission') - attributes.attrs.should.have.length_of(0) - - # Both str and int values should work. - USER1 = '123456789011' - USER2 = 123456789022 - - ADD_USERS_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'add', - 'user_ids': [USER1, USER2]} - - REMOVE_USERS_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'remove', - 'user_ids': [USER1, USER2]} - - REMOVE_SINGLE_USER_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'remove', - 'user_ids': [USER1]} - - # Add multiple users and confirm - conn.modify_image_attribute(**ADD_USERS_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs['user_ids'].should.have.length_of(2) - set(attributes.attrs['user_ids']).should.equal( - set([str(USER1), str(USER2)])) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - # Add is idempotent - conn.modify_image_attribute.when.called_with( - **ADD_USERS_ARGS).should_not.throw(EC2ResponseError) - - # Remove single user and confirm - conn.modify_image_attribute(**REMOVE_SINGLE_USER_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs['user_ids'].should.have.length_of(1) - set(attributes.attrs['user_ids']).should.equal(set([str(USER2)])) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - # Remove multiple users and confirm - conn.modify_image_attribute(**REMOVE_USERS_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs.should.have.length_of(0) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - # Remove is idempotent - conn.modify_image_attribute.when.called_with( - **REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) - - -@mock_ec2 -def test_ami_describe_executable_users(): - conn = boto3.client('ec2', region_name='us-east-1') - ec2 = boto3.resource('ec2', 'us-east-1') - ec2.create_instances(ImageId='', - MinCount=1, - MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) - instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] - image_id = conn.create_image(InstanceId=instance_id, - Name='TestImage', )['ImageId'] - - USER1 = '123456789011' - - ADD_USER_ARGS = {'ImageId': image_id, - 'Attribute': 'launchPermission', - 'OperationType': 'add', - 'UserIds': [USER1]} - - # Add users and get no images - conn.modify_image_attribute(**ADD_USER_ARGS) - - attributes = conn.describe_image_attribute(ImageId=image_id, - Attribute='LaunchPermissions', - DryRun=False) - attributes['LaunchPermissions'].should.have.length_of(1) - attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) - images = conn.describe_images(ExecutableUsers=[USER1])['Images'] - images.should.have.length_of(1) - images[0]['ImageId'].should.equal(image_id) - - -@mock_ec2 -def test_ami_describe_executable_users_negative(): - conn = boto3.client('ec2', region_name='us-east-1') - ec2 = boto3.resource('ec2', 'us-east-1') - ec2.create_instances(ImageId='', - MinCount=1, - MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) - instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] - image_id = conn.create_image(InstanceId=instance_id, - Name='TestImage')['ImageId'] - - USER1 = '123456789011' - USER2 = '113355789012' - - ADD_USER_ARGS = {'ImageId': image_id, - 'Attribute': 'launchPermission', - 'OperationType': 'add', - 'UserIds': [USER1]} - - # Add users and get no images - # Add users and get no images - conn.modify_image_attribute(**ADD_USER_ARGS) - - attributes = conn.describe_image_attribute(ImageId=image_id, - Attribute='LaunchPermissions', - DryRun=False) - attributes['LaunchPermissions'].should.have.length_of(1) - attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) - images = conn.describe_images(ExecutableUsers=[USER2])['Images'] - images.should.have.length_of(0) - - -@mock_ec2 -def test_ami_describe_executable_users_and_filter(): - conn = boto3.client('ec2', region_name='us-east-1') - ec2 = boto3.resource('ec2', 'us-east-1') - ec2.create_instances(ImageId='', - MinCount=1, - MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) - instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] - image_id = conn.create_image(InstanceId=instance_id, - Name='ImageToDelete', )['ImageId'] - - USER1 = '123456789011' - - ADD_USER_ARGS = {'ImageId': image_id, - 'Attribute': 'launchPermission', - 'OperationType': 'add', - 'UserIds': [USER1]} - - # Add users and get no images - conn.modify_image_attribute(**ADD_USER_ARGS) - - attributes = conn.describe_image_attribute(ImageId=image_id, - Attribute='LaunchPermissions', - DryRun=False) - attributes['LaunchPermissions'].should.have.length_of(1) - attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) - images = conn.describe_images(ExecutableUsers=[USER1], - Filters=[{'Name': 'state', 'Values': ['available']}])['Images'] - images.should.have.length_of(1) - images[0]['ImageId'].should.equal(image_id) - - -@mock_ec2_deprecated -def test_ami_attribute_user_and_group_permissions(): - """ - Boto supports adding/removing both users and groups at the same time. - Just spot-check this -- input variations, idempotency, etc are validated - via user-specific and group-specific tests above. - """ - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - - # Baseline - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.name.should.equal('launch_permission') - attributes.attrs.should.have.length_of(0) - - USER1 = '123456789011' - USER2 = '123456789022' - - ADD_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'add', - 'groups': ['all'], - 'user_ids': [USER1, USER2]} - - REMOVE_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'remove', - 'groups': ['all'], - 'user_ids': [USER1, USER2]} - - # Add and confirm - conn.modify_image_attribute(**ADD_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs['user_ids'].should.have.length_of(2) - set(attributes.attrs['user_ids']).should.equal(set([USER1, USER2])) - set(attributes.attrs['groups']).should.equal(set(['all'])) - image = conn.get_image(image_id) - image.is_public.should.equal(True) - - # Remove and confirm - conn.modify_image_attribute(**REMOVE_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs.should.have.length_of(0) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - -@mock_ec2_deprecated -def test_ami_attribute_error_cases(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - - # Error: Add with group != 'all' - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - groups='everyone') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with user ID that isn't an integer. - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - user_ids='12345678901A') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with user ID that is > length 12. - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - user_ids='1234567890123') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with user ID that is < length 12. - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - user_ids='12345678901') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with one invalid user ID among other valid IDs, ensure no - # partial changes. - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - user_ids=['123456789011', 'foo', '123456789022']) - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs.should.have.length_of(0) - - # Error: Add with invalid image ID - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute("ami-abcd1234", - attribute='launchPermission', - operation='add', - groups='all') - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Remove with invalid image ID - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute("ami-abcd1234", - attribute='launchPermission', - operation='remove', - groups='all') - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_ami_describe_non_existent(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - # Valid pattern but non-existent id - img = ec2.Image('ami-abcd1234') - with assert_raises(ClientError): - img.load() - # Invalid ami pattern - img = ec2.Image('not_an_ami_id') - with assert_raises(ClientError): - img.load() - - -@mock_ec2 -def test_ami_filter_wildcard(): - ec2_resource = boto3.resource('ec2', region_name='us-west-1') - ec2_client = boto3.client('ec2', region_name='us-west-1') - - instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] - instance.create_image(Name='test-image') - - # create an image with the same owner but will not match the filter - instance.create_image(Name='not-matching-image') - - my_images = ec2_client.describe_images( - Owners=['111122223333'], - Filters=[{'Name': 'name', 'Values': ['test*']}] - )['Images'] - my_images.should.have.length_of(1) - - -@mock_ec2 -def test_ami_filter_by_owner_id(): - client = boto3.client('ec2', region_name='us-east-1') - - ubuntu_id = '099720109477' - - ubuntu_images = client.describe_images(Owners=[ubuntu_id]) - all_images = client.describe_images() - - ubuntu_ids = [ami['OwnerId'] for ami in ubuntu_images['Images']] - all_ids = [ami['OwnerId'] for ami in all_images['Images']] - - # Assert all ubuntu_ids are the same and one equals ubuntu_id - assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id - # Check we actually have a subset of images - assert len(ubuntu_ids) < len(all_ids) - - -@mock_ec2 -def test_ami_filter_by_self(): - ec2_resource = boto3.resource('ec2', region_name='us-west-1') - ec2_client = boto3.client('ec2', region_name='us-west-1') - - my_images = ec2_client.describe_images(Owners=['self'])['Images'] - my_images.should.have.length_of(0) - - # Create a new image - instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] - instance.create_image(Name='test-image') - - my_images = ec2_client.describe_images(Owners=['self'])['Images'] - my_images.should.have.length_of(1) - - -@mock_ec2 -def test_ami_snapshots_have_correct_owner(): - ec2_client = boto3.client('ec2', region_name='us-west-1') - - images_response = ec2_client.describe_images() - - owner_id_to_snapshot_ids = {} - for image in images_response['Images']: - owner_id = image['OwnerId'] - snapshot_ids = [ - block_device_mapping['Ebs']['SnapshotId'] - for block_device_mapping in image['BlockDeviceMappings'] - ] - existing_snapshot_ids = owner_id_to_snapshot_ids.get(owner_id, []) - owner_id_to_snapshot_ids[owner_id] = ( - existing_snapshot_ids + snapshot_ids - ) - - for owner_id in owner_id_to_snapshot_ids: - snapshots_rseponse = ec2_client.describe_snapshots( - SnapshotIds=owner_id_to_snapshot_ids[owner_id] - ) - - for snapshot in snapshots_rseponse['Snapshots']: - assert owner_id == snapshot['OwnerId'] +from __future__ import unicode_literals + +import boto +import boto.ec2 +import boto3 +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +# Ensure 'assert_raises' context manager support for Python 2.6 +from nose.tools import assert_raises +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 +from moto.ec2.models import AMIS +from tests.helpers import requires_boto_gte + + +@mock_ec2_deprecated +def test_ami_create_and_delete(): + conn = boto.connect_ec2('the_key', 'the_secret') + + initial_ami_count = len(AMIS) + conn.get_all_volumes().should.have.length_of(0) + conn.get_all_snapshots().should.have.length_of(initial_ami_count) + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + image_id = conn.create_image( + instance.id, "test-ami", "this is a test ami", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set') + + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + + all_images = conn.get_all_images() + set([i.id for i in all_images]).should.contain(image_id) + + retrieved_image = [i for i in all_images if i.id == image_id][0] + + retrieved_image.id.should.equal(image_id) + retrieved_image.virtualization_type.should.equal(instance.virtualization_type) + retrieved_image.architecture.should.equal(instance.architecture) + retrieved_image.kernel_id.should.equal(instance.kernel) + retrieved_image.platform.should.equal(instance.platform) + retrieved_image.creationDate.should_not.be.none + instance.terminate() + + # Ensure we're no longer creating a volume + volumes = conn.get_all_volumes() + volumes.should.have.length_of(0) + + # Validate auto-created snapshot + snapshots = conn.get_all_snapshots() + snapshots.should.have.length_of(initial_ami_count + 1) + + retrieved_image_snapshot_id = retrieved_image.block_device_mapping.current_value.snapshot_id + [s.id for s in snapshots].should.contain(retrieved_image_snapshot_id) + snapshot = [s for s in snapshots if s.id == retrieved_image_snapshot_id][0] + snapshot.description.should.equal( + "Auto-created snapshot for AMI {0}".format(retrieved_image.id)) + + # root device should be in AMI's block device mappings + root_mapping = retrieved_image.block_device_mapping.get(retrieved_image.root_device_name) + root_mapping.should_not.be.none + + # Deregister + with assert_raises(EC2ResponseError) as ex: + success = conn.deregister_image(image_id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set') + + success = conn.deregister_image(image_id) + success.should.be.true + + with assert_raises(EC2ResponseError) as cm: + conn.deregister_image(image_id) + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.14.0") +@mock_ec2_deprecated +def test_ami_copy(): + conn = boto.ec2.connect_to_region("us-west-1") + + initial_ami_count = len(AMIS) + conn.get_all_volumes().should.have.length_of(0) + conn.get_all_snapshots().should.have.length_of(initial_ami_count) + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + source_image_id = conn.create_image( + instance.id, "test-ami", "this is a test ami") + instance.terminate() + source_image = conn.get_all_images(image_ids=[source_image_id])[0] + + # Boto returns a 'CopyImage' object with an image_id attribute here. Use + # the image_id to fetch the full info. + with assert_raises(EC2ResponseError) as ex: + copy_image_ref = conn.copy_image( + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", + dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set') + + copy_image_ref = conn.copy_image( + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami") + copy_image_id = copy_image_ref.image_id + copy_image = conn.get_all_images(image_ids=[copy_image_id])[0] + + copy_image.id.should.equal(copy_image_id) + copy_image.virtualization_type.should.equal( + source_image.virtualization_type) + copy_image.architecture.should.equal(source_image.architecture) + copy_image.kernel_id.should.equal(source_image.kernel_id) + copy_image.platform.should.equal(source_image.platform) + + # Ensure we're no longer creating a volume + conn.get_all_volumes().should.have.length_of(0) + + # Validate auto-created snapshot + conn.get_all_snapshots().should.have.length_of(initial_ami_count + 2) + + copy_image.block_device_mapping.current_value.snapshot_id.should_not.equal( + source_image.block_device_mapping.current_value.snapshot_id) + + # Copy from non-existent source ID. + with assert_raises(EC2ResponseError) as cm: + conn.copy_image(source_image.region.name, 'ami-abcd1234', + "test-copy-ami", "this is a test copy ami") + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Copy from non-existent source region. + with assert_raises(EC2ResponseError) as cm: + invalid_region = 'us-east-1' if (source_image.region.name != + 'us-east-1') else 'us-west-1' + conn.copy_image(invalid_region, source_image.id, + "test-copy-ami", "this is a test copy ami") + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_ami_tagging(): + conn = boto.connect_vpc('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_all_images()[0] + + with assert_raises(EC2ResponseError) as ex: + image.add_tag("a key", "some value", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + image.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the DHCP options + image = conn.get_all_images()[0] + image.tags.should.have.length_of(1) + image.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_ami_create_from_missing_instance(): + conn = boto.connect_ec2('the_key', 'the_secret') + args = ["i-abcdefg", "test-ami", "this is a test ami"] + + with assert_raises(EC2ResponseError) as cm: + conn.create_image(*args) + cm.exception.code.should.equal('InvalidInstanceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_ami_pulls_attributes_from_instance(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.modify_attribute("kernel", "test-kernel") + + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.kernel_id.should.equal('test-kernel') + + +@mock_ec2_deprecated +def test_ami_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + + reservationA = conn.run_instances('ami-1234abcd') + instanceA = reservationA.instances[0] + instanceA.modify_attribute("architecture", "i386") + instanceA.modify_attribute("kernel", "k-1234abcd") + instanceA.modify_attribute("platform", "windows") + instanceA.modify_attribute("virtualization_type", "hvm") + imageA_id = conn.create_image( + instanceA.id, "test-ami-A", "this is a test ami") + imageA = conn.get_image(imageA_id) + + reservationB = conn.run_instances('ami-abcd1234') + instanceB = reservationB.instances[0] + instanceB.modify_attribute("architecture", "x86_64") + instanceB.modify_attribute("kernel", "k-abcd1234") + instanceB.modify_attribute("platform", "linux") + instanceB.modify_attribute("virtualization_type", "paravirtual") + imageB_id = conn.create_image( + instanceB.id, "test-ami-B", "this is a test ami") + imageB = conn.get_image(imageB_id) + imageB.set_launch_permissions(group_names=("all")) + + amis_by_architecture = conn.get_all_images( + filters={'architecture': 'x86_64'}) + set([ami.id for ami in amis_by_architecture]).should.contain(imageB.id) + len(amis_by_architecture).should.equal(35) + + amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'}) + set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id])) + + amis_by_virtualization = conn.get_all_images( + filters={'virtualization-type': 'paravirtual'}) + set([ami.id for ami in amis_by_virtualization] + ).should.contain(imageB.id) + len(amis_by_virtualization).should.equal(3) + + amis_by_platform = conn.get_all_images(filters={'platform': 'windows'}) + set([ami.id for ami in amis_by_platform]).should.contain(imageA.id) + len(amis_by_platform).should.equal(24) + + amis_by_id = conn.get_all_images(filters={'image-id': imageA.id}) + set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id])) + + amis_by_state = conn.get_all_images(filters={'state': 'available'}) + ami_ids_by_state = [ami.id for ami in amis_by_state] + ami_ids_by_state.should.contain(imageA.id) + ami_ids_by_state.should.contain(imageB.id) + len(amis_by_state).should.equal(36) + + amis_by_name = conn.get_all_images(filters={'name': imageA.name}) + set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id])) + + amis_by_public = conn.get_all_images(filters={'is-public': True}) + set([ami.id for ami in amis_by_public]).should.contain(imageB.id) + len(amis_by_public).should.equal(35) + + amis_by_nonpublic = conn.get_all_images(filters={'is-public': False}) + set([ami.id for ami in amis_by_nonpublic]).should.contain(imageA.id) + len(amis_by_nonpublic).should.equal(1) + + +@mock_ec2_deprecated +def test_ami_filtering_via_tag(): + conn = boto.connect_vpc('the_key', 'the_secret') + + reservationA = conn.run_instances('ami-1234abcd') + instanceA = reservationA.instances[0] + imageA_id = conn.create_image( + instanceA.id, "test-ami-A", "this is a test ami") + imageA = conn.get_image(imageA_id) + imageA.add_tag("a key", "some value") + + reservationB = conn.run_instances('ami-abcd1234') + instanceB = reservationB.instances[0] + imageB_id = conn.create_image( + instanceB.id, "test-ami-B", "this is a test ami") + imageB = conn.get_image(imageB_id) + imageB.add_tag("another key", "some other value") + + amis_by_tagA = conn.get_all_images(filters={'tag:a key': 'some value'}) + set([ami.id for ami in amis_by_tagA]).should.equal(set([imageA.id])) + + amis_by_tagB = conn.get_all_images( + filters={'tag:another key': 'some other value'}) + set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id])) + + +@mock_ec2_deprecated +def test_getting_missing_ami(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_image('ami-missing') + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_getting_malformed_ami(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_image('foo-missing') + cm.exception.code.should.equal('InvalidAMIID.Malformed') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_ami_attribute_group_permissions(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + + # Baseline + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.name.should.equal('launch_permission') + attributes.attrs.should.have.length_of(0) + + ADD_GROUP_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'add', + 'groups': 'all'} + + REMOVE_GROUP_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'remove', + 'groups': 'all'} + + # Add 'all' group and confirm + with assert_raises(EC2ResponseError) as ex: + conn.modify_image_attribute( + **dict(ADD_GROUP_ARGS, **{'dry_run': True})) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_image_attribute(**ADD_GROUP_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs['groups'].should.have.length_of(1) + attributes.attrs['groups'].should.equal(['all']) + image = conn.get_image(image_id) + image.is_public.should.equal(True) + + # Add is idempotent + conn.modify_image_attribute.when.called_with( + **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) + + # Remove 'all' group and confirm + conn.modify_image_attribute(**REMOVE_GROUP_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs.should.have.length_of(0) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + # Remove is idempotent + conn.modify_image_attribute.when.called_with( + **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) + + +@mock_ec2_deprecated +def test_ami_attribute_user_permissions(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + + # Baseline + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.name.should.equal('launch_permission') + attributes.attrs.should.have.length_of(0) + + # Both str and int values should work. + USER1 = '123456789011' + USER2 = 123456789022 + + ADD_USERS_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'add', + 'user_ids': [USER1, USER2]} + + REMOVE_USERS_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'remove', + 'user_ids': [USER1, USER2]} + + REMOVE_SINGLE_USER_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'remove', + 'user_ids': [USER1]} + + # Add multiple users and confirm + conn.modify_image_attribute(**ADD_USERS_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs['user_ids'].should.have.length_of(2) + set(attributes.attrs['user_ids']).should.equal( + set([str(USER1), str(USER2)])) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + # Add is idempotent + conn.modify_image_attribute.when.called_with( + **ADD_USERS_ARGS).should_not.throw(EC2ResponseError) + + # Remove single user and confirm + conn.modify_image_attribute(**REMOVE_SINGLE_USER_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs['user_ids'].should.have.length_of(1) + set(attributes.attrs['user_ids']).should.equal(set([str(USER2)])) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + # Remove multiple users and confirm + conn.modify_image_attribute(**REMOVE_USERS_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs.should.have.length_of(0) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + # Remove is idempotent + conn.modify_image_attribute.when.called_with( + **REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) + + +@mock_ec2 +def test_ami_describe_executable_users(): + conn = boto3.client('ec2', region_name='us-east-1') + ec2 = boto3.resource('ec2', 'us-east-1') + ec2.create_instances(ImageId='', + MinCount=1, + MaxCount=1) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) + instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] + image_id = conn.create_image(InstanceId=instance_id, + Name='TestImage', )['ImageId'] + + USER1 = '123456789011' + + ADD_USER_ARGS = {'ImageId': image_id, + 'Attribute': 'launchPermission', + 'OperationType': 'add', + 'UserIds': [USER1]} + + # Add users and get no images + conn.modify_image_attribute(**ADD_USER_ARGS) + + attributes = conn.describe_image_attribute(ImageId=image_id, + Attribute='LaunchPermissions', + DryRun=False) + attributes['LaunchPermissions'].should.have.length_of(1) + attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) + images = conn.describe_images(ExecutableUsers=[USER1])['Images'] + images.should.have.length_of(1) + images[0]['ImageId'].should.equal(image_id) + + +@mock_ec2 +def test_ami_describe_executable_users_negative(): + conn = boto3.client('ec2', region_name='us-east-1') + ec2 = boto3.resource('ec2', 'us-east-1') + ec2.create_instances(ImageId='', + MinCount=1, + MaxCount=1) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) + instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] + image_id = conn.create_image(InstanceId=instance_id, + Name='TestImage')['ImageId'] + + USER1 = '123456789011' + USER2 = '113355789012' + + ADD_USER_ARGS = {'ImageId': image_id, + 'Attribute': 'launchPermission', + 'OperationType': 'add', + 'UserIds': [USER1]} + + # Add users and get no images + # Add users and get no images + conn.modify_image_attribute(**ADD_USER_ARGS) + + attributes = conn.describe_image_attribute(ImageId=image_id, + Attribute='LaunchPermissions', + DryRun=False) + attributes['LaunchPermissions'].should.have.length_of(1) + attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) + images = conn.describe_images(ExecutableUsers=[USER2])['Images'] + images.should.have.length_of(0) + + +@mock_ec2 +def test_ami_describe_executable_users_and_filter(): + conn = boto3.client('ec2', region_name='us-east-1') + ec2 = boto3.resource('ec2', 'us-east-1') + ec2.create_instances(ImageId='', + MinCount=1, + MaxCount=1) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) + instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] + image_id = conn.create_image(InstanceId=instance_id, + Name='ImageToDelete', )['ImageId'] + + USER1 = '123456789011' + + ADD_USER_ARGS = {'ImageId': image_id, + 'Attribute': 'launchPermission', + 'OperationType': 'add', + 'UserIds': [USER1]} + + # Add users and get no images + conn.modify_image_attribute(**ADD_USER_ARGS) + + attributes = conn.describe_image_attribute(ImageId=image_id, + Attribute='LaunchPermissions', + DryRun=False) + attributes['LaunchPermissions'].should.have.length_of(1) + attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) + images = conn.describe_images(ExecutableUsers=[USER1], + Filters=[{'Name': 'state', 'Values': ['available']}])['Images'] + images.should.have.length_of(1) + images[0]['ImageId'].should.equal(image_id) + + +@mock_ec2_deprecated +def test_ami_attribute_user_and_group_permissions(): + """ + Boto supports adding/removing both users and groups at the same time. + Just spot-check this -- input variations, idempotency, etc are validated + via user-specific and group-specific tests above. + """ + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + + # Baseline + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.name.should.equal('launch_permission') + attributes.attrs.should.have.length_of(0) + + USER1 = '123456789011' + USER2 = '123456789022' + + ADD_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'add', + 'groups': ['all'], + 'user_ids': [USER1, USER2]} + + REMOVE_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'remove', + 'groups': ['all'], + 'user_ids': [USER1, USER2]} + + # Add and confirm + conn.modify_image_attribute(**ADD_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs['user_ids'].should.have.length_of(2) + set(attributes.attrs['user_ids']).should.equal(set([USER1, USER2])) + set(attributes.attrs['groups']).should.equal(set(['all'])) + image = conn.get_image(image_id) + image.is_public.should.equal(True) + + # Remove and confirm + conn.modify_image_attribute(**REMOVE_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs.should.have.length_of(0) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + +@mock_ec2_deprecated +def test_ami_attribute_error_cases(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + + # Error: Add with group != 'all' + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + groups='everyone') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with user ID that isn't an integer. + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + user_ids='12345678901A') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with user ID that is > length 12. + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + user_ids='1234567890123') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with user ID that is < length 12. + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + user_ids='12345678901') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with one invalid user ID among other valid IDs, ensure no + # partial changes. + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + user_ids=['123456789011', 'foo', '123456789022']) + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs.should.have.length_of(0) + + # Error: Add with invalid image ID + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute("ami-abcd1234", + attribute='launchPermission', + operation='add', + groups='all') + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Remove with invalid image ID + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute("ami-abcd1234", + attribute='launchPermission', + operation='remove', + groups='all') + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_ami_describe_non_existent(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + # Valid pattern but non-existent id + img = ec2.Image('ami-abcd1234') + with assert_raises(ClientError): + img.load() + # Invalid ami pattern + img = ec2.Image('not_an_ami_id') + with assert_raises(ClientError): + img.load() + + +@mock_ec2 +def test_ami_filter_wildcard(): + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') + + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') + + # create an image with the same owner but will not match the filter + instance.create_image(Name='not-matching-image') + + my_images = ec2_client.describe_images( + Owners=['111122223333'], + Filters=[{'Name': 'name', 'Values': ['test*']}] + )['Images'] + my_images.should.have.length_of(1) + + +@mock_ec2 +def test_ami_filter_by_owner_id(): + client = boto3.client('ec2', region_name='us-east-1') + + ubuntu_id = '099720109477' + + ubuntu_images = client.describe_images(Owners=[ubuntu_id]) + all_images = client.describe_images() + + ubuntu_ids = [ami['OwnerId'] for ami in ubuntu_images['Images']] + all_ids = [ami['OwnerId'] for ami in all_images['Images']] + + # Assert all ubuntu_ids are the same and one equals ubuntu_id + assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id + # Check we actually have a subset of images + assert len(ubuntu_ids) < len(all_ids) + + +@mock_ec2 +def test_ami_filter_by_self(): + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') + + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(0) + + # Create a new image + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') + + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(1) + + +@mock_ec2 +def test_ami_snapshots_have_correct_owner(): + ec2_client = boto3.client('ec2', region_name='us-west-1') + + images_response = ec2_client.describe_images() + + owner_id_to_snapshot_ids = {} + for image in images_response['Images']: + owner_id = image['OwnerId'] + snapshot_ids = [ + block_device_mapping['Ebs']['SnapshotId'] + for block_device_mapping in image['BlockDeviceMappings'] + ] + existing_snapshot_ids = owner_id_to_snapshot_ids.get(owner_id, []) + owner_id_to_snapshot_ids[owner_id] = ( + existing_snapshot_ids + snapshot_ids + ) + + for owner_id in owner_id_to_snapshot_ids: + snapshots_rseponse = ec2_client.describe_snapshots( + SnapshotIds=owner_id_to_snapshot_ids[owner_id] + ) + + for snapshot in snapshots_rseponse['Snapshots']: + assert owner_id == snapshot['OwnerId'] diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index c64f075ca42a..0c94687fa1a0 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -1,54 +1,54 @@ -from __future__ import unicode_literals -import boto -import boto.ec2 -import boto3 -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_describe_regions(): - conn = boto.connect_ec2('the_key', 'the_secret') - regions = conn.get_all_regions() - regions.should.have.length_of(16) - for region in regions: - region.endpoint.should.contain(region.name) - - -@mock_ec2_deprecated -def test_availability_zones(): - conn = boto.connect_ec2('the_key', 'the_secret') - regions = conn.get_all_regions() - for region in regions: - conn = boto.ec2.connect_to_region(region.name) - if conn is None: - continue - for zone in conn.get_all_zones(): - zone.name.should.contain(region.name) - - -@mock_ec2 -def test_boto3_describe_regions(): - ec2 = boto3.client('ec2', 'us-east-1') - resp = ec2.describe_regions() - resp['Regions'].should.have.length_of(16) - for rec in resp['Regions']: - rec['Endpoint'].should.contain(rec['RegionName']) - - test_region = 'us-east-1' - resp = ec2.describe_regions(RegionNames=[test_region]) - resp['Regions'].should.have.length_of(1) - resp['Regions'][0].should.have.key('RegionName').which.should.equal(test_region) - - -@mock_ec2 -def test_boto3_availability_zones(): - ec2 = boto3.client('ec2', 'us-east-1') - resp = ec2.describe_regions() - regions = [r['RegionName'] for r in resp['Regions']] - for region in regions: - conn = boto3.client('ec2', region) - resp = conn.describe_availability_zones() - for rec in resp['AvailabilityZones']: - rec['ZoneName'].should.contain(region) +from __future__ import unicode_literals +import boto +import boto.ec2 +import boto3 +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_describe_regions(): + conn = boto.connect_ec2('the_key', 'the_secret') + regions = conn.get_all_regions() + regions.should.have.length_of(16) + for region in regions: + region.endpoint.should.contain(region.name) + + +@mock_ec2_deprecated +def test_availability_zones(): + conn = boto.connect_ec2('the_key', 'the_secret') + regions = conn.get_all_regions() + for region in regions: + conn = boto.ec2.connect_to_region(region.name) + if conn is None: + continue + for zone in conn.get_all_zones(): + zone.name.should.contain(region.name) + + +@mock_ec2 +def test_boto3_describe_regions(): + ec2 = boto3.client('ec2', 'us-east-1') + resp = ec2.describe_regions() + resp['Regions'].should.have.length_of(16) + for rec in resp['Regions']: + rec['Endpoint'].should.contain(rec['RegionName']) + + test_region = 'us-east-1' + resp = ec2.describe_regions(RegionNames=[test_region]) + resp['Regions'].should.have.length_of(1) + resp['Regions'][0].should.have.key('RegionName').which.should.equal(test_region) + + +@mock_ec2 +def test_boto3_availability_zones(): + ec2 = boto3.client('ec2', 'us-east-1') + resp = ec2.describe_regions() + regions = [r['RegionName'] for r in resp['Regions']] + for region in regions: + conn = boto3.client('ec2', region) + resp = conn.describe_availability_zones() + for rec in resp['AvailabilityZones']: + rec['ZoneName'].should.contain(region) diff --git a/tests/test_ec2/test_customer_gateways.py b/tests/test_ec2/test_customer_gateways.py index 589f887f6f40..82e31672304d 100644 --- a/tests/test_ec2/test_customer_gateways.py +++ b/tests/test_ec2/test_customer_gateways.py @@ -1,52 +1,52 @@ -from __future__ import unicode_literals -import boto -import sure # noqa -from nose.tools import assert_raises -from nose.tools import assert_false -from boto.exception import EC2ResponseError - -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_create_customer_gateways(): - conn = boto.connect_vpc('the_key', 'the_secret') - - customer_gateway = conn.create_customer_gateway( - 'ipsec.1', '205.251.242.54', 65534) - customer_gateway.should_not.be.none - customer_gateway.id.should.match(r'cgw-\w+') - customer_gateway.type.should.equal('ipsec.1') - customer_gateway.bgp_asn.should.equal(65534) - customer_gateway.ip_address.should.equal('205.251.242.54') - - -@mock_ec2_deprecated -def test_describe_customer_gateways(): - conn = boto.connect_vpc('the_key', 'the_secret') - customer_gateway = conn.create_customer_gateway( - 'ipsec.1', '205.251.242.54', 65534) - cgws = conn.get_all_customer_gateways() - cgws.should.have.length_of(1) - cgws[0].id.should.match(customer_gateway.id) - - -@mock_ec2_deprecated -def test_delete_customer_gateways(): - conn = boto.connect_vpc('the_key', 'the_secret') - - customer_gateway = conn.create_customer_gateway( - 'ipsec.1', '205.251.242.54', 65534) - customer_gateway.should_not.be.none - cgws = conn.get_all_customer_gateways() - cgws[0].id.should.match(customer_gateway.id) - deleted = conn.delete_customer_gateway(customer_gateway.id) - cgws = conn.get_all_customer_gateways() - cgws.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_delete_customer_gateways_bad_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - with assert_raises(EC2ResponseError) as cm: - conn.delete_customer_gateway('cgw-0123abcd') +from __future__ import unicode_literals +import boto +import sure # noqa +from nose.tools import assert_raises +from nose.tools import assert_false +from boto.exception import EC2ResponseError + +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_create_customer_gateways(): + conn = boto.connect_vpc('the_key', 'the_secret') + + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) + customer_gateway.should_not.be.none + customer_gateway.id.should.match(r'cgw-\w+') + customer_gateway.type.should.equal('ipsec.1') + customer_gateway.bgp_asn.should.equal(65534) + customer_gateway.ip_address.should.equal('205.251.242.54') + + +@mock_ec2_deprecated +def test_describe_customer_gateways(): + conn = boto.connect_vpc('the_key', 'the_secret') + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) + cgws = conn.get_all_customer_gateways() + cgws.should.have.length_of(1) + cgws[0].id.should.match(customer_gateway.id) + + +@mock_ec2_deprecated +def test_delete_customer_gateways(): + conn = boto.connect_vpc('the_key', 'the_secret') + + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) + customer_gateway.should_not.be.none + cgws = conn.get_all_customer_gateways() + cgws[0].id.should.match(customer_gateway.id) + deleted = conn.delete_customer_gateway(customer_gateway.id) + cgws = conn.get_all_customer_gateways() + cgws.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_delete_customer_gateways_bad_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + with assert_raises(EC2ResponseError) as cm: + conn.delete_customer_gateway('cgw-0123abcd') diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py index 4e25202412e3..2aff803aea7d 100644 --- a/tests/test_ec2/test_dhcp_options.py +++ b/tests/test_ec2/test_dhcp_options.py @@ -1,333 +1,333 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto3 -import boto -from boto.exception import EC2ResponseError - -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - -SAMPLE_DOMAIN_NAME = u'example.com' -SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] - - -@mock_ec2_deprecated -def test_dhcp_options_associate(): - """ associate dhcp option """ - conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - vpc = conn.create_vpc("10.0.0.0/16") - - rval = conn.associate_dhcp_options(dhcp_options.id, vpc.id) - rval.should.be.equal(True) - - -@mock_ec2_deprecated -def test_dhcp_options_associate_invalid_dhcp_id(): - """ associate dhcp option bad dhcp options id """ - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - - with assert_raises(EC2ResponseError) as cm: - conn.associate_dhcp_options("foo", vpc.id) - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_dhcp_options_associate_invalid_vpc_id(): - """ associate dhcp option invalid vpc id """ - conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - - with assert_raises(EC2ResponseError) as cm: - conn.associate_dhcp_options(dhcp_options.id, "foo") - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_dhcp_options_delete_with_vpc(): - """Test deletion of dhcp options with vpc""" - conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - dhcp_options_id = dhcp_options.id - vpc = conn.create_vpc("10.0.0.0/16") - - rval = conn.associate_dhcp_options(dhcp_options_id, vpc.id) - rval.should.be.equal(True) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_dhcp_options(dhcp_options_id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - vpc.delete() - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_dhcp_options([dhcp_options_id]) - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_create_dhcp_options(): - """Create most basic dhcp option""" - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp_option = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - dhcp_option.options[u'domain-name'][0].should.be.equal(SAMPLE_DOMAIN_NAME) - dhcp_option.options[ - u'domain-name-servers'][0].should.be.equal(SAMPLE_NAME_SERVERS[0]) - dhcp_option.options[ - u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1]) - - -@mock_ec2_deprecated -def test_create_dhcp_options_invalid_options(): - """Create invalid dhcp options""" - conn = boto.connect_vpc('the_key', 'the_secret') - servers = ["f", "f", "f", "f", "f"] - - with assert_raises(EC2ResponseError) as cm: - conn.create_dhcp_options(ntp_servers=servers) - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.create_dhcp_options(netbios_node_type="0") - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_describe_dhcp_options(): - """Test dhcp options lookup by id""" - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp_option = conn.create_dhcp_options() - dhcp_options = conn.get_all_dhcp_options([dhcp_option.id]) - dhcp_options.should.be.length_of(1) - - dhcp_options = conn.get_all_dhcp_options() - dhcp_options.should.be.length_of(1) - - -@mock_ec2_deprecated -def test_describe_dhcp_options_invalid_id(): - """get error on invalid dhcp_option_id lookup""" - conn = boto.connect_vpc('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_dhcp_options(["1"]) - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_delete_dhcp_options(): - """delete dhcp option""" - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp_option = conn.create_dhcp_options() - dhcp_options = conn.get_all_dhcp_options([dhcp_option.id]) - dhcp_options.should.be.length_of(1) - - conn.delete_dhcp_options(dhcp_option.id) # .should.be.equal(True) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_dhcp_options([dhcp_option.id]) - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_delete_dhcp_options_invalid_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - - conn.create_dhcp_options() - - with assert_raises(EC2ResponseError) as cm: - conn.delete_dhcp_options("dopt-abcd1234") - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_delete_dhcp_options_malformed_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - - conn.create_dhcp_options() - - with assert_raises(EC2ResponseError) as cm: - conn.delete_dhcp_options("foo-abcd1234") - cm.exception.code.should.equal('InvalidDhcpOptionsId.Malformed') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_dhcp_tagging(): - conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_option = conn.create_dhcp_options() - - dhcp_option.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the DHCP options - dhcp_option = conn.get_all_dhcp_options()[0] - dhcp_option.tags.should.have.length_of(1) - dhcp_option.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_dhcp_options_get_by_tag(): - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp1 = conn.create_dhcp_options('example.com', ['10.0.10.2']) - dhcp1.add_tag('Name', 'TestDhcpOptions1') - dhcp1.add_tag('test-tag', 'test-value') - - dhcp2 = conn.create_dhcp_options('example.com', ['10.0.20.2']) - dhcp2.add_tag('Name', 'TestDhcpOptions2') - dhcp2.add_tag('test-tag', 'test-value') - - filters = {'tag:Name': 'TestDhcpOptions1', 'tag:test-tag': 'test-value'} - dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) - - dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options[ - 'domain-name'][0].should.be.equal('example.com') - dhcp_options_sets[0].options[ - 'domain-name-servers'][0].should.be.equal('10.0.10.2') - dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions1') - dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') - - filters = {'tag:Name': 'TestDhcpOptions2', 'tag:test-tag': 'test-value'} - dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) - - dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options[ - 'domain-name'][0].should.be.equal('example.com') - dhcp_options_sets[0].options[ - 'domain-name-servers'][0].should.be.equal('10.0.20.2') - dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions2') - dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') - - filters = {'tag:test-tag': 'test-value'} - dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) - - dhcp_options_sets.should.have.length_of(2) - - -@mock_ec2_deprecated -def test_dhcp_options_get_by_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp1 = conn.create_dhcp_options('test1.com', ['10.0.10.2']) - dhcp1.add_tag('Name', 'TestDhcpOptions1') - dhcp1.add_tag('test-tag', 'test-value') - dhcp1_id = dhcp1.id - - dhcp2 = conn.create_dhcp_options('test2.com', ['10.0.20.2']) - dhcp2.add_tag('Name', 'TestDhcpOptions2') - dhcp2.add_tag('test-tag', 'test-value') - dhcp2_id = dhcp2.id - - dhcp_options_sets = conn.get_all_dhcp_options() - dhcp_options_sets.should.have.length_of(2) - - dhcp_options_sets = conn.get_all_dhcp_options( - filters={'dhcp-options-id': dhcp1_id}) - - dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test1.com') - dhcp_options_sets[0].options[ - 'domain-name-servers'][0].should.be.equal('10.0.10.2') - - dhcp_options_sets = conn.get_all_dhcp_options( - filters={'dhcp-options-id': dhcp2_id}) - - dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test2.com') - dhcp_options_sets[0].options[ - 'domain-name-servers'][0].should.be.equal('10.0.20.2') - - -@mock_ec2 -def test_dhcp_options_get_by_value_filter(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.10.2']} - ]) - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.20.2']} - ]) - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.30.2']} - ]) - - filters = [{'Name': 'value', 'Values': ['10.0.10.2']}] - dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters)) - dhcp_options_sets.should.have.length_of(1) - - -@mock_ec2 -def test_dhcp_options_get_by_key_filter(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.10.2']} - ]) - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.20.2']} - ]) - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.30.2']} - ]) - - filters = [{'Name': 'key', 'Values': ['domain-name']}] - dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters)) - dhcp_options_sets.should.have.length_of(3) - - -@mock_ec2_deprecated -def test_dhcp_options_get_by_invalid_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - - conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - filters = {'invalid-filter': 'invalid-value'} - - conn.get_all_dhcp_options.when.called_with( - filters=filters).should.throw(NotImplementedError) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto3 +import boto +from boto.exception import EC2ResponseError + +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + +SAMPLE_DOMAIN_NAME = u'example.com' +SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] + + +@mock_ec2_deprecated +def test_dhcp_options_associate(): + """ associate dhcp option """ + conn = boto.connect_vpc('the_key', 'the_secret') + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + vpc = conn.create_vpc("10.0.0.0/16") + + rval = conn.associate_dhcp_options(dhcp_options.id, vpc.id) + rval.should.be.equal(True) + + +@mock_ec2_deprecated +def test_dhcp_options_associate_invalid_dhcp_id(): + """ associate dhcp option bad dhcp options id """ + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + + with assert_raises(EC2ResponseError) as cm: + conn.associate_dhcp_options("foo", vpc.id) + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_dhcp_options_associate_invalid_vpc_id(): + """ associate dhcp option invalid vpc id """ + conn = boto.connect_vpc('the_key', 'the_secret') + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + + with assert_raises(EC2ResponseError) as cm: + conn.associate_dhcp_options(dhcp_options.id, "foo") + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_dhcp_options_delete_with_vpc(): + """Test deletion of dhcp options with vpc""" + conn = boto.connect_vpc('the_key', 'the_secret') + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options_id = dhcp_options.id + vpc = conn.create_vpc("10.0.0.0/16") + + rval = conn.associate_dhcp_options(dhcp_options_id, vpc.id) + rval.should.be.equal(True) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_dhcp_options(dhcp_options_id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + vpc.delete() + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_dhcp_options([dhcp_options_id]) + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_create_dhcp_options(): + """Create most basic dhcp option""" + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp_option = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_option.options[u'domain-name'][0].should.be.equal(SAMPLE_DOMAIN_NAME) + dhcp_option.options[ + u'domain-name-servers'][0].should.be.equal(SAMPLE_NAME_SERVERS[0]) + dhcp_option.options[ + u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1]) + + +@mock_ec2_deprecated +def test_create_dhcp_options_invalid_options(): + """Create invalid dhcp options""" + conn = boto.connect_vpc('the_key', 'the_secret') + servers = ["f", "f", "f", "f", "f"] + + with assert_raises(EC2ResponseError) as cm: + conn.create_dhcp_options(ntp_servers=servers) + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.create_dhcp_options(netbios_node_type="0") + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_describe_dhcp_options(): + """Test dhcp options lookup by id""" + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp_option = conn.create_dhcp_options() + dhcp_options = conn.get_all_dhcp_options([dhcp_option.id]) + dhcp_options.should.be.length_of(1) + + dhcp_options = conn.get_all_dhcp_options() + dhcp_options.should.be.length_of(1) + + +@mock_ec2_deprecated +def test_describe_dhcp_options_invalid_id(): + """get error on invalid dhcp_option_id lookup""" + conn = boto.connect_vpc('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_dhcp_options(["1"]) + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_delete_dhcp_options(): + """delete dhcp option""" + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp_option = conn.create_dhcp_options() + dhcp_options = conn.get_all_dhcp_options([dhcp_option.id]) + dhcp_options.should.be.length_of(1) + + conn.delete_dhcp_options(dhcp_option.id) # .should.be.equal(True) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_dhcp_options([dhcp_option.id]) + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_delete_dhcp_options_invalid_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + + conn.create_dhcp_options() + + with assert_raises(EC2ResponseError) as cm: + conn.delete_dhcp_options("dopt-abcd1234") + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_delete_dhcp_options_malformed_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + + conn.create_dhcp_options() + + with assert_raises(EC2ResponseError) as cm: + conn.delete_dhcp_options("foo-abcd1234") + cm.exception.code.should.equal('InvalidDhcpOptionsId.Malformed') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_dhcp_tagging(): + conn = boto.connect_vpc('the_key', 'the_secret') + dhcp_option = conn.create_dhcp_options() + + dhcp_option.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the DHCP options + dhcp_option = conn.get_all_dhcp_options()[0] + dhcp_option.tags.should.have.length_of(1) + dhcp_option.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_dhcp_options_get_by_tag(): + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp1 = conn.create_dhcp_options('example.com', ['10.0.10.2']) + dhcp1.add_tag('Name', 'TestDhcpOptions1') + dhcp1.add_tag('test-tag', 'test-value') + + dhcp2 = conn.create_dhcp_options('example.com', ['10.0.20.2']) + dhcp2.add_tag('Name', 'TestDhcpOptions2') + dhcp2.add_tag('test-tag', 'test-value') + + filters = {'tag:Name': 'TestDhcpOptions1', 'tag:test-tag': 'test-value'} + dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) + + dhcp_options_sets.should.have.length_of(1) + dhcp_options_sets[0].options[ + 'domain-name'][0].should.be.equal('example.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.10.2') + dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions1') + dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') + + filters = {'tag:Name': 'TestDhcpOptions2', 'tag:test-tag': 'test-value'} + dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) + + dhcp_options_sets.should.have.length_of(1) + dhcp_options_sets[0].options[ + 'domain-name'][0].should.be.equal('example.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.20.2') + dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions2') + dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') + + filters = {'tag:test-tag': 'test-value'} + dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) + + dhcp_options_sets.should.have.length_of(2) + + +@mock_ec2_deprecated +def test_dhcp_options_get_by_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp1 = conn.create_dhcp_options('test1.com', ['10.0.10.2']) + dhcp1.add_tag('Name', 'TestDhcpOptions1') + dhcp1.add_tag('test-tag', 'test-value') + dhcp1_id = dhcp1.id + + dhcp2 = conn.create_dhcp_options('test2.com', ['10.0.20.2']) + dhcp2.add_tag('Name', 'TestDhcpOptions2') + dhcp2.add_tag('test-tag', 'test-value') + dhcp2_id = dhcp2.id + + dhcp_options_sets = conn.get_all_dhcp_options() + dhcp_options_sets.should.have.length_of(2) + + dhcp_options_sets = conn.get_all_dhcp_options( + filters={'dhcp-options-id': dhcp1_id}) + + dhcp_options_sets.should.have.length_of(1) + dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test1.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.10.2') + + dhcp_options_sets = conn.get_all_dhcp_options( + filters={'dhcp-options-id': dhcp2_id}) + + dhcp_options_sets.should.have.length_of(1) + dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test2.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.20.2') + + +@mock_ec2 +def test_dhcp_options_get_by_value_filter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.10.2']} + ]) + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.20.2']} + ]) + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.30.2']} + ]) + + filters = [{'Name': 'value', 'Values': ['10.0.10.2']}] + dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters)) + dhcp_options_sets.should.have.length_of(1) + + +@mock_ec2 +def test_dhcp_options_get_by_key_filter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.10.2']} + ]) + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.20.2']} + ]) + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.30.2']} + ]) + + filters = [{'Name': 'key', 'Values': ['domain-name']}] + dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters)) + dhcp_options_sets.should.have.length_of(3) + + +@mock_ec2_deprecated +def test_dhcp_options_get_by_invalid_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + + conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + filters = {'invalid-filter': 'invalid-value'} + + conn.get_all_dhcp_options.when.called_with( + filters=filters).should.throw(NotImplementedError) diff --git a/tests/test_ec2/test_ec2_core.py b/tests/test_ec2/test_ec2_core.py index baffc4882521..78b780d97562 100644 --- a/tests/test_ec2/test_ec2_core.py +++ b/tests/test_ec2/test_ec2_core.py @@ -1 +1 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 442e41dde0c9..a5583f44b643 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -1,665 +1,665 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -from moto.ec2 import ec2_backends -import boto -import boto3 -from botocore.exceptions import ClientError -from boto.exception import EC2ResponseError -from freezegun import freeze_time -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 - - -@mock_ec2_deprecated -def test_create_and_delete_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - - all_volumes = conn.get_all_volumes() - - current_volume = [item for item in all_volumes if item.id == volume.id] - current_volume.should.have.length_of(1) - current_volume[0].size.should.equal(80) - current_volume[0].zone.should.equal("us-east-1a") - current_volume[0].encrypted.should.be(False) - - volume = current_volume[0] - - with assert_raises(EC2ResponseError) as ex: - volume.delete(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set') - - volume.delete() - - all_volumes = conn.get_all_volumes() - my_volume = [item for item in all_volumes if item.id == volume.id] - my_volume.should.have.length_of(0) - - # Deleting something that was already deleted should throw an error - with assert_raises(EC2ResponseError) as cm: - volume.delete() - cm.exception.code.should.equal('InvalidVolume.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_create_encrypted_volume_dryrun(): - conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(EC2ResponseError) as ex: - conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') - - -@mock_ec2_deprecated -def test_create_encrypted_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a", encrypted=True) - - with assert_raises(EC2ResponseError) as ex: - conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') - - all_volumes = [vol for vol in conn.get_all_volumes() if vol.id == volume.id] - all_volumes[0].encrypted.should.be(True) - - -@mock_ec2_deprecated -def test_filter_volume_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume1 = conn.create_volume(80, "us-east-1a") - volume2 = conn.create_volume(36, "us-east-1b") - volume3 = conn.create_volume(20, "us-east-1c") - vol1 = conn.get_all_volumes(volume_ids=volume3.id) - vol1.should.have.length_of(1) - vol1[0].size.should.equal(20) - vol1[0].zone.should.equal('us-east-1c') - vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id]) - vol2.should.have.length_of(2) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_volumes(volume_ids=['vol-does_not_exist']) - cm.exception.code.should.equal('InvalidVolume.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_volume_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.update() - - volume1 = conn.create_volume(80, "us-east-1a", encrypted=True) - volume2 = conn.create_volume(36, "us-east-1b", encrypted=False) - volume3 = conn.create_volume(20, "us-east-1c", encrypted=True) - - snapshot = volume3.create_snapshot(description='testsnap') - volume4 = conn.create_volume(25, "us-east-1a", snapshot=snapshot) - - conn.create_tags([volume1.id], {'testkey1': 'testvalue1'}) - conn.create_tags([volume2.id], {'testkey2': 'testvalue2'}) - - volume1.update() - volume2.update() - volume3.update() - volume4.update() - - block_mapping = instance.block_device_mapping['/dev/sda1'] - - volume_ids = (volume1.id, volume2.id, volume3.id, volume4.id, block_mapping.volume_id) - - volumes_by_attach_time = conn.get_all_volumes( - filters={'attachment.attach-time': block_mapping.attach_time}) - set([vol.id for vol in volumes_by_attach_time] - ).should.equal({block_mapping.volume_id}) - - volumes_by_attach_device = conn.get_all_volumes( - filters={'attachment.device': '/dev/sda1'}) - set([vol.id for vol in volumes_by_attach_device] - ).should.equal({block_mapping.volume_id}) - - volumes_by_attach_instance_id = conn.get_all_volumes( - filters={'attachment.instance-id': instance.id}) - set([vol.id for vol in volumes_by_attach_instance_id] - ).should.equal({block_mapping.volume_id}) - - volumes_by_attach_status = conn.get_all_volumes( - filters={'attachment.status': 'attached'}) - set([vol.id for vol in volumes_by_attach_status] - ).should.equal({block_mapping.volume_id}) - - volumes_by_create_time = conn.get_all_volumes( - filters={'create-time': volume4.create_time}) - set([vol.create_time for vol in volumes_by_create_time] - ).should.equal({volume4.create_time}) - - volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size}) - set([vol.id for vol in volumes_by_size]).should.equal({volume2.id}) - - volumes_by_snapshot_id = conn.get_all_volumes( - filters={'snapshot-id': snapshot.id}) - set([vol.id for vol in volumes_by_snapshot_id] - ).should.equal({volume4.id}) - - volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'}) - set([vol.id for vol in volumes_by_status]).should.equal( - {block_mapping.volume_id}) - - volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id}) - set([vol.id for vol in volumes_by_id]).should.equal({volume1.id}) - - volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'}) - set([vol.id for vol in volumes_by_tag_key]).should.equal({volume1.id}) - - volumes_by_tag_value = conn.get_all_volumes( - filters={'tag-value': 'testvalue1'}) - set([vol.id for vol in volumes_by_tag_value] - ).should.equal({volume1.id}) - - volumes_by_tag = conn.get_all_volumes( - filters={'tag:testkey1': 'testvalue1'}) - set([vol.id for vol in volumes_by_tag]).should.equal({volume1.id}) - - volumes_by_unencrypted = conn.get_all_volumes( - filters={'encrypted': 'false'}) - set([vol.id for vol in volumes_by_unencrypted if vol.id in volume_ids]).should.equal( - {block_mapping.volume_id, volume2.id} - ) - - volumes_by_encrypted = conn.get_all_volumes(filters={'encrypted': 'true'}) - set([vol.id for vol in volumes_by_encrypted if vol.id in volume_ids]).should.equal( - {volume1.id, volume3.id, volume4.id} - ) - - volumes_by_availability_zone = conn.get_all_volumes(filters={'availability-zone': 'us-east-1b'}) - set([vol.id for vol in volumes_by_availability_zone if vol.id in volume_ids]).should.equal( - {volume2.id} - ) - - -@mock_ec2_deprecated -def test_volume_attach_and_detach(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - volume = conn.create_volume(80, "us-east-1a") - - volume.update() - volume.volume_state().should.equal('available') - - with assert_raises(EC2ResponseError) as ex: - volume.attach(instance.id, "/dev/sdh", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set') - - volume.attach(instance.id, "/dev/sdh") - - volume.update() - volume.volume_state().should.equal('in-use') - volume.attachment_state().should.equal('attached') - - volume.attach_data.instance_id.should.equal(instance.id) - - with assert_raises(EC2ResponseError) as ex: - volume.detach(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set') - - volume.detach() - - volume.update() - volume.volume_state().should.equal('available') - - with assert_raises(EC2ResponseError) as cm1: - volume.attach('i-1234abcd', "/dev/sdh") - cm1.exception.code.should.equal('InvalidInstanceID.NotFound') - cm1.exception.status.should.equal(400) - cm1.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as cm2: - conn.detach_volume(volume.id, instance.id, "/dev/sdh") - cm2.exception.code.should.equal('InvalidAttachment.NotFound') - cm2.exception.status.should.equal(400) - cm2.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as cm3: - conn.detach_volume(volume.id, 'i-1234abcd', "/dev/sdh") - cm3.exception.code.should.equal('InvalidInstanceID.NotFound') - cm3.exception.status.should.equal(400) - cm3.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_create_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - - with assert_raises(EC2ResponseError) as ex: - snapshot = volume.create_snapshot('a dryrun snapshot', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') - - snapshot = volume.create_snapshot('a test snapshot') - snapshot.update() - snapshot.status.should.equal('completed') - - snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] - snapshots.should.have.length_of(1) - snapshots[0].description.should.equal('a test snapshot') - snapshots[0].start_time.should_not.be.none - snapshots[0].encrypted.should.be(False) - - # Create snapshot without description - num_snapshots = len(conn.get_all_snapshots()) - - snapshot = volume.create_snapshot() - conn.get_all_snapshots().should.have.length_of(num_snapshots + 1) - - snapshot.delete() - conn.get_all_snapshots().should.have.length_of(num_snapshots) - - # Deleting something that was already deleted should throw an error - with assert_raises(EC2ResponseError) as cm: - snapshot.delete() - cm.exception.code.should.equal('InvalidSnapshot.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_create_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a", encrypted=True) - snapshot = volume.create_snapshot('a test snapshot') - snapshot.update() - snapshot.status.should.equal('completed') - - snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] - snapshots.should.have.length_of(1) - snapshots[0].description.should.equal('a test snapshot') - snapshots[0].start_time.should_not.be.none - snapshots[0].encrypted.should.be(True) - - -@mock_ec2_deprecated -def test_filter_snapshot_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume1 = conn.create_volume(36, "us-east-1a") - snap1 = volume1.create_snapshot('a test snapshot 1') - volume2 = conn.create_volume(42, 'us-east-1a') - snap2 = volume2.create_snapshot('a test snapshot 2') - volume3 = conn.create_volume(84, 'us-east-1a') - snap3 = volume3.create_snapshot('a test snapshot 3') - snapshots1 = conn.get_all_snapshots(snapshot_ids=snap2.id) - snapshots1.should.have.length_of(1) - snapshots1[0].volume_id.should.equal(volume2.id) - snapshots1[0].region.name.should.equal(conn.region.name) - snapshots2 = conn.get_all_snapshots(snapshot_ids=[snap2.id, snap3.id]) - snapshots2.should.have.length_of(2) - for s in snapshots2: - s.start_time.should_not.be.none - s.volume_id.should.be.within([volume2.id, volume3.id]) - s.region.name.should.equal(conn.region.name) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_snapshots(snapshot_ids=['snap-does_not_exist']) - cm.exception.code.should.equal('InvalidSnapshot.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_snapshot_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) - volume2 = conn.create_volume(25, "us-east-1a", encrypted=True) - - snapshot1 = volume1.create_snapshot(description='testsnapshot1') - snapshot2 = volume1.create_snapshot(description='testsnapshot2') - snapshot3 = volume2.create_snapshot(description='testsnapshot3') - - conn.create_tags([snapshot1.id], {'testkey1': 'testvalue1'}) - conn.create_tags([snapshot2.id], {'testkey2': 'testvalue2'}) - - snapshots_by_description = conn.get_all_snapshots( - filters={'description': 'testsnapshot1'}) - set([snap.id for snap in snapshots_by_description] - ).should.equal({snapshot1.id}) - - snapshots_by_id = conn.get_all_snapshots( - filters={'snapshot-id': snapshot1.id}) - set([snap.id for snap in snapshots_by_id] - ).should.equal({snapshot1.id}) - - snapshots_by_start_time = conn.get_all_snapshots( - filters={'start-time': snapshot1.start_time}) - set([snap.start_time for snap in snapshots_by_start_time] - ).should.equal({snapshot1.start_time}) - - snapshots_by_volume_id = conn.get_all_snapshots( - filters={'volume-id': volume1.id}) - set([snap.id for snap in snapshots_by_volume_id] - ).should.equal({snapshot1.id, snapshot2.id}) - - snapshots_by_status = conn.get_all_snapshots( - filters={'status': 'completed'}) - ({snapshot1.id, snapshot2.id, snapshot3.id} - - {snap.id for snap in snapshots_by_status}).should.have.length_of(0) - - snapshots_by_volume_size = conn.get_all_snapshots( - filters={'volume-size': volume1.size}) - set([snap.id for snap in snapshots_by_volume_size] - ).should.equal({snapshot1.id, snapshot2.id}) - - snapshots_by_tag_key = conn.get_all_snapshots( - filters={'tag-key': 'testkey1'}) - set([snap.id for snap in snapshots_by_tag_key] - ).should.equal({snapshot1.id}) - - snapshots_by_tag_value = conn.get_all_snapshots( - filters={'tag-value': 'testvalue1'}) - set([snap.id for snap in snapshots_by_tag_value] - ).should.equal({snapshot1.id}) - - snapshots_by_tag = conn.get_all_snapshots( - filters={'tag:testkey1': 'testvalue1'}) - set([snap.id for snap in snapshots_by_tag] - ).should.equal({snapshot1.id}) - - snapshots_by_encrypted = conn.get_all_snapshots( - filters={'encrypted': 'true'}) - set([snap.id for snap in snapshots_by_encrypted] - ).should.equal({snapshot3.id}) - - -@mock_ec2_deprecated -def test_snapshot_attribute(): - import copy - - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - snapshot = volume.create_snapshot() - - # Baseline - attributes = conn.get_snapshot_attribute( - snapshot.id, attribute='createVolumePermission') - attributes.name.should.equal('create_volume_permission') - attributes.attrs.should.have.length_of(0) - - ADD_GROUP_ARGS = {'snapshot_id': snapshot.id, - 'attribute': 'createVolumePermission', - 'operation': 'add', - 'groups': 'all'} - - REMOVE_GROUP_ARGS = {'snapshot_id': snapshot.id, - 'attribute': 'createVolumePermission', - 'operation': 'remove', - 'groups': 'all'} - - # Add 'all' group and confirm - - with assert_raises(EC2ResponseError) as ex: - conn.modify_snapshot_attribute( - **dict(ADD_GROUP_ARGS, **{'dry_run': True})) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_snapshot_attribute(**ADD_GROUP_ARGS) - - attributes = conn.get_snapshot_attribute( - snapshot.id, attribute='createVolumePermission') - attributes.attrs['groups'].should.have.length_of(1) - attributes.attrs['groups'].should.equal(['all']) - - # Add is idempotent - conn.modify_snapshot_attribute.when.called_with( - **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) - - # Remove 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: - conn.modify_snapshot_attribute( - **dict(REMOVE_GROUP_ARGS, **{'dry_run': True})) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_snapshot_attribute(**REMOVE_GROUP_ARGS) - - attributes = conn.get_snapshot_attribute( - snapshot.id, attribute='createVolumePermission') - attributes.attrs.should.have.length_of(0) - - # Remove is idempotent - conn.modify_snapshot_attribute.when.called_with( - **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) - - # Error: Add with group != 'all' - with assert_raises(EC2ResponseError) as cm: - conn.modify_snapshot_attribute(snapshot.id, - attribute='createVolumePermission', - operation='add', - groups='everyone') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with invalid snapshot ID - with assert_raises(EC2ResponseError) as cm: - conn.modify_snapshot_attribute("snapshot-abcd1234", - attribute='createVolumePermission', - operation='add', - groups='all') - cm.exception.code.should.equal('InvalidSnapshot.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Remove with invalid snapshot ID - with assert_raises(EC2ResponseError) as cm: - conn.modify_snapshot_attribute("snapshot-abcd1234", - attribute='createVolumePermission', - operation='remove', - groups='all') - cm.exception.code.should.equal('InvalidSnapshot.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add or remove with user ID instead of group - conn.modify_snapshot_attribute.when.called_with(snapshot.id, - attribute='createVolumePermission', - operation='add', - user_ids=['user']).should.throw(NotImplementedError) - conn.modify_snapshot_attribute.when.called_with(snapshot.id, - attribute='createVolumePermission', - operation='remove', - user_ids=['user']).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -def test_create_volume_from_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - snapshot = volume.create_snapshot('a test snapshot') - - with assert_raises(EC2ResponseError) as ex: - snapshot = volume.create_snapshot('a test snapshot', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') - - snapshot = volume.create_snapshot('a test snapshot') - snapshot.update() - snapshot.status.should.equal('completed') - - new_volume = snapshot.create_volume('us-east-1a') - new_volume.size.should.equal(80) - new_volume.snapshot_id.should.equal(snapshot.id) - - -@mock_ec2_deprecated -def test_create_volume_from_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a", encrypted=True) - - snapshot = volume.create_snapshot('a test snapshot') - snapshot.update() - snapshot.status.should.equal('completed') - - new_volume = snapshot.create_volume('us-east-1a') - new_volume.size.should.equal(80) - new_volume.snapshot_id.should.equal(snapshot.id) - new_volume.encrypted.should.be(True) - - -@mock_ec2_deprecated -def test_modify_attribute_blockDeviceMapping(): - """ - Reproduces the missing feature explained at [0], where we want to mock a - call to modify an instance attribute of type: blockDeviceMapping. - - [0] https://github.com/spulec/moto/issues/160 - """ - conn = boto.ec2.connect_to_region("us-east-1") - - reservation = conn.run_instances('ami-1234abcd') - - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute('blockDeviceMapping', { - '/dev/sda1': True}, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) - - instance = ec2_backends[conn.region.name].get_instance(instance.id) - instance.block_device_mapping.should.have.key('/dev/sda1') - instance.block_device_mapping[ - '/dev/sda1'].delete_on_termination.should.be(True) - - -@mock_ec2_deprecated -def test_volume_tag_escaping(): - conn = boto.connect_ec2('the_key', 'the_secret') - vol = conn.create_volume(10, 'us-east-1a') - snapshot = conn.create_snapshot(vol.id, 'Desc') - - with assert_raises(EC2ResponseError) as ex: - snapshot.add_tags({'key': ''}, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] - dict(snaps[0].tags).should_not.be.equal( - {'key': ''}) - - snapshot.add_tags({'key': ''}) - - snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] - dict(snaps[0].tags).should.equal({'key': ''}) - - -@freeze_time -@mock_ec2 -def test_copy_snapshot(): - ec2_client = boto3.client('ec2', region_name='eu-west-1') - dest_ec2_client = boto3.client('ec2', region_name='eu-west-2') - - volume_response = ec2_client.create_volume( - AvailabilityZone='eu-west-1a', Size=10 - ) - - create_snapshot_response = ec2_client.create_snapshot( - VolumeId=volume_response['VolumeId'] - ) - - copy_snapshot_response = dest_ec2_client.copy_snapshot( - SourceSnapshotId=create_snapshot_response['SnapshotId'], - SourceRegion="eu-west-1" - ) - - ec2 = boto3.resource('ec2', region_name='eu-west-1') - dest_ec2 = boto3.resource('ec2', region_name='eu-west-2') - - source = ec2.Snapshot(create_snapshot_response['SnapshotId']) - dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) - - attribs = ['data_encryption_key_id', 'encrypted', - 'kms_key_id', 'owner_alias', 'owner_id', - 'progress', 'state', 'state_message', - 'tags', 'volume_id', 'volume_size'] - - for attrib in attribs: - getattr(source, attrib).should.equal(getattr(dest, attrib)) - - # Copy from non-existent source ID. - with assert_raises(ClientError) as cm: - create_snapshot_error = ec2_client.create_snapshot( - VolumeId='vol-abcd1234' - ) - cm.exception.response['Error']['Code'].should.equal('InvalidVolume.NotFound') - cm.exception.response['Error']['Message'].should.equal("The volume 'vol-abcd1234' does not exist.") - cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none - cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - - # Copy from non-existent source region. - with assert_raises(ClientError) as cm: - copy_snapshot_response = dest_ec2_client.copy_snapshot( - SourceSnapshotId=create_snapshot_response['SnapshotId'], - SourceRegion="eu-west-2" - ) - cm.exception.response['Error']['Code'].should.equal('InvalidSnapshot.NotFound') - cm.exception.response['Error']['Message'].should.be.none - cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none - cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - -@mock_ec2 -def test_search_for_many_snapshots(): - ec2_client = boto3.client('ec2', region_name='eu-west-1') - - volume_response = ec2_client.create_volume( - AvailabilityZone='eu-west-1a', Size=10 - ) - - snapshot_ids = [] - for i in range(1, 20): - create_snapshot_response = ec2_client.create_snapshot( - VolumeId=volume_response['VolumeId'] - ) - snapshot_ids.append(create_snapshot_response['SnapshotId']) - - snapshots_response = ec2_client.describe_snapshots( - SnapshotIds=snapshot_ids - ) - - assert len(snapshots_response['Snapshots']) == len(snapshot_ids) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +from moto.ec2 import ec2_backends +import boto +import boto3 +from botocore.exceptions import ClientError +from boto.exception import EC2ResponseError +from freezegun import freeze_time +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 + + +@mock_ec2_deprecated +def test_create_and_delete_volume(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + + all_volumes = conn.get_all_volumes() + + current_volume = [item for item in all_volumes if item.id == volume.id] + current_volume.should.have.length_of(1) + current_volume[0].size.should.equal(80) + current_volume[0].zone.should.equal("us-east-1a") + current_volume[0].encrypted.should.be(False) + + volume = current_volume[0] + + with assert_raises(EC2ResponseError) as ex: + volume.delete(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set') + + volume.delete() + + all_volumes = conn.get_all_volumes() + my_volume = [item for item in all_volumes if item.id == volume.id] + my_volume.should.have.length_of(0) + + # Deleting something that was already deleted should throw an error + with assert_raises(EC2ResponseError) as cm: + volume.delete() + cm.exception.code.should.equal('InvalidVolume.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_create_encrypted_volume_dryrun(): + conn = boto.connect_ec2('the_key', 'the_secret') + with assert_raises(EC2ResponseError) as ex: + conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') + + +@mock_ec2_deprecated +def test_create_encrypted_volume(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a", encrypted=True) + + with assert_raises(EC2ResponseError) as ex: + conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') + + all_volumes = [vol for vol in conn.get_all_volumes() if vol.id == volume.id] + all_volumes[0].encrypted.should.be(True) + + +@mock_ec2_deprecated +def test_filter_volume_by_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume1 = conn.create_volume(80, "us-east-1a") + volume2 = conn.create_volume(36, "us-east-1b") + volume3 = conn.create_volume(20, "us-east-1c") + vol1 = conn.get_all_volumes(volume_ids=volume3.id) + vol1.should.have.length_of(1) + vol1[0].size.should.equal(20) + vol1[0].zone.should.equal('us-east-1c') + vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id]) + vol2.should.have.length_of(2) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_volumes(volume_ids=['vol-does_not_exist']) + cm.exception.code.should.equal('InvalidVolume.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_volume_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.update() + + volume1 = conn.create_volume(80, "us-east-1a", encrypted=True) + volume2 = conn.create_volume(36, "us-east-1b", encrypted=False) + volume3 = conn.create_volume(20, "us-east-1c", encrypted=True) + + snapshot = volume3.create_snapshot(description='testsnap') + volume4 = conn.create_volume(25, "us-east-1a", snapshot=snapshot) + + conn.create_tags([volume1.id], {'testkey1': 'testvalue1'}) + conn.create_tags([volume2.id], {'testkey2': 'testvalue2'}) + + volume1.update() + volume2.update() + volume3.update() + volume4.update() + + block_mapping = instance.block_device_mapping['/dev/sda1'] + + volume_ids = (volume1.id, volume2.id, volume3.id, volume4.id, block_mapping.volume_id) + + volumes_by_attach_time = conn.get_all_volumes( + filters={'attachment.attach-time': block_mapping.attach_time}) + set([vol.id for vol in volumes_by_attach_time] + ).should.equal({block_mapping.volume_id}) + + volumes_by_attach_device = conn.get_all_volumes( + filters={'attachment.device': '/dev/sda1'}) + set([vol.id for vol in volumes_by_attach_device] + ).should.equal({block_mapping.volume_id}) + + volumes_by_attach_instance_id = conn.get_all_volumes( + filters={'attachment.instance-id': instance.id}) + set([vol.id for vol in volumes_by_attach_instance_id] + ).should.equal({block_mapping.volume_id}) + + volumes_by_attach_status = conn.get_all_volumes( + filters={'attachment.status': 'attached'}) + set([vol.id for vol in volumes_by_attach_status] + ).should.equal({block_mapping.volume_id}) + + volumes_by_create_time = conn.get_all_volumes( + filters={'create-time': volume4.create_time}) + set([vol.create_time for vol in volumes_by_create_time] + ).should.equal({volume4.create_time}) + + volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size}) + set([vol.id for vol in volumes_by_size]).should.equal({volume2.id}) + + volumes_by_snapshot_id = conn.get_all_volumes( + filters={'snapshot-id': snapshot.id}) + set([vol.id for vol in volumes_by_snapshot_id] + ).should.equal({volume4.id}) + + volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'}) + set([vol.id for vol in volumes_by_status]).should.equal( + {block_mapping.volume_id}) + + volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id}) + set([vol.id for vol in volumes_by_id]).should.equal({volume1.id}) + + volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'}) + set([vol.id for vol in volumes_by_tag_key]).should.equal({volume1.id}) + + volumes_by_tag_value = conn.get_all_volumes( + filters={'tag-value': 'testvalue1'}) + set([vol.id for vol in volumes_by_tag_value] + ).should.equal({volume1.id}) + + volumes_by_tag = conn.get_all_volumes( + filters={'tag:testkey1': 'testvalue1'}) + set([vol.id for vol in volumes_by_tag]).should.equal({volume1.id}) + + volumes_by_unencrypted = conn.get_all_volumes( + filters={'encrypted': 'false'}) + set([vol.id for vol in volumes_by_unencrypted if vol.id in volume_ids]).should.equal( + {block_mapping.volume_id, volume2.id} + ) + + volumes_by_encrypted = conn.get_all_volumes(filters={'encrypted': 'true'}) + set([vol.id for vol in volumes_by_encrypted if vol.id in volume_ids]).should.equal( + {volume1.id, volume3.id, volume4.id} + ) + + volumes_by_availability_zone = conn.get_all_volumes(filters={'availability-zone': 'us-east-1b'}) + set([vol.id for vol in volumes_by_availability_zone if vol.id in volume_ids]).should.equal( + {volume2.id} + ) + + +@mock_ec2_deprecated +def test_volume_attach_and_detach(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + volume = conn.create_volume(80, "us-east-1a") + + volume.update() + volume.volume_state().should.equal('available') + + with assert_raises(EC2ResponseError) as ex: + volume.attach(instance.id, "/dev/sdh", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set') + + volume.attach(instance.id, "/dev/sdh") + + volume.update() + volume.volume_state().should.equal('in-use') + volume.attachment_state().should.equal('attached') + + volume.attach_data.instance_id.should.equal(instance.id) + + with assert_raises(EC2ResponseError) as ex: + volume.detach(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set') + + volume.detach() + + volume.update() + volume.volume_state().should.equal('available') + + with assert_raises(EC2ResponseError) as cm1: + volume.attach('i-1234abcd', "/dev/sdh") + cm1.exception.code.should.equal('InvalidInstanceID.NotFound') + cm1.exception.status.should.equal(400) + cm1.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as cm2: + conn.detach_volume(volume.id, instance.id, "/dev/sdh") + cm2.exception.code.should.equal('InvalidAttachment.NotFound') + cm2.exception.status.should.equal(400) + cm2.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as cm3: + conn.detach_volume(volume.id, 'i-1234abcd', "/dev/sdh") + cm3.exception.code.should.equal('InvalidInstanceID.NotFound') + cm3.exception.status.should.equal(400) + cm3.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_create_snapshot(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + + with assert_raises(EC2ResponseError) as ex: + snapshot = volume.create_snapshot('a dryrun snapshot', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') + + snapshot = volume.create_snapshot('a test snapshot') + snapshot.update() + snapshot.status.should.equal('completed') + + snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + snapshots.should.have.length_of(1) + snapshots[0].description.should.equal('a test snapshot') + snapshots[0].start_time.should_not.be.none + snapshots[0].encrypted.should.be(False) + + # Create snapshot without description + num_snapshots = len(conn.get_all_snapshots()) + + snapshot = volume.create_snapshot() + conn.get_all_snapshots().should.have.length_of(num_snapshots + 1) + + snapshot.delete() + conn.get_all_snapshots().should.have.length_of(num_snapshots) + + # Deleting something that was already deleted should throw an error + with assert_raises(EC2ResponseError) as cm: + snapshot.delete() + cm.exception.code.should.equal('InvalidSnapshot.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_create_encrypted_snapshot(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a", encrypted=True) + snapshot = volume.create_snapshot('a test snapshot') + snapshot.update() + snapshot.status.should.equal('completed') + + snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + snapshots.should.have.length_of(1) + snapshots[0].description.should.equal('a test snapshot') + snapshots[0].start_time.should_not.be.none + snapshots[0].encrypted.should.be(True) + + +@mock_ec2_deprecated +def test_filter_snapshot_by_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume1 = conn.create_volume(36, "us-east-1a") + snap1 = volume1.create_snapshot('a test snapshot 1') + volume2 = conn.create_volume(42, 'us-east-1a') + snap2 = volume2.create_snapshot('a test snapshot 2') + volume3 = conn.create_volume(84, 'us-east-1a') + snap3 = volume3.create_snapshot('a test snapshot 3') + snapshots1 = conn.get_all_snapshots(snapshot_ids=snap2.id) + snapshots1.should.have.length_of(1) + snapshots1[0].volume_id.should.equal(volume2.id) + snapshots1[0].region.name.should.equal(conn.region.name) + snapshots2 = conn.get_all_snapshots(snapshot_ids=[snap2.id, snap3.id]) + snapshots2.should.have.length_of(2) + for s in snapshots2: + s.start_time.should_not.be.none + s.volume_id.should.be.within([volume2.id, volume3.id]) + s.region.name.should.equal(conn.region.name) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_snapshots(snapshot_ids=['snap-does_not_exist']) + cm.exception.code.should.equal('InvalidSnapshot.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_snapshot_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) + volume2 = conn.create_volume(25, "us-east-1a", encrypted=True) + + snapshot1 = volume1.create_snapshot(description='testsnapshot1') + snapshot2 = volume1.create_snapshot(description='testsnapshot2') + snapshot3 = volume2.create_snapshot(description='testsnapshot3') + + conn.create_tags([snapshot1.id], {'testkey1': 'testvalue1'}) + conn.create_tags([snapshot2.id], {'testkey2': 'testvalue2'}) + + snapshots_by_description = conn.get_all_snapshots( + filters={'description': 'testsnapshot1'}) + set([snap.id for snap in snapshots_by_description] + ).should.equal({snapshot1.id}) + + snapshots_by_id = conn.get_all_snapshots( + filters={'snapshot-id': snapshot1.id}) + set([snap.id for snap in snapshots_by_id] + ).should.equal({snapshot1.id}) + + snapshots_by_start_time = conn.get_all_snapshots( + filters={'start-time': snapshot1.start_time}) + set([snap.start_time for snap in snapshots_by_start_time] + ).should.equal({snapshot1.start_time}) + + snapshots_by_volume_id = conn.get_all_snapshots( + filters={'volume-id': volume1.id}) + set([snap.id for snap in snapshots_by_volume_id] + ).should.equal({snapshot1.id, snapshot2.id}) + + snapshots_by_status = conn.get_all_snapshots( + filters={'status': 'completed'}) + ({snapshot1.id, snapshot2.id, snapshot3.id} - + {snap.id for snap in snapshots_by_status}).should.have.length_of(0) + + snapshots_by_volume_size = conn.get_all_snapshots( + filters={'volume-size': volume1.size}) + set([snap.id for snap in snapshots_by_volume_size] + ).should.equal({snapshot1.id, snapshot2.id}) + + snapshots_by_tag_key = conn.get_all_snapshots( + filters={'tag-key': 'testkey1'}) + set([snap.id for snap in snapshots_by_tag_key] + ).should.equal({snapshot1.id}) + + snapshots_by_tag_value = conn.get_all_snapshots( + filters={'tag-value': 'testvalue1'}) + set([snap.id for snap in snapshots_by_tag_value] + ).should.equal({snapshot1.id}) + + snapshots_by_tag = conn.get_all_snapshots( + filters={'tag:testkey1': 'testvalue1'}) + set([snap.id for snap in snapshots_by_tag] + ).should.equal({snapshot1.id}) + + snapshots_by_encrypted = conn.get_all_snapshots( + filters={'encrypted': 'true'}) + set([snap.id for snap in snapshots_by_encrypted] + ).should.equal({snapshot3.id}) + + +@mock_ec2_deprecated +def test_snapshot_attribute(): + import copy + + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + snapshot = volume.create_snapshot() + + # Baseline + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') + attributes.name.should.equal('create_volume_permission') + attributes.attrs.should.have.length_of(0) + + ADD_GROUP_ARGS = {'snapshot_id': snapshot.id, + 'attribute': 'createVolumePermission', + 'operation': 'add', + 'groups': 'all'} + + REMOVE_GROUP_ARGS = {'snapshot_id': snapshot.id, + 'attribute': 'createVolumePermission', + 'operation': 'remove', + 'groups': 'all'} + + # Add 'all' group and confirm + + with assert_raises(EC2ResponseError) as ex: + conn.modify_snapshot_attribute( + **dict(ADD_GROUP_ARGS, **{'dry_run': True})) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_snapshot_attribute(**ADD_GROUP_ARGS) + + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') + attributes.attrs['groups'].should.have.length_of(1) + attributes.attrs['groups'].should.equal(['all']) + + # Add is idempotent + conn.modify_snapshot_attribute.when.called_with( + **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) + + # Remove 'all' group and confirm + with assert_raises(EC2ResponseError) as ex: + conn.modify_snapshot_attribute( + **dict(REMOVE_GROUP_ARGS, **{'dry_run': True})) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_snapshot_attribute(**REMOVE_GROUP_ARGS) + + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') + attributes.attrs.should.have.length_of(0) + + # Remove is idempotent + conn.modify_snapshot_attribute.when.called_with( + **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) + + # Error: Add with group != 'all' + with assert_raises(EC2ResponseError) as cm: + conn.modify_snapshot_attribute(snapshot.id, + attribute='createVolumePermission', + operation='add', + groups='everyone') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with invalid snapshot ID + with assert_raises(EC2ResponseError) as cm: + conn.modify_snapshot_attribute("snapshot-abcd1234", + attribute='createVolumePermission', + operation='add', + groups='all') + cm.exception.code.should.equal('InvalidSnapshot.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Remove with invalid snapshot ID + with assert_raises(EC2ResponseError) as cm: + conn.modify_snapshot_attribute("snapshot-abcd1234", + attribute='createVolumePermission', + operation='remove', + groups='all') + cm.exception.code.should.equal('InvalidSnapshot.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add or remove with user ID instead of group + conn.modify_snapshot_attribute.when.called_with(snapshot.id, + attribute='createVolumePermission', + operation='add', + user_ids=['user']).should.throw(NotImplementedError) + conn.modify_snapshot_attribute.when.called_with(snapshot.id, + attribute='createVolumePermission', + operation='remove', + user_ids=['user']).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +def test_create_volume_from_snapshot(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + snapshot = volume.create_snapshot('a test snapshot') + + with assert_raises(EC2ResponseError) as ex: + snapshot = volume.create_snapshot('a test snapshot', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') + + snapshot = volume.create_snapshot('a test snapshot') + snapshot.update() + snapshot.status.should.equal('completed') + + new_volume = snapshot.create_volume('us-east-1a') + new_volume.size.should.equal(80) + new_volume.snapshot_id.should.equal(snapshot.id) + + +@mock_ec2_deprecated +def test_create_volume_from_encrypted_snapshot(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a", encrypted=True) + + snapshot = volume.create_snapshot('a test snapshot') + snapshot.update() + snapshot.status.should.equal('completed') + + new_volume = snapshot.create_volume('us-east-1a') + new_volume.size.should.equal(80) + new_volume.snapshot_id.should.equal(snapshot.id) + new_volume.encrypted.should.be(True) + + +@mock_ec2_deprecated +def test_modify_attribute_blockDeviceMapping(): + """ + Reproduces the missing feature explained at [0], where we want to mock a + call to modify an instance attribute of type: blockDeviceMapping. + + [0] https://github.com/spulec/moto/issues/160 + """ + conn = boto.ec2.connect_to_region("us-east-1") + + reservation = conn.run_instances('ami-1234abcd') + + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute('blockDeviceMapping', { + '/dev/sda1': True}, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) + + instance = ec2_backends[conn.region.name].get_instance(instance.id) + instance.block_device_mapping.should.have.key('/dev/sda1') + instance.block_device_mapping[ + '/dev/sda1'].delete_on_termination.should.be(True) + + +@mock_ec2_deprecated +def test_volume_tag_escaping(): + conn = boto.connect_ec2('the_key', 'the_secret') + vol = conn.create_volume(10, 'us-east-1a') + snapshot = conn.create_snapshot(vol.id, 'Desc') + + with assert_raises(EC2ResponseError) as ex: + snapshot.add_tags({'key': ''}, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + dict(snaps[0].tags).should_not.be.equal( + {'key': ''}) + + snapshot.add_tags({'key': ''}) + + snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + dict(snaps[0].tags).should.equal({'key': ''}) + + +@freeze_time +@mock_ec2 +def test_copy_snapshot(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + dest_ec2_client = boto3.client('ec2', region_name='eu-west-2') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-1" + ) + + ec2 = boto3.resource('ec2', region_name='eu-west-1') + dest_ec2 = boto3.resource('ec2', region_name='eu-west-2') + + source = ec2.Snapshot(create_snapshot_response['SnapshotId']) + dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) + + attribs = ['data_encryption_key_id', 'encrypted', + 'kms_key_id', 'owner_alias', 'owner_id', + 'progress', 'state', 'state_message', + 'tags', 'volume_id', 'volume_size'] + + for attrib in attribs: + getattr(source, attrib).should.equal(getattr(dest, attrib)) + + # Copy from non-existent source ID. + with assert_raises(ClientError) as cm: + create_snapshot_error = ec2_client.create_snapshot( + VolumeId='vol-abcd1234' + ) + cm.exception.response['Error']['Code'].should.equal('InvalidVolume.NotFound') + cm.exception.response['Error']['Message'].should.equal("The volume 'vol-abcd1234' does not exist.") + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + # Copy from non-existent source region. + with assert_raises(ClientError) as cm: + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-2" + ) + cm.exception.response['Error']['Code'].should.equal('InvalidSnapshot.NotFound') + cm.exception.response['Error']['Message'].should.be.none + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + +@mock_ec2 +def test_search_for_many_snapshots(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + snapshot_ids = [] + for i in range(1, 20): + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + snapshot_ids.append(create_snapshot_response['SnapshotId']) + + snapshots_response = ec2_client.describe_snapshots( + SnapshotIds=snapshot_ids + ) + + assert len(snapshots_response['Snapshots']) == len(snapshot_ids) diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index ca6637b18dc6..3fad7fd3cd57 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -1,514 +1,514 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import boto3 -from boto.exception import EC2ResponseError -import six - -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - -import logging - - -@mock_ec2_deprecated -def test_eip_allocate_classic(): - """Allocate/release Classic EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - standard = conn.allocate_address(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') - - standard = conn.allocate_address() - standard.should.be.a(boto.ec2.address.Address) - standard.public_ip.should.be.a(six.text_type) - standard.instance_id.should.be.none - standard.domain.should.be.equal("standard") - - with assert_raises(EC2ResponseError) as ex: - standard.release(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') - - standard.release() - standard.should_not.be.within(conn.get_all_addresses()) - - -@mock_ec2_deprecated -def test_eip_allocate_vpc(): - """Allocate/release VPC EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - vpc = conn.allocate_address(domain="vpc", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') - - vpc = conn.allocate_address(domain="vpc") - vpc.should.be.a(boto.ec2.address.Address) - vpc.domain.should.be.equal("vpc") - logging.debug("vpc alloc_id:".format(vpc.allocation_id)) - vpc.release() - -@mock_ec2 -def test_specific_eip_allocate_vpc(): - """Allocate VPC EIP with specific address""" - service = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - - vpc = client.allocate_address(Domain="vpc", Address="127.38.43.222") - vpc['Domain'].should.be.equal("vpc") - vpc['PublicIp'].should.be.equal("127.38.43.222") - logging.debug("vpc alloc_id:".format(vpc['AllocationId'])) - - -@mock_ec2_deprecated -def test_eip_allocate_invalid_domain(): - """Allocate EIP invalid domain""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.allocate_address(domain="bogus") - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_associate_classic(): - """Associate/Disassociate EIP to classic instance""" - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - eip = conn.allocate_address() - eip.instance_id.should.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.associate_address(public_ip=eip.public_ip) - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as ex: - conn.associate_address(instance_id=instance.id, - public_ip=eip.public_ip, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set') - - conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.instance_id.should.be.equal(instance.id) - - with assert_raises(EC2ResponseError) as ex: - conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set') - - conn.disassociate_address(public_ip=eip.public_ip) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.instance_id.should.be.equal(u'') - eip.release() - eip.should_not.be.within(conn.get_all_addresses()) - eip = None - - instance.terminate() - - -@mock_ec2_deprecated -def test_eip_associate_vpc(): - """Associate/Disassociate EIP to VPC instance""" - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - eip = conn.allocate_address(domain='vpc') - eip.instance_id.should.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.associate_address(allocation_id=eip.allocation_id) - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.associate_address(instance_id=instance.id, - allocation_id=eip.allocation_id) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.instance_id.should.be.equal(instance.id) - conn.disassociate_address(association_id=eip.association_id) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.instance_id.should.be.equal(u'') - eip.association_id.should.be.none - - with assert_raises(EC2ResponseError) as ex: - eip.release(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') - - eip.release() - eip = None - - instance.terminate() - - -@mock_ec2 -def test_eip_boto3_vpc_association(): - """Associate EIP to VPC instance in a new subnet with boto3""" - service = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') - subnet_res = client.create_subnet( - VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') - instance = service.create_instances(**{ - 'InstanceType': 't2.micro', - 'ImageId': 'ami-test', - 'MinCount': 1, - 'MaxCount': 1, - 'SubnetId': subnet_res['Subnet']['SubnetId'] - })[0] - allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] - address = service.VpcAddress(allocation_id) - address.load() - address.association_id.should.be.none - address.instance_id.should.be.empty - address.network_interface_id.should.be.empty - association_id = client.associate_address( - InstanceId=instance.id, - AllocationId=allocation_id, - AllowReassociation=False) - instance.load() - address.reload() - address.association_id.should_not.be.none - instance.public_ip_address.should_not.be.none - instance.public_dns_name.should_not.be.none - address.network_interface_id.should.equal(instance.network_interfaces_attribute[0].get('NetworkInterfaceId')) - address.public_ip.should.equal(instance.public_ip_address) - address.instance_id.should.equal(instance.id) - - client.disassociate_address(AssociationId=address.association_id) - instance.reload() - address.reload() - instance.public_ip_address.should.be.none - address.network_interface_id.should.be.empty - address.association_id.should.be.none - address.instance_id.should.be.empty - - -@mock_ec2_deprecated -def test_eip_associate_network_interface(): - """Associate/Disassociate EIP to NIC""" - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - eni = conn.create_network_interface(subnet.id) - - eip = conn.allocate_address(domain='vpc') - eip.network_interface_id.should.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.associate_address(network_interface_id=eni.id) - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.associate_address(network_interface_id=eni.id, - allocation_id=eip.allocation_id) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.network_interface_id.should.be.equal(eni.id) - - conn.disassociate_address(association_id=eip.association_id) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.network_interface_id.should.be.equal(u'') - eip.association_id.should.be.none - eip.release() - eip = None - - -@mock_ec2_deprecated -def test_eip_reassociate(): - """reassociate EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd', min_count=2) - instance1, instance2 = reservation.instances - - eip = conn.allocate_address() - conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) - - # Same ID is idempotent - conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) - - # Different ID detects resource association - with assert_raises(EC2ResponseError) as cm: - conn.associate_address( - instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False) - cm.exception.code.should.equal('Resource.AlreadyAssociated') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.associate_address.when.called_with( - instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) - - eip.release() - eip = None - - instance1.terminate() - instance2.terminate() - - -@mock_ec2_deprecated -def test_eip_reassociate_nic(): - """reassociate EIP""" - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - eni1 = conn.create_network_interface(subnet.id) - eni2 = conn.create_network_interface(subnet.id) - - eip = conn.allocate_address() - conn.associate_address(network_interface_id=eni1.id, - public_ip=eip.public_ip) - - # Same ID is idempotent - conn.associate_address(network_interface_id=eni1.id, - public_ip=eip.public_ip) - - # Different ID detects resource association - with assert_raises(EC2ResponseError) as cm: - conn.associate_address( - network_interface_id=eni2.id, public_ip=eip.public_ip) - cm.exception.code.should.equal('Resource.AlreadyAssociated') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.associate_address.when.called_with( - network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) - - eip.release() - eip = None - - -@mock_ec2_deprecated -def test_eip_associate_invalid_args(): - """Associate EIP, invalid args """ - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - eip = conn.allocate_address() - - with assert_raises(EC2ResponseError) as cm: - conn.associate_address(instance_id=instance.id) - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - instance.terminate() - - -@mock_ec2_deprecated -def test_eip_disassociate_bogus_association(): - """Disassociate bogus EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.disassociate_address(association_id="bogus") - cm.exception.code.should.equal('InvalidAssociationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_release_bogus_eip(): - """Release bogus EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.release_address(allocation_id="bogus") - cm.exception.code.should.equal('InvalidAllocationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_disassociate_arg_error(): - """Invalid arguments disassociate address""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.disassociate_address() - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_release_arg_error(): - """Invalid arguments release address""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.release_address() - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_describe(): - """Listing of allocated Elastic IP Addresses.""" - conn = boto.connect_ec2('the_key', 'the_secret') - eips = [] - number_of_classic_ips = 2 - number_of_vpc_ips = 2 - - # allocate some IPs - for _ in range(number_of_classic_ips): - eips.append(conn.allocate_address()) - for _ in range(number_of_vpc_ips): - eips.append(conn.allocate_address(domain='vpc')) - len(eips).should.be.equal(number_of_classic_ips + number_of_vpc_ips) - - # Can we find each one individually? - for eip in eips: - if eip.allocation_id: - lookup_addresses = conn.get_all_addresses( - allocation_ids=[eip.allocation_id]) - else: - lookup_addresses = conn.get_all_addresses( - addresses=[eip.public_ip]) - len(lookup_addresses).should.be.equal(1) - lookup_addresses[0].public_ip.should.be.equal(eip.public_ip) - - # Can we find first two when we search for them? - lookup_addresses = conn.get_all_addresses( - addresses=[eips[0].public_ip, eips[1].public_ip]) - len(lookup_addresses).should.be.equal(2) - lookup_addresses[0].public_ip.should.be.equal(eips[0].public_ip) - lookup_addresses[1].public_ip.should.be.equal(eips[1].public_ip) - - # Release all IPs - for eip in eips: - eip.release() - len(conn.get_all_addresses()).should.be.equal(0) - - -@mock_ec2_deprecated -def test_eip_describe_none(): - """Error when search for bogus IP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_addresses(addresses=["256.256.256.256"]) - cm.exception.code.should.equal('InvalidAddress.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_eip_filters(): - service = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') - subnet_res = client.create_subnet( - VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') - - def create_inst_with_eip(): - instance = service.create_instances(**{ - 'InstanceType': 't2.micro', - 'ImageId': 'ami-test', - 'MinCount': 1, - 'MaxCount': 1, - 'SubnetId': subnet_res['Subnet']['SubnetId'] - })[0] - allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] - _ = client.associate_address( - InstanceId=instance.id, - AllocationId=allocation_id, - AllowReassociation=False) - instance.load() - address = service.VpcAddress(allocation_id) - address.load() - return instance, address - - inst1, eip1 = create_inst_with_eip() - inst2, eip2 = create_inst_with_eip() - inst3, eip3 = create_inst_with_eip() - - # Param search by AllocationId - addresses = list(service.vpc_addresses.filter(AllocationIds=[eip2.allocation_id])) - len(addresses).should.be.equal(1) - addresses[0].public_ip.should.equal(eip2.public_ip) - inst2.public_ip_address.should.equal(addresses[0].public_ip) - - # Param search by PublicIp - addresses = list(service.vpc_addresses.filter(PublicIps=[eip3.public_ip])) - len(addresses).should.be.equal(1) - addresses[0].public_ip.should.equal(eip3.public_ip) - inst3.public_ip_address.should.equal(addresses[0].public_ip) - - # Param search by Filter - def check_vpc_filter_valid(filter_name, filter_values): - addresses = list(service.vpc_addresses.filter( - Filters=[{'Name': filter_name, - 'Values': filter_values}])) - len(addresses).should.equal(2) - ips = [addr.public_ip for addr in addresses] - set(ips).should.equal(set([eip1.public_ip, eip2.public_ip])) - ips.should.contain(inst1.public_ip_address) - - def check_vpc_filter_invalid(filter_name): - addresses = list(service.vpc_addresses.filter( - Filters=[{'Name': filter_name, - 'Values': ['dummy1', 'dummy2']}])) - len(addresses).should.equal(0) - - def check_vpc_filter(filter_name, filter_values): - check_vpc_filter_valid(filter_name, filter_values) - check_vpc_filter_invalid(filter_name) - - check_vpc_filter('allocation-id', [eip1.allocation_id, eip2.allocation_id]) - check_vpc_filter('association-id', [eip1.association_id, eip2.association_id]) - check_vpc_filter('instance-id', [inst1.id, inst2.id]) - check_vpc_filter( - 'network-interface-id', - [inst1.network_interfaces_attribute[0].get('NetworkInterfaceId'), - inst2.network_interfaces_attribute[0].get('NetworkInterfaceId')]) - check_vpc_filter( - 'private-ip-address', - [inst1.network_interfaces_attribute[0].get('PrivateIpAddress'), - inst2.network_interfaces_attribute[0].get('PrivateIpAddress')]) - check_vpc_filter('public-ip', [inst1.public_ip_address, inst2.public_ip_address]) - - # all the ips are in a VPC - addresses = list(service.vpc_addresses.filter( - Filters=[{'Name': 'domain', 'Values': ['vpc']}])) - len(addresses).should.equal(3) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import boto3 +from boto.exception import EC2ResponseError +import six + +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + +import logging + + +@mock_ec2_deprecated +def test_eip_allocate_classic(): + """Allocate/release Classic EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + standard = conn.allocate_address(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') + + standard = conn.allocate_address() + standard.should.be.a(boto.ec2.address.Address) + standard.public_ip.should.be.a(six.text_type) + standard.instance_id.should.be.none + standard.domain.should.be.equal("standard") + + with assert_raises(EC2ResponseError) as ex: + standard.release(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') + + standard.release() + standard.should_not.be.within(conn.get_all_addresses()) + + +@mock_ec2_deprecated +def test_eip_allocate_vpc(): + """Allocate/release VPC EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + vpc = conn.allocate_address(domain="vpc", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') + + vpc = conn.allocate_address(domain="vpc") + vpc.should.be.a(boto.ec2.address.Address) + vpc.domain.should.be.equal("vpc") + logging.debug("vpc alloc_id:".format(vpc.allocation_id)) + vpc.release() + +@mock_ec2 +def test_specific_eip_allocate_vpc(): + """Allocate VPC EIP with specific address""" + service = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + vpc = client.allocate_address(Domain="vpc", Address="127.38.43.222") + vpc['Domain'].should.be.equal("vpc") + vpc['PublicIp'].should.be.equal("127.38.43.222") + logging.debug("vpc alloc_id:".format(vpc['AllocationId'])) + + +@mock_ec2_deprecated +def test_eip_allocate_invalid_domain(): + """Allocate EIP invalid domain""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.allocate_address(domain="bogus") + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_associate_classic(): + """Associate/Disassociate EIP to classic instance""" + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + eip = conn.allocate_address() + eip.instance_id.should.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.associate_address(public_ip=eip.public_ip) + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as ex: + conn.associate_address(instance_id=instance.id, + public_ip=eip.public_ip, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set') + + conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.instance_id.should.be.equal(instance.id) + + with assert_raises(EC2ResponseError) as ex: + conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set') + + conn.disassociate_address(public_ip=eip.public_ip) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.instance_id.should.be.equal(u'') + eip.release() + eip.should_not.be.within(conn.get_all_addresses()) + eip = None + + instance.terminate() + + +@mock_ec2_deprecated +def test_eip_associate_vpc(): + """Associate/Disassociate EIP to VPC instance""" + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + eip = conn.allocate_address(domain='vpc') + eip.instance_id.should.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.associate_address(allocation_id=eip.allocation_id) + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.associate_address(instance_id=instance.id, + allocation_id=eip.allocation_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.instance_id.should.be.equal(instance.id) + conn.disassociate_address(association_id=eip.association_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.instance_id.should.be.equal(u'') + eip.association_id.should.be.none + + with assert_raises(EC2ResponseError) as ex: + eip.release(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') + + eip.release() + eip = None + + instance.terminate() + + +@mock_ec2 +def test_eip_boto3_vpc_association(): + """Associate EIP to VPC instance in a new subnet with boto3""" + service = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') + subnet_res = client.create_subnet( + VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') + instance = service.create_instances(**{ + 'InstanceType': 't2.micro', + 'ImageId': 'ami-test', + 'MinCount': 1, + 'MaxCount': 1, + 'SubnetId': subnet_res['Subnet']['SubnetId'] + })[0] + allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] + address = service.VpcAddress(allocation_id) + address.load() + address.association_id.should.be.none + address.instance_id.should.be.empty + address.network_interface_id.should.be.empty + association_id = client.associate_address( + InstanceId=instance.id, + AllocationId=allocation_id, + AllowReassociation=False) + instance.load() + address.reload() + address.association_id.should_not.be.none + instance.public_ip_address.should_not.be.none + instance.public_dns_name.should_not.be.none + address.network_interface_id.should.equal(instance.network_interfaces_attribute[0].get('NetworkInterfaceId')) + address.public_ip.should.equal(instance.public_ip_address) + address.instance_id.should.equal(instance.id) + + client.disassociate_address(AssociationId=address.association_id) + instance.reload() + address.reload() + instance.public_ip_address.should.be.none + address.network_interface_id.should.be.empty + address.association_id.should.be.none + address.instance_id.should.be.empty + + +@mock_ec2_deprecated +def test_eip_associate_network_interface(): + """Associate/Disassociate EIP to NIC""" + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + eni = conn.create_network_interface(subnet.id) + + eip = conn.allocate_address(domain='vpc') + eip.network_interface_id.should.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.associate_address(network_interface_id=eni.id) + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.associate_address(network_interface_id=eni.id, + allocation_id=eip.allocation_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.network_interface_id.should.be.equal(eni.id) + + conn.disassociate_address(association_id=eip.association_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.network_interface_id.should.be.equal(u'') + eip.association_id.should.be.none + eip.release() + eip = None + + +@mock_ec2_deprecated +def test_eip_reassociate(): + """reassociate EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd', min_count=2) + instance1, instance2 = reservation.instances + + eip = conn.allocate_address() + conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) + + # Same ID is idempotent + conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) + + # Different ID detects resource association + with assert_raises(EC2ResponseError) as cm: + conn.associate_address( + instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False) + cm.exception.code.should.equal('Resource.AlreadyAssociated') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.associate_address.when.called_with( + instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) + + eip.release() + eip = None + + instance1.terminate() + instance2.terminate() + + +@mock_ec2_deprecated +def test_eip_reassociate_nic(): + """reassociate EIP""" + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + eni1 = conn.create_network_interface(subnet.id) + eni2 = conn.create_network_interface(subnet.id) + + eip = conn.allocate_address() + conn.associate_address(network_interface_id=eni1.id, + public_ip=eip.public_ip) + + # Same ID is idempotent + conn.associate_address(network_interface_id=eni1.id, + public_ip=eip.public_ip) + + # Different ID detects resource association + with assert_raises(EC2ResponseError) as cm: + conn.associate_address( + network_interface_id=eni2.id, public_ip=eip.public_ip) + cm.exception.code.should.equal('Resource.AlreadyAssociated') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.associate_address.when.called_with( + network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) + + eip.release() + eip = None + + +@mock_ec2_deprecated +def test_eip_associate_invalid_args(): + """Associate EIP, invalid args """ + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + eip = conn.allocate_address() + + with assert_raises(EC2ResponseError) as cm: + conn.associate_address(instance_id=instance.id) + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + instance.terminate() + + +@mock_ec2_deprecated +def test_eip_disassociate_bogus_association(): + """Disassociate bogus EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.disassociate_address(association_id="bogus") + cm.exception.code.should.equal('InvalidAssociationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_release_bogus_eip(): + """Release bogus EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.release_address(allocation_id="bogus") + cm.exception.code.should.equal('InvalidAllocationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_disassociate_arg_error(): + """Invalid arguments disassociate address""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.disassociate_address() + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_release_arg_error(): + """Invalid arguments release address""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.release_address() + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_describe(): + """Listing of allocated Elastic IP Addresses.""" + conn = boto.connect_ec2('the_key', 'the_secret') + eips = [] + number_of_classic_ips = 2 + number_of_vpc_ips = 2 + + # allocate some IPs + for _ in range(number_of_classic_ips): + eips.append(conn.allocate_address()) + for _ in range(number_of_vpc_ips): + eips.append(conn.allocate_address(domain='vpc')) + len(eips).should.be.equal(number_of_classic_ips + number_of_vpc_ips) + + # Can we find each one individually? + for eip in eips: + if eip.allocation_id: + lookup_addresses = conn.get_all_addresses( + allocation_ids=[eip.allocation_id]) + else: + lookup_addresses = conn.get_all_addresses( + addresses=[eip.public_ip]) + len(lookup_addresses).should.be.equal(1) + lookup_addresses[0].public_ip.should.be.equal(eip.public_ip) + + # Can we find first two when we search for them? + lookup_addresses = conn.get_all_addresses( + addresses=[eips[0].public_ip, eips[1].public_ip]) + len(lookup_addresses).should.be.equal(2) + lookup_addresses[0].public_ip.should.be.equal(eips[0].public_ip) + lookup_addresses[1].public_ip.should.be.equal(eips[1].public_ip) + + # Release all IPs + for eip in eips: + eip.release() + len(conn.get_all_addresses()).should.be.equal(0) + + +@mock_ec2_deprecated +def test_eip_describe_none(): + """Error when search for bogus IP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_addresses(addresses=["256.256.256.256"]) + cm.exception.code.should.equal('InvalidAddress.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_eip_filters(): + service = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') + subnet_res = client.create_subnet( + VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') + + def create_inst_with_eip(): + instance = service.create_instances(**{ + 'InstanceType': 't2.micro', + 'ImageId': 'ami-test', + 'MinCount': 1, + 'MaxCount': 1, + 'SubnetId': subnet_res['Subnet']['SubnetId'] + })[0] + allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] + _ = client.associate_address( + InstanceId=instance.id, + AllocationId=allocation_id, + AllowReassociation=False) + instance.load() + address = service.VpcAddress(allocation_id) + address.load() + return instance, address + + inst1, eip1 = create_inst_with_eip() + inst2, eip2 = create_inst_with_eip() + inst3, eip3 = create_inst_with_eip() + + # Param search by AllocationId + addresses = list(service.vpc_addresses.filter(AllocationIds=[eip2.allocation_id])) + len(addresses).should.be.equal(1) + addresses[0].public_ip.should.equal(eip2.public_ip) + inst2.public_ip_address.should.equal(addresses[0].public_ip) + + # Param search by PublicIp + addresses = list(service.vpc_addresses.filter(PublicIps=[eip3.public_ip])) + len(addresses).should.be.equal(1) + addresses[0].public_ip.should.equal(eip3.public_ip) + inst3.public_ip_address.should.equal(addresses[0].public_ip) + + # Param search by Filter + def check_vpc_filter_valid(filter_name, filter_values): + addresses = list(service.vpc_addresses.filter( + Filters=[{'Name': filter_name, + 'Values': filter_values}])) + len(addresses).should.equal(2) + ips = [addr.public_ip for addr in addresses] + set(ips).should.equal(set([eip1.public_ip, eip2.public_ip])) + ips.should.contain(inst1.public_ip_address) + + def check_vpc_filter_invalid(filter_name): + addresses = list(service.vpc_addresses.filter( + Filters=[{'Name': filter_name, + 'Values': ['dummy1', 'dummy2']}])) + len(addresses).should.equal(0) + + def check_vpc_filter(filter_name, filter_values): + check_vpc_filter_valid(filter_name, filter_values) + check_vpc_filter_invalid(filter_name) + + check_vpc_filter('allocation-id', [eip1.allocation_id, eip2.allocation_id]) + check_vpc_filter('association-id', [eip1.association_id, eip2.association_id]) + check_vpc_filter('instance-id', [inst1.id, inst2.id]) + check_vpc_filter( + 'network-interface-id', + [inst1.network_interfaces_attribute[0].get('NetworkInterfaceId'), + inst2.network_interfaces_attribute[0].get('NetworkInterfaceId')]) + check_vpc_filter( + 'private-ip-address', + [inst1.network_interfaces_attribute[0].get('PrivateIpAddress'), + inst2.network_interfaces_attribute[0].get('PrivateIpAddress')]) + check_vpc_filter('public-ip', [inst1.public_ip_address, inst2.public_ip_address]) + + # all the ips are in a VPC + addresses = list(service.vpc_addresses.filter( + Filters=[{'Name': 'domain', 'Values': ['vpc']}])) + len(addresses).should.equal(3) diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 828f9d917bd4..56959e484389 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -1,362 +1,362 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto3 -from botocore.exceptions import ClientError -import boto -import boto.cloudformation -import boto.ec2 -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated -from tests.helpers import requires_boto_gte -from tests.test_cloudformation.fixtures import vpc_eni -import json - - -@mock_ec2_deprecated -def test_elastic_network_interfaces(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - with assert_raises(EC2ResponseError) as ex: - eni = conn.create_network_interface(subnet.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - eni = conn.create_network_interface(subnet.id) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - eni = all_enis[0] - eni.groups.should.have.length_of(0) - eni.private_ip_addresses.should.have.length_of(0) - - with assert_raises(EC2ResponseError) as ex: - conn.delete_network_interface(eni.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - conn.delete_network_interface(eni.id) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(0) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_network_interface(eni.id) - cm.exception.error_code.should.equal('InvalidNetworkInterfaceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_elastic_network_interfaces_subnet_validation(): - conn = boto.connect_vpc('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.create_network_interface("subnet-abcd1234") - cm.exception.error_code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_elastic_network_interfaces_with_private_ip(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - private_ip = "54.0.0.1" - eni = conn.create_network_interface(subnet.id, private_ip) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - eni = all_enis[0] - eni.groups.should.have.length_of(0) - - eni.private_ip_addresses.should.have.length_of(1) - eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) - - -@mock_ec2_deprecated -def test_elastic_network_interfaces_with_groups(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - conn.create_network_interface( - subnet.id, groups=[security_group1.id, security_group2.id]) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - eni = all_enis[0] - eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - - -@requires_boto_gte("2.12.0") -@mock_ec2_deprecated -def test_elastic_network_interfaces_modify_attribute(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - conn.create_network_interface(subnet.id, groups=[security_group1.id]) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - eni = all_enis[0] - eni.groups.should.have.length_of(1) - eni.groups[0].id.should.equal(security_group1.id) - - with assert_raises(EC2ResponseError) as ex: - conn.modify_network_interface_attribute( - eni.id, 'groupset', [security_group2.id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_network_interface_attribute( - eni.id, 'groupset', [security_group2.id]) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - eni = all_enis[0] - eni.groups.should.have.length_of(1) - eni.groups[0].id.should.equal(security_group2.id) - - -@mock_ec2_deprecated -def test_elastic_network_interfaces_filtering(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - - eni1 = conn.create_network_interface( - subnet.id, groups=[security_group1.id, security_group2.id]) - eni2 = conn.create_network_interface( - subnet.id, groups=[security_group1.id]) - eni3 = conn.create_network_interface(subnet.id) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(3) - - # Filter by NetworkInterfaceId - enis_by_id = conn.get_all_network_interfaces([eni1.id]) - enis_by_id.should.have.length_of(1) - set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) - - # Filter by ENI ID - enis_by_id = conn.get_all_network_interfaces( - filters={'network-interface-id': eni1.id}) - enis_by_id.should.have.length_of(1) - set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) - - # Filter by Security Group - enis_by_group = conn.get_all_network_interfaces( - filters={'group-id': security_group1.id}) - enis_by_group.should.have.length_of(2) - set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id])) - - # Filter by ENI ID and Security Group - enis_by_group = conn.get_all_network_interfaces( - filters={'network-interface-id': eni1.id, 'group-id': security_group1.id}) - enis_by_group.should.have.length_of(1) - set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id])) - - # Unsupported filter - conn.get_all_network_interfaces.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_tag_name(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - eni1 = ec2.create_network_interface( - SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') - - with assert_raises(ClientError) as ex: - eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}], DryRun=True) - ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata'][ - 'HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}]) - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id]) - - filters = [{'Name': 'tag:Name', 'Values': ['eni1']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'tag:Name', 'Values': ['wrong-name']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_availability_zone(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - subnet2 = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.1.0/24', AvailabilityZone='us-west-2b') - - eni1 = ec2.create_network_interface( - SubnetId=subnet1.id, PrivateIpAddress='10.0.0.15') - - eni2 = ec2.create_network_interface( - SubnetId=subnet2.id, PrivateIpAddress='10.0.1.15') - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id, eni2.id]) - - filters = [{'Name': 'availability-zone', 'Values': ['us-west-2a']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'availability-zone', 'Values': ['us-west-2c']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_private_ip(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - eni1 = ec2.create_network_interface( - SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id]) - - filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.5']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.10']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.5']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.10']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_vpc_id(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - eni1 = ec2.create_network_interface( - SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id]) - - filters = [{'Name': 'vpc-id', 'Values': [subnet.vpc_id]}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'vpc-id', 'Values': ['vpc-aaaa1111']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_subnet_id(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - eni1 = ec2.create_network_interface( - SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id]) - - filters = [{'Name': 'subnet-id', 'Values': [subnet.id]}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'subnet-id', 'Values': ['subnet-aaaa1111']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2_deprecated -@mock_cloudformation_deprecated -def test_elastic_network_interfaces_cloudformation(): - template = vpc_eni.template - template_json = json.dumps(template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - ) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eni = ec2_conn.get_all_network_interfaces()[0] - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - cfn_eni = [resource for resource in resources if resource.resource_type == - 'AWS::EC2::NetworkInterface'][0] - cfn_eni.physical_resource_id.should.equal(eni.id) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto3 +from botocore.exceptions import ClientError +import boto +import boto.cloudformation +import boto.ec2 +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated +from tests.helpers import requires_boto_gte +from tests.test_cloudformation.fixtures import vpc_eni +import json + + +@mock_ec2_deprecated +def test_elastic_network_interfaces(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + with assert_raises(EC2ResponseError) as ex: + eni = conn.create_network_interface(subnet.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + eni = conn.create_network_interface(subnet.id) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + eni = all_enis[0] + eni.groups.should.have.length_of(0) + eni.private_ip_addresses.should.have.length_of(0) + + with assert_raises(EC2ResponseError) as ex: + conn.delete_network_interface(eni.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + conn.delete_network_interface(eni.id) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(0) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_network_interface(eni.id) + cm.exception.error_code.should.equal('InvalidNetworkInterfaceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_elastic_network_interfaces_subnet_validation(): + conn = boto.connect_vpc('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.create_network_interface("subnet-abcd1234") + cm.exception.error_code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_elastic_network_interfaces_with_private_ip(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + private_ip = "54.0.0.1" + eni = conn.create_network_interface(subnet.id, private_ip) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + eni = all_enis[0] + eni.groups.should.have.length_of(0) + + eni.private_ip_addresses.should.have.length_of(1) + eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) + + +@mock_ec2_deprecated +def test_elastic_network_interfaces_with_groups(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + conn.create_network_interface( + subnet.id, groups=[security_group1.id, security_group2.id]) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + eni = all_enis[0] + eni.groups.should.have.length_of(2) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + + +@requires_boto_gte("2.12.0") +@mock_ec2_deprecated +def test_elastic_network_interfaces_modify_attribute(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + conn.create_network_interface(subnet.id, groups=[security_group1.id]) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + eni = all_enis[0] + eni.groups.should.have.length_of(1) + eni.groups[0].id.should.equal(security_group1.id) + + with assert_raises(EC2ResponseError) as ex: + conn.modify_network_interface_attribute( + eni.id, 'groupset', [security_group2.id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_network_interface_attribute( + eni.id, 'groupset', [security_group2.id]) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + eni = all_enis[0] + eni.groups.should.have.length_of(1) + eni.groups[0].id.should.equal(security_group2.id) + + +@mock_ec2_deprecated +def test_elastic_network_interfaces_filtering(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + + eni1 = conn.create_network_interface( + subnet.id, groups=[security_group1.id, security_group2.id]) + eni2 = conn.create_network_interface( + subnet.id, groups=[security_group1.id]) + eni3 = conn.create_network_interface(subnet.id) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(3) + + # Filter by NetworkInterfaceId + enis_by_id = conn.get_all_network_interfaces([eni1.id]) + enis_by_id.should.have.length_of(1) + set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) + + # Filter by ENI ID + enis_by_id = conn.get_all_network_interfaces( + filters={'network-interface-id': eni1.id}) + enis_by_id.should.have.length_of(1) + set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) + + # Filter by Security Group + enis_by_group = conn.get_all_network_interfaces( + filters={'group-id': security_group1.id}) + enis_by_group.should.have.length_of(2) + set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id])) + + # Filter by ENI ID and Security Group + enis_by_group = conn.get_all_network_interfaces( + filters={'network-interface-id': eni1.id, 'group-id': security_group1.id}) + enis_by_group.should.have.length_of(1) + set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id])) + + # Unsupported filter + conn.get_all_network_interfaces.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_tag_name(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + + with assert_raises(ClientError) as ex: + eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}], DryRun=True) + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}]) + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + filters = [{'Name': 'tag:Name', 'Values': ['eni1']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'tag:Name', 'Values': ['wrong-name']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_availability_zone(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + subnet2 = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.1.0/24', AvailabilityZone='us-west-2b') + + eni1 = ec2.create_network_interface( + SubnetId=subnet1.id, PrivateIpAddress='10.0.0.15') + + eni2 = ec2.create_network_interface( + SubnetId=subnet2.id, PrivateIpAddress='10.0.1.15') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id, eni2.id]) + + filters = [{'Name': 'availability-zone', 'Values': ['us-west-2a']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'availability-zone', 'Values': ['us-west-2c']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_private_ip(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.5']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.10']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.5']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.10']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_vpc_id(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + filters = [{'Name': 'vpc-id', 'Values': [subnet.vpc_id]}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'vpc-id', 'Values': ['vpc-aaaa1111']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_subnet_id(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + filters = [{'Name': 'subnet-id', 'Values': [subnet.id]}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'subnet-id', 'Values': ['subnet-aaaa1111']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2_deprecated +@mock_cloudformation_deprecated +def test_elastic_network_interfaces_cloudformation(): + template = vpc_eni.template + template_json = json.dumps(template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + ) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eni = ec2_conn.get_all_network_interfaces()[0] + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + cfn_eni = [resource for resource in resources if resource.resource_type == + 'AWS::EC2::NetworkInterface'][0] + cfn_eni.physical_resource_id.should.equal(eni.id) diff --git a/tests/test_ec2/test_general.py b/tests/test_ec2/test_general.py index 4c319d30d2ff..7249af6a2f02 100644 --- a/tests/test_ec2/test_general.py +++ b/tests/test_ec2/test_general.py @@ -1,42 +1,42 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import boto3 -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 - - -@mock_ec2_deprecated -def test_console_output(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance_id = reservation.instances[0].id - output = conn.get_console_output(instance_id) - output.output.should_not.equal(None) - - -@mock_ec2_deprecated -def test_console_output_without_instance(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_console_output('i-1234abcd') - cm.exception.code.should.equal('InvalidInstanceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_console_output_boto3(): - conn = boto3.resource('ec2', 'us-east-1') - instances = conn.create_instances(ImageId='ami-1234abcd', - MinCount=1, - MaxCount=1) - - output = instances[0].console_output() - output.get('Output').should_not.equal(None) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import boto3 +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 + + +@mock_ec2_deprecated +def test_console_output(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance_id = reservation.instances[0].id + output = conn.get_console_output(instance_id) + output.output.should_not.equal(None) + + +@mock_ec2_deprecated +def test_console_output_without_instance(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_console_output('i-1234abcd') + cm.exception.code.should.equal('InvalidInstanceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_console_output_boto3(): + conn = boto3.resource('ec2', 'us-east-1') + instances = conn.create_instances(ImageId='ami-1234abcd', + MinCount=1, + MaxCount=1) + + output = instances[0].console_output() + output.get('Output').should_not.equal(None) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 84b4fbd7dddd..109017b3c6ee 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1,1256 +1,1256 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import base64 -import datetime -import ipaddress - -import six -import boto -import boto3 -from boto.ec2.instance import Reservation, InstanceAttribute -from boto.exception import EC2ResponseError, EC2ResponseError -from freezegun import freeze_time -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 -from tests.helpers import requires_boto_gte - - -################ Test Readme ############### -def add_servers(ami_id, count): - conn = boto.connect_ec2() - for index in range(count): - conn.run_instances(ami_id) - - -@mock_ec2_deprecated -def test_add_servers(): - add_servers('ami-1234abcd', 2) - - conn = boto.connect_ec2() - reservations = conn.get_all_instances() - assert len(reservations) == 2 - instance1 = reservations[0].instances[0] - assert instance1.image_id == 'ami-1234abcd' - -############################################ - - -@freeze_time("2014-01-01 05:00:00") -@mock_ec2_deprecated -def test_instance_launch_and_terminate(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - reservation = conn.run_instances('ami-1234abcd', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set') - - reservation = conn.run_instances('ami-1234abcd') - reservation.should.be.a(Reservation) - reservation.instances.should.have.length_of(1) - instance = reservation.instances[0] - instance.state.should.equal('pending') - - reservations = conn.get_all_instances() - reservations.should.have.length_of(1) - reservations[0].id.should.equal(reservation.id) - instances = reservations[0].instances - instances.should.have.length_of(1) - instance = instances[0] - instance.id.should.equal(instance.id) - instance.state.should.equal('running') - instance.launch_time.should.equal("2014-01-01T05:00:00.000Z") - instance.vpc_id.should.equal(None) - instance.placement.should.equal('us-east-1a') - - root_device_name = instance.root_device_name - instance.block_device_mapping[ - root_device_name].status.should.equal('in-use') - volume_id = instance.block_device_mapping[root_device_name].volume_id - volume_id.should.match(r'vol-\w+') - - volume = conn.get_all_volumes(volume_ids=[volume_id])[0] - volume.attach_data.instance_id.should.equal(instance.id) - volume.status.should.equal('in-use') - - with assert_raises(EC2ResponseError) as ex: - conn.terminate_instances([instance.id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set') - - conn.terminate_instances([instance.id]) - - reservations = conn.get_all_instances() - instance = reservations[0].instances[0] - instance.state.should.equal('terminated') - - -@mock_ec2_deprecated -def test_terminate_empty_instances(): - conn = boto.connect_ec2('the_key', 'the_secret') - conn.terminate_instances.when.called_with( - []).should.throw(EC2ResponseError) - - -@freeze_time("2014-01-01 05:00:00") -@mock_ec2_deprecated -def test_instance_attach_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - vol1 = conn.create_volume(size=36, zone=conn.region.name) - vol1.attach(instance.id, "/dev/sda1") - vol1.update() - vol2 = conn.create_volume(size=65, zone=conn.region.name) - vol2.attach(instance.id, "/dev/sdb1") - vol2.update() - vol3 = conn.create_volume(size=130, zone=conn.region.name) - vol3.attach(instance.id, "/dev/sdc1") - vol3.update() - - reservations = conn.get_all_instances() - instance = reservations[0].instances[0] - - instance.block_device_mapping.should.have.length_of(3) - - for v in conn.get_all_volumes(volume_ids=[instance.block_device_mapping['/dev/sdc1'].volume_id]): - v.attach_data.instance_id.should.equal(instance.id) - # can do due to freeze_time decorator. - v.attach_data.attach_time.should.equal(instance.launch_time) - # can do due to freeze_time decorator. - v.create_time.should.equal(instance.launch_time) - v.region.name.should.equal(instance.region.name) - v.status.should.equal('in-use') - - -@mock_ec2_deprecated -def test_get_instances_by_id(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=2) - instance1, instance2 = reservation.instances - - reservations = conn.get_all_instances(instance_ids=[instance1.id]) - reservations.should.have.length_of(1) - reservation = reservations[0] - reservation.instances.should.have.length_of(1) - reservation.instances[0].id.should.equal(instance1.id) - - reservations = conn.get_all_instances( - instance_ids=[instance1.id, instance2.id]) - reservations.should.have.length_of(1) - reservation = reservations[0] - reservation.instances.should.have.length_of(2) - instance_ids = [instance.id for instance in reservation.instances] - instance_ids.should.equal([instance1.id, instance2.id]) - - # Call get_all_instances with a bad id should raise an error - with assert_raises(EC2ResponseError) as cm: - conn.get_all_instances(instance_ids=[instance1.id, "i-1234abcd"]) - cm.exception.code.should.equal('InvalidInstanceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_get_paginated_instances(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - conn = boto3.resource('ec2', 'us-east-1') - for i in range(100): - conn.create_instances(ImageId=image_id, - MinCount=1, - MaxCount=1) - resp = client.describe_instances(MaxResults=50) - reservations = resp['Reservations'] - reservations.should.have.length_of(50) - next_token = resp['NextToken'] - next_token.should_not.be.none - resp2 = client.describe_instances(NextToken=next_token) - reservations.extend(resp2['Reservations']) - reservations.should.have.length_of(100) - assert 'NextToken' not in resp2.keys() - - -@mock_ec2 -def test_create_with_tags(): - ec2 = boto3.client('ec2', region_name='us-west-2') - instances = ec2.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - assert 'Tags' in instances['Instances'][0] - len(instances['Instances'][0]['Tags']).should.equal(3) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_state(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - - conn.terminate_instances([instance1.id]) - - reservations = conn.get_all_instances( - filters={'instance-state-name': 'running'}) - reservations.should.have.length_of(1) - # Since we terminated instance1, only instance2 and instance3 should be - # returned - instance_ids = [instance.id for instance in reservations[0].instances] - set(instance_ids).should.equal(set([instance2.id, instance3.id])) - - reservations = conn.get_all_instances( - [instance2.id], filters={'instance-state-name': 'running'}) - reservations.should.have.length_of(1) - instance_ids = [instance.id for instance in reservations[0].instances] - instance_ids.should.equal([instance2.id]) - - reservations = conn.get_all_instances( - [instance2.id], filters={'instance-state-name': 'terminated'}) - list(reservations).should.equal([]) - - # get_all_instances should still return all 3 - reservations = conn.get_all_instances() - reservations[0].instances.should.have.length_of(3) - - conn.get_all_instances.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_instance_id(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - - reservations = conn.get_all_instances( - filters={'instance-id': instance1.id}) - # get_all_instances should return just instance1 - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance1.id) - - reservations = conn.get_all_instances( - filters={'instance-id': [instance1.id, instance2.id]}) - # get_all_instances should return two - reservations[0].instances.should.have.length_of(2) - - reservations = conn.get_all_instances( - filters={'instance-id': 'non-existing-id'}) - reservations.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_instance_type(): - conn = boto.connect_ec2() - reservation1 = conn.run_instances('ami-1234abcd', instance_type='m1.small') - instance1 = reservation1.instances[0] - reservation2 = conn.run_instances('ami-1234abcd', instance_type='m1.small') - instance2 = reservation2.instances[0] - reservation3 = conn.run_instances('ami-1234abcd', instance_type='t1.micro') - instance3 = reservation3.instances[0] - - reservations = conn.get_all_instances( - filters={'instance-type': 'm1.small'}) - # get_all_instances should return instance1,2 - reservations.should.have.length_of(2) - reservations[0].instances.should.have.length_of(1) - reservations[1].instances.should.have.length_of(1) - instance_ids = [reservations[0].instances[0].id, - reservations[1].instances[0].id] - set(instance_ids).should.equal(set([instance1.id, instance2.id])) - - reservations = conn.get_all_instances( - filters={'instance-type': 't1.micro'}) - # get_all_instances should return one - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance3.id) - - reservations = conn.get_all_instances( - filters={'instance-type': ['t1.micro', 'm1.small']}) - reservations.should.have.length_of(3) - reservations[0].instances.should.have.length_of(1) - reservations[1].instances.should.have.length_of(1) - reservations[2].instances.should.have.length_of(1) - instance_ids = [ - reservations[0].instances[0].id, - reservations[1].instances[0].id, - reservations[2].instances[0].id, - ] - set(instance_ids).should.equal( - set([instance1.id, instance2.id, instance3.id])) - - reservations = conn.get_all_instances(filters={'instance-type': 'bogus'}) - # bogus instance-type should return none - reservations.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_reason_code(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.stop() - instance2.terminate() - - reservations = conn.get_all_instances( - filters={'state-reason-code': 'Client.UserInitiatedShutdown'}) - # get_all_instances should return instance1 and instance2 - reservations[0].instances.should.have.length_of(2) - set([instance1.id, instance2.id]).should.equal( - set([i.id for i in reservations[0].instances])) - - reservations = conn.get_all_instances(filters={'state-reason-code': ''}) - # get_all_instances should return instance 3 - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance3.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_source_dest_check(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=2) - instance1, instance2 = reservation.instances - conn.modify_instance_attribute( - instance1.id, attribute='sourceDestCheck', value=False) - - source_dest_check_false = conn.get_all_instances( - filters={'source-dest-check': 'false'}) - source_dest_check_true = conn.get_all_instances( - filters={'source-dest-check': 'true'}) - - source_dest_check_false[0].instances.should.have.length_of(1) - source_dest_check_false[0].instances[0].id.should.equal(instance1.id) - - source_dest_check_true[0].instances.should.have.length_of(1) - source_dest_check_true[0].instances[0].id.should.equal(instance2.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_vpc_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc1 = conn.create_vpc("10.0.0.0/16") - subnet1 = conn.create_subnet(vpc1.id, "10.0.0.0/27") - reservation1 = conn.run_instances( - 'ami-1234abcd', min_count=1, subnet_id=subnet1.id) - instance1 = reservation1.instances[0] - - vpc2 = conn.create_vpc("10.1.0.0/16") - subnet2 = conn.create_subnet(vpc2.id, "10.1.0.0/27") - reservation2 = conn.run_instances( - 'ami-1234abcd', min_count=1, subnet_id=subnet2.id) - instance2 = reservation2.instances[0] - - reservations1 = conn.get_all_instances(filters={'vpc-id': vpc1.id}) - reservations1.should.have.length_of(1) - reservations1[0].instances.should.have.length_of(1) - reservations1[0].instances[0].id.should.equal(instance1.id) - reservations1[0].instances[0].vpc_id.should.equal(vpc1.id) - reservations1[0].instances[0].subnet_id.should.equal(subnet1.id) - - reservations2 = conn.get_all_instances(filters={'vpc-id': vpc2.id}) - reservations2.should.have.length_of(1) - reservations2[0].instances.should.have.length_of(1) - reservations2[0].instances[0].id.should.equal(instance2.id) - reservations2[0].instances[0].vpc_id.should.equal(vpc2.id) - reservations2[0].instances[0].subnet_id.should.equal(subnet2.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_architecture(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=1) - instance = reservation.instances - - reservations = conn.get_all_instances(filters={'architecture': 'x86_64'}) - # get_all_instances should return the instance - reservations[0].instances.should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_image_id(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - conn = boto3.resource('ec2', 'us-east-1') - conn.create_instances(ImageId=image_id, - MinCount=1, - MaxCount=1) - - reservations = client.describe_instances(Filters=[{'Name': 'image-id', - 'Values': [image_id]}])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_private_dns(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - conn = boto3.resource('ec2', 'us-east-1') - conn.create_instances(ImageId=image_id, - MinCount=1, - MaxCount=1, - PrivateIpAddress='10.0.0.1') - reservations = client.describe_instances(Filters=[ - {'Name': 'private-dns-name', 'Values': ['ip-10-0-0-1.ec2.internal']} - ])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_ni_private_dns(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-west-2') - conn = boto3.resource('ec2', 'us-west-2') - conn.create_instances(ImageId=image_id, - MinCount=1, - MaxCount=1, - PrivateIpAddress='10.0.0.1') - reservations = client.describe_instances(Filters=[ - {'Name': 'network-interface.private-dns-name', 'Values': ['ip-10-0-0-1.us-west-2.compute.internal']} - ])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_instance_group_name(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - client.create_security_group( - Description='test', - GroupName='test_sg' - ) - client.run_instances(ImageId=image_id, - MinCount=1, - MaxCount=1, - SecurityGroups=['test_sg']) - reservations = client.describe_instances(Filters=[ - {'Name': 'instance.group-name', 'Values': ['test_sg']} - ])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_instance_group_id(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - create_sg = client.create_security_group( - Description='test', - GroupName='test_sg' - ) - group_id = create_sg['GroupId'] - client.run_instances(ImageId=image_id, - MinCount=1, - MaxCount=1, - SecurityGroups=['test_sg']) - reservations = client.describe_instances(Filters=[ - {'Name': 'instance.group-id', 'Values': [group_id]} - ])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_tag(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.add_tag('tag1', 'value1') - instance1.add_tag('tag2', 'value2') - instance2.add_tag('tag1', 'value1') - instance2.add_tag('tag2', 'wrong value') - instance3.add_tag('tag2', 'value2') - - reservations = conn.get_all_instances(filters={'tag:tag0': 'value0'}) - # get_all_instances should return no instances - reservations.should.have.length_of(0) - - reservations = conn.get_all_instances(filters={'tag:tag1': 'value1'}) - # get_all_instances should return both instances with this tag value - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - - reservations = conn.get_all_instances( - filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) - # get_all_instances should return the instance with both tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance1.id) - - reservations = conn.get_all_instances( - filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) - # get_all_instances should return the instance with both tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance1.id) - - reservations = conn.get_all_instances( - filters={'tag:tag2': ['value2', 'bogus']}) - # get_all_instances should return both instances with one of the - # acceptable tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance3.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_tag_value(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.add_tag('tag1', 'value1') - instance1.add_tag('tag2', 'value2') - instance2.add_tag('tag1', 'value1') - instance2.add_tag('tag2', 'wrong value') - instance3.add_tag('tag2', 'value2') - - reservations = conn.get_all_instances(filters={'tag-value': 'value0'}) - # get_all_instances should return no instances - reservations.should.have.length_of(0) - - reservations = conn.get_all_instances(filters={'tag-value': 'value1'}) - # get_all_instances should return both instances with this tag value - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - - reservations = conn.get_all_instances( - filters={'tag-value': ['value2', 'value1']}) - # get_all_instances should return both instances with one of the - # acceptable tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(3) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - reservations[0].instances[2].id.should.equal(instance3.id) - - reservations = conn.get_all_instances( - filters={'tag-value': ['value2', 'bogus']}) - # get_all_instances should return both instances with one of the - # acceptable tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance3.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_tag_name(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.add_tag('tag1') - instance1.add_tag('tag2') - instance2.add_tag('tag1') - instance2.add_tag('tag2X') - instance3.add_tag('tag3') - - reservations = conn.get_all_instances(filters={'tag-key': 'tagX'}) - # get_all_instances should return no instances - reservations.should.have.length_of(0) - - reservations = conn.get_all_instances(filters={'tag-key': 'tag1'}) - # get_all_instances should return both instances with this tag value - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - - reservations = conn.get_all_instances( - filters={'tag-key': ['tag1', 'tag3']}) - # get_all_instances should return both instances with one of the - # acceptable tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(3) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - reservations[0].instances[2].id.should.equal(instance3.id) - - -@mock_ec2_deprecated -def test_instance_start_and_stop(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', min_count=2) - instances = reservation.instances - instances.should.have.length_of(2) - - instance_ids = [instance.id for instance in instances] - - with assert_raises(EC2ResponseError) as ex: - stopped_instances = conn.stop_instances(instance_ids, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set') - - stopped_instances = conn.stop_instances(instance_ids) - - for instance in stopped_instances: - instance.state.should.equal('stopping') - - with assert_raises(EC2ResponseError) as ex: - started_instances = conn.start_instances( - [instances[0].id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set') - - started_instances = conn.start_instances([instances[0].id]) - started_instances[0].state.should.equal('pending') - - -@mock_ec2_deprecated -def test_instance_reboot(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.reboot(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set') - - instance.reboot() - instance.state.should.equal('pending') - - -@mock_ec2_deprecated -def test_instance_attribute_instance_type(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute("instanceType", "m1.small", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute("instanceType", "m1.small") - - instance_attribute = instance.get_attribute("instanceType") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get('instanceType').should.equal("m1.small") - - -@mock_ec2_deprecated -def test_modify_instance_attribute_security_groups(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - sg_id = 'sg-1234abcd' - sg_id2 = 'sg-abcd4321' - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute("groupSet", [sg_id, sg_id2]) - - instance_attribute = instance.get_attribute("groupSet") - instance_attribute.should.be.a(InstanceAttribute) - group_list = instance_attribute.get('groupSet') - any(g.id == sg_id for g in group_list).should.be.ok - any(g.id == sg_id2 for g in group_list).should.be.ok - - -@mock_ec2_deprecated -def test_instance_attribute_user_data(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute( - "userData", "this is my user data", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute("userData", "this is my user data") - - instance_attribute = instance.get_attribute("userData") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get("userData").should.equal("this is my user data") - - -@mock_ec2_deprecated -def test_instance_attribute_source_dest_check(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - # Default value is true - instance.sourceDestCheck.should.equal('true') - - instance_attribute = instance.get_attribute("sourceDestCheck") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get("sourceDestCheck").should.equal(True) - - # Set to false (note: Boto converts bool to string, eg 'false') - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute("sourceDestCheck", False, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute("sourceDestCheck", False) - - instance.update() - instance.sourceDestCheck.should.equal('false') - - instance_attribute = instance.get_attribute("sourceDestCheck") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get("sourceDestCheck").should.equal(False) - - # Set back to true - instance.modify_attribute("sourceDestCheck", True) - - instance.update() - instance.sourceDestCheck.should.equal('true') - - instance_attribute = instance.get_attribute("sourceDestCheck") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get("sourceDestCheck").should.equal(True) - - -@mock_ec2_deprecated -def test_user_data_with_run_instance(): - user_data = b"some user data" - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', user_data=user_data) - instance = reservation.instances[0] - - instance_attribute = instance.get_attribute("userData") - instance_attribute.should.be.a(InstanceAttribute) - retrieved_user_data = instance_attribute.get("userData").encode('utf-8') - decoded_user_data = base64.decodestring(retrieved_user_data) - decoded_user_data.should.equal(b"some user data") - - -@mock_ec2_deprecated -def test_run_instance_with_security_group_name(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - group = conn.create_security_group( - 'group1', "some description", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - - group = conn.create_security_group('group1', "some description") - - reservation = conn.run_instances('ami-1234abcd', - security_groups=['group1']) - instance = reservation.instances[0] - - instance.groups[0].id.should.equal(group.id) - instance.groups[0].name.should.equal("group1") - - -@mock_ec2_deprecated -def test_run_instance_with_security_group_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - group = conn.create_security_group('group1', "some description") - reservation = conn.run_instances('ami-1234abcd', - security_group_ids=[group.id]) - instance = reservation.instances[0] - - instance.groups[0].id.should.equal(group.id) - instance.groups[0].name.should.equal("group1") - - -@mock_ec2_deprecated -def test_run_instance_with_instance_type(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', instance_type="t1.micro") - instance = reservation.instances[0] - - instance.instance_type.should.equal("t1.micro") - - -@mock_ec2_deprecated -def test_run_instance_with_default_placement(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.placement.should.equal("us-east-1a") - - -@mock_ec2_deprecated -def test_run_instance_with_placement(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', placement="us-east-1b") - instance = reservation.instances[0] - - instance.placement.should.equal("us-east-1b") - - -@mock_ec2 -def test_run_instance_with_subnet_boto3(): - client = boto3.client('ec2', region_name='eu-central-1') - - ip_networks = [ - (ipaddress.ip_network('10.0.0.0/16'), ipaddress.ip_network('10.0.99.0/24')), - (ipaddress.ip_network('192.168.42.0/24'), ipaddress.ip_network('192.168.42.0/25')) - ] - - # Tests instances are created with the correct IPs - for vpc_cidr, subnet_cidr in ip_networks: - resp = client.create_vpc( - CidrBlock=str(vpc_cidr), - AmazonProvidedIpv6CidrBlock=False, - DryRun=False, - InstanceTenancy='default' - ) - vpc_id = resp['Vpc']['VpcId'] - - resp = client.create_subnet( - CidrBlock=str(subnet_cidr), - VpcId=vpc_id - ) - subnet_id = resp['Subnet']['SubnetId'] - - resp = client.run_instances( - ImageId='ami-1234abcd', - MaxCount=1, - MinCount=1, - SubnetId=subnet_id - ) - instance = resp['Instances'][0] - instance['SubnetId'].should.equal(subnet_id) - - priv_ipv4 = ipaddress.ip_address(six.text_type(instance['PrivateIpAddress'])) - subnet_cidr.should.contain(priv_ipv4) - - -@mock_ec2 -def test_run_instance_with_specified_private_ipv4(): - client = boto3.client('ec2', region_name='eu-central-1') - - vpc_cidr = ipaddress.ip_network('192.168.42.0/24') - subnet_cidr = ipaddress.ip_network('192.168.42.0/25') - - resp = client.create_vpc( - CidrBlock=str(vpc_cidr), - AmazonProvidedIpv6CidrBlock=False, - DryRun=False, - InstanceTenancy='default' - ) - vpc_id = resp['Vpc']['VpcId'] - - resp = client.create_subnet( - CidrBlock=str(subnet_cidr), - VpcId=vpc_id - ) - subnet_id = resp['Subnet']['SubnetId'] - - resp = client.run_instances( - ImageId='ami-1234abcd', - MaxCount=1, - MinCount=1, - SubnetId=subnet_id, - PrivateIpAddress='192.168.42.5' - ) - instance = resp['Instances'][0] - instance['SubnetId'].should.equal(subnet_id) - instance['PrivateIpAddress'].should.equal('192.168.42.5') - - -@mock_ec2 -def test_run_instance_mapped_public_ipv4(): - client = boto3.client('ec2', region_name='eu-central-1') - - vpc_cidr = ipaddress.ip_network('192.168.42.0/24') - subnet_cidr = ipaddress.ip_network('192.168.42.0/25') - - resp = client.create_vpc( - CidrBlock=str(vpc_cidr), - AmazonProvidedIpv6CidrBlock=False, - DryRun=False, - InstanceTenancy='default' - ) - vpc_id = resp['Vpc']['VpcId'] - - resp = client.create_subnet( - CidrBlock=str(subnet_cidr), - VpcId=vpc_id - ) - subnet_id = resp['Subnet']['SubnetId'] - client.modify_subnet_attribute( - SubnetId=subnet_id, - MapPublicIpOnLaunch={'Value': True} - ) - - resp = client.run_instances( - ImageId='ami-1234abcd', - MaxCount=1, - MinCount=1, - SubnetId=subnet_id - ) - instance = resp['Instances'][0] - instance.should.contain('PublicDnsName') - instance.should.contain('PublicIpAddress') - len(instance['PublicDnsName']).should.be.greater_than(0) - len(instance['PublicIpAddress']).should.be.greater_than(0) - - -@mock_ec2_deprecated -def test_run_instance_with_nic_autocreated(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - private_ip = "10.0.0.1" - - reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id, - security_groups=[security_group1.name], - security_group_ids=[security_group2.id], - private_ip_address=private_ip) - instance = reservation.instances[0] - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - eni = all_enis[0] - - instance.interfaces.should.have.length_of(1) - instance.interfaces[0].id.should.equal(eni.id) - - instance.subnet_id.should.equal(subnet.id) - instance.groups.should.have.length_of(2) - set([group.id for group in instance.groups]).should.equal( - set([security_group1.id, security_group2.id])) - - eni.subnet_id.should.equal(subnet.id) - eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - eni.private_ip_addresses.should.have.length_of(1) - eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) - - -@mock_ec2_deprecated -def test_run_instance_with_nic_preexisting(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - private_ip = "54.0.0.1" - eni = conn.create_network_interface( - subnet.id, private_ip, groups=[security_group1.id]) - - # Boto requires NetworkInterfaceCollection of NetworkInterfaceSpecifications... - # annoying, but generates the desired querystring. - from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection - interface = NetworkInterfaceSpecification( - network_interface_id=eni.id, device_index=0) - interfaces = NetworkInterfaceCollection(interface) - # end Boto objects - - reservation = conn.run_instances('ami-1234abcd', network_interfaces=interfaces, - security_group_ids=[security_group2.id]) - instance = reservation.instances[0] - - instance.subnet_id.should.equal(subnet.id) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - instance.interfaces.should.have.length_of(1) - instance_eni = instance.interfaces[0] - instance_eni.id.should.equal(eni.id) - - instance_eni.subnet_id.should.equal(subnet.id) - instance_eni.groups.should.have.length_of(2) - set([group.id for group in instance_eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - instance_eni.private_ip_addresses.should.have.length_of(1) - instance_eni.private_ip_addresses[ - 0].private_ip_address.should.equal(private_ip) - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_instance_with_nic_attach_detach(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - - reservation = conn.run_instances( - 'ami-1234abcd', security_group_ids=[security_group1.id]) - instance = reservation.instances[0] - - eni = conn.create_network_interface(subnet.id, groups=[security_group2.id]) - - # Check initial instance and ENI data - instance.interfaces.should.have.length_of(1) - - eni.groups.should.have.length_of(1) - set([group.id for group in eni.groups]).should.equal( - set([security_group2.id])) - - # Attach - with assert_raises(EC2ResponseError) as ex: - conn.attach_network_interface( - eni.id, instance.id, device_index=1, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - conn.attach_network_interface(eni.id, instance.id, device_index=1) - - # Check attached instance and ENI data - instance.update() - instance.interfaces.should.have.length_of(2) - instance_eni = instance.interfaces[1] - instance_eni.id.should.equal(eni.id) - instance_eni.groups.should.have.length_of(2) - set([group.id for group in instance_eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - - eni = conn.get_all_network_interfaces( - filters={'network-interface-id': eni.id})[0] - eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - - # Detach - with assert_raises(EC2ResponseError) as ex: - conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - conn.detach_network_interface(instance_eni.attachment.id) - - # Check detached instance and ENI data - instance.update() - instance.interfaces.should.have.length_of(1) - - eni = conn.get_all_network_interfaces( - filters={'network-interface-id': eni.id})[0] - eni.groups.should.have.length_of(1) - set([group.id for group in eni.groups]).should.equal( - set([security_group2.id])) - - # Detach with invalid attachment ID - with assert_raises(EC2ResponseError) as cm: - conn.detach_network_interface('eni-attach-1234abcd') - cm.exception.code.should.equal('InvalidAttachmentID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_ec2_classic_has_public_ip_address(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") - instance = reservation.instances[0] - instance.ip_address.should_not.equal(None) - instance.public_dns_name.should.contain(instance.ip_address.replace('.', '-')) - instance.private_ip_address.should_not.equal(None) - instance.private_dns_name.should.contain(instance.private_ip_address.replace('.', '-')) - - -@mock_ec2_deprecated -def test_run_instance_with_keypair(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") - instance = reservation.instances[0] - - instance.key_name.should.equal("keypair_name") - - -@mock_ec2_deprecated -def test_describe_instance_status_no_instances(): - conn = boto.connect_ec2('the_key', 'the_secret') - all_status = conn.get_all_instance_status() - len(all_status).should.equal(0) - - -@mock_ec2_deprecated -def test_describe_instance_status_with_instances(): - conn = boto.connect_ec2('the_key', 'the_secret') - conn.run_instances('ami-1234abcd', key_name="keypair_name") - - all_status = conn.get_all_instance_status() - len(all_status).should.equal(1) - all_status[0].instance_status.status.should.equal('ok') - all_status[0].system_status.status.should.equal('ok') - - -@mock_ec2_deprecated -def test_describe_instance_status_with_instance_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - - # We want to filter based on this one - reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") - instance = reservation.instances[0] - - # This is just to setup the test - conn.run_instances('ami-1234abcd', key_name="keypair_name") - - all_status = conn.get_all_instance_status(instance_ids=[instance.id]) - len(all_status).should.equal(1) - all_status[0].id.should.equal(instance.id) - - # Call get_all_instance_status with a bad id should raise an error - with assert_raises(EC2ResponseError) as cm: - conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"]) - cm.exception.code.should.equal('InvalidInstanceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_describe_instance_status_with_non_running_instances(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.stop() - instance2.terminate() - - all_running_status = conn.get_all_instance_status() - all_running_status.should.have.length_of(1) - all_running_status[0].id.should.equal(instance3.id) - all_running_status[0].state_name.should.equal('running') - - all_status = conn.get_all_instance_status(include_all_instances=True) - all_status.should.have.length_of(3) - - status1 = next((s for s in all_status if s.id == instance1.id), None) - status1.state_name.should.equal('stopped') - - status2 = next((s for s in all_status if s.id == instance2.id), None) - status2.state_name.should.equal('terminated') - - status3 = next((s for s in all_status if s.id == instance3.id), None) - status3.state_name.should.equal('running') - - -@mock_ec2_deprecated -def test_get_instance_by_security_group(): - conn = boto.connect_ec2('the_key', 'the_secret') - - conn.run_instances('ami-1234abcd') - instance = conn.get_only_instances()[0] - - security_group = conn.create_security_group('test', 'test') - - with assert_raises(EC2ResponseError) as ex: - conn.modify_instance_attribute(instance.id, "groupSet", [ - security_group.id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_instance_attribute( - instance.id, "groupSet", [security_group.id]) - - security_group_instances = security_group.instances() - - assert len(security_group_instances) == 1 - assert security_group_instances[0].id == instance.id - - -@mock_ec2 -def test_modify_delete_on_termination(): - ec2_client = boto3.resource('ec2', region_name='us-west-1') - result = ec2_client.create_instances(ImageId='ami-12345678', MinCount=1, MaxCount=1) - instance = result[0] - instance.load() - instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(False) - instance.modify_attribute( - BlockDeviceMappings=[{ - 'DeviceName': '/dev/sda1', - 'Ebs': {'DeleteOnTermination': True} - }] - ) - instance.load() - instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(True) - -@mock_ec2 -def test_create_instance_ebs_optimized(): - ec2_resource = boto3.resource('ec2', region_name='eu-west-1') - - instance = ec2_resource.create_instances( - ImageId = 'ami-12345678', - MaxCount = 1, - MinCount = 1, - EbsOptimized = True, - )[0] - instance.load() - instance.ebs_optimized.should.be(True) - - instance.modify_attribute( - EbsOptimized={ - 'Value': False - } - ) - instance.load() - instance.ebs_optimized.should.be(False) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import base64 +import datetime +import ipaddress + +import six +import boto +import boto3 +from boto.ec2.instance import Reservation, InstanceAttribute +from boto.exception import EC2ResponseError, EC2ResponseError +from freezegun import freeze_time +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 +from tests.helpers import requires_boto_gte + + +################ Test Readme ############### +def add_servers(ami_id, count): + conn = boto.connect_ec2() + for index in range(count): + conn.run_instances(ami_id) + + +@mock_ec2_deprecated +def test_add_servers(): + add_servers('ami-1234abcd', 2) + + conn = boto.connect_ec2() + reservations = conn.get_all_instances() + assert len(reservations) == 2 + instance1 = reservations[0].instances[0] + assert instance1.image_id == 'ami-1234abcd' + +############################################ + + +@freeze_time("2014-01-01 05:00:00") +@mock_ec2_deprecated +def test_instance_launch_and_terminate(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + reservation = conn.run_instances('ami-1234abcd', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set') + + reservation = conn.run_instances('ami-1234abcd') + reservation.should.be.a(Reservation) + reservation.instances.should.have.length_of(1) + instance = reservation.instances[0] + instance.state.should.equal('pending') + + reservations = conn.get_all_instances() + reservations.should.have.length_of(1) + reservations[0].id.should.equal(reservation.id) + instances = reservations[0].instances + instances.should.have.length_of(1) + instance = instances[0] + instance.id.should.equal(instance.id) + instance.state.should.equal('running') + instance.launch_time.should.equal("2014-01-01T05:00:00.000Z") + instance.vpc_id.should.equal(None) + instance.placement.should.equal('us-east-1a') + + root_device_name = instance.root_device_name + instance.block_device_mapping[ + root_device_name].status.should.equal('in-use') + volume_id = instance.block_device_mapping[root_device_name].volume_id + volume_id.should.match(r'vol-\w+') + + volume = conn.get_all_volumes(volume_ids=[volume_id])[0] + volume.attach_data.instance_id.should.equal(instance.id) + volume.status.should.equal('in-use') + + with assert_raises(EC2ResponseError) as ex: + conn.terminate_instances([instance.id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set') + + conn.terminate_instances([instance.id]) + + reservations = conn.get_all_instances() + instance = reservations[0].instances[0] + instance.state.should.equal('terminated') + + +@mock_ec2_deprecated +def test_terminate_empty_instances(): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.terminate_instances.when.called_with( + []).should.throw(EC2ResponseError) + + +@freeze_time("2014-01-01 05:00:00") +@mock_ec2_deprecated +def test_instance_attach_volume(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + vol1 = conn.create_volume(size=36, zone=conn.region.name) + vol1.attach(instance.id, "/dev/sda1") + vol1.update() + vol2 = conn.create_volume(size=65, zone=conn.region.name) + vol2.attach(instance.id, "/dev/sdb1") + vol2.update() + vol3 = conn.create_volume(size=130, zone=conn.region.name) + vol3.attach(instance.id, "/dev/sdc1") + vol3.update() + + reservations = conn.get_all_instances() + instance = reservations[0].instances[0] + + instance.block_device_mapping.should.have.length_of(3) + + for v in conn.get_all_volumes(volume_ids=[instance.block_device_mapping['/dev/sdc1'].volume_id]): + v.attach_data.instance_id.should.equal(instance.id) + # can do due to freeze_time decorator. + v.attach_data.attach_time.should.equal(instance.launch_time) + # can do due to freeze_time decorator. + v.create_time.should.equal(instance.launch_time) + v.region.name.should.equal(instance.region.name) + v.status.should.equal('in-use') + + +@mock_ec2_deprecated +def test_get_instances_by_id(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=2) + instance1, instance2 = reservation.instances + + reservations = conn.get_all_instances(instance_ids=[instance1.id]) + reservations.should.have.length_of(1) + reservation = reservations[0] + reservation.instances.should.have.length_of(1) + reservation.instances[0].id.should.equal(instance1.id) + + reservations = conn.get_all_instances( + instance_ids=[instance1.id, instance2.id]) + reservations.should.have.length_of(1) + reservation = reservations[0] + reservation.instances.should.have.length_of(2) + instance_ids = [instance.id for instance in reservation.instances] + instance_ids.should.equal([instance1.id, instance2.id]) + + # Call get_all_instances with a bad id should raise an error + with assert_raises(EC2ResponseError) as cm: + conn.get_all_instances(instance_ids=[instance1.id, "i-1234abcd"]) + cm.exception.code.should.equal('InvalidInstanceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_get_paginated_instances(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + conn = boto3.resource('ec2', 'us-east-1') + for i in range(100): + conn.create_instances(ImageId=image_id, + MinCount=1, + MaxCount=1) + resp = client.describe_instances(MaxResults=50) + reservations = resp['Reservations'] + reservations.should.have.length_of(50) + next_token = resp['NextToken'] + next_token.should_not.be.none + resp2 = client.describe_instances(NextToken=next_token) + reservations.extend(resp2['Reservations']) + reservations.should.have.length_of(100) + assert 'NextToken' not in resp2.keys() + + +@mock_ec2 +def test_create_with_tags(): + ec2 = boto3.client('ec2', region_name='us-west-2') + instances = ec2.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + assert 'Tags' in instances['Instances'][0] + len(instances['Instances'][0]['Tags']).should.equal(3) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_state(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + + conn.terminate_instances([instance1.id]) + + reservations = conn.get_all_instances( + filters={'instance-state-name': 'running'}) + reservations.should.have.length_of(1) + # Since we terminated instance1, only instance2 and instance3 should be + # returned + instance_ids = [instance.id for instance in reservations[0].instances] + set(instance_ids).should.equal(set([instance2.id, instance3.id])) + + reservations = conn.get_all_instances( + [instance2.id], filters={'instance-state-name': 'running'}) + reservations.should.have.length_of(1) + instance_ids = [instance.id for instance in reservations[0].instances] + instance_ids.should.equal([instance2.id]) + + reservations = conn.get_all_instances( + [instance2.id], filters={'instance-state-name': 'terminated'}) + list(reservations).should.equal([]) + + # get_all_instances should still return all 3 + reservations = conn.get_all_instances() + reservations[0].instances.should.have.length_of(3) + + conn.get_all_instances.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_instance_id(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + + reservations = conn.get_all_instances( + filters={'instance-id': instance1.id}) + # get_all_instances should return just instance1 + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance1.id) + + reservations = conn.get_all_instances( + filters={'instance-id': [instance1.id, instance2.id]}) + # get_all_instances should return two + reservations[0].instances.should.have.length_of(2) + + reservations = conn.get_all_instances( + filters={'instance-id': 'non-existing-id'}) + reservations.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_instance_type(): + conn = boto.connect_ec2() + reservation1 = conn.run_instances('ami-1234abcd', instance_type='m1.small') + instance1 = reservation1.instances[0] + reservation2 = conn.run_instances('ami-1234abcd', instance_type='m1.small') + instance2 = reservation2.instances[0] + reservation3 = conn.run_instances('ami-1234abcd', instance_type='t1.micro') + instance3 = reservation3.instances[0] + + reservations = conn.get_all_instances( + filters={'instance-type': 'm1.small'}) + # get_all_instances should return instance1,2 + reservations.should.have.length_of(2) + reservations[0].instances.should.have.length_of(1) + reservations[1].instances.should.have.length_of(1) + instance_ids = [reservations[0].instances[0].id, + reservations[1].instances[0].id] + set(instance_ids).should.equal(set([instance1.id, instance2.id])) + + reservations = conn.get_all_instances( + filters={'instance-type': 't1.micro'}) + # get_all_instances should return one + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance3.id) + + reservations = conn.get_all_instances( + filters={'instance-type': ['t1.micro', 'm1.small']}) + reservations.should.have.length_of(3) + reservations[0].instances.should.have.length_of(1) + reservations[1].instances.should.have.length_of(1) + reservations[2].instances.should.have.length_of(1) + instance_ids = [ + reservations[0].instances[0].id, + reservations[1].instances[0].id, + reservations[2].instances[0].id, + ] + set(instance_ids).should.equal( + set([instance1.id, instance2.id, instance3.id])) + + reservations = conn.get_all_instances(filters={'instance-type': 'bogus'}) + # bogus instance-type should return none + reservations.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_reason_code(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.stop() + instance2.terminate() + + reservations = conn.get_all_instances( + filters={'state-reason-code': 'Client.UserInitiatedShutdown'}) + # get_all_instances should return instance1 and instance2 + reservations[0].instances.should.have.length_of(2) + set([instance1.id, instance2.id]).should.equal( + set([i.id for i in reservations[0].instances])) + + reservations = conn.get_all_instances(filters={'state-reason-code': ''}) + # get_all_instances should return instance 3 + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance3.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_source_dest_check(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=2) + instance1, instance2 = reservation.instances + conn.modify_instance_attribute( + instance1.id, attribute='sourceDestCheck', value=False) + + source_dest_check_false = conn.get_all_instances( + filters={'source-dest-check': 'false'}) + source_dest_check_true = conn.get_all_instances( + filters={'source-dest-check': 'true'}) + + source_dest_check_false[0].instances.should.have.length_of(1) + source_dest_check_false[0].instances[0].id.should.equal(instance1.id) + + source_dest_check_true[0].instances.should.have.length_of(1) + source_dest_check_true[0].instances[0].id.should.equal(instance2.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_vpc_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc1 = conn.create_vpc("10.0.0.0/16") + subnet1 = conn.create_subnet(vpc1.id, "10.0.0.0/27") + reservation1 = conn.run_instances( + 'ami-1234abcd', min_count=1, subnet_id=subnet1.id) + instance1 = reservation1.instances[0] + + vpc2 = conn.create_vpc("10.1.0.0/16") + subnet2 = conn.create_subnet(vpc2.id, "10.1.0.0/27") + reservation2 = conn.run_instances( + 'ami-1234abcd', min_count=1, subnet_id=subnet2.id) + instance2 = reservation2.instances[0] + + reservations1 = conn.get_all_instances(filters={'vpc-id': vpc1.id}) + reservations1.should.have.length_of(1) + reservations1[0].instances.should.have.length_of(1) + reservations1[0].instances[0].id.should.equal(instance1.id) + reservations1[0].instances[0].vpc_id.should.equal(vpc1.id) + reservations1[0].instances[0].subnet_id.should.equal(subnet1.id) + + reservations2 = conn.get_all_instances(filters={'vpc-id': vpc2.id}) + reservations2.should.have.length_of(1) + reservations2[0].instances.should.have.length_of(1) + reservations2[0].instances[0].id.should.equal(instance2.id) + reservations2[0].instances[0].vpc_id.should.equal(vpc2.id) + reservations2[0].instances[0].subnet_id.should.equal(subnet2.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_architecture(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=1) + instance = reservation.instances + + reservations = conn.get_all_instances(filters={'architecture': 'x86_64'}) + # get_all_instances should return the instance + reservations[0].instances.should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_image_id(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + conn = boto3.resource('ec2', 'us-east-1') + conn.create_instances(ImageId=image_id, + MinCount=1, + MaxCount=1) + + reservations = client.describe_instances(Filters=[{'Name': 'image-id', + 'Values': [image_id]}])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_private_dns(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + conn = boto3.resource('ec2', 'us-east-1') + conn.create_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + PrivateIpAddress='10.0.0.1') + reservations = client.describe_instances(Filters=[ + {'Name': 'private-dns-name', 'Values': ['ip-10-0-0-1.ec2.internal']} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_ni_private_dns(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-west-2') + conn = boto3.resource('ec2', 'us-west-2') + conn.create_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + PrivateIpAddress='10.0.0.1') + reservations = client.describe_instances(Filters=[ + {'Name': 'network-interface.private-dns-name', 'Values': ['ip-10-0-0-1.us-west-2.compute.internal']} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_instance_group_name(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + client.create_security_group( + Description='test', + GroupName='test_sg' + ) + client.run_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + SecurityGroups=['test_sg']) + reservations = client.describe_instances(Filters=[ + {'Name': 'instance.group-name', 'Values': ['test_sg']} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_instance_group_id(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + create_sg = client.create_security_group( + Description='test', + GroupName='test_sg' + ) + group_id = create_sg['GroupId'] + client.run_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + SecurityGroups=['test_sg']) + reservations = client.describe_instances(Filters=[ + {'Name': 'instance.group-id', 'Values': [group_id]} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_tag(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.add_tag('tag1', 'value1') + instance1.add_tag('tag2', 'value2') + instance2.add_tag('tag1', 'value1') + instance2.add_tag('tag2', 'wrong value') + instance3.add_tag('tag2', 'value2') + + reservations = conn.get_all_instances(filters={'tag:tag0': 'value0'}) + # get_all_instances should return no instances + reservations.should.have.length_of(0) + + reservations = conn.get_all_instances(filters={'tag:tag1': 'value1'}) + # get_all_instances should return both instances with this tag value + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + + reservations = conn.get_all_instances( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) + # get_all_instances should return the instance with both tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance1.id) + + reservations = conn.get_all_instances( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) + # get_all_instances should return the instance with both tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance1.id) + + reservations = conn.get_all_instances( + filters={'tag:tag2': ['value2', 'bogus']}) + # get_all_instances should return both instances with one of the + # acceptable tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance3.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_tag_value(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.add_tag('tag1', 'value1') + instance1.add_tag('tag2', 'value2') + instance2.add_tag('tag1', 'value1') + instance2.add_tag('tag2', 'wrong value') + instance3.add_tag('tag2', 'value2') + + reservations = conn.get_all_instances(filters={'tag-value': 'value0'}) + # get_all_instances should return no instances + reservations.should.have.length_of(0) + + reservations = conn.get_all_instances(filters={'tag-value': 'value1'}) + # get_all_instances should return both instances with this tag value + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + + reservations = conn.get_all_instances( + filters={'tag-value': ['value2', 'value1']}) + # get_all_instances should return both instances with one of the + # acceptable tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(3) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + reservations[0].instances[2].id.should.equal(instance3.id) + + reservations = conn.get_all_instances( + filters={'tag-value': ['value2', 'bogus']}) + # get_all_instances should return both instances with one of the + # acceptable tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance3.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_tag_name(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.add_tag('tag1') + instance1.add_tag('tag2') + instance2.add_tag('tag1') + instance2.add_tag('tag2X') + instance3.add_tag('tag3') + + reservations = conn.get_all_instances(filters={'tag-key': 'tagX'}) + # get_all_instances should return no instances + reservations.should.have.length_of(0) + + reservations = conn.get_all_instances(filters={'tag-key': 'tag1'}) + # get_all_instances should return both instances with this tag value + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + + reservations = conn.get_all_instances( + filters={'tag-key': ['tag1', 'tag3']}) + # get_all_instances should return both instances with one of the + # acceptable tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(3) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + reservations[0].instances[2].id.should.equal(instance3.id) + + +@mock_ec2_deprecated +def test_instance_start_and_stop(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', min_count=2) + instances = reservation.instances + instances.should.have.length_of(2) + + instance_ids = [instance.id for instance in instances] + + with assert_raises(EC2ResponseError) as ex: + stopped_instances = conn.stop_instances(instance_ids, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set') + + stopped_instances = conn.stop_instances(instance_ids) + + for instance in stopped_instances: + instance.state.should.equal('stopping') + + with assert_raises(EC2ResponseError) as ex: + started_instances = conn.start_instances( + [instances[0].id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set') + + started_instances = conn.start_instances([instances[0].id]) + started_instances[0].state.should.equal('pending') + + +@mock_ec2_deprecated +def test_instance_reboot(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.reboot(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set') + + instance.reboot() + instance.state.should.equal('pending') + + +@mock_ec2_deprecated +def test_instance_attribute_instance_type(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute("instanceType", "m1.small", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute("instanceType", "m1.small") + + instance_attribute = instance.get_attribute("instanceType") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get('instanceType').should.equal("m1.small") + + +@mock_ec2_deprecated +def test_modify_instance_attribute_security_groups(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + sg_id = 'sg-1234abcd' + sg_id2 = 'sg-abcd4321' + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute("groupSet", [sg_id, sg_id2]) + + instance_attribute = instance.get_attribute("groupSet") + instance_attribute.should.be.a(InstanceAttribute) + group_list = instance_attribute.get('groupSet') + any(g.id == sg_id for g in group_list).should.be.ok + any(g.id == sg_id2 for g in group_list).should.be.ok + + +@mock_ec2_deprecated +def test_instance_attribute_user_data(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute( + "userData", "this is my user data", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute("userData", "this is my user data") + + instance_attribute = instance.get_attribute("userData") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get("userData").should.equal("this is my user data") + + +@mock_ec2_deprecated +def test_instance_attribute_source_dest_check(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + # Default value is true + instance.sourceDestCheck.should.equal('true') + + instance_attribute = instance.get_attribute("sourceDestCheck") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get("sourceDestCheck").should.equal(True) + + # Set to false (note: Boto converts bool to string, eg 'false') + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute("sourceDestCheck", False, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute("sourceDestCheck", False) + + instance.update() + instance.sourceDestCheck.should.equal('false') + + instance_attribute = instance.get_attribute("sourceDestCheck") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get("sourceDestCheck").should.equal(False) + + # Set back to true + instance.modify_attribute("sourceDestCheck", True) + + instance.update() + instance.sourceDestCheck.should.equal('true') + + instance_attribute = instance.get_attribute("sourceDestCheck") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get("sourceDestCheck").should.equal(True) + + +@mock_ec2_deprecated +def test_user_data_with_run_instance(): + user_data = b"some user data" + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', user_data=user_data) + instance = reservation.instances[0] + + instance_attribute = instance.get_attribute("userData") + instance_attribute.should.be.a(InstanceAttribute) + retrieved_user_data = instance_attribute.get("userData").encode('utf-8') + decoded_user_data = base64.decodestring(retrieved_user_data) + decoded_user_data.should.equal(b"some user data") + + +@mock_ec2_deprecated +def test_run_instance_with_security_group_name(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + group = conn.create_security_group( + 'group1', "some description", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + + group = conn.create_security_group('group1', "some description") + + reservation = conn.run_instances('ami-1234abcd', + security_groups=['group1']) + instance = reservation.instances[0] + + instance.groups[0].id.should.equal(group.id) + instance.groups[0].name.should.equal("group1") + + +@mock_ec2_deprecated +def test_run_instance_with_security_group_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + group = conn.create_security_group('group1', "some description") + reservation = conn.run_instances('ami-1234abcd', + security_group_ids=[group.id]) + instance = reservation.instances[0] + + instance.groups[0].id.should.equal(group.id) + instance.groups[0].name.should.equal("group1") + + +@mock_ec2_deprecated +def test_run_instance_with_instance_type(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', instance_type="t1.micro") + instance = reservation.instances[0] + + instance.instance_type.should.equal("t1.micro") + + +@mock_ec2_deprecated +def test_run_instance_with_default_placement(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.placement.should.equal("us-east-1a") + + +@mock_ec2_deprecated +def test_run_instance_with_placement(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', placement="us-east-1b") + instance = reservation.instances[0] + + instance.placement.should.equal("us-east-1b") + + +@mock_ec2 +def test_run_instance_with_subnet_boto3(): + client = boto3.client('ec2', region_name='eu-central-1') + + ip_networks = [ + (ipaddress.ip_network('10.0.0.0/16'), ipaddress.ip_network('10.0.99.0/24')), + (ipaddress.ip_network('192.168.42.0/24'), ipaddress.ip_network('192.168.42.0/25')) + ] + + # Tests instances are created with the correct IPs + for vpc_cidr, subnet_cidr in ip_networks: + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id + ) + instance = resp['Instances'][0] + instance['SubnetId'].should.equal(subnet_id) + + priv_ipv4 = ipaddress.ip_address(six.text_type(instance['PrivateIpAddress'])) + subnet_cidr.should.contain(priv_ipv4) + + +@mock_ec2 +def test_run_instance_with_specified_private_ipv4(): + client = boto3.client('ec2', region_name='eu-central-1') + + vpc_cidr = ipaddress.ip_network('192.168.42.0/24') + subnet_cidr = ipaddress.ip_network('192.168.42.0/25') + + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id, + PrivateIpAddress='192.168.42.5' + ) + instance = resp['Instances'][0] + instance['SubnetId'].should.equal(subnet_id) + instance['PrivateIpAddress'].should.equal('192.168.42.5') + + +@mock_ec2 +def test_run_instance_mapped_public_ipv4(): + client = boto3.client('ec2', region_name='eu-central-1') + + vpc_cidr = ipaddress.ip_network('192.168.42.0/24') + subnet_cidr = ipaddress.ip_network('192.168.42.0/25') + + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + client.modify_subnet_attribute( + SubnetId=subnet_id, + MapPublicIpOnLaunch={'Value': True} + ) + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id + ) + instance = resp['Instances'][0] + instance.should.contain('PublicDnsName') + instance.should.contain('PublicIpAddress') + len(instance['PublicDnsName']).should.be.greater_than(0) + len(instance['PublicIpAddress']).should.be.greater_than(0) + + +@mock_ec2_deprecated +def test_run_instance_with_nic_autocreated(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + private_ip = "10.0.0.1" + + reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id, + security_groups=[security_group1.name], + security_group_ids=[security_group2.id], + private_ip_address=private_ip) + instance = reservation.instances[0] + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + eni = all_enis[0] + + instance.interfaces.should.have.length_of(1) + instance.interfaces[0].id.should.equal(eni.id) + + instance.subnet_id.should.equal(subnet.id) + instance.groups.should.have.length_of(2) + set([group.id for group in instance.groups]).should.equal( + set([security_group1.id, security_group2.id])) + + eni.subnet_id.should.equal(subnet.id) + eni.groups.should.have.length_of(2) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + eni.private_ip_addresses.should.have.length_of(1) + eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) + + +@mock_ec2_deprecated +def test_run_instance_with_nic_preexisting(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + private_ip = "54.0.0.1" + eni = conn.create_network_interface( + subnet.id, private_ip, groups=[security_group1.id]) + + # Boto requires NetworkInterfaceCollection of NetworkInterfaceSpecifications... + # annoying, but generates the desired querystring. + from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection + interface = NetworkInterfaceSpecification( + network_interface_id=eni.id, device_index=0) + interfaces = NetworkInterfaceCollection(interface) + # end Boto objects + + reservation = conn.run_instances('ami-1234abcd', network_interfaces=interfaces, + security_group_ids=[security_group2.id]) + instance = reservation.instances[0] + + instance.subnet_id.should.equal(subnet.id) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + instance.interfaces.should.have.length_of(1) + instance_eni = instance.interfaces[0] + instance_eni.id.should.equal(eni.id) + + instance_eni.subnet_id.should.equal(subnet.id) + instance_eni.groups.should.have.length_of(2) + set([group.id for group in instance_eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + instance_eni.private_ip_addresses.should.have.length_of(1) + instance_eni.private_ip_addresses[ + 0].private_ip_address.should.equal(private_ip) + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_instance_with_nic_attach_detach(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + + reservation = conn.run_instances( + 'ami-1234abcd', security_group_ids=[security_group1.id]) + instance = reservation.instances[0] + + eni = conn.create_network_interface(subnet.id, groups=[security_group2.id]) + + # Check initial instance and ENI data + instance.interfaces.should.have.length_of(1) + + eni.groups.should.have.length_of(1) + set([group.id for group in eni.groups]).should.equal( + set([security_group2.id])) + + # Attach + with assert_raises(EC2ResponseError) as ex: + conn.attach_network_interface( + eni.id, instance.id, device_index=1, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + conn.attach_network_interface(eni.id, instance.id, device_index=1) + + # Check attached instance and ENI data + instance.update() + instance.interfaces.should.have.length_of(2) + instance_eni = instance.interfaces[1] + instance_eni.id.should.equal(eni.id) + instance_eni.groups.should.have.length_of(2) + set([group.id for group in instance_eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + + eni = conn.get_all_network_interfaces( + filters={'network-interface-id': eni.id})[0] + eni.groups.should.have.length_of(2) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + + # Detach + with assert_raises(EC2ResponseError) as ex: + conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + conn.detach_network_interface(instance_eni.attachment.id) + + # Check detached instance and ENI data + instance.update() + instance.interfaces.should.have.length_of(1) + + eni = conn.get_all_network_interfaces( + filters={'network-interface-id': eni.id})[0] + eni.groups.should.have.length_of(1) + set([group.id for group in eni.groups]).should.equal( + set([security_group2.id])) + + # Detach with invalid attachment ID + with assert_raises(EC2ResponseError) as cm: + conn.detach_network_interface('eni-attach-1234abcd') + cm.exception.code.should.equal('InvalidAttachmentID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_ec2_classic_has_public_ip_address(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") + instance = reservation.instances[0] + instance.ip_address.should_not.equal(None) + instance.public_dns_name.should.contain(instance.ip_address.replace('.', '-')) + instance.private_ip_address.should_not.equal(None) + instance.private_dns_name.should.contain(instance.private_ip_address.replace('.', '-')) + + +@mock_ec2_deprecated +def test_run_instance_with_keypair(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") + instance = reservation.instances[0] + + instance.key_name.should.equal("keypair_name") + + +@mock_ec2_deprecated +def test_describe_instance_status_no_instances(): + conn = boto.connect_ec2('the_key', 'the_secret') + all_status = conn.get_all_instance_status() + len(all_status).should.equal(0) + + +@mock_ec2_deprecated +def test_describe_instance_status_with_instances(): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.run_instances('ami-1234abcd', key_name="keypair_name") + + all_status = conn.get_all_instance_status() + len(all_status).should.equal(1) + all_status[0].instance_status.status.should.equal('ok') + all_status[0].system_status.status.should.equal('ok') + + +@mock_ec2_deprecated +def test_describe_instance_status_with_instance_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + + # We want to filter based on this one + reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") + instance = reservation.instances[0] + + # This is just to setup the test + conn.run_instances('ami-1234abcd', key_name="keypair_name") + + all_status = conn.get_all_instance_status(instance_ids=[instance.id]) + len(all_status).should.equal(1) + all_status[0].id.should.equal(instance.id) + + # Call get_all_instance_status with a bad id should raise an error + with assert_raises(EC2ResponseError) as cm: + conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"]) + cm.exception.code.should.equal('InvalidInstanceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_describe_instance_status_with_non_running_instances(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.stop() + instance2.terminate() + + all_running_status = conn.get_all_instance_status() + all_running_status.should.have.length_of(1) + all_running_status[0].id.should.equal(instance3.id) + all_running_status[0].state_name.should.equal('running') + + all_status = conn.get_all_instance_status(include_all_instances=True) + all_status.should.have.length_of(3) + + status1 = next((s for s in all_status if s.id == instance1.id), None) + status1.state_name.should.equal('stopped') + + status2 = next((s for s in all_status if s.id == instance2.id), None) + status2.state_name.should.equal('terminated') + + status3 = next((s for s in all_status if s.id == instance3.id), None) + status3.state_name.should.equal('running') + + +@mock_ec2_deprecated +def test_get_instance_by_security_group(): + conn = boto.connect_ec2('the_key', 'the_secret') + + conn.run_instances('ami-1234abcd') + instance = conn.get_only_instances()[0] + + security_group = conn.create_security_group('test', 'test') + + with assert_raises(EC2ResponseError) as ex: + conn.modify_instance_attribute(instance.id, "groupSet", [ + security_group.id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_instance_attribute( + instance.id, "groupSet", [security_group.id]) + + security_group_instances = security_group.instances() + + assert len(security_group_instances) == 1 + assert security_group_instances[0].id == instance.id + + +@mock_ec2 +def test_modify_delete_on_termination(): + ec2_client = boto3.resource('ec2', region_name='us-west-1') + result = ec2_client.create_instances(ImageId='ami-12345678', MinCount=1, MaxCount=1) + instance = result[0] + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(False) + instance.modify_attribute( + BlockDeviceMappings=[{ + 'DeviceName': '/dev/sda1', + 'Ebs': {'DeleteOnTermination': True} + }] + ) + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(True) + +@mock_ec2 +def test_create_instance_ebs_optimized(): + ec2_resource = boto3.resource('ec2', region_name='eu-west-1') + + instance = ec2_resource.create_instances( + ImageId = 'ami-12345678', + MaxCount = 1, + MinCount = 1, + EbsOptimized = True, + )[0] + instance.load() + instance.ebs_optimized.should.be(True) + + instance.modify_attribute( + EbsOptimized={ + 'Value': False + } + ) + instance.load() + instance.ebs_optimized.should.be(False) diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index 3a1d0fda9cef..1f010223c0f5 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -1,269 +1,269 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import re - -import boto -from boto.exception import EC2ResponseError - -import sure # noqa - -from moto import mock_ec2_deprecated - - -VPC_CIDR = "10.0.0.0/16" -BAD_VPC = "vpc-deadbeef" -BAD_IGW = "igw-deadbeef" - - -@mock_ec2_deprecated -def test_igw_create(): - """ internet gateway create """ - conn = boto.connect_vpc('the_key', 'the_secret') - - conn.get_all_internet_gateways().should.have.length_of(0) - - with assert_raises(EC2ResponseError) as ex: - igw = conn.create_internet_gateway(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set') - - igw = conn.create_internet_gateway() - conn.get_all_internet_gateways().should.have.length_of(1) - igw.id.should.match(r'igw-[0-9a-f]+') - - igw = conn.get_all_internet_gateways()[0] - igw.attachments.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_igw_attach(): - """ internet gateway attach """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - - with assert_raises(EC2ResponseError) as ex: - conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') - - conn.attach_internet_gateway(igw.id, vpc.id) - - igw = conn.get_all_internet_gateways()[0] - igw.attachments[0].vpc_id.should.be.equal(vpc.id) - - -@mock_ec2_deprecated -def test_igw_attach_bad_vpc(): - """ internet gateway fail to attach w/ bad vpc """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - - with assert_raises(EC2ResponseError) as cm: - conn.attach_internet_gateway(igw.id, BAD_VPC) - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_attach_twice(): - """ internet gateway fail to attach twice """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc1 = conn.create_vpc(VPC_CIDR) - vpc2 = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc1.id) - - with assert_raises(EC2ResponseError) as cm: - conn.attach_internet_gateway(igw.id, vpc2.id) - cm.exception.code.should.equal('Resource.AlreadyAssociated') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_detach(): - """ internet gateway detach""" - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc.id) - - with assert_raises(EC2ResponseError) as ex: - conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set') - - conn.detach_internet_gateway(igw.id, vpc.id) - igw = conn.get_all_internet_gateways()[0] - igw.attachments.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_igw_detach_wrong_vpc(): - """ internet gateway fail to detach w/ wrong vpc """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc1 = conn.create_vpc(VPC_CIDR) - vpc2 = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc1.id) - - with assert_raises(EC2ResponseError) as cm: - conn.detach_internet_gateway(igw.id, vpc2.id) - cm.exception.code.should.equal('Gateway.NotAttached') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_detach_invalid_vpc(): - """ internet gateway fail to detach w/ invalid vpc """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc.id) - - with assert_raises(EC2ResponseError) as cm: - conn.detach_internet_gateway(igw.id, BAD_VPC) - cm.exception.code.should.equal('Gateway.NotAttached') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_detach_unattached(): - """ internet gateway fail to detach unattached """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - - with assert_raises(EC2ResponseError) as cm: - conn.detach_internet_gateway(igw.id, vpc.id) - cm.exception.code.should.equal('Gateway.NotAttached') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_delete(): - """ internet gateway delete""" - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc(VPC_CIDR) - conn.get_all_internet_gateways().should.have.length_of(0) - igw = conn.create_internet_gateway() - conn.get_all_internet_gateways().should.have.length_of(1) - - with assert_raises(EC2ResponseError) as ex: - conn.delete_internet_gateway(igw.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set') - - conn.delete_internet_gateway(igw.id) - conn.get_all_internet_gateways().should.have.length_of(0) - - -@mock_ec2_deprecated -def test_igw_delete_attached(): - """ internet gateway fail to delete attached """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc.id) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_internet_gateway(igw.id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_desribe(): - """ internet gateway fetch by id """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - igw_by_search = conn.get_all_internet_gateways([igw.id])[0] - igw.id.should.equal(igw_by_search.id) - - -@mock_ec2_deprecated -def test_igw_describe_bad_id(): - """ internet gateway fail to fetch by bad id """ - conn = boto.connect_vpc('the_key', 'the_secret') - with assert_raises(EC2ResponseError) as cm: - conn.get_all_internet_gateways([BAD_IGW]) - cm.exception.code.should.equal('InvalidInternetGatewayID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_filter_by_vpc_id(): - """ internet gateway filter by vpc id """ - conn = boto.connect_vpc('the_key', 'the_secret') - - igw1 = conn.create_internet_gateway() - igw2 = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw1.id, vpc.id) - - result = conn.get_all_internet_gateways( - filters={"attachment.vpc-id": vpc.id}) - result.should.have.length_of(1) - result[0].id.should.equal(igw1.id) - - -@mock_ec2_deprecated -def test_igw_filter_by_tags(): - """ internet gateway filter by vpc id """ - conn = boto.connect_vpc('the_key', 'the_secret') - - igw1 = conn.create_internet_gateway() - igw2 = conn.create_internet_gateway() - igw1.add_tag("tests", "yes") - - result = conn.get_all_internet_gateways(filters={"tag:tests": "yes"}) - result.should.have.length_of(1) - result[0].id.should.equal(igw1.id) - - -@mock_ec2_deprecated -def test_igw_filter_by_internet_gateway_id(): - """ internet gateway filter by internet gateway id """ - conn = boto.connect_vpc('the_key', 'the_secret') - - igw1 = conn.create_internet_gateway() - igw2 = conn.create_internet_gateway() - - result = conn.get_all_internet_gateways( - filters={"internet-gateway-id": igw1.id}) - result.should.have.length_of(1) - result[0].id.should.equal(igw1.id) - - -@mock_ec2_deprecated -def test_igw_filter_by_attachment_state(): - """ internet gateway filter by attachment state """ - conn = boto.connect_vpc('the_key', 'the_secret') - - igw1 = conn.create_internet_gateway() - igw2 = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw1.id, vpc.id) - - result = conn.get_all_internet_gateways( - filters={"attachment.state": "available"}) - result.should.have.length_of(1) - result[0].id.should.equal(igw1.id) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import re + +import boto +from boto.exception import EC2ResponseError + +import sure # noqa + +from moto import mock_ec2_deprecated + + +VPC_CIDR = "10.0.0.0/16" +BAD_VPC = "vpc-deadbeef" +BAD_IGW = "igw-deadbeef" + + +@mock_ec2_deprecated +def test_igw_create(): + """ internet gateway create """ + conn = boto.connect_vpc('the_key', 'the_secret') + + conn.get_all_internet_gateways().should.have.length_of(0) + + with assert_raises(EC2ResponseError) as ex: + igw = conn.create_internet_gateway(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set') + + igw = conn.create_internet_gateway() + conn.get_all_internet_gateways().should.have.length_of(1) + igw.id.should.match(r'igw-[0-9a-f]+') + + igw = conn.get_all_internet_gateways()[0] + igw.attachments.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_igw_attach(): + """ internet gateway attach """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + + with assert_raises(EC2ResponseError) as ex: + conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') + + conn.attach_internet_gateway(igw.id, vpc.id) + + igw = conn.get_all_internet_gateways()[0] + igw.attachments[0].vpc_id.should.be.equal(vpc.id) + + +@mock_ec2_deprecated +def test_igw_attach_bad_vpc(): + """ internet gateway fail to attach w/ bad vpc """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + + with assert_raises(EC2ResponseError) as cm: + conn.attach_internet_gateway(igw.id, BAD_VPC) + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_attach_twice(): + """ internet gateway fail to attach twice """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc1 = conn.create_vpc(VPC_CIDR) + vpc2 = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc1.id) + + with assert_raises(EC2ResponseError) as cm: + conn.attach_internet_gateway(igw.id, vpc2.id) + cm.exception.code.should.equal('Resource.AlreadyAssociated') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_detach(): + """ internet gateway detach""" + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc.id) + + with assert_raises(EC2ResponseError) as ex: + conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set') + + conn.detach_internet_gateway(igw.id, vpc.id) + igw = conn.get_all_internet_gateways()[0] + igw.attachments.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_igw_detach_wrong_vpc(): + """ internet gateway fail to detach w/ wrong vpc """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc1 = conn.create_vpc(VPC_CIDR) + vpc2 = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc1.id) + + with assert_raises(EC2ResponseError) as cm: + conn.detach_internet_gateway(igw.id, vpc2.id) + cm.exception.code.should.equal('Gateway.NotAttached') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_detach_invalid_vpc(): + """ internet gateway fail to detach w/ invalid vpc """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc.id) + + with assert_raises(EC2ResponseError) as cm: + conn.detach_internet_gateway(igw.id, BAD_VPC) + cm.exception.code.should.equal('Gateway.NotAttached') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_detach_unattached(): + """ internet gateway fail to detach unattached """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + + with assert_raises(EC2ResponseError) as cm: + conn.detach_internet_gateway(igw.id, vpc.id) + cm.exception.code.should.equal('Gateway.NotAttached') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_delete(): + """ internet gateway delete""" + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc(VPC_CIDR) + conn.get_all_internet_gateways().should.have.length_of(0) + igw = conn.create_internet_gateway() + conn.get_all_internet_gateways().should.have.length_of(1) + + with assert_raises(EC2ResponseError) as ex: + conn.delete_internet_gateway(igw.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set') + + conn.delete_internet_gateway(igw.id) + conn.get_all_internet_gateways().should.have.length_of(0) + + +@mock_ec2_deprecated +def test_igw_delete_attached(): + """ internet gateway fail to delete attached """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc.id) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_internet_gateway(igw.id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_desribe(): + """ internet gateway fetch by id """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + igw_by_search = conn.get_all_internet_gateways([igw.id])[0] + igw.id.should.equal(igw_by_search.id) + + +@mock_ec2_deprecated +def test_igw_describe_bad_id(): + """ internet gateway fail to fetch by bad id """ + conn = boto.connect_vpc('the_key', 'the_secret') + with assert_raises(EC2ResponseError) as cm: + conn.get_all_internet_gateways([BAD_IGW]) + cm.exception.code.should.equal('InvalidInternetGatewayID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_filter_by_vpc_id(): + """ internet gateway filter by vpc id """ + conn = boto.connect_vpc('the_key', 'the_secret') + + igw1 = conn.create_internet_gateway() + igw2 = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw1.id, vpc.id) + + result = conn.get_all_internet_gateways( + filters={"attachment.vpc-id": vpc.id}) + result.should.have.length_of(1) + result[0].id.should.equal(igw1.id) + + +@mock_ec2_deprecated +def test_igw_filter_by_tags(): + """ internet gateway filter by vpc id """ + conn = boto.connect_vpc('the_key', 'the_secret') + + igw1 = conn.create_internet_gateway() + igw2 = conn.create_internet_gateway() + igw1.add_tag("tests", "yes") + + result = conn.get_all_internet_gateways(filters={"tag:tests": "yes"}) + result.should.have.length_of(1) + result[0].id.should.equal(igw1.id) + + +@mock_ec2_deprecated +def test_igw_filter_by_internet_gateway_id(): + """ internet gateway filter by internet gateway id """ + conn = boto.connect_vpc('the_key', 'the_secret') + + igw1 = conn.create_internet_gateway() + igw2 = conn.create_internet_gateway() + + result = conn.get_all_internet_gateways( + filters={"internet-gateway-id": igw1.id}) + result.should.have.length_of(1) + result[0].id.should.equal(igw1.id) + + +@mock_ec2_deprecated +def test_igw_filter_by_attachment_state(): + """ internet gateway filter by attachment state """ + conn = boto.connect_vpc('the_key', 'the_secret') + + igw1 = conn.create_internet_gateway() + igw2 = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw1.id, vpc.id) + + result = conn.get_all_internet_gateways( + filters={"attachment.state": "available"}) + result.should.have.length_of(1) + result[0].id.should.equal(igw1.id) diff --git a/tests/test_ec2/test_ip_addresses.py b/tests/test_ec2/test_ip_addresses.py index a8e927b004bc..60cf1cfc664e 100644 --- a/tests/test_ec2/test_ip_addresses.py +++ b/tests/test_ec2/test_ip_addresses.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_ip_addresses(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_ip_addresses(): + pass diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 0a7fb9f7620b..75c1aa73f711 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -1,151 +1,151 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import six -import sure # noqa - -from boto.exception import EC2ResponseError -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_key_pairs_empty(): - conn = boto.connect_ec2('the_key', 'the_secret') - assert len(conn.get_all_key_pairs()) == 0 - - -@mock_ec2_deprecated -def test_key_pairs_invalid_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_key_pairs('foo') - cm.exception.code.should.equal('InvalidKeyPair.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_key_pairs_create(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - kp = conn.create_key_pair('foo', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') - - kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') - kps = conn.get_all_key_pairs() - assert len(kps) == 1 - assert kps[0].name == 'foo' - - -@mock_ec2_deprecated -def test_key_pairs_create_two(): - conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - kp = conn.create_key_pair('bar') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') - kps = conn.get_all_key_pairs() - kps.should.have.length_of(2) - [i.name for i in kps].should.contain('foo') - [i.name for i in kps].should.contain('bar') - kps = conn.get_all_key_pairs('foo') - kps.should.have.length_of(1) - kps[0].name.should.equal('foo') - - -@mock_ec2_deprecated -def test_key_pairs_create_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') - assert len(conn.get_all_key_pairs()) == 1 - - with assert_raises(EC2ResponseError) as cm: - conn.create_key_pair('foo') - cm.exception.code.should.equal('InvalidKeyPair.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_key_pairs_delete_no_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - assert len(conn.get_all_key_pairs()) == 0 - r = conn.delete_key_pair('foo') - r.should.be.ok - - -@mock_ec2_deprecated -def test_key_pairs_delete_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - conn.create_key_pair('foo') - - with assert_raises(EC2ResponseError) as ex: - r = conn.delete_key_pair('foo', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') - - r = conn.delete_key_pair('foo') - r.should.be.ok - assert len(conn.get_all_key_pairs()) == 0 - - -@mock_ec2_deprecated -def test_key_pairs_import(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - kp = conn.import_key_pair('foo', b'content', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') - - kp = conn.import_key_pair('foo', b'content') - assert kp.name == 'foo' - kps = conn.get_all_key_pairs() - assert len(kps) == 1 - assert kps[0].name == 'foo' - - -@mock_ec2_deprecated -def test_key_pairs_import_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.import_key_pair('foo', b'content') - assert kp.name == 'foo' - assert len(conn.get_all_key_pairs()) == 1 - - with assert_raises(EC2ResponseError) as cm: - conn.create_key_pair('foo') - cm.exception.code.should.equal('InvalidKeyPair.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_key_pair_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') - - _ = conn.create_key_pair('kpfltr1') - kp2 = conn.create_key_pair('kpfltr2') - kp3 = conn.create_key_pair('kpfltr3') - - kp_by_name = conn.get_all_key_pairs( - filters={'key-name': 'kpfltr2'}) - set([kp.name for kp in kp_by_name] - ).should.equal(set([kp2.name])) - - kp_by_name = conn.get_all_key_pairs( - filters={'fingerprint': kp3.fingerprint}) - set([kp.name for kp in kp_by_name] - ).should.equal(set([kp3.name])) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import six +import sure # noqa + +from boto.exception import EC2ResponseError +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_key_pairs_empty(): + conn = boto.connect_ec2('the_key', 'the_secret') + assert len(conn.get_all_key_pairs()) == 0 + + +@mock_ec2_deprecated +def test_key_pairs_invalid_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_key_pairs('foo') + cm.exception.code.should.equal('InvalidKeyPair.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pairs_create(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + kp = conn.create_key_pair('foo', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') + + kp = conn.create_key_pair('foo') + assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + kps = conn.get_all_key_pairs() + assert len(kps) == 1 + assert kps[0].name == 'foo' + + +@mock_ec2_deprecated +def test_key_pairs_create_two(): + conn = boto.connect_ec2('the_key', 'the_secret') + kp = conn.create_key_pair('foo') + kp = conn.create_key_pair('bar') + assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + kps = conn.get_all_key_pairs() + kps.should.have.length_of(2) + [i.name for i in kps].should.contain('foo') + [i.name for i in kps].should.contain('bar') + kps = conn.get_all_key_pairs('foo') + kps.should.have.length_of(1) + kps[0].name.should.equal('foo') + + +@mock_ec2_deprecated +def test_key_pairs_create_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + kp = conn.create_key_pair('foo') + assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + assert len(conn.get_all_key_pairs()) == 1 + + with assert_raises(EC2ResponseError) as cm: + conn.create_key_pair('foo') + cm.exception.code.should.equal('InvalidKeyPair.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pairs_delete_no_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + assert len(conn.get_all_key_pairs()) == 0 + r = conn.delete_key_pair('foo') + r.should.be.ok + + +@mock_ec2_deprecated +def test_key_pairs_delete_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.create_key_pair('foo') + + with assert_raises(EC2ResponseError) as ex: + r = conn.delete_key_pair('foo', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') + + r = conn.delete_key_pair('foo') + r.should.be.ok + assert len(conn.get_all_key_pairs()) == 0 + + +@mock_ec2_deprecated +def test_key_pairs_import(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + kp = conn.import_key_pair('foo', b'content', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') + + kp = conn.import_key_pair('foo', b'content') + assert kp.name == 'foo' + kps = conn.get_all_key_pairs() + assert len(kps) == 1 + assert kps[0].name == 'foo' + + +@mock_ec2_deprecated +def test_key_pairs_import_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + kp = conn.import_key_pair('foo', b'content') + assert kp.name == 'foo' + assert len(conn.get_all_key_pairs()) == 1 + + with assert_raises(EC2ResponseError) as cm: + conn.create_key_pair('foo') + cm.exception.code.should.equal('InvalidKeyPair.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pair_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + + _ = conn.create_key_pair('kpfltr1') + kp2 = conn.create_key_pair('kpfltr2') + kp3 = conn.create_key_pair('kpfltr3') + + kp_by_name = conn.get_all_key_pairs( + filters={'key-name': 'kpfltr2'}) + set([kp.name for kp in kp_by_name] + ).should.equal(set([kp2.name])) + + kp_by_name = conn.get_all_key_pairs( + filters={'fingerprint': kp3.fingerprint}) + set([kp.name for kp in kp_by_name] + ).should.equal(set([kp3.name])) diff --git a/tests/test_ec2/test_monitoring.py b/tests/test_ec2/test_monitoring.py index 03be93adf2d1..95bd36e6ac2c 100644 --- a/tests/test_ec2/test_monitoring.py +++ b/tests/test_ec2/test_monitoring.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_monitoring(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_monitoring(): + pass diff --git a/tests/test_ec2/test_nat_gateway.py b/tests/test_ec2/test_nat_gateway.py index 27e8753be1ad..310ae2c3a5a2 100644 --- a/tests/test_ec2/test_nat_gateway.py +++ b/tests/test_ec2/test_nat_gateway.py @@ -1,109 +1,109 @@ -from __future__ import unicode_literals -import boto3 -import sure # noqa -from moto import mock_ec2 - - -@mock_ec2 -def test_describe_nat_gateways(): - conn = boto3.client('ec2', 'us-east-1') - - response = conn.describe_nat_gateways() - - response['NatGateways'].should.have.length_of(0) - - -@mock_ec2 -def test_create_nat_gateway(): - conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') - vpc_id = vpc['Vpc']['VpcId'] - subnet = conn.create_subnet( - VpcId=vpc_id, - CidrBlock='10.0.1.0/27', - AvailabilityZone='us-east-1a', - ) - allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] - subnet_id = subnet['Subnet']['SubnetId'] - - response = conn.create_nat_gateway( - SubnetId=subnet_id, - AllocationId=allocation_id, - ) - - response['NatGateway']['VpcId'].should.equal(vpc_id) - response['NatGateway']['SubnetId'].should.equal(subnet_id) - response['NatGateway']['State'].should.equal('available') - - -@mock_ec2 -def test_delete_nat_gateway(): - conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') - vpc_id = vpc['Vpc']['VpcId'] - subnet = conn.create_subnet( - VpcId=vpc_id, - CidrBlock='10.0.1.0/27', - AvailabilityZone='us-east-1a', - ) - allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] - subnet_id = subnet['Subnet']['SubnetId'] - - nat_gateway = conn.create_nat_gateway( - SubnetId=subnet_id, - AllocationId=allocation_id, - ) - nat_gateway_id = nat_gateway['NatGateway']['NatGatewayId'] - response = conn.delete_nat_gateway(NatGatewayId=nat_gateway_id) - - # this is hard to match against, so remove it - response['ResponseMetadata'].pop('HTTPHeaders', None) - response['ResponseMetadata'].pop('RetryAttempts', None) - response.should.equal({ - 'NatGatewayId': nat_gateway_id, - 'ResponseMetadata': { - 'HTTPStatusCode': 200, - 'RequestId': '741fc8ab-6ebe-452b-b92b-example' - } - }) - - -@mock_ec2 -def test_create_and_describe_nat_gateway(): - conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') - vpc_id = vpc['Vpc']['VpcId'] - subnet = conn.create_subnet( - VpcId=vpc_id, - CidrBlock='10.0.1.0/27', - AvailabilityZone='us-east-1a', - ) - allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] - subnet_id = subnet['Subnet']['SubnetId'] - - create_response = conn.create_nat_gateway( - SubnetId=subnet_id, - AllocationId=allocation_id, - ) - nat_gateway_id = create_response['NatGateway']['NatGatewayId'] - describe_response = conn.describe_nat_gateways() - - enis = conn.describe_network_interfaces()['NetworkInterfaces'] - eni_id = enis[0]['NetworkInterfaceId'] - public_ip = conn.describe_addresses(AllocationIds=[allocation_id])[ - 'Addresses'][0]['PublicIp'] - - describe_response['NatGateways'].should.have.length_of(1) - describe_response['NatGateways'][0][ - 'NatGatewayId'].should.equal(nat_gateway_id) - describe_response['NatGateways'][0]['State'].should.equal('available') - describe_response['NatGateways'][0]['SubnetId'].should.equal(subnet_id) - describe_response['NatGateways'][0]['VpcId'].should.equal(vpc_id) - describe_response['NatGateways'][0]['NatGatewayAddresses'][ - 0]['AllocationId'].should.equal(allocation_id) - describe_response['NatGateways'][0]['NatGatewayAddresses'][ - 0]['NetworkInterfaceId'].should.equal(eni_id) - assert describe_response['NatGateways'][0][ - 'NatGatewayAddresses'][0]['PrivateIp'].startswith('10.') - describe_response['NatGateways'][0]['NatGatewayAddresses'][ - 0]['PublicIp'].should.equal(public_ip) +from __future__ import unicode_literals +import boto3 +import sure # noqa +from moto import mock_ec2 + + +@mock_ec2 +def test_describe_nat_gateways(): + conn = boto3.client('ec2', 'us-east-1') + + response = conn.describe_nat_gateways() + + response['NatGateways'].should.have.length_of(0) + + +@mock_ec2 +def test_create_nat_gateway(): + conn = boto3.client('ec2', 'us-east-1') + vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') + vpc_id = vpc['Vpc']['VpcId'] + subnet = conn.create_subnet( + VpcId=vpc_id, + CidrBlock='10.0.1.0/27', + AvailabilityZone='us-east-1a', + ) + allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] + subnet_id = subnet['Subnet']['SubnetId'] + + response = conn.create_nat_gateway( + SubnetId=subnet_id, + AllocationId=allocation_id, + ) + + response['NatGateway']['VpcId'].should.equal(vpc_id) + response['NatGateway']['SubnetId'].should.equal(subnet_id) + response['NatGateway']['State'].should.equal('available') + + +@mock_ec2 +def test_delete_nat_gateway(): + conn = boto3.client('ec2', 'us-east-1') + vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') + vpc_id = vpc['Vpc']['VpcId'] + subnet = conn.create_subnet( + VpcId=vpc_id, + CidrBlock='10.0.1.0/27', + AvailabilityZone='us-east-1a', + ) + allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] + subnet_id = subnet['Subnet']['SubnetId'] + + nat_gateway = conn.create_nat_gateway( + SubnetId=subnet_id, + AllocationId=allocation_id, + ) + nat_gateway_id = nat_gateway['NatGateway']['NatGatewayId'] + response = conn.delete_nat_gateway(NatGatewayId=nat_gateway_id) + + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) + response['ResponseMetadata'].pop('RetryAttempts', None) + response.should.equal({ + 'NatGatewayId': nat_gateway_id, + 'ResponseMetadata': { + 'HTTPStatusCode': 200, + 'RequestId': '741fc8ab-6ebe-452b-b92b-example' + } + }) + + +@mock_ec2 +def test_create_and_describe_nat_gateway(): + conn = boto3.client('ec2', 'us-east-1') + vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') + vpc_id = vpc['Vpc']['VpcId'] + subnet = conn.create_subnet( + VpcId=vpc_id, + CidrBlock='10.0.1.0/27', + AvailabilityZone='us-east-1a', + ) + allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] + subnet_id = subnet['Subnet']['SubnetId'] + + create_response = conn.create_nat_gateway( + SubnetId=subnet_id, + AllocationId=allocation_id, + ) + nat_gateway_id = create_response['NatGateway']['NatGatewayId'] + describe_response = conn.describe_nat_gateways() + + enis = conn.describe_network_interfaces()['NetworkInterfaces'] + eni_id = enis[0]['NetworkInterfaceId'] + public_ip = conn.describe_addresses(AllocationIds=[allocation_id])[ + 'Addresses'][0]['PublicIp'] + + describe_response['NatGateways'].should.have.length_of(1) + describe_response['NatGateways'][0][ + 'NatGatewayId'].should.equal(nat_gateway_id) + describe_response['NatGateways'][0]['State'].should.equal('available') + describe_response['NatGateways'][0]['SubnetId'].should.equal(subnet_id) + describe_response['NatGateways'][0]['VpcId'].should.equal(vpc_id) + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['AllocationId'].should.equal(allocation_id) + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['NetworkInterfaceId'].should.equal(eni_id) + assert describe_response['NatGateways'][0][ + 'NatGatewayAddresses'][0]['PrivateIp'].startswith('10.') + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['PublicIp'].should.equal(public_ip) diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index fd2ec105ee23..ad3222b8af98 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -1,175 +1,175 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_default_network_acl_created_with_vpc(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(2) - - -@mock_ec2_deprecated -def test_network_acls(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - network_acl = conn.create_network_acl(vpc.id) - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(3) - - -@mock_ec2_deprecated -def test_new_subnet_associates_with_default_network_acl(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.get_all_vpcs()[0] - - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(1) - - acl = all_network_acls[0] - acl.associations.should.have.length_of(4) - [a.subnet_id for a in acl.associations].should.contain(subnet.id) - - -@mock_ec2_deprecated -def test_network_acl_entries(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - network_acl = conn.create_network_acl(vpc.id) - - network_acl_entry = conn.create_network_acl_entry( - network_acl.id, 110, 6, - 'ALLOW', '0.0.0.0/0', False, - port_range_from='443', - port_range_to='443' - ) - - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(3) - - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - entries = test_network_acl.network_acl_entries - entries.should.have.length_of(1) - entries[0].rule_number.should.equal('110') - entries[0].protocol.should.equal('6') - entries[0].rule_action.should.equal('ALLOW') - - -@mock_ec2_deprecated -def test_delete_network_acl_entry(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - network_acl = conn.create_network_acl(vpc.id) - - conn.create_network_acl_entry( - network_acl.id, 110, 6, - 'ALLOW', '0.0.0.0/0', False, - port_range_from='443', - port_range_to='443' - ) - conn.delete_network_acl_entry( - network_acl.id, 110, False - ) - - all_network_acls = conn.get_all_network_acls() - - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - entries = test_network_acl.network_acl_entries - entries.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_replace_network_acl_entry(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - network_acl = conn.create_network_acl(vpc.id) - - conn.create_network_acl_entry( - network_acl.id, 110, 6, - 'ALLOW', '0.0.0.0/0', False, - port_range_from='443', - port_range_to='443' - ) - conn.replace_network_acl_entry( - network_acl.id, 110, -1, - 'DENY', '0.0.0.0/0', False, - port_range_from='22', - port_range_to='22' - ) - - all_network_acls = conn.get_all_network_acls() - - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - entries = test_network_acl.network_acl_entries - entries.should.have.length_of(1) - entries[0].rule_number.should.equal('110') - entries[0].protocol.should.equal('-1') - entries[0].rule_action.should.equal('DENY') - -@mock_ec2_deprecated -def test_associate_new_network_acl_with_subnet(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - network_acl = conn.create_network_acl(vpc.id) - - conn.associate_network_acl(network_acl.id, subnet.id) - - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(3) - - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - - test_network_acl.associations.should.have.length_of(1) - test_network_acl.associations[0].subnet_id.should.equal(subnet.id) - - -@mock_ec2_deprecated -def test_delete_network_acl(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - network_acl = conn.create_network_acl(vpc.id) - - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(3) - - any(acl.id == network_acl.id for acl in all_network_acls).should.be.ok - - conn.delete_network_acl(network_acl.id) - - updated_network_acls = conn.get_all_network_acls() - updated_network_acls.should.have.length_of(2) - - any(acl.id == network_acl.id for acl in updated_network_acls).shouldnt.be.ok - - -@mock_ec2_deprecated -def test_network_acl_tagging(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - network_acl = conn.create_network_acl(vpc.id) - - network_acl.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - all_network_acls = conn.get_all_network_acls() - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - test_network_acl.tags.should.have.length_of(1) - test_network_acl.tags["a key"].should.equal("some value") +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_default_network_acl_created_with_vpc(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(2) + + +@mock_ec2_deprecated +def test_network_acls(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + network_acl = conn.create_network_acl(vpc.id) + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(3) + + +@mock_ec2_deprecated +def test_new_subnet_associates_with_default_network_acl(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.get_all_vpcs()[0] + + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(1) + + acl = all_network_acls[0] + acl.associations.should.have.length_of(4) + [a.subnet_id for a in acl.associations].should.contain(subnet.id) + + +@mock_ec2_deprecated +def test_network_acl_entries(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + + network_acl = conn.create_network_acl(vpc.id) + + network_acl_entry = conn.create_network_acl_entry( + network_acl.id, 110, 6, + 'ALLOW', '0.0.0.0/0', False, + port_range_from='443', + port_range_to='443' + ) + + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(3) + + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + entries = test_network_acl.network_acl_entries + entries.should.have.length_of(1) + entries[0].rule_number.should.equal('110') + entries[0].protocol.should.equal('6') + entries[0].rule_action.should.equal('ALLOW') + + +@mock_ec2_deprecated +def test_delete_network_acl_entry(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + + network_acl = conn.create_network_acl(vpc.id) + + conn.create_network_acl_entry( + network_acl.id, 110, 6, + 'ALLOW', '0.0.0.0/0', False, + port_range_from='443', + port_range_to='443' + ) + conn.delete_network_acl_entry( + network_acl.id, 110, False + ) + + all_network_acls = conn.get_all_network_acls() + + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + entries = test_network_acl.network_acl_entries + entries.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_replace_network_acl_entry(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + + network_acl = conn.create_network_acl(vpc.id) + + conn.create_network_acl_entry( + network_acl.id, 110, 6, + 'ALLOW', '0.0.0.0/0', False, + port_range_from='443', + port_range_to='443' + ) + conn.replace_network_acl_entry( + network_acl.id, 110, -1, + 'DENY', '0.0.0.0/0', False, + port_range_from='22', + port_range_to='22' + ) + + all_network_acls = conn.get_all_network_acls() + + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + entries = test_network_acl.network_acl_entries + entries.should.have.length_of(1) + entries[0].rule_number.should.equal('110') + entries[0].protocol.should.equal('-1') + entries[0].rule_action.should.equal('DENY') + +@mock_ec2_deprecated +def test_associate_new_network_acl_with_subnet(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + network_acl = conn.create_network_acl(vpc.id) + + conn.associate_network_acl(network_acl.id, subnet.id) + + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(3) + + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + + test_network_acl.associations.should.have.length_of(1) + test_network_acl.associations[0].subnet_id.should.equal(subnet.id) + + +@mock_ec2_deprecated +def test_delete_network_acl(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + network_acl = conn.create_network_acl(vpc.id) + + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(3) + + any(acl.id == network_acl.id for acl in all_network_acls).should.be.ok + + conn.delete_network_acl(network_acl.id) + + updated_network_acls = conn.get_all_network_acls() + updated_network_acls.should.have.length_of(2) + + any(acl.id == network_acl.id for acl in updated_network_acls).shouldnt.be.ok + + +@mock_ec2_deprecated +def test_network_acl_tagging(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + network_acl = conn.create_network_acl(vpc.id) + + network_acl.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + all_network_acls = conn.get_all_network_acls() + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + test_network_acl.tags.should.have.length_of(1) + test_network_acl.tags["a key"].should.equal("some value") diff --git a/tests/test_ec2/test_placement_groups.py b/tests/test_ec2/test_placement_groups.py index c7494228a7da..bc389488b66b 100644 --- a/tests/test_ec2/test_placement_groups.py +++ b/tests/test_ec2/test_placement_groups.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_placement_groups(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_placement_groups(): + pass diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 1e87b253ce36..7f0ea2f1893a 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -1,148 +1,148 @@ -from __future__ import unicode_literals -import boto.ec2 -import boto.ec2.autoscale -import boto.ec2.elb -import sure -from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated - -from moto.ec2 import ec2_backends - -def test_use_boto_regions(): - boto_regions = {r.name for r in boto.ec2.regions()} - moto_regions = set(ec2_backends) - - moto_regions.should.equal(boto_regions) - -def add_servers_to_region(ami_id, count, region): - conn = boto.ec2.connect_to_region(region) - for index in range(count): - conn.run_instances(ami_id) - -@mock_ec2_deprecated -def test_add_servers_to_a_single_region(): - region = 'ap-northeast-1' - add_servers_to_region('ami-1234abcd', 1, region) - add_servers_to_region('ami-5678efgh', 1, region) - - conn = boto.ec2.connect_to_region(region) - reservations = conn.get_all_instances() - len(reservations).should.equal(2) - reservations.sort(key=lambda x: x.instances[0].image_id) - - reservations[0].instances[0].image_id.should.equal('ami-1234abcd') - reservations[1].instances[0].image_id.should.equal('ami-5678efgh') - - -@mock_ec2_deprecated -def test_add_servers_to_multiple_regions(): - region1 = 'us-east-1' - region2 = 'ap-northeast-1' - add_servers_to_region('ami-1234abcd', 1, region1) - add_servers_to_region('ami-5678efgh', 1, region2) - - us_conn = boto.ec2.connect_to_region(region1) - ap_conn = boto.ec2.connect_to_region(region2) - us_reservations = us_conn.get_all_instances() - ap_reservations = ap_conn.get_all_instances() - - len(us_reservations).should.equal(1) - len(ap_reservations).should.equal(1) - - us_reservations[0].instances[0].image_id.should.equal('ami-1234abcd') - ap_reservations[0].instances[0].image_id.should.equal('ami-5678efgh') - - -@mock_autoscaling_deprecated -@mock_elb_deprecated -def test_create_autoscaling_group(): - elb_conn = boto.ec2.elb.connect_to_region('us-east-1') - elb_conn.create_load_balancer( - 'us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) - elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1') - elb_conn.create_load_balancer( - 'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) - - us_conn = boto.ec2.autoscale.connect_to_region('us-east-1') - config = boto.ec2.autoscale.LaunchConfiguration( - name='us_tester', - image_id='ami-abcd1234', - instance_type='m1.small', - ) - us_conn.create_launch_configuration(config) - - group = boto.ec2.autoscale.AutoScalingGroup( - name='us_tester_group', - availability_zones=['us-east-1c'], - default_cooldown=60, - desired_capacity=2, - health_check_period=100, - health_check_type="EC2", - max_size=2, - min_size=2, - launch_config=config, - load_balancers=["us_test_lb"], - placement_group="us_test_placement", - vpc_zone_identifier='subnet-1234abcd', - termination_policies=["OldestInstance", "NewestInstance"], - ) - us_conn.create_auto_scaling_group(group) - - ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1') - config = boto.ec2.autoscale.LaunchConfiguration( - name='ap_tester', - image_id='ami-efgh5678', - instance_type='m1.small', - ) - ap_conn.create_launch_configuration(config) - - group = boto.ec2.autoscale.AutoScalingGroup( - name='ap_tester_group', - availability_zones=['ap-northeast-1a'], - default_cooldown=60, - desired_capacity=2, - health_check_period=100, - health_check_type="EC2", - max_size=2, - min_size=2, - launch_config=config, - load_balancers=["ap_test_lb"], - placement_group="ap_test_placement", - vpc_zone_identifier='subnet-5678efgh', - termination_policies=["OldestInstance", "NewestInstance"], - ) - ap_conn.create_auto_scaling_group(group) - - len(us_conn.get_all_groups()).should.equal(1) - len(ap_conn.get_all_groups()).should.equal(1) - - us_group = us_conn.get_all_groups()[0] - us_group.name.should.equal('us_tester_group') - list(us_group.availability_zones).should.equal(['us-east-1c']) - us_group.desired_capacity.should.equal(2) - us_group.max_size.should.equal(2) - us_group.min_size.should.equal(2) - us_group.vpc_zone_identifier.should.equal('subnet-1234abcd') - us_group.launch_config_name.should.equal('us_tester') - us_group.default_cooldown.should.equal(60) - us_group.health_check_period.should.equal(100) - us_group.health_check_type.should.equal("EC2") - list(us_group.load_balancers).should.equal(["us_test_lb"]) - us_group.placement_group.should.equal("us_test_placement") - list(us_group.termination_policies).should.equal( - ["OldestInstance", "NewestInstance"]) - - ap_group = ap_conn.get_all_groups()[0] - ap_group.name.should.equal('ap_tester_group') - list(ap_group.availability_zones).should.equal(['ap-northeast-1a']) - ap_group.desired_capacity.should.equal(2) - ap_group.max_size.should.equal(2) - ap_group.min_size.should.equal(2) - ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh') - ap_group.launch_config_name.should.equal('ap_tester') - ap_group.default_cooldown.should.equal(60) - ap_group.health_check_period.should.equal(100) - ap_group.health_check_type.should.equal("EC2") - list(ap_group.load_balancers).should.equal(["ap_test_lb"]) - ap_group.placement_group.should.equal("ap_test_placement") - list(ap_group.termination_policies).should.equal( - ["OldestInstance", "NewestInstance"]) +from __future__ import unicode_literals +import boto.ec2 +import boto.ec2.autoscale +import boto.ec2.elb +import sure +from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated + +from moto.ec2 import ec2_backends + +def test_use_boto_regions(): + boto_regions = {r.name for r in boto.ec2.regions()} + moto_regions = set(ec2_backends) + + moto_regions.should.equal(boto_regions) + +def add_servers_to_region(ami_id, count, region): + conn = boto.ec2.connect_to_region(region) + for index in range(count): + conn.run_instances(ami_id) + +@mock_ec2_deprecated +def test_add_servers_to_a_single_region(): + region = 'ap-northeast-1' + add_servers_to_region('ami-1234abcd', 1, region) + add_servers_to_region('ami-5678efgh', 1, region) + + conn = boto.ec2.connect_to_region(region) + reservations = conn.get_all_instances() + len(reservations).should.equal(2) + reservations.sort(key=lambda x: x.instances[0].image_id) + + reservations[0].instances[0].image_id.should.equal('ami-1234abcd') + reservations[1].instances[0].image_id.should.equal('ami-5678efgh') + + +@mock_ec2_deprecated +def test_add_servers_to_multiple_regions(): + region1 = 'us-east-1' + region2 = 'ap-northeast-1' + add_servers_to_region('ami-1234abcd', 1, region1) + add_servers_to_region('ami-5678efgh', 1, region2) + + us_conn = boto.ec2.connect_to_region(region1) + ap_conn = boto.ec2.connect_to_region(region2) + us_reservations = us_conn.get_all_instances() + ap_reservations = ap_conn.get_all_instances() + + len(us_reservations).should.equal(1) + len(ap_reservations).should.equal(1) + + us_reservations[0].instances[0].image_id.should.equal('ami-1234abcd') + ap_reservations[0].instances[0].image_id.should.equal('ami-5678efgh') + + +@mock_autoscaling_deprecated +@mock_elb_deprecated +def test_create_autoscaling_group(): + elb_conn = boto.ec2.elb.connect_to_region('us-east-1') + elb_conn.create_load_balancer( + 'us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1') + elb_conn.create_load_balancer( + 'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + + us_conn = boto.ec2.autoscale.connect_to_region('us-east-1') + config = boto.ec2.autoscale.LaunchConfiguration( + name='us_tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + us_conn.create_launch_configuration(config) + + group = boto.ec2.autoscale.AutoScalingGroup( + name='us_tester_group', + availability_zones=['us-east-1c'], + default_cooldown=60, + desired_capacity=2, + health_check_period=100, + health_check_type="EC2", + max_size=2, + min_size=2, + launch_config=config, + load_balancers=["us_test_lb"], + placement_group="us_test_placement", + vpc_zone_identifier='subnet-1234abcd', + termination_policies=["OldestInstance", "NewestInstance"], + ) + us_conn.create_auto_scaling_group(group) + + ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1') + config = boto.ec2.autoscale.LaunchConfiguration( + name='ap_tester', + image_id='ami-efgh5678', + instance_type='m1.small', + ) + ap_conn.create_launch_configuration(config) + + group = boto.ec2.autoscale.AutoScalingGroup( + name='ap_tester_group', + availability_zones=['ap-northeast-1a'], + default_cooldown=60, + desired_capacity=2, + health_check_period=100, + health_check_type="EC2", + max_size=2, + min_size=2, + launch_config=config, + load_balancers=["ap_test_lb"], + placement_group="ap_test_placement", + vpc_zone_identifier='subnet-5678efgh', + termination_policies=["OldestInstance", "NewestInstance"], + ) + ap_conn.create_auto_scaling_group(group) + + len(us_conn.get_all_groups()).should.equal(1) + len(ap_conn.get_all_groups()).should.equal(1) + + us_group = us_conn.get_all_groups()[0] + us_group.name.should.equal('us_tester_group') + list(us_group.availability_zones).should.equal(['us-east-1c']) + us_group.desired_capacity.should.equal(2) + us_group.max_size.should.equal(2) + us_group.min_size.should.equal(2) + us_group.vpc_zone_identifier.should.equal('subnet-1234abcd') + us_group.launch_config_name.should.equal('us_tester') + us_group.default_cooldown.should.equal(60) + us_group.health_check_period.should.equal(100) + us_group.health_check_type.should.equal("EC2") + list(us_group.load_balancers).should.equal(["us_test_lb"]) + us_group.placement_group.should.equal("us_test_placement") + list(us_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) + + ap_group = ap_conn.get_all_groups()[0] + ap_group.name.should.equal('ap_tester_group') + list(ap_group.availability_zones).should.equal(['ap-northeast-1a']) + ap_group.desired_capacity.should.equal(2) + ap_group.max_size.should.equal(2) + ap_group.min_size.should.equal(2) + ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh') + ap_group.launch_config_name.should.equal('ap_tester') + ap_group.default_cooldown.should.equal(60) + ap_group.health_check_period.should.equal(100) + ap_group.health_check_type.should.equal("EC2") + list(ap_group.load_balancers).should.equal(["ap_test_lb"]) + ap_group.placement_group.should.equal("ap_test_placement") + list(ap_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) diff --git a/tests/test_ec2/test_reserved_instances.py b/tests/test_ec2/test_reserved_instances.py index 437d3a95b648..47456bc035b9 100644 --- a/tests/test_ec2/test_reserved_instances.py +++ b/tests/test_ec2/test_reserved_instances.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_reserved_instances(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_reserved_instances(): + pass diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index b27484468460..e6f767a0a3a3 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -1,530 +1,530 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import boto3 -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated -from tests.helpers import requires_boto_gte - - -@mock_ec2_deprecated -def test_route_tables_defaults(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(1) - - main_route_table = all_route_tables[0] - main_route_table.vpc_id.should.equal(vpc.id) - - routes = main_route_table.routes - routes.should.have.length_of(1) - - local_route = routes[0] - local_route.gateway_id.should.equal('local') - local_route.state.should.equal('active') - local_route.destination_cidr_block.should.equal(vpc.cidr_block) - - vpc.delete() - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_route_tables_additional(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - route_table = conn.create_route_table(vpc.id) - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(2) - all_route_tables[0].vpc_id.should.equal(vpc.id) - all_route_tables[1].vpc_id.should.equal(vpc.id) - - all_route_table_ids = [route_table.id for route_table in all_route_tables] - all_route_table_ids.should.contain(route_table.id) - - routes = route_table.routes - routes.should.have.length_of(1) - - local_route = routes[0] - local_route.gateway_id.should.equal('local') - local_route.state.should.equal('active') - local_route.destination_cidr_block.should.equal(vpc.cidr_block) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_vpc(vpc.id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.delete_route_table(route_table.id) - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_route_table("rtb-1234abcd") - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_route_tables_filters_standard(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc1 = conn.create_vpc("10.0.0.0/16") - route_table1 = conn.create_route_table(vpc1.id) - - vpc2 = conn.create_vpc("10.0.0.0/16") - route_table2 = conn.create_route_table(vpc2.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(5) - - # Filter by main route table - main_route_tables = conn.get_all_route_tables( - filters={'association.main': 'true'}) - main_route_tables.should.have.length_of(3) - main_route_table_ids = [ - route_table.id for route_table in main_route_tables] - main_route_table_ids.should_not.contain(route_table1.id) - main_route_table_ids.should_not.contain(route_table2.id) - - # Filter by VPC - vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc1.id}) - vpc1_route_tables.should.have.length_of(2) - vpc1_route_table_ids = [ - route_table.id for route_table in vpc1_route_tables] - vpc1_route_table_ids.should.contain(route_table1.id) - vpc1_route_table_ids.should_not.contain(route_table2.id) - - # Filter by VPC and main route table - vpc2_main_route_tables = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc2.id}) - vpc2_main_route_tables.should.have.length_of(1) - vpc2_main_route_table_ids = [ - route_table.id for route_table in vpc2_main_route_tables] - vpc2_main_route_table_ids.should_not.contain(route_table1.id) - vpc2_main_route_table_ids.should_not.contain(route_table2.id) - - # Unsupported filter - conn.get_all_route_tables.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -def test_route_tables_filters_associations(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc = conn.create_vpc("10.0.0.0/16") - subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24") - subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24") - subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24") - route_table1 = conn.create_route_table(vpc.id) - route_table2 = conn.create_route_table(vpc.id) - - association_id1 = conn.associate_route_table(route_table1.id, subnet1.id) - association_id2 = conn.associate_route_table(route_table1.id, subnet2.id) - association_id3 = conn.associate_route_table(route_table2.id, subnet3.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(4) - - # Filter by association ID - association1_route_tables = conn.get_all_route_tables( - filters={'association.route-table-association-id': association_id1}) - association1_route_tables.should.have.length_of(1) - association1_route_tables[0].id.should.equal(route_table1.id) - association1_route_tables[0].associations.should.have.length_of(2) - - # Filter by route table ID - route_table2_route_tables = conn.get_all_route_tables( - filters={'association.route-table-id': route_table2.id}) - route_table2_route_tables.should.have.length_of(1) - route_table2_route_tables[0].id.should.equal(route_table2.id) - route_table2_route_tables[0].associations.should.have.length_of(1) - - # Filter by subnet ID - subnet_route_tables = conn.get_all_route_tables( - filters={'association.subnet-id': subnet1.id}) - subnet_route_tables.should.have.length_of(1) - subnet_route_tables[0].id.should.equal(route_table1.id) - association1_route_tables[0].associations.should.have.length_of(2) - - -@mock_ec2_deprecated -def test_route_table_associations(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - route_table = conn.create_route_table(vpc.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(3) - - # Refresh - route_table = conn.get_all_route_tables(route_table.id)[0] - route_table.associations.should.have.length_of(0) - - # Associate - association_id = conn.associate_route_table(route_table.id, subnet.id) - - # Refresh - route_table = conn.get_all_route_tables(route_table.id)[0] - route_table.associations.should.have.length_of(1) - - route_table.associations[0].id.should.equal(association_id) - route_table.associations[0].main.should.equal(False) - route_table.associations[0].route_table_id.should.equal(route_table.id) - route_table.associations[0].subnet_id.should.equal(subnet.id) - - # Associate is idempotent - association_id_idempotent = conn.associate_route_table( - route_table.id, subnet.id) - association_id_idempotent.should.equal(association_id) - - # Error: Attempt delete associated route table. - with assert_raises(EC2ResponseError) as cm: - conn.delete_route_table(route_table.id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Disassociate - conn.disassociate_route_table(association_id) - - # Refresh - route_table = conn.get_all_route_tables(route_table.id)[0] - route_table.associations.should.have.length_of(0) - - # Error: Disassociate with invalid association ID - with assert_raises(EC2ResponseError) as cm: - conn.disassociate_route_table(association_id) - cm.exception.code.should.equal('InvalidAssociationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Associate with invalid subnet ID - with assert_raises(EC2ResponseError) as cm: - conn.associate_route_table(route_table.id, "subnet-1234abcd") - cm.exception.code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Associate with invalid route table ID - with assert_raises(EC2ResponseError) as cm: - conn.associate_route_table("rtb-1234abcd", subnet.id) - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.16.0") -@mock_ec2_deprecated -def test_route_table_replace_route_table_association(): - """ - Note: Boto has deprecated replace_route_table_assocation (which returns status) - and now uses replace_route_table_assocation_with_assoc (which returns association ID). - """ - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - route_table1 = conn.create_route_table(vpc.id) - route_table2 = conn.create_route_table(vpc.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(4) - - # Refresh - route_table1 = conn.get_all_route_tables(route_table1.id)[0] - route_table1.associations.should.have.length_of(0) - - # Associate - association_id1 = conn.associate_route_table(route_table1.id, subnet.id) - - # Refresh - route_table1 = conn.get_all_route_tables(route_table1.id)[0] - route_table2 = conn.get_all_route_tables(route_table2.id)[0] - - # Validate - route_table1.associations.should.have.length_of(1) - route_table2.associations.should.have.length_of(0) - - route_table1.associations[0].id.should.equal(association_id1) - route_table1.associations[0].main.should.equal(False) - route_table1.associations[0].route_table_id.should.equal(route_table1.id) - route_table1.associations[0].subnet_id.should.equal(subnet.id) - - # Replace Association - association_id2 = conn.replace_route_table_association_with_assoc( - association_id1, route_table2.id) - - # Refresh - route_table1 = conn.get_all_route_tables(route_table1.id)[0] - route_table2 = conn.get_all_route_tables(route_table2.id)[0] - - # Validate - route_table1.associations.should.have.length_of(0) - route_table2.associations.should.have.length_of(1) - - route_table2.associations[0].id.should.equal(association_id2) - route_table2.associations[0].main.should.equal(False) - route_table2.associations[0].route_table_id.should.equal(route_table2.id) - route_table2.associations[0].subnet_id.should.equal(subnet.id) - - # Replace Association is idempotent - association_id_idempotent = conn.replace_route_table_association_with_assoc( - association_id2, route_table2.id) - association_id_idempotent.should.equal(association_id2) - - # Error: Replace association with invalid association ID - with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc( - "rtbassoc-1234abcd", route_table1.id) - cm.exception.code.should.equal('InvalidAssociationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Replace association with invalid route table ID - with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc( - association_id2, "rtb-1234abcd") - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_route_table_get_by_tag(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc = conn.create_vpc('10.0.0.0/16') - - route_table = conn.create_route_table(vpc.id) - route_table.add_tag('Name', 'TestRouteTable') - - route_tables = conn.get_all_route_tables( - filters={'tag:Name': 'TestRouteTable'}) - - route_tables.should.have.length_of(1) - route_tables[0].vpc_id.should.equal(vpc.id) - route_tables[0].id.should.equal(route_table.id) - route_tables[0].tags.should.have.length_of(1) - route_tables[0].tags['Name'].should.equal('TestRouteTable') - - -@mock_ec2 -def test_route_table_get_by_tag_boto3(): - ec2 = boto3.resource('ec2', region_name='eu-central-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - route_table = ec2.create_route_table(VpcId=vpc.id) - route_table.create_tags(Tags=[{'Key': 'Name', 'Value': 'TestRouteTable'}]) - - filters = [{'Name': 'tag:Name', 'Values': ['TestRouteTable']}] - route_tables = list(ec2.route_tables.filter(Filters=filters)) - - route_tables.should.have.length_of(1) - route_tables[0].vpc_id.should.equal(vpc.id) - route_tables[0].id.should.equal(route_table.id) - route_tables[0].tags.should.have.length_of(1) - route_tables[0].tags[0].should.equal( - {'Key': 'Name', 'Value': 'TestRouteTable'}) - - -@mock_ec2_deprecated -def test_routes_additional(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] - local_route = main_route_table.routes[0] - igw = conn.create_internet_gateway() - ROUTE_CIDR = "10.0.0.4/24" - - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - - main_route_table = conn.get_all_route_tables( - filters={'vpc-id': vpc.id})[0] # Refresh route table - - main_route_table.routes.should.have.length_of(2) - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(1) - - new_route = new_routes[0] - new_route.gateway_id.should.equal(igw.id) - new_route.instance_id.should.be.none - new_route.state.should.equal('active') - new_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - conn.delete_route(main_route_table.id, ROUTE_CIDR) - - main_route_table = conn.get_all_route_tables( - filters={'vpc-id': vpc.id})[0] # Refresh route table - - main_route_table.routes.should.have.length_of(1) - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(0) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_route(main_route_table.id, ROUTE_CIDR) - cm.exception.code.should.equal('InvalidRoute.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_routes_replace(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc.id})[0] - local_route = main_route_table.routes[0] - ROUTE_CIDR = "10.0.0.4/24" - - # Various route targets - igw = conn.create_internet_gateway() - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - # Create initial route - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - - # Replace... - def get_target_route(): - route_table = conn.get_all_route_tables(main_route_table.id)[0] - routes = [ - route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] - routes.should.have.length_of(1) - return routes[0] - - conn.replace_route(main_route_table.id, ROUTE_CIDR, - instance_id=instance.id) - - target_route = get_target_route() - target_route.gateway_id.should.be.none - target_route.instance_id.should.equal(instance.id) - target_route.state.should.equal('active') - target_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - conn.replace_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - - target_route = get_target_route() - target_route.gateway_id.should.equal(igw.id) - target_route.instance_id.should.be.none - target_route.state.should.equal('active') - target_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - with assert_raises(EC2ResponseError) as cm: - conn.replace_route('rtb-1234abcd', ROUTE_CIDR, gateway_id=igw.id) - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.19.0") -@mock_ec2_deprecated -def test_routes_not_supported(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables()[0] - local_route = main_route_table.routes[0] - igw = conn.create_internet_gateway() - ROUTE_CIDR = "10.0.0.4/24" - - # Create - conn.create_route.when.called_with( - main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) - - # Replace - igw = conn.create_internet_gateway() - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - conn.replace_route.when.called_with( - main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) - - -@requires_boto_gte("2.34.0") -@mock_ec2_deprecated -def test_routes_vpc_peering_connection(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc.id})[0] - local_route = main_route_table.routes[0] - ROUTE_CIDR = "10.0.0.4/24" - - peer_vpc = conn.create_vpc("11.0.0.0/16") - vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) - - conn.create_route(main_route_table.id, ROUTE_CIDR, - vpc_peering_connection_id=vpc_pcx.id) - - # Refresh route table - main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(1) - - new_route = new_routes[0] - new_route.gateway_id.should.be.none - new_route.instance_id.should.be.none - new_route.vpc_peering_connection_id.should.equal(vpc_pcx.id) - new_route.state.should.equal('blackhole') - new_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - -@requires_boto_gte("2.34.0") -@mock_ec2_deprecated -def test_routes_vpn_gateway(): - - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc.id})[0] - ROUTE_CIDR = "10.0.0.4/24" - - vpn_gw = conn.create_vpn_gateway(type="ipsec.1") - - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=vpn_gw.id) - - main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(1) - - new_route = new_routes[0] - new_route.gateway_id.should.equal(vpn_gw.id) - new_route.instance_id.should.be.none - new_route.vpc_peering_connection_id.should.be.none - - -@mock_ec2_deprecated -def test_network_acl_tagging(): - - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - route_table = conn.create_route_table(vpc.id) - route_table.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - all_route_tables = conn.get_all_route_tables() - test_route_table = next(na for na in all_route_tables - if na.id == route_table.id) - test_route_table.tags.should.have.length_of(1) - test_route_table.tags["a key"].should.equal("some value") +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import boto3 +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated +from tests.helpers import requires_boto_gte + + +@mock_ec2_deprecated +def test_route_tables_defaults(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(1) + + main_route_table = all_route_tables[0] + main_route_table.vpc_id.should.equal(vpc.id) + + routes = main_route_table.routes + routes.should.have.length_of(1) + + local_route = routes[0] + local_route.gateway_id.should.equal('local') + local_route.state.should.equal('active') + local_route.destination_cidr_block.should.equal(vpc.cidr_block) + + vpc.delete() + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_route_tables_additional(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + route_table = conn.create_route_table(vpc.id) + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(2) + all_route_tables[0].vpc_id.should.equal(vpc.id) + all_route_tables[1].vpc_id.should.equal(vpc.id) + + all_route_table_ids = [route_table.id for route_table in all_route_tables] + all_route_table_ids.should.contain(route_table.id) + + routes = route_table.routes + routes.should.have.length_of(1) + + local_route = routes[0] + local_route.gateway_id.should.equal('local') + local_route.state.should.equal('active') + local_route.destination_cidr_block.should.equal(vpc.cidr_block) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_vpc(vpc.id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.delete_route_table(route_table.id) + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_route_table("rtb-1234abcd") + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_route_tables_filters_standard(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc1 = conn.create_vpc("10.0.0.0/16") + route_table1 = conn.create_route_table(vpc1.id) + + vpc2 = conn.create_vpc("10.0.0.0/16") + route_table2 = conn.create_route_table(vpc2.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(5) + + # Filter by main route table + main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true'}) + main_route_tables.should.have.length_of(3) + main_route_table_ids = [ + route_table.id for route_table in main_route_tables] + main_route_table_ids.should_not.contain(route_table1.id) + main_route_table_ids.should_not.contain(route_table2.id) + + # Filter by VPC + vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc1.id}) + vpc1_route_tables.should.have.length_of(2) + vpc1_route_table_ids = [ + route_table.id for route_table in vpc1_route_tables] + vpc1_route_table_ids.should.contain(route_table1.id) + vpc1_route_table_ids.should_not.contain(route_table2.id) + + # Filter by VPC and main route table + vpc2_main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc2.id}) + vpc2_main_route_tables.should.have.length_of(1) + vpc2_main_route_table_ids = [ + route_table.id for route_table in vpc2_main_route_tables] + vpc2_main_route_table_ids.should_not.contain(route_table1.id) + vpc2_main_route_table_ids.should_not.contain(route_table2.id) + + # Unsupported filter + conn.get_all_route_tables.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +def test_route_tables_filters_associations(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc = conn.create_vpc("10.0.0.0/16") + subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24") + subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24") + subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24") + route_table1 = conn.create_route_table(vpc.id) + route_table2 = conn.create_route_table(vpc.id) + + association_id1 = conn.associate_route_table(route_table1.id, subnet1.id) + association_id2 = conn.associate_route_table(route_table1.id, subnet2.id) + association_id3 = conn.associate_route_table(route_table2.id, subnet3.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(4) + + # Filter by association ID + association1_route_tables = conn.get_all_route_tables( + filters={'association.route-table-association-id': association_id1}) + association1_route_tables.should.have.length_of(1) + association1_route_tables[0].id.should.equal(route_table1.id) + association1_route_tables[0].associations.should.have.length_of(2) + + # Filter by route table ID + route_table2_route_tables = conn.get_all_route_tables( + filters={'association.route-table-id': route_table2.id}) + route_table2_route_tables.should.have.length_of(1) + route_table2_route_tables[0].id.should.equal(route_table2.id) + route_table2_route_tables[0].associations.should.have.length_of(1) + + # Filter by subnet ID + subnet_route_tables = conn.get_all_route_tables( + filters={'association.subnet-id': subnet1.id}) + subnet_route_tables.should.have.length_of(1) + subnet_route_tables[0].id.should.equal(route_table1.id) + association1_route_tables[0].associations.should.have.length_of(2) + + +@mock_ec2_deprecated +def test_route_table_associations(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + route_table = conn.create_route_table(vpc.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(3) + + # Refresh + route_table = conn.get_all_route_tables(route_table.id)[0] + route_table.associations.should.have.length_of(0) + + # Associate + association_id = conn.associate_route_table(route_table.id, subnet.id) + + # Refresh + route_table = conn.get_all_route_tables(route_table.id)[0] + route_table.associations.should.have.length_of(1) + + route_table.associations[0].id.should.equal(association_id) + route_table.associations[0].main.should.equal(False) + route_table.associations[0].route_table_id.should.equal(route_table.id) + route_table.associations[0].subnet_id.should.equal(subnet.id) + + # Associate is idempotent + association_id_idempotent = conn.associate_route_table( + route_table.id, subnet.id) + association_id_idempotent.should.equal(association_id) + + # Error: Attempt delete associated route table. + with assert_raises(EC2ResponseError) as cm: + conn.delete_route_table(route_table.id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Disassociate + conn.disassociate_route_table(association_id) + + # Refresh + route_table = conn.get_all_route_tables(route_table.id)[0] + route_table.associations.should.have.length_of(0) + + # Error: Disassociate with invalid association ID + with assert_raises(EC2ResponseError) as cm: + conn.disassociate_route_table(association_id) + cm.exception.code.should.equal('InvalidAssociationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Associate with invalid subnet ID + with assert_raises(EC2ResponseError) as cm: + conn.associate_route_table(route_table.id, "subnet-1234abcd") + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Associate with invalid route table ID + with assert_raises(EC2ResponseError) as cm: + conn.associate_route_table("rtb-1234abcd", subnet.id) + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.16.0") +@mock_ec2_deprecated +def test_route_table_replace_route_table_association(): + """ + Note: Boto has deprecated replace_route_table_assocation (which returns status) + and now uses replace_route_table_assocation_with_assoc (which returns association ID). + """ + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + route_table1 = conn.create_route_table(vpc.id) + route_table2 = conn.create_route_table(vpc.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(4) + + # Refresh + route_table1 = conn.get_all_route_tables(route_table1.id)[0] + route_table1.associations.should.have.length_of(0) + + # Associate + association_id1 = conn.associate_route_table(route_table1.id, subnet.id) + + # Refresh + route_table1 = conn.get_all_route_tables(route_table1.id)[0] + route_table2 = conn.get_all_route_tables(route_table2.id)[0] + + # Validate + route_table1.associations.should.have.length_of(1) + route_table2.associations.should.have.length_of(0) + + route_table1.associations[0].id.should.equal(association_id1) + route_table1.associations[0].main.should.equal(False) + route_table1.associations[0].route_table_id.should.equal(route_table1.id) + route_table1.associations[0].subnet_id.should.equal(subnet.id) + + # Replace Association + association_id2 = conn.replace_route_table_association_with_assoc( + association_id1, route_table2.id) + + # Refresh + route_table1 = conn.get_all_route_tables(route_table1.id)[0] + route_table2 = conn.get_all_route_tables(route_table2.id)[0] + + # Validate + route_table1.associations.should.have.length_of(0) + route_table2.associations.should.have.length_of(1) + + route_table2.associations[0].id.should.equal(association_id2) + route_table2.associations[0].main.should.equal(False) + route_table2.associations[0].route_table_id.should.equal(route_table2.id) + route_table2.associations[0].subnet_id.should.equal(subnet.id) + + # Replace Association is idempotent + association_id_idempotent = conn.replace_route_table_association_with_assoc( + association_id2, route_table2.id) + association_id_idempotent.should.equal(association_id2) + + # Error: Replace association with invalid association ID + with assert_raises(EC2ResponseError) as cm: + conn.replace_route_table_association_with_assoc( + "rtbassoc-1234abcd", route_table1.id) + cm.exception.code.should.equal('InvalidAssociationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Replace association with invalid route table ID + with assert_raises(EC2ResponseError) as cm: + conn.replace_route_table_association_with_assoc( + association_id2, "rtb-1234abcd") + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_route_table_get_by_tag(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc = conn.create_vpc('10.0.0.0/16') + + route_table = conn.create_route_table(vpc.id) + route_table.add_tag('Name', 'TestRouteTable') + + route_tables = conn.get_all_route_tables( + filters={'tag:Name': 'TestRouteTable'}) + + route_tables.should.have.length_of(1) + route_tables[0].vpc_id.should.equal(vpc.id) + route_tables[0].id.should.equal(route_table.id) + route_tables[0].tags.should.have.length_of(1) + route_tables[0].tags['Name'].should.equal('TestRouteTable') + + +@mock_ec2 +def test_route_table_get_by_tag_boto3(): + ec2 = boto3.resource('ec2', region_name='eu-central-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + route_table = ec2.create_route_table(VpcId=vpc.id) + route_table.create_tags(Tags=[{'Key': 'Name', 'Value': 'TestRouteTable'}]) + + filters = [{'Name': 'tag:Name', 'Values': ['TestRouteTable']}] + route_tables = list(ec2.route_tables.filter(Filters=filters)) + + route_tables.should.have.length_of(1) + route_tables[0].vpc_id.should.equal(vpc.id) + route_tables[0].id.should.equal(route_table.id) + route_tables[0].tags.should.have.length_of(1) + route_tables[0].tags[0].should.equal( + {'Key': 'Name', 'Value': 'TestRouteTable'}) + + +@mock_ec2_deprecated +def test_routes_additional(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] + local_route = main_route_table.routes[0] + igw = conn.create_internet_gateway() + ROUTE_CIDR = "10.0.0.4/24" + + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table + + main_route_table.routes.should.have.length_of(2) + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(1) + + new_route = new_routes[0] + new_route.gateway_id.should.equal(igw.id) + new_route.instance_id.should.be.none + new_route.state.should.equal('active') + new_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + conn.delete_route(main_route_table.id, ROUTE_CIDR) + + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table + + main_route_table.routes.should.have.length_of(1) + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(0) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_route(main_route_table.id, ROUTE_CIDR) + cm.exception.code.should.equal('InvalidRoute.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_routes_replace(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] + local_route = main_route_table.routes[0] + ROUTE_CIDR = "10.0.0.4/24" + + # Various route targets + igw = conn.create_internet_gateway() + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + # Create initial route + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + + # Replace... + def get_target_route(): + route_table = conn.get_all_route_tables(main_route_table.id)[0] + routes = [ + route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] + routes.should.have.length_of(1) + return routes[0] + + conn.replace_route(main_route_table.id, ROUTE_CIDR, + instance_id=instance.id) + + target_route = get_target_route() + target_route.gateway_id.should.be.none + target_route.instance_id.should.equal(instance.id) + target_route.state.should.equal('active') + target_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + conn.replace_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + + target_route = get_target_route() + target_route.gateway_id.should.equal(igw.id) + target_route.instance_id.should.be.none + target_route.state.should.equal('active') + target_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + with assert_raises(EC2ResponseError) as cm: + conn.replace_route('rtb-1234abcd', ROUTE_CIDR, gateway_id=igw.id) + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.19.0") +@mock_ec2_deprecated +def test_routes_not_supported(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables()[0] + local_route = main_route_table.routes[0] + igw = conn.create_internet_gateway() + ROUTE_CIDR = "10.0.0.4/24" + + # Create + conn.create_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + + # Replace + igw = conn.create_internet_gateway() + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + conn.replace_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + + +@requires_boto_gte("2.34.0") +@mock_ec2_deprecated +def test_routes_vpc_peering_connection(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] + local_route = main_route_table.routes[0] + ROUTE_CIDR = "10.0.0.4/24" + + peer_vpc = conn.create_vpc("11.0.0.0/16") + vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) + + conn.create_route(main_route_table.id, ROUTE_CIDR, + vpc_peering_connection_id=vpc_pcx.id) + + # Refresh route table + main_route_table = conn.get_all_route_tables(main_route_table.id)[0] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(1) + + new_route = new_routes[0] + new_route.gateway_id.should.be.none + new_route.instance_id.should.be.none + new_route.vpc_peering_connection_id.should.equal(vpc_pcx.id) + new_route.state.should.equal('blackhole') + new_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + +@requires_boto_gte("2.34.0") +@mock_ec2_deprecated +def test_routes_vpn_gateway(): + + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] + ROUTE_CIDR = "10.0.0.4/24" + + vpn_gw = conn.create_vpn_gateway(type="ipsec.1") + + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=vpn_gw.id) + + main_route_table = conn.get_all_route_tables(main_route_table.id)[0] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(1) + + new_route = new_routes[0] + new_route.gateway_id.should.equal(vpn_gw.id) + new_route.instance_id.should.be.none + new_route.vpc_peering_connection_id.should.be.none + + +@mock_ec2_deprecated +def test_network_acl_tagging(): + + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + + route_table = conn.create_route_table(vpc.id) + route_table.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + all_route_tables = conn.get_all_route_tables() + test_route_table = next(na for na in all_route_tables + if na.id == route_table.id) + test_route_table.tags.should.have.length_of(1) + test_route_table.tags["a key"].should.equal("some value") diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index d843087a6282..15be94fbed18 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -1,737 +1,737 @@ -from __future__ import unicode_literals - -import copy - -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -import boto3 -import boto -from botocore.exceptions import ClientError -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_create_and_describe_security_group(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - security_group = conn.create_security_group( - 'test security group', 'this is a test security group', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - - security_group = conn.create_security_group( - 'test security group', 'this is a test security group') - - security_group.name.should.equal('test security group') - security_group.description.should.equal('this is a test security group') - - # Trying to create another group with the same name should throw an error - with assert_raises(EC2ResponseError) as cm: - conn.create_security_group( - 'test security group', 'this is a test security group') - cm.exception.code.should.equal('InvalidGroup.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_groups = conn.get_all_security_groups() - # The default group gets created automatically - all_groups.should.have.length_of(3) - group_names = [group.name for group in all_groups] - set(group_names).should.equal(set(["default", "test security group"])) - - -@mock_ec2_deprecated -def test_create_security_group_without_description_raises_error(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.create_security_group('test security group', '') - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_default_security_group(): - conn = boto.ec2.connect_to_region('us-east-1') - groups = conn.get_all_security_groups() - groups.should.have.length_of(2) - groups[0].name.should.equal("default") - - -@mock_ec2_deprecated -def test_create_and_describe_vpc_security_group(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id=vpc_id) - - security_group.vpc_id.should.equal(vpc_id) - - security_group.name.should.equal('test security group') - security_group.description.should.equal('this is a test security group') - - # Trying to create another group with the same name in the same VPC should - # throw an error - with assert_raises(EC2ResponseError) as cm: - conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id) - cm.exception.code.should.equal('InvalidGroup.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_groups = conn.get_all_security_groups(filters={'vpc_id': [vpc_id]}) - - all_groups[0].vpc_id.should.equal(vpc_id) - - all_groups.should.have.length_of(1) - all_groups[0].name.should.equal('test security group') - - -@mock_ec2_deprecated -def test_create_two_security_groups_with_same_name_in_different_vpc(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = 'vpc-5300000c' - vpc_id2 = 'vpc-5300000d' - - conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id) - conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id2) - - all_groups = conn.get_all_security_groups() - - all_groups.should.have.length_of(4) - group_names = [group.name for group in all_groups] - # The default group is created automatically - set(group_names).should.equal(set(["default", "test security group"])) - - -@mock_ec2_deprecated -def test_deleting_security_groups(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group1 = conn.create_security_group('test1', 'test1') - conn.create_security_group('test2', 'test2') - - conn.get_all_security_groups().should.have.length_of(4) - - # Deleting a group that doesn't exist should throw an error - with assert_raises(EC2ResponseError) as cm: - conn.delete_security_group('foobar') - cm.exception.code.should.equal('InvalidGroup.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Delete by name - with assert_raises(EC2ResponseError) as ex: - conn.delete_security_group('test2', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - - conn.delete_security_group('test2') - conn.get_all_security_groups().should.have.length_of(3) - - # Delete by group id - conn.delete_security_group(group_id=security_group1.id) - conn.get_all_security_groups().should.have.length_of(2) - - -@mock_ec2_deprecated -def test_delete_security_group_in_vpc(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = "vpc-12345" - security_group1 = conn.create_security_group('test1', 'test1', vpc_id) - - # this should not throw an exception - conn.delete_security_group(group_id=security_group1.id) - - -@mock_ec2_deprecated -def test_authorize_ip_range_and_revoke(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group = conn.create_security_group('test', 'test') - - with assert_raises(EC2ResponseError) as ex: - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") - assert success.should.be.true - - security_group = conn.get_all_security_groups(groupnames=['test'])[0] - int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[ - 0].cidr_ip.should.equal("123.123.123.123/32") - - # Wrong Cidr should throw error - with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", cidr_ip="123.123.123.122/32") - cm.exception.code.should.equal('InvalidPermission.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Actually revoke - with assert_raises(EC2ResponseError) as ex: - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", cidr_ip="123.123.123.123/32") - - security_group = conn.get_all_security_groups()[0] - security_group.rules.should.have.length_of(0) - - # Test for egress as well - egress_security_group = conn.create_security_group( - 'testegress', 'testegress', vpc_id='vpc-3432589') - - with assert_raises(EC2ResponseError) as ex: - success = conn.authorize_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - - success = conn.authorize_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") - assert success.should.be.true - egress_security_group = conn.get_all_security_groups( - groupnames='testegress')[0] - # There are two egress rules associated with the security group: - # the default outbound rule and the new one - int(egress_security_group.rules_egress[1].to_port).should.equal(2222) - egress_security_group.rules_egress[1].grants[ - 0].cidr_ip.should.equal("123.123.123.123/32") - - # Wrong Cidr should throw error - egress_security_group.revoke.when.called_with( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) - - # Actually revoke - with assert_raises(EC2ResponseError) as ex: - conn.revoke_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - - conn.revoke_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") - - egress_security_group = conn.get_all_security_groups()[0] - # There is still the default outbound rule - egress_security_group.rules_egress.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_authorize_other_group_and_revoke(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group = conn.create_security_group('test', 'test') - other_security_group = conn.create_security_group('other', 'other') - wrong_group = conn.create_security_group('wrong', 'wrong') - - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) - assert success.should.be.true - - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test'][0] - int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[ - 0].group_id.should.equal(other_security_group.id) - - # Wrong source group should throw error - with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", src_group=wrong_group) - cm.exception.code.should.equal('InvalidPermission.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Actually revoke - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", src_group=other_security_group) - - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test'][0] - security_group.rules.should.have.length_of(0) - - -@mock_ec2 -def test_authorize_other_group_egress_and_revoke(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - sg01 = ec2.create_security_group( - GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group( - GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) - - ip_permission = { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', 'UserId': sg02.owner_id}], - 'IpRanges': [] - } - - sg01.authorize_egress(IpPermissions=[ip_permission]) - sg01.ip_permissions_egress.should.have.length_of(2) - sg01.ip_permissions_egress.should.contain(ip_permission) - - sg01.revoke_egress(IpPermissions=[ip_permission]) - sg01.ip_permissions_egress.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_authorize_group_in_vpc(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = "vpc-12345" - - # create 2 groups in a vpc - security_group = conn.create_security_group('test1', 'test1', vpc_id) - other_security_group = conn.create_security_group('test2', 'test2', vpc_id) - - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) - success.should.be.true - - # Check that the rule is accurate - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test1'][0] - int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[ - 0].group_id.should.equal(other_security_group.id) - - # Now remove the rule - success = security_group.revoke( - ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) - success.should.be.true - - # And check that it gets revoked - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test1'][0] - security_group.rules.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_get_all_security_groups(): - conn = boto.connect_ec2() - sg1 = conn.create_security_group( - name='test1', description='test1', vpc_id='vpc-mjm05d27') - conn.create_security_group(name='test2', description='test2') - - resp = conn.get_all_security_groups(groupnames=['test1']) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_security_groups(groupnames=['does_not_exist']) - cm.exception.code.should.equal('InvalidGroup.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups(filters={'vpc-id': ['vpc-mjm05d27']}) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups(filters={'vpc_id': ['vpc-mjm05d27']}) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups(filters={'description': ['test1']}) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups() - resp.should.have.length_of(4) - - -@mock_ec2_deprecated -def test_authorize_bad_cidr_throws_invalid_parameter_value(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group = conn.create_security_group('test', 'test') - with assert_raises(EC2ResponseError) as cm: - security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_security_group_tagging(): - conn = boto.connect_vpc() - vpc = conn.create_vpc("10.0.0.0/16") - - sg = conn.create_security_group("test-sg", "Test SG", vpc.id) - - with assert_raises(EC2ResponseError) as ex: - sg.add_tag("Test", "Tag", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - sg.add_tag("Test", "Tag") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("Test") - tag.value.should.equal("Tag") - - group = conn.get_all_security_groups("test-sg")[0] - group.tags.should.have.length_of(1) - group.tags["Test"].should.equal("Tag") - - -@mock_ec2_deprecated -def test_security_group_tag_filtering(): - conn = boto.connect_ec2() - sg = conn.create_security_group("test-sg", "Test SG") - sg.add_tag("test-tag", "test-value") - - groups = conn.get_all_security_groups( - filters={"tag:test-tag": "test-value"}) - groups.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_authorize_all_protocols_with_no_port_specification(): - conn = boto.connect_ec2() - sg = conn.create_security_group('test', 'test') - - success = sg.authorize(ip_protocol='-1', cidr_ip='0.0.0.0/0') - success.should.be.true - - sg = conn.get_all_security_groups('test')[0] - sg.rules[0].from_port.should.equal(None) - sg.rules[0].to_port.should.equal(None) - - -@mock_ec2_deprecated -def test_sec_group_rule_limit(): - ec2_conn = boto.connect_ec2() - sg = ec2_conn.create_security_group('test', 'test') - other_sg = ec2_conn.create_security_group('test_2', 'test_other') - - # INGRESS - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - sg.rules.should.be.empty - # authorize a rule targeting a different sec group (because this count too) - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - success.should.be.true - # fill the rules up the limit - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(99)]) - success.should.be.true - # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - # EGRESS - # authorize a rule targeting a different sec group (because this count too) - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - # fill the rules up the limit - # remember that by default, when created a sec group contains 1 egress rule - # so our other_sg rule + 98 CIDR IP rules + 1 by default == 100 the limit - for i in range(98): - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='{0}.0.0.0/0'.format(i)) - # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='101.0.0.0/0') - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - -@mock_ec2_deprecated -def test_sec_group_rule_limit_vpc(): - ec2_conn = boto.connect_ec2() - vpc_conn = boto.connect_vpc() - - vpc = vpc_conn.create_vpc('10.0.0.0/8') - - sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) - other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) - - # INGRESS - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - sg.rules.should.be.empty - # authorize a rule targeting a different sec group (because this count too) - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - success.should.be.true - # fill the rules up the limit - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(49)]) - # verify that we cannot authorize past the limit for a CIDR IP - success.should.be.true - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - # EGRESS - # authorize a rule targeting a different sec group (because this count too) - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - # fill the rules up the limit - # remember that by default, when created a sec group contains 1 egress rule - # so our other_sg rule + 48 CIDR IP rules + 1 by default == 50 the limit - for i in range(48): - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='{0}.0.0.0/0'.format(i)) - # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='50.0.0.0/0') - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - -''' -Boto3 -''' - - -@mock_ec2 -def test_add_same_rule_twice_throws_error(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - sg = ec2.create_security_group( - GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) - - ip_permissions = [ - { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'IpRanges': [{"CidrIp": "1.2.3.4/32"}] - }, - ] - sg.authorize_ingress(IpPermissions=ip_permissions) - - with assert_raises(ClientError) as ex: - sg.authorize_ingress(IpPermissions=ip_permissions) - - -@mock_ec2 -def test_security_group_tagging_boto3(): - conn = boto3.client('ec2', region_name='us-east-1') - - sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") - - with assert_raises(ClientError) as ex: - conn.create_tags(Resources=[sg['GroupId']], Tags=[ - {'Key': 'Test', 'Value': 'Tag'}], DryRun=True) - ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata'][ - 'HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - conn.create_tags(Resources=[sg['GroupId']], Tags=[ - {'Key': 'Test', 'Value': 'Tag'}]) - describe = conn.describe_security_groups( - Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) - tag = describe["SecurityGroups"][0]['Tags'][0] - tag['Value'].should.equal("Tag") - tag['Key'].should.equal("Test") - - -@mock_ec2 -def test_security_group_wildcard_tag_filter_boto3(): - conn = boto3.client('ec2', region_name='us-east-1') - sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") - conn.create_tags(Resources=[sg['GroupId']], Tags=[ - {'Key': 'Test', 'Value': 'Tag'}]) - describe = conn.describe_security_groups( - Filters=[{'Name': 'tag-value', 'Values': ['*']}]) - - tag = describe["SecurityGroups"][0]['Tags'][0] - tag['Value'].should.equal("Tag") - tag['Key'].should.equal("Test") - - -@mock_ec2 -def test_authorize_and_revoke_in_bulk(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - sg01 = ec2.create_security_group( - GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group( - GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) - sg03 = ec2.create_security_group( - GroupName='sg03', Description='Test security group sg03') - - ip_permissions = [ - { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', - 'UserId': sg02.owner_id}], - 'IpRanges': [] - }, - { - 'IpProtocol': 'tcp', - 'FromPort': 27018, - 'ToPort': 27018, - 'UserIdGroupPairs': [{'GroupId': sg02.id, 'UserId': sg02.owner_id}], - 'IpRanges': [] - }, - { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'UserIdGroupPairs': [{'GroupName': 'sg03', 'UserId': sg03.owner_id}], - 'IpRanges': [] - } - ] - expected_ip_permissions = copy.deepcopy(ip_permissions) - expected_ip_permissions[1]['UserIdGroupPairs'][0]['GroupName'] = 'sg02' - expected_ip_permissions[2]['UserIdGroupPairs'][0]['GroupId'] = sg03.id - - sg01.authorize_ingress(IpPermissions=ip_permissions) - sg01.ip_permissions.should.have.length_of(3) - for ip_permission in expected_ip_permissions: - sg01.ip_permissions.should.contain(ip_permission) - - sg01.revoke_ingress(IpPermissions=ip_permissions) - sg01.ip_permissions.should.be.empty - for ip_permission in expected_ip_permissions: - sg01.ip_permissions.shouldnt.contain(ip_permission) - - sg01.authorize_egress(IpPermissions=ip_permissions) - sg01.ip_permissions_egress.should.have.length_of(4) - for ip_permission in expected_ip_permissions: - sg01.ip_permissions_egress.should.contain(ip_permission) - - sg01.revoke_egress(IpPermissions=ip_permissions) - sg01.ip_permissions_egress.should.have.length_of(1) - for ip_permission in expected_ip_permissions: - sg01.ip_permissions_egress.shouldnt.contain(ip_permission) - - -@mock_ec2 -def test_security_group_ingress_without_multirule(): - ec2 = boto3.resource('ec2', 'ca-central-1') - sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') - - assert len(sg.ip_permissions) == 0 - sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') - - # Fails - assert len(sg.ip_permissions) == 1 - - -@mock_ec2 -def test_security_group_ingress_without_multirule_after_reload(): - ec2 = boto3.resource('ec2', 'ca-central-1') - sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') - - assert len(sg.ip_permissions) == 0 - sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') - - # Also Fails - sg_after = ec2.SecurityGroup(sg.id) - assert len(sg_after.ip_permissions) == 1 - - -@mock_ec2_deprecated -def test_get_all_security_groups_filter_with_same_vpc_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group( - 'test1', 'test1', vpc_id=vpc_id) - security_group2 = conn.create_security_group( - 'test2', 'test2', vpc_id=vpc_id) - - security_group.vpc_id.should.equal(vpc_id) - security_group2.vpc_id.should.equal(vpc_id) - - security_groups = conn.get_all_security_groups( - group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) - security_groups.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_security_groups(group_ids=['does_not_exist']) - cm.exception.code.should.equal('InvalidGroup.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none +from __future__ import unicode_literals + +import copy + +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +import boto3 +import boto +from botocore.exceptions import ClientError +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_create_and_describe_security_group(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + + security_group = conn.create_security_group( + 'test security group', 'this is a test security group') + + security_group.name.should.equal('test security group') + security_group.description.should.equal('this is a test security group') + + # Trying to create another group with the same name should throw an error + with assert_raises(EC2ResponseError) as cm: + conn.create_security_group( + 'test security group', 'this is a test security group') + cm.exception.code.should.equal('InvalidGroup.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_groups = conn.get_all_security_groups() + # The default group gets created automatically + all_groups.should.have.length_of(3) + group_names = [group.name for group in all_groups] + set(group_names).should.equal(set(["default", "test security group"])) + + +@mock_ec2_deprecated +def test_create_security_group_without_description_raises_error(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.create_security_group('test security group', '') + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_default_security_group(): + conn = boto.ec2.connect_to_region('us-east-1') + groups = conn.get_all_security_groups() + groups.should.have.length_of(2) + groups[0].name.should.equal("default") + + +@mock_ec2_deprecated +def test_create_and_describe_vpc_security_group(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id=vpc_id) + + security_group.vpc_id.should.equal(vpc_id) + + security_group.name.should.equal('test security group') + security_group.description.should.equal('this is a test security group') + + # Trying to create another group with the same name in the same VPC should + # throw an error + with assert_raises(EC2ResponseError) as cm: + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) + cm.exception.code.should.equal('InvalidGroup.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_groups = conn.get_all_security_groups(filters={'vpc_id': [vpc_id]}) + + all_groups[0].vpc_id.should.equal(vpc_id) + + all_groups.should.have.length_of(1) + all_groups[0].name.should.equal('test security group') + + +@mock_ec2_deprecated +def test_create_two_security_groups_with_same_name_in_different_vpc(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + vpc_id2 = 'vpc-5300000d' + + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id2) + + all_groups = conn.get_all_security_groups() + + all_groups.should.have.length_of(4) + group_names = [group.name for group in all_groups] + # The default group is created automatically + set(group_names).should.equal(set(["default", "test security group"])) + + +@mock_ec2_deprecated +def test_deleting_security_groups(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group1 = conn.create_security_group('test1', 'test1') + conn.create_security_group('test2', 'test2') + + conn.get_all_security_groups().should.have.length_of(4) + + # Deleting a group that doesn't exist should throw an error + with assert_raises(EC2ResponseError) as cm: + conn.delete_security_group('foobar') + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Delete by name + with assert_raises(EC2ResponseError) as ex: + conn.delete_security_group('test2', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + + conn.delete_security_group('test2') + conn.get_all_security_groups().should.have.length_of(3) + + # Delete by group id + conn.delete_security_group(group_id=security_group1.id) + conn.get_all_security_groups().should.have.length_of(2) + + +@mock_ec2_deprecated +def test_delete_security_group_in_vpc(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = "vpc-12345" + security_group1 = conn.create_security_group('test1', 'test1', vpc_id) + + # this should not throw an exception + conn.delete_security_group(group_id=security_group1.id) + + +@mock_ec2_deprecated +def test_authorize_ip_range_and_revoke(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group = conn.create_security_group('test', 'test') + + with assert_raises(EC2ResponseError) as ex: + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + assert success.should.be.true + + security_group = conn.get_all_security_groups(groupnames=['test'])[0] + int(security_group.rules[0].to_port).should.equal(2222) + security_group.rules[0].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") + + # Wrong Cidr should throw error + with assert_raises(EC2ResponseError) as cm: + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.122/32") + cm.exception.code.should.equal('InvalidPermission.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Actually revoke + with assert_raises(EC2ResponseError) as ex: + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32") + + security_group = conn.get_all_security_groups()[0] + security_group.rules.should.have.length_of(0) + + # Test for egress as well + egress_security_group = conn.create_security_group( + 'testegress', 'testegress', vpc_id='vpc-3432589') + + with assert_raises(EC2ResponseError) as ex: + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + assert success.should.be.true + egress_security_group = conn.get_all_security_groups( + groupnames='testegress')[0] + # There are two egress rules associated with the security group: + # the default outbound rule and the new one + int(egress_security_group.rules_egress[1].to_port).should.equal(2222) + egress_security_group.rules_egress[1].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") + + # Wrong Cidr should throw error + egress_security_group.revoke.when.called_with( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) + + # Actually revoke + with assert_raises(EC2ResponseError) as ex: + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + + egress_security_group = conn.get_all_security_groups()[0] + # There is still the default outbound rule + egress_security_group.rules_egress.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_authorize_other_group_and_revoke(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group = conn.create_security_group('test', 'test') + other_security_group = conn.create_security_group('other', 'other') + wrong_group = conn.create_security_group('wrong', 'wrong') + + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + assert success.should.be.true + + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] + int(security_group.rules[0].to_port).should.equal(2222) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) + + # Wrong source group should throw error + with assert_raises(EC2ResponseError) as cm: + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=wrong_group) + cm.exception.code.should.equal('InvalidPermission.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Actually revoke + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=other_security_group) + + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] + security_group.rules.should.have.length_of(0) + + +@mock_ec2 +def test_authorize_other_group_egress_and_revoke(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + + ip_permission = { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', 'UserId': sg02.owner_id}], + 'IpRanges': [] + } + + sg01.authorize_egress(IpPermissions=[ip_permission]) + sg01.ip_permissions_egress.should.have.length_of(2) + sg01.ip_permissions_egress.should.contain(ip_permission) + + sg01.revoke_egress(IpPermissions=[ip_permission]) + sg01.ip_permissions_egress.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_authorize_group_in_vpc(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = "vpc-12345" + + # create 2 groups in a vpc + security_group = conn.create_security_group('test1', 'test1', vpc_id) + other_security_group = conn.create_security_group('test2', 'test2', vpc_id) + + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success.should.be.true + + # Check that the rule is accurate + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + int(security_group.rules[0].to_port).should.equal(2222) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) + + # Now remove the rule + success = security_group.revoke( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success.should.be.true + + # And check that it gets revoked + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + security_group.rules.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_get_all_security_groups(): + conn = boto.connect_ec2() + sg1 = conn.create_security_group( + name='test1', description='test1', vpc_id='vpc-mjm05d27') + conn.create_security_group(name='test2', description='test2') + + resp = conn.get_all_security_groups(groupnames=['test1']) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_security_groups(groupnames=['does_not_exist']) + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups(filters={'vpc-id': ['vpc-mjm05d27']}) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups(filters={'vpc_id': ['vpc-mjm05d27']}) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups(filters={'description': ['test1']}) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups() + resp.should.have.length_of(4) + + +@mock_ec2_deprecated +def test_authorize_bad_cidr_throws_invalid_parameter_value(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group = conn.create_security_group('test', 'test') + with assert_raises(EC2ResponseError) as cm: + security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_security_group_tagging(): + conn = boto.connect_vpc() + vpc = conn.create_vpc("10.0.0.0/16") + + sg = conn.create_security_group("test-sg", "Test SG", vpc.id) + + with assert_raises(EC2ResponseError) as ex: + sg.add_tag("Test", "Tag", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + sg.add_tag("Test", "Tag") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("Test") + tag.value.should.equal("Tag") + + group = conn.get_all_security_groups("test-sg")[0] + group.tags.should.have.length_of(1) + group.tags["Test"].should.equal("Tag") + + +@mock_ec2_deprecated +def test_security_group_tag_filtering(): + conn = boto.connect_ec2() + sg = conn.create_security_group("test-sg", "Test SG") + sg.add_tag("test-tag", "test-value") + + groups = conn.get_all_security_groups( + filters={"tag:test-tag": "test-value"}) + groups.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_authorize_all_protocols_with_no_port_specification(): + conn = boto.connect_ec2() + sg = conn.create_security_group('test', 'test') + + success = sg.authorize(ip_protocol='-1', cidr_ip='0.0.0.0/0') + success.should.be.true + + sg = conn.get_all_security_groups('test')[0] + sg.rules[0].from_port.should.equal(None) + sg.rules[0].to_port.should.equal(None) + + +@mock_ec2_deprecated +def test_sec_group_rule_limit(): + ec2_conn = boto.connect_ec2() + sg = ec2_conn.create_security_group('test', 'test') + other_sg = ec2_conn.create_security_group('test_2', 'test_other') + + # INGRESS + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + sg.rules.should.be.empty + # authorize a rule targeting a different sec group (because this count too) + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + success.should.be.true + # fill the rules up the limit + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(99)]) + success.should.be.true + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + # EGRESS + # authorize a rule targeting a different sec group (because this count too) + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + # fill the rules up the limit + # remember that by default, when created a sec group contains 1 egress rule + # so our other_sg rule + 98 CIDR IP rules + 1 by default == 100 the limit + for i in range(98): + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='{0}.0.0.0/0'.format(i)) + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='101.0.0.0/0') + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + +@mock_ec2_deprecated +def test_sec_group_rule_limit_vpc(): + ec2_conn = boto.connect_ec2() + vpc_conn = boto.connect_vpc() + + vpc = vpc_conn.create_vpc('10.0.0.0/8') + + sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) + other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) + + # INGRESS + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + sg.rules.should.be.empty + # authorize a rule targeting a different sec group (because this count too) + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + success.should.be.true + # fill the rules up the limit + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(49)]) + # verify that we cannot authorize past the limit for a CIDR IP + success.should.be.true + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + # EGRESS + # authorize a rule targeting a different sec group (because this count too) + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + # fill the rules up the limit + # remember that by default, when created a sec group contains 1 egress rule + # so our other_sg rule + 48 CIDR IP rules + 1 by default == 50 the limit + for i in range(48): + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='{0}.0.0.0/0'.format(i)) + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='50.0.0.0/0') + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + +''' +Boto3 +''' + + +@mock_ec2 +def test_add_same_rule_twice_throws_error(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + sg = ec2.create_security_group( + GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) + + ip_permissions = [ + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'IpRanges': [{"CidrIp": "1.2.3.4/32"}] + }, + ] + sg.authorize_ingress(IpPermissions=ip_permissions) + + with assert_raises(ClientError) as ex: + sg.authorize_ingress(IpPermissions=ip_permissions) + + +@mock_ec2 +def test_security_group_tagging_boto3(): + conn = boto3.client('ec2', region_name='us-east-1') + + sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") + + with assert_raises(ClientError) as ex: + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}], DryRun=True) + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}]) + describe = conn.describe_security_groups( + Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) + tag = describe["SecurityGroups"][0]['Tags'][0] + tag['Value'].should.equal("Tag") + tag['Key'].should.equal("Test") + + +@mock_ec2 +def test_security_group_wildcard_tag_filter_boto3(): + conn = boto3.client('ec2', region_name='us-east-1') + sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}]) + describe = conn.describe_security_groups( + Filters=[{'Name': 'tag-value', 'Values': ['*']}]) + + tag = describe["SecurityGroups"][0]['Tags'][0] + tag['Value'].should.equal("Tag") + tag['Key'].should.equal("Test") + + +@mock_ec2 +def test_authorize_and_revoke_in_bulk(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + sg03 = ec2.create_security_group( + GroupName='sg03', Description='Test security group sg03') + + ip_permissions = [ + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', + 'UserId': sg02.owner_id}], + 'IpRanges': [] + }, + { + 'IpProtocol': 'tcp', + 'FromPort': 27018, + 'ToPort': 27018, + 'UserIdGroupPairs': [{'GroupId': sg02.id, 'UserId': sg02.owner_id}], + 'IpRanges': [] + }, + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'UserIdGroupPairs': [{'GroupName': 'sg03', 'UserId': sg03.owner_id}], + 'IpRanges': [] + } + ] + expected_ip_permissions = copy.deepcopy(ip_permissions) + expected_ip_permissions[1]['UserIdGroupPairs'][0]['GroupName'] = 'sg02' + expected_ip_permissions[2]['UserIdGroupPairs'][0]['GroupId'] = sg03.id + + sg01.authorize_ingress(IpPermissions=ip_permissions) + sg01.ip_permissions.should.have.length_of(3) + for ip_permission in expected_ip_permissions: + sg01.ip_permissions.should.contain(ip_permission) + + sg01.revoke_ingress(IpPermissions=ip_permissions) + sg01.ip_permissions.should.be.empty + for ip_permission in expected_ip_permissions: + sg01.ip_permissions.shouldnt.contain(ip_permission) + + sg01.authorize_egress(IpPermissions=ip_permissions) + sg01.ip_permissions_egress.should.have.length_of(4) + for ip_permission in expected_ip_permissions: + sg01.ip_permissions_egress.should.contain(ip_permission) + + sg01.revoke_egress(IpPermissions=ip_permissions) + sg01.ip_permissions_egress.should.have.length_of(1) + for ip_permission in expected_ip_permissions: + sg01.ip_permissions_egress.shouldnt.contain(ip_permission) + + +@mock_ec2 +def test_security_group_ingress_without_multirule(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Fails + assert len(sg.ip_permissions) == 1 + + +@mock_ec2 +def test_security_group_ingress_without_multirule_after_reload(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Also Fails + sg_after = ec2.SecurityGroup(sg.id) + assert len(sg_after.ip_permissions) == 1 + + +@mock_ec2_deprecated +def test_get_all_security_groups_filter_with_same_vpc_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + security_group = conn.create_security_group( + 'test1', 'test1', vpc_id=vpc_id) + security_group2 = conn.create_security_group( + 'test2', 'test2', vpc_id=vpc_id) + + security_group.vpc_id.should.equal(vpc_id) + security_group2.vpc_id.should.equal(vpc_id) + + security_groups = conn.get_all_security_groups( + group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) + security_groups.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_security_groups(group_ids=['does_not_exist']) + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none diff --git a/tests/test_ec2/test_server.py b/tests/test_ec2/test_server.py index 00be6259384a..dc56571445a0 100644 --- a/tests/test_ec2/test_server.py +++ b/tests/test_ec2/test_server.py @@ -1,26 +1,26 @@ -from __future__ import unicode_literals -import re -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_ec2_server_get(): - backend = server.create_backend_app("ec2") - test_client = backend.test_client() - - res = test_client.get( - '/?Action=RunInstances&ImageId=ami-60a54009', - headers={"Host": "ec2.us-east-1.amazonaws.com"} - ) - - groups = re.search("(.*)", - res.data.decode('utf-8')) - instance_id = groups.groups()[0] - - res = test_client.get('/?Action=DescribeInstances') - res.data.decode('utf-8').should.contain(instance_id) +from __future__ import unicode_literals +import re +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_ec2_server_get(): + backend = server.create_backend_app("ec2") + test_client = backend.test_client() + + res = test_client.get( + '/?Action=RunInstances&ImageId=ami-60a54009', + headers={"Host": "ec2.us-east-1.amazonaws.com"} + ) + + groups = re.search("(.*)", + res.data.decode('utf-8')) + instance_id = groups.groups()[0] + + res = test_client.get('/?Action=DescribeInstances') + res.data.decode('utf-8').should.contain(instance_id) diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index a2bd1d061ab0..01b05566a8d7 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -1,345 +1,345 @@ -from __future__ import unicode_literals - -import boto3 -import sure # noqa - -from moto import mock_ec2 - - -def get_subnet_id(conn): - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] - subnet_id = subnet['SubnetId'] - return subnet_id - - -def spot_config(subnet_id, allocation_strategy="lowestPrice"): - return { - 'ClientToken': 'string', - 'SpotPrice': '0.12', - 'TargetCapacity': 6, - 'IamFleetRole': 'arn:aws:iam::123456789012:role/fleet', - 'LaunchSpecifications': [{ - 'ImageId': 'ami-123', - 'KeyName': 'my-key', - 'SecurityGroups': [ - { - 'GroupId': 'sg-123' - }, - ], - 'UserData': 'some user data', - 'InstanceType': 't2.small', - 'BlockDeviceMappings': [ - { - 'VirtualName': 'string', - 'DeviceName': 'string', - 'Ebs': { - 'SnapshotId': 'string', - 'VolumeSize': 123, - 'DeleteOnTermination': True | False, - 'VolumeType': 'standard', - 'Iops': 123, - 'Encrypted': True | False - }, - 'NoDevice': 'string' - }, - ], - 'Monitoring': { - 'Enabled': True - }, - 'SubnetId': subnet_id, - 'IamInstanceProfile': { - 'Arn': 'arn:aws:iam::123456789012:role/fleet' - }, - 'EbsOptimized': False, - 'WeightedCapacity': 2.0, - 'SpotPrice': '0.13' - }, { - 'ImageId': 'ami-123', - 'KeyName': 'my-key', - 'SecurityGroups': [ - { - 'GroupId': 'sg-123' - }, - ], - 'UserData': 'some user data', - 'InstanceType': 't2.large', - 'Monitoring': { - 'Enabled': True - }, - 'SubnetId': subnet_id, - 'IamInstanceProfile': { - 'Arn': 'arn:aws:iam::123456789012:role/fleet' - }, - 'EbsOptimized': False, - 'WeightedCapacity': 4.0, - 'SpotPrice': '10.00', - }], - 'AllocationStrategy': allocation_strategy, - 'FulfilledCapacity': 6, - } - - -@mock_ec2 -def test_create_spot_fleet_with_lowest_price(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id) - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(1) - spot_fleet_request = spot_fleet_requests[0] - spot_fleet_request['SpotFleetRequestState'].should.equal("active") - spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] - - spot_fleet_config['SpotPrice'].should.equal('0.12') - spot_fleet_config['TargetCapacity'].should.equal(6) - spot_fleet_config['IamFleetRole'].should.equal( - 'arn:aws:iam::123456789012:role/fleet') - spot_fleet_config['AllocationStrategy'].should.equal('lowestPrice') - spot_fleet_config['FulfilledCapacity'].should.equal(6.0) - - len(spot_fleet_config['LaunchSpecifications']).should.equal(2) - launch_spec = spot_fleet_config['LaunchSpecifications'][0] - - launch_spec['EbsOptimized'].should.equal(False) - launch_spec['SecurityGroups'].should.equal([{"GroupId": "sg-123"}]) - launch_spec['IamInstanceProfile'].should.equal( - {"Arn": "arn:aws:iam::123456789012:role/fleet"}) - launch_spec['ImageId'].should.equal("ami-123") - launch_spec['InstanceType'].should.equal("t2.small") - launch_spec['KeyName'].should.equal("my-key") - launch_spec['Monitoring'].should.equal({"Enabled": True}) - launch_spec['SpotPrice'].should.equal("0.13") - launch_spec['SubnetId'].should.equal(subnet_id) - launch_spec['UserData'].should.equal("some user data") - launch_spec['WeightedCapacity'].should.equal(2.0) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(3) - - -@mock_ec2 -def test_create_diversified_spot_fleet(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - diversified_config = spot_config( - subnet_id, allocation_strategy='diversified') - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=diversified_config - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(2) - instance_types = set([instance['InstanceType'] for instance in instances]) - instance_types.should.equal(set(["t2.small", "t2.large"])) - instances[0]['InstanceId'].should.contain("i-") - - -@mock_ec2 -def test_cancel_spot_fleet_request(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.cancel_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True) - - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(0) - - -@mock_ec2 -def test_modify_spot_fleet_request_up(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=20) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(10) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(20) - spot_fleet_config['FulfilledCapacity'].should.equal(20.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_up_diversified(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config( - subnet_id, allocation_strategy='diversified'), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=19) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(7) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(19) - spot_fleet_config['FulfilledCapacity'].should.equal(20.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_down_no_terminate(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(3) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(1) - spot_fleet_config['FulfilledCapacity'].should.equal(6.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_down_odd(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=7) - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=5) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(3) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(5) - spot_fleet_config['FulfilledCapacity'].should.equal(6.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_down(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=1) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(1) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(1) - spot_fleet_config['FulfilledCapacity'].should.equal(2.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - conn.terminate_instances(InstanceIds=[i['InstanceId'] for i in instances[1:]]) - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(1) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(1) - spot_fleet_config['FulfilledCapacity'].should.equal(2.0) - - -@mock_ec2 -def test_create_spot_fleet_without_spot_price(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - # remove prices to force a fallback to ondemand price - spot_config_without_price = spot_config(subnet_id) - del spot_config_without_price['SpotPrice'] - for spec in spot_config_without_price['LaunchSpecifications']: - del spec['SpotPrice'] - - spot_fleet_id = conn.request_spot_fleet(SpotFleetRequestConfig=spot_config_without_price)['SpotFleetRequestId'] - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(1) - spot_fleet_request = spot_fleet_requests[0] - spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] - - len(spot_fleet_config['LaunchSpecifications']).should.equal(2) - launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] - launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] - - # AWS will figure out the price - assert 'SpotPrice' not in launch_spec1 - assert 'SpotPrice' not in launch_spec2 +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_ec2 + + +def get_subnet_id(conn): + vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + return subnet_id + + +def spot_config(subnet_id, allocation_strategy="lowestPrice"): + return { + 'ClientToken': 'string', + 'SpotPrice': '0.12', + 'TargetCapacity': 6, + 'IamFleetRole': 'arn:aws:iam::123456789012:role/fleet', + 'LaunchSpecifications': [{ + 'ImageId': 'ami-123', + 'KeyName': 'my-key', + 'SecurityGroups': [ + { + 'GroupId': 'sg-123' + }, + ], + 'UserData': 'some user data', + 'InstanceType': 't2.small', + 'BlockDeviceMappings': [ + { + 'VirtualName': 'string', + 'DeviceName': 'string', + 'Ebs': { + 'SnapshotId': 'string', + 'VolumeSize': 123, + 'DeleteOnTermination': True | False, + 'VolumeType': 'standard', + 'Iops': 123, + 'Encrypted': True | False + }, + 'NoDevice': 'string' + }, + ], + 'Monitoring': { + 'Enabled': True + }, + 'SubnetId': subnet_id, + 'IamInstanceProfile': { + 'Arn': 'arn:aws:iam::123456789012:role/fleet' + }, + 'EbsOptimized': False, + 'WeightedCapacity': 2.0, + 'SpotPrice': '0.13' + }, { + 'ImageId': 'ami-123', + 'KeyName': 'my-key', + 'SecurityGroups': [ + { + 'GroupId': 'sg-123' + }, + ], + 'UserData': 'some user data', + 'InstanceType': 't2.large', + 'Monitoring': { + 'Enabled': True + }, + 'SubnetId': subnet_id, + 'IamInstanceProfile': { + 'Arn': 'arn:aws:iam::123456789012:role/fleet' + }, + 'EbsOptimized': False, + 'WeightedCapacity': 4.0, + 'SpotPrice': '10.00', + }], + 'AllocationStrategy': allocation_strategy, + 'FulfilledCapacity': 6, + } + + +@mock_ec2 +def test_create_spot_fleet_with_lowest_price(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id) + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_request['SpotFleetRequestState'].should.equal("active") + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + spot_fleet_config['SpotPrice'].should.equal('0.12') + spot_fleet_config['TargetCapacity'].should.equal(6) + spot_fleet_config['IamFleetRole'].should.equal( + 'arn:aws:iam::123456789012:role/fleet') + spot_fleet_config['AllocationStrategy'].should.equal('lowestPrice') + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) + + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec = spot_fleet_config['LaunchSpecifications'][0] + + launch_spec['EbsOptimized'].should.equal(False) + launch_spec['SecurityGroups'].should.equal([{"GroupId": "sg-123"}]) + launch_spec['IamInstanceProfile'].should.equal( + {"Arn": "arn:aws:iam::123456789012:role/fleet"}) + launch_spec['ImageId'].should.equal("ami-123") + launch_spec['InstanceType'].should.equal("t2.small") + launch_spec['KeyName'].should.equal("my-key") + launch_spec['Monitoring'].should.equal({"Enabled": True}) + launch_spec['SpotPrice'].should.equal("0.13") + launch_spec['SubnetId'].should.equal(subnet_id) + launch_spec['UserData'].should.equal("some user data") + launch_spec['WeightedCapacity'].should.equal(2.0) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(3) + + +@mock_ec2 +def test_create_diversified_spot_fleet(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + diversified_config = spot_config( + subnet_id, allocation_strategy='diversified') + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=diversified_config + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(2) + instance_types = set([instance['InstanceType'] for instance in instances]) + instance_types.should.equal(set(["t2.small", "t2.large"])) + instances[0]['InstanceId'].should.contain("i-") + + +@mock_ec2 +def test_cancel_spot_fleet_request(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.cancel_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True) + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(0) + + +@mock_ec2 +def test_modify_spot_fleet_request_up(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=20) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(10) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(20) + spot_fleet_config['FulfilledCapacity'].should.equal(20.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_up_diversified(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config( + subnet_id, allocation_strategy='diversified'), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=19) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(7) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(19) + spot_fleet_config['FulfilledCapacity'].should.equal(20.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_down_no_terminate(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(3) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(1) + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_down_odd(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=7) + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=5) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(3) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(5) + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_down(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=1) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(1) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(1) + spot_fleet_config['FulfilledCapacity'].should.equal(2.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + conn.terminate_instances(InstanceIds=[i['InstanceId'] for i in instances[1:]]) + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(1) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(1) + spot_fleet_config['FulfilledCapacity'].should.equal(2.0) + + +@mock_ec2 +def test_create_spot_fleet_without_spot_price(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + # remove prices to force a fallback to ondemand price + spot_config_without_price = spot_config(subnet_id) + del spot_config_without_price['SpotPrice'] + for spec in spot_config_without_price['LaunchSpecifications']: + del spec['SpotPrice'] + + spot_fleet_id = conn.request_spot_fleet(SpotFleetRequestConfig=spot_config_without_price)['SpotFleetRequestId'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] + launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] + + # AWS will figure out the price + assert 'SpotPrice' not in launch_spec1 + assert 'SpotPrice' not in launch_spec2 diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 05f8ee88f48b..51590ed46150 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -1,268 +1,268 @@ -from __future__ import unicode_literals -from nose.tools import assert_raises -import datetime - -import boto -import boto3 -from boto.exception import EC2ResponseError -from botocore.exceptions import ClientError -import pytz -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated -from moto.backends import get_model -from moto.core.utils import iso_8601_datetime_with_milliseconds - - -@mock_ec2 -def test_request_spot_instances(): - conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] - subnet_id = subnet['SubnetId'] - - conn.create_security_group(GroupName='group1', Description='description') - conn.create_security_group(GroupName='group2', Description='description') - - start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc) - end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc) - start = iso_8601_datetime_with_milliseconds(start_dt) - end = iso_8601_datetime_with_milliseconds(end_dt) - - with assert_raises(ClientError) as ex: - request = conn.request_spot_instances( - SpotPrice="0.5", InstanceCount=1, Type='one-time', - ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", - AvailabilityZoneGroup='my-group', - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - "KeyName": "test", - "SecurityGroups": ['group1', 'group2'], - "UserData": "some test data", - "InstanceType": 'm1.small', - "Placement": { - "AvailabilityZone": 'us-east-1c', - }, - "KernelId": "test-kernel", - "RamdiskId": "test-ramdisk", - "Monitoring": { - "Enabled": True, - }, - "SubnetId": subnet_id, - }, - DryRun=True, - ) - ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata'][ - 'HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') - - request = conn.request_spot_instances( - SpotPrice="0.5", InstanceCount=1, Type='one-time', - ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", - AvailabilityZoneGroup='my-group', - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - "KeyName": "test", - "SecurityGroups": ['group1', 'group2'], - "UserData": "some test data", - "InstanceType": 'm1.small', - "Placement": { - "AvailabilityZone": 'us-east-1c', - }, - "KernelId": "test-kernel", - "RamdiskId": "test-ramdisk", - "Monitoring": { - "Enabled": True, - }, - "SubnetId": subnet_id, - }, - ) - - requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] - requests.should.have.length_of(1) - request = requests[0] - - request['State'].should.equal("open") - request['SpotPrice'].should.equal("0.5") - request['Type'].should.equal('one-time') - request['ValidFrom'].should.equal(start_dt) - request['ValidUntil'].should.equal(end_dt) - request['LaunchGroup'].should.equal("the-group") - request['AvailabilityZoneGroup'].should.equal('my-group') - - launch_spec = request['LaunchSpecification'] - security_group_names = [group['GroupName'] - for group in launch_spec['SecurityGroups']] - set(security_group_names).should.equal(set(['group1', 'group2'])) - - launch_spec['ImageId'].should.equal('ami-abcd1234') - launch_spec['KeyName'].should.equal("test") - launch_spec['InstanceType'].should.equal('m1.small') - launch_spec['KernelId'].should.equal("test-kernel") - launch_spec['RamdiskId'].should.equal("test-ramdisk") - launch_spec['SubnetId'].should.equal(subnet_id) - - -@mock_ec2 -def test_request_spot_instances_default_arguments(): - """ - Test that moto set the correct default arguments - """ - conn = boto3.client('ec2', 'us-east-1') - - request = conn.request_spot_instances( - SpotPrice="0.5", - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - } - ) - - requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] - requests.should.have.length_of(1) - request = requests[0] - - request['State'].should.equal("open") - request['SpotPrice'].should.equal("0.5") - request['Type'].should.equal('one-time') - request.shouldnt.contain('ValidFrom') - request.shouldnt.contain('ValidUntil') - request.shouldnt.contain('LaunchGroup') - request.shouldnt.contain('AvailabilityZoneGroup') - - launch_spec = request['LaunchSpecification'] - - security_group_names = [group['GroupName'] - for group in launch_spec['SecurityGroups']] - security_group_names.should.equal(["default"]) - - launch_spec['ImageId'].should.equal('ami-abcd1234') - request.shouldnt.contain('KeyName') - launch_spec['InstanceType'].should.equal('m1.small') - request.shouldnt.contain('KernelId') - request.shouldnt.contain('RamdiskId') - request.shouldnt.contain('SubnetId') - - -@mock_ec2_deprecated -def test_cancel_spot_instance_request(): - conn = boto.connect_ec2() - - conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as ex: - conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') - - conn.cancel_spot_instance_requests([requests[0].id]) - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_request_spot_instances_fulfilled(): - """ - Test that moto correctly fullfills a spot instance request - """ - conn = boto.ec2.connect_to_region("us-east-1") - - request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] - - request.state.should.equal("open") - - get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active' - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] - - request.state.should.equal("active") - - -@mock_ec2_deprecated -def test_tag_spot_instance_request(): - """ - Test that moto correctly tags a spot instance request - """ - conn = boto.connect_ec2() - - request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - request[0].add_tag('tag1', 'value1') - request[0].add_tag('tag2', 'value2') - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] - - tag_dict = dict(request.tags) - tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'}) - - -@mock_ec2_deprecated -def test_get_all_spot_instance_requests_filtering(): - """ - Test that moto correctly filters spot instance requests - """ - conn = boto.connect_ec2() - - request1 = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - request2 = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - request1[0].add_tag('tag1', 'value1') - request1[0].add_tag('tag2', 'value2') - request2[0].add_tag('tag1', 'value1') - request2[0].add_tag('tag2', 'wrong') - - requests = conn.get_all_spot_instance_requests(filters={'state': 'active'}) - requests.should.have.length_of(0) - - requests = conn.get_all_spot_instance_requests(filters={'state': 'open'}) - requests.should.have.length_of(3) - - requests = conn.get_all_spot_instance_requests( - filters={'tag:tag1': 'value1'}) - requests.should.have.length_of(2) - - requests = conn.get_all_spot_instance_requests( - filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) - requests.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_request_spot_instances_setting_instance_id(): - conn = boto.ec2.connect_to_region("us-east-1") - request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234') - - req = get_model('SpotInstanceRequest', 'us-east-1')[0] - req.state = 'active' - req.instance_id = 'i-12345678' - - request = conn.get_all_spot_instance_requests()[0] - assert request.state == 'active' - assert request.instance_id == 'i-12345678' +from __future__ import unicode_literals +from nose.tools import assert_raises +import datetime + +import boto +import boto3 +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +import pytz +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated +from moto.backends import get_model +from moto.core.utils import iso_8601_datetime_with_milliseconds + + +@mock_ec2 +def test_request_spot_instances(): + conn = boto3.client('ec2', 'us-east-1') + vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + + conn.create_security_group(GroupName='group1', Description='description') + conn.create_security_group(GroupName='group2', Description='description') + + start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc) + end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc) + start = iso_8601_datetime_with_milliseconds(start_dt) + end = iso_8601_datetime_with_milliseconds(end_dt) + + with assert_raises(ClientError) as ex: + request = conn.request_spot_instances( + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": "some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', + }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, + DryRun=True, + ) + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') + + request = conn.request_spot_instances( + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": "some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', + }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, + ) + + requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] + requests.should.have.length_of(1) + request = requests[0] + + request['State'].should.equal("open") + request['SpotPrice'].should.equal("0.5") + request['Type'].should.equal('one-time') + request['ValidFrom'].should.equal(start_dt) + request['ValidUntil'].should.equal(end_dt) + request['LaunchGroup'].should.equal("the-group") + request['AvailabilityZoneGroup'].should.equal('my-group') + + launch_spec = request['LaunchSpecification'] + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] + set(security_group_names).should.equal(set(['group1', 'group2'])) + + launch_spec['ImageId'].should.equal('ami-abcd1234') + launch_spec['KeyName'].should.equal("test") + launch_spec['InstanceType'].should.equal('m1.small') + launch_spec['KernelId'].should.equal("test-kernel") + launch_spec['RamdiskId'].should.equal("test-ramdisk") + launch_spec['SubnetId'].should.equal(subnet_id) + + +@mock_ec2 +def test_request_spot_instances_default_arguments(): + """ + Test that moto set the correct default arguments + """ + conn = boto3.client('ec2', 'us-east-1') + + request = conn.request_spot_instances( + SpotPrice="0.5", + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + } + ) + + requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] + requests.should.have.length_of(1) + request = requests[0] + + request['State'].should.equal("open") + request['SpotPrice'].should.equal("0.5") + request['Type'].should.equal('one-time') + request.shouldnt.contain('ValidFrom') + request.shouldnt.contain('ValidUntil') + request.shouldnt.contain('LaunchGroup') + request.shouldnt.contain('AvailabilityZoneGroup') + + launch_spec = request['LaunchSpecification'] + + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] + security_group_names.should.equal(["default"]) + + launch_spec['ImageId'].should.equal('ami-abcd1234') + request.shouldnt.contain('KeyName') + launch_spec['InstanceType'].should.equal('m1.small') + request.shouldnt.contain('KernelId') + request.shouldnt.contain('RamdiskId') + request.shouldnt.contain('SubnetId') + + +@mock_ec2_deprecated +def test_cancel_spot_instance_request(): + conn = boto.connect_ec2() + + conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as ex: + conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') + + conn.cancel_spot_instance_requests([requests[0].id]) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_request_spot_instances_fulfilled(): + """ + Test that moto correctly fullfills a spot instance request + """ + conn = boto.ec2.connect_to_region("us-east-1") + + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + request.state.should.equal("open") + + get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active' + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + request.state.should.equal("active") + + +@mock_ec2_deprecated +def test_tag_spot_instance_request(): + """ + Test that moto correctly tags a spot instance request + """ + conn = boto.connect_ec2() + + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + request[0].add_tag('tag1', 'value1') + request[0].add_tag('tag2', 'value2') + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + tag_dict = dict(request.tags) + tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'}) + + +@mock_ec2_deprecated +def test_get_all_spot_instance_requests_filtering(): + """ + Test that moto correctly filters spot instance requests + """ + conn = boto.connect_ec2() + + request1 = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + request2 = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + request1[0].add_tag('tag1', 'value1') + request1[0].add_tag('tag2', 'value2') + request2[0].add_tag('tag1', 'value1') + request2[0].add_tag('tag2', 'wrong') + + requests = conn.get_all_spot_instance_requests(filters={'state': 'active'}) + requests.should.have.length_of(0) + + requests = conn.get_all_spot_instance_requests(filters={'state': 'open'}) + requests.should.have.length_of(3) + + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1'}) + requests.should.have.length_of(2) + + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) + requests.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_request_spot_instances_setting_instance_id(): + conn = boto.ec2.connect_to_region("us-east-1") + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234') + + req = get_model('SpotInstanceRequest', 'us-east-1')[0] + req.state = 'active' + req.instance_id = 'i-12345678' + + request = conn.get_all_spot_instance_requests()[0] + assert request.state == 'active' + assert request.instance_id == 'i-12345678' diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 99e6d45d8993..3fb122807e7a 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -1,291 +1,291 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -import boto3 -import boto -import boto.vpc -from boto.exception import EC2ResponseError -from botocore.exceptions import ParamValidationError -import json -import sure # noqa - -from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_subnets(): - ec2 = boto.connect_ec2('the_key', 'the_secret') - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(1 + len(ec2.get_all_zones())) - - conn.delete_subnet(subnet.id) - - all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(0 + len(ec2.get_all_zones())) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_subnet(subnet.id) - cm.exception.code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_subnet_create_vpc_validation(): - conn = boto.connect_vpc('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.create_subnet("vpc-abcd1234", "10.0.0.0/18") - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_subnet_tagging(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - subnet.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the subnet - subnet = conn.get_all_subnets(subnet_ids=[subnet.id])[0] - subnet.tags.should.have.length_of(1) - subnet.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_subnet_should_have_proper_availability_zone_set(): - conn = boto.vpc.connect_to_region('us-west-1') - vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet( - vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') - subnetA.availability_zone.should.equal('us-west-1b') - - -@mock_ec2 -def test_default_subnet(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - default_vpc = list(ec2.vpcs.all())[0] - default_vpc.cidr_block.should.equal('172.31.0.0/16') - default_vpc.reload() - default_vpc.is_default.should.be.ok - - subnet = ec2.create_subnet( - VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') - subnet.reload() - subnet.map_public_ip_on_launch.shouldnt.be.ok - - -@mock_ec2_deprecated -def test_non_default_subnet(): - vpc_cli = boto.vpc.connect_to_region('us-west-1') - - # Create the non default VPC - vpc = vpc_cli.create_vpc("10.0.0.0/16") - vpc.is_default.shouldnt.be.ok - - subnet = vpc_cli.create_subnet(vpc.id, "10.0.0.0/24") - subnet = vpc_cli.get_all_subnets(subnet_ids=[subnet.id])[0] - subnet.mapPublicIpOnLaunch.should.equal('false') - - -@mock_ec2 -def test_boto3_non_default_subnet(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the non default VPC - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - vpc.reload() - vpc.is_default.shouldnt.be.ok - - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') - subnet.reload() - subnet.map_public_ip_on_launch.shouldnt.be.ok - - -@mock_ec2 -def test_modify_subnet_attribute(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - - # Get the default VPC - vpc = list(ec2.vpcs.all())[0] - - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') - - # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action - subnet.reload() - - # For non default subnet, attribute value should be 'False' - subnet.map_public_ip_on_launch.shouldnt.be.ok - - client.modify_subnet_attribute( - SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) - subnet.reload() - subnet.map_public_ip_on_launch.shouldnt.be.ok - - client.modify_subnet_attribute( - SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) - subnet.reload() - subnet.map_public_ip_on_launch.should.be.ok - - -@mock_ec2 -def test_modify_subnet_attribute_validation(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') - - with assert_raises(ParamValidationError): - client.modify_subnet_attribute( - SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) - - -@mock_ec2_deprecated -def test_subnet_get_by_id(): - ec2 = boto.ec2.connect_to_region('us-west-1') - conn = boto.vpc.connect_to_region('us-west-1') - vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet( - vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') - vpcB = conn.create_vpc("10.0.0.0/16") - subnetB1 = conn.create_subnet( - vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') - subnetB2 = conn.create_subnet( - vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') - - subnets_by_id = conn.get_all_subnets(subnet_ids=[subnetA.id, subnetB1.id]) - subnets_by_id.should.have.length_of(2) - subnets_by_id = tuple(map(lambda s: s.id, subnets_by_id)) - subnetA.id.should.be.within(subnets_by_id) - subnetB1.id.should.be.within(subnets_by_id) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_subnets(subnet_ids=['subnet-does_not_exist']) - cm.exception.code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_get_subnets_filtering(): - ec2 = boto.ec2.connect_to_region('us-west-1') - conn = boto.vpc.connect_to_region('us-west-1') - vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet( - vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') - vpcB = conn.create_vpc("10.0.0.0/16") - subnetB1 = conn.create_subnet( - vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') - subnetB2 = conn.create_subnet( - vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') - - all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(3 + len(ec2.get_all_zones())) - - # Filter by VPC ID - subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id}) - subnets_by_vpc.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_vpc]).should.equal( - set([subnetB1.id, subnetB2.id])) - - # Filter by CIDR variations - subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"}) - subnets_by_cidr1.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr1] - ).should.equal(set([subnetA.id, subnetB1.id])) - - subnets_by_cidr2 = conn.get_all_subnets( - filters={'cidr-block': "10.0.0.0/24"}) - subnets_by_cidr2.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr2] - ).should.equal(set([subnetA.id, subnetB1.id])) - - subnets_by_cidr3 = conn.get_all_subnets( - filters={'cidrBlock': "10.0.0.0/24"}) - subnets_by_cidr3.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr3] - ).should.equal(set([subnetA.id, subnetB1.id])) - - # Filter by VPC ID and CIDR - subnets_by_vpc_and_cidr = conn.get_all_subnets( - filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) - subnets_by_vpc_and_cidr.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_vpc_and_cidr] - ).should.equal(set([subnetB1.id])) - - # Filter by subnet ID - subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id}) - subnets_by_id.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id])) - - # Filter by availabilityZone - subnets_by_az = conn.get_all_subnets( - filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) - subnets_by_az.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_az] - ).should.equal(set([subnetB1.id])) - - # Filter by defaultForAz - - subnets_by_az = conn.get_all_subnets(filters={'defaultForAz': "true"}) - subnets_by_az.should.have.length_of(len(conn.get_all_zones())) - - # Unsupported filter - conn.get_all_subnets.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -@mock_cloudformation_deprecated -def test_subnet_tags_through_cloudformation(): - vpc_conn = boto.vpc.connect_to_region('us-west-1') - vpc = vpc_conn.create_vpc("10.0.0.0/16") - - subnet_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testSubnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": vpc.id, - "CidrBlock": "10.0.0.0/24", - "AvailabilityZone": "us-west-1b", - "Tags": [{ - "Key": "foo", - "Value": "bar", - }, { - "Key": "blah", - "Value": "baz", - }] - } - } - } - } - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - template_json = json.dumps(subnet_template) - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] - subnet.tags["foo"].should.equal("bar") - subnet.tags["blah"].should.equal("baz") +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +import boto3 +import boto +import boto.vpc +from boto.exception import EC2ResponseError +from botocore.exceptions import ParamValidationError +import json +import sure # noqa + +from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_subnets(): + ec2 = boto.connect_ec2('the_key', 'the_secret') + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + all_subnets = conn.get_all_subnets() + all_subnets.should.have.length_of(1 + len(ec2.get_all_zones())) + + conn.delete_subnet(subnet.id) + + all_subnets = conn.get_all_subnets() + all_subnets.should.have.length_of(0 + len(ec2.get_all_zones())) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_subnet(subnet.id) + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_subnet_create_vpc_validation(): + conn = boto.connect_vpc('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.create_subnet("vpc-abcd1234", "10.0.0.0/18") + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_subnet_tagging(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + subnet.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the subnet + subnet = conn.get_all_subnets(subnet_ids=[subnet.id])[0] + subnet.tags.should.have.length_of(1) + subnet.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_subnet_should_have_proper_availability_zone_set(): + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') + subnetA.availability_zone.should.equal('us-west-1b') + + +@mock_ec2 +def test_default_subnet(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + default_vpc = list(ec2.vpcs.all())[0] + default_vpc.cidr_block.should.equal('172.31.0.0/16') + default_vpc.reload() + default_vpc.is_default.should.be.ok + + subnet = ec2.create_subnet( + VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + + +@mock_ec2_deprecated +def test_non_default_subnet(): + vpc_cli = boto.vpc.connect_to_region('us-west-1') + + # Create the non default VPC + vpc = vpc_cli.create_vpc("10.0.0.0/16") + vpc.is_default.shouldnt.be.ok + + subnet = vpc_cli.create_subnet(vpc.id, "10.0.0.0/24") + subnet = vpc_cli.get_all_subnets(subnet_ids=[subnet.id])[0] + subnet.mapPublicIpOnLaunch.should.equal('false') + + +@mock_ec2 +def test_boto3_non_default_subnet(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + + +@mock_ec2 +def test_modify_subnet_attribute(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + # Get the default VPC + vpc = list(ec2.vpcs.all())[0] + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + + # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action + subnet.reload() + + # For non default subnet, attribute value should be 'False' + subnet.map_public_ip_on_launch.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) + subnet.reload() + subnet.map_public_ip_on_launch.should.be.ok + + +@mock_ec2 +def test_modify_subnet_attribute_validation(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + + with assert_raises(ParamValidationError): + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) + + +@mock_ec2_deprecated +def test_subnet_get_by_id(): + ec2 = boto.ec2.connect_to_region('us-west-1') + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') + vpcB = conn.create_vpc("10.0.0.0/16") + subnetB1 = conn.create_subnet( + vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetB2 = conn.create_subnet( + vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') + + subnets_by_id = conn.get_all_subnets(subnet_ids=[subnetA.id, subnetB1.id]) + subnets_by_id.should.have.length_of(2) + subnets_by_id = tuple(map(lambda s: s.id, subnets_by_id)) + subnetA.id.should.be.within(subnets_by_id) + subnetB1.id.should.be.within(subnets_by_id) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_subnets(subnet_ids=['subnet-does_not_exist']) + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_get_subnets_filtering(): + ec2 = boto.ec2.connect_to_region('us-west-1') + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') + vpcB = conn.create_vpc("10.0.0.0/16") + subnetB1 = conn.create_subnet( + vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetB2 = conn.create_subnet( + vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') + + all_subnets = conn.get_all_subnets() + all_subnets.should.have.length_of(3 + len(ec2.get_all_zones())) + + # Filter by VPC ID + subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id}) + subnets_by_vpc.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_vpc]).should.equal( + set([subnetB1.id, subnetB2.id])) + + # Filter by CIDR variations + subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"}) + subnets_by_cidr1.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_cidr1] + ).should.equal(set([subnetA.id, subnetB1.id])) + + subnets_by_cidr2 = conn.get_all_subnets( + filters={'cidr-block': "10.0.0.0/24"}) + subnets_by_cidr2.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_cidr2] + ).should.equal(set([subnetA.id, subnetB1.id])) + + subnets_by_cidr3 = conn.get_all_subnets( + filters={'cidrBlock': "10.0.0.0/24"}) + subnets_by_cidr3.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_cidr3] + ).should.equal(set([subnetA.id, subnetB1.id])) + + # Filter by VPC ID and CIDR + subnets_by_vpc_and_cidr = conn.get_all_subnets( + filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) + subnets_by_vpc_and_cidr.should.have.length_of(1) + set([subnet.id for subnet in subnets_by_vpc_and_cidr] + ).should.equal(set([subnetB1.id])) + + # Filter by subnet ID + subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id}) + subnets_by_id.should.have.length_of(1) + set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id])) + + # Filter by availabilityZone + subnets_by_az = conn.get_all_subnets( + filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) + subnets_by_az.should.have.length_of(1) + set([subnet.id for subnet in subnets_by_az] + ).should.equal(set([subnetB1.id])) + + # Filter by defaultForAz + + subnets_by_az = conn.get_all_subnets(filters={'defaultForAz': "true"}) + subnets_by_az.should.have.length_of(len(conn.get_all_zones())) + + # Unsupported filter + conn.get_all_subnets.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +@mock_cloudformation_deprecated +def test_subnet_tags_through_cloudformation(): + vpc_conn = boto.vpc.connect_to_region('us-west-1') + vpc = vpc_conn.create_vpc("10.0.0.0/16") + + subnet_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testSubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": vpc.id, + "CidrBlock": "10.0.0.0/24", + "AvailabilityZone": "us-west-1b", + "Tags": [{ + "Key": "foo", + "Value": "bar", + }, { + "Key": "blah", + "Value": "baz", + }] + } + } + } + } + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + template_json = json.dumps(subnet_template) + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] + subnet.tags["foo"].should.equal("bar") + subnet.tags["blah"].should.equal("baz") diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index c92a4f81f033..ac213857a8de 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -1,453 +1,453 @@ -from __future__ import unicode_literals -from nose.tools import assert_raises - -import itertools - -import boto -import boto3 -from boto.exception import EC2ResponseError -from boto.ec2.instance import Reservation -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 -from nose.tools import assert_raises - - -@mock_ec2_deprecated -def test_add_tag(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.add_tag("a key", "some value", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - instance.add_tag("a key", "some value") - chain = itertools.chain.from_iterable - existing_instances = list( - chain([res.instances for res in conn.get_all_instances()])) - existing_instances.should.have.length_of(1) - existing_instance = existing_instances[0] - existing_instance.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_remove_tag(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.add_tag("a key", "some value") - - tags = conn.get_all_tags() - tag = tags[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - with assert_raises(EC2ResponseError) as ex: - instance.remove_tag("a key", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set') - - instance.remove_tag("a key") - conn.get_all_tags().should.have.length_of(0) - - instance.add_tag("a key", "some value") - conn.get_all_tags().should.have.length_of(1) - instance.remove_tag("a key", "some value") - - -@mock_ec2_deprecated -def test_get_all_tags(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.add_tag("a key", "some value") - - tags = conn.get_all_tags() - tag = tags[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - -@mock_ec2_deprecated -def test_get_all_tags_with_special_characters(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.add_tag("a key", "some<> value") - - tags = conn.get_all_tags() - tag = tags[0] - tag.name.should.equal("a key") - tag.value.should.equal("some<> value") - - -@mock_ec2_deprecated -def test_create_tags(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - tag_dict = {'a key': 'some value', - 'another key': 'some other value', - 'blank key': ''} - - with assert_raises(EC2ResponseError) as ex: - conn.create_tags(instance.id, tag_dict, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - conn.create_tags(instance.id, tag_dict) - tags = conn.get_all_tags() - set([key for key in tag_dict]).should.equal( - set([tag.name for tag in tags])) - set([tag_dict[key] for key in tag_dict]).should.equal( - set([tag.value for tag in tags])) - - -@mock_ec2_deprecated -def test_tag_limit_exceeded(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - tag_dict = {} - for i in range(51): - tag_dict['{0:02d}'.format(i + 1)] = '' - - with assert_raises(EC2ResponseError) as cm: - conn.create_tags(instance.id, tag_dict) - cm.exception.code.should.equal('TagLimitExceeded') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - instance.add_tag("a key", "a value") - with assert_raises(EC2ResponseError) as cm: - conn.create_tags(instance.id, tag_dict) - cm.exception.code.should.equal('TagLimitExceeded') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - tags = conn.get_all_tags() - tag = tags[0] - tags.should.have.length_of(1) - tag.name.should.equal("a key") - tag.value.should.equal("a value") - - -@mock_ec2_deprecated -def test_invalid_parameter_tag_null(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as cm: - instance.add_tag("a key", None) - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_invalid_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(EC2ResponseError) as cm: - conn.create_tags('ami-blah', {'key': 'tag'}) - cm.exception.code.should.equal('InvalidID') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.create_tags('blah-blah', {'key': 'tag'}) - cm.exception.code.should.equal('InvalidID') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_get_all_tags_resource_id_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.add_tag("an instance key", "some value") - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.add_tag("an image key", "some value") - - tags = conn.get_all_tags(filters={'resource-id': instance.id}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(instance.id) - tag.res_type.should.equal('instance') - tag.name.should.equal("an instance key") - tag.value.should.equal("some value") - - tags = conn.get_all_tags(filters={'resource-id': image_id}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(image_id) - tag.res_type.should.equal('image') - tag.name.should.equal("an image key") - tag.value.should.equal("some value") - - -@mock_ec2_deprecated -def test_get_all_tags_resource_type_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.add_tag("an instance key", "some value") - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.add_tag("an image key", "some value") - - tags = conn.get_all_tags(filters={'resource-type': 'instance'}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(instance.id) - tag.res_type.should.equal('instance') - tag.name.should.equal("an instance key") - tag.value.should.equal("some value") - - tags = conn.get_all_tags(filters={'resource-type': 'image'}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(image_id) - tag.res_type.should.equal('image') - tag.name.should.equal("an image key") - tag.value.should.equal("some value") - - -@mock_ec2_deprecated -def test_get_all_tags_key_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.add_tag("an instance key", "some value") - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.add_tag("an image key", "some value") - - tags = conn.get_all_tags(filters={'key': 'an instance key'}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(instance.id) - tag.res_type.should.equal('instance') - tag.name.should.equal("an instance key") - tag.value.should.equal("some value") - - -@mock_ec2_deprecated -def test_get_all_tags_value_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.add_tag("an instance key", "some value") - reservation_b = conn.run_instances('ami-1234abcd') - instance_b = reservation_b.instances[0] - instance_b.add_tag("an instance key", "some other value") - reservation_c = conn.run_instances('ami-1234abcd') - instance_c = reservation_c.instances[0] - instance_c.add_tag("an instance key", "other value*") - reservation_d = conn.run_instances('ami-1234abcd') - instance_d = reservation_d.instances[0] - instance_d.add_tag("an instance key", "other value**") - reservation_e = conn.run_instances('ami-1234abcd') - instance_e = reservation_e.instances[0] - instance_e.add_tag("an instance key", "other value*?") - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.add_tag("an image key", "some value") - - tags = conn.get_all_tags(filters={'value': 'some value'}) - tags.should.have.length_of(2) - - tags = conn.get_all_tags(filters={'value': 'some*value'}) - tags.should.have.length_of(3) - - tags = conn.get_all_tags(filters={'value': '*some*value'}) - tags.should.have.length_of(3) - - tags = conn.get_all_tags(filters={'value': '*some*value*'}) - tags.should.have.length_of(3) - - tags = conn.get_all_tags(filters={'value': '*value\*'}) - tags.should.have.length_of(1) - - tags = conn.get_all_tags(filters={'value': '*value\*\*'}) - tags.should.have.length_of(1) - - tags = conn.get_all_tags(filters={'value': '*value\*\?'}) - tags.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_retrieved_instances_must_contain_their_tags(): - tag_key = 'Tag name' - tag_value = 'Tag value' - tags_to_be_set = {tag_key: tag_value} - - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - reservation.should.be.a(Reservation) - reservation.instances.should.have.length_of(1) - instance = reservation.instances[0] - - reservations = conn.get_all_instances() - reservations.should.have.length_of(1) - reservations[0].id.should.equal(reservation.id) - instances = reservations[0].instances - instances.should.have.length_of(1) - instances[0].id.should.equal(instance.id) - - conn.create_tags([instance.id], tags_to_be_set) - reservations = conn.get_all_instances() - instance = reservations[0].instances[0] - retrieved_tags = instance.tags - - # Cleanup of instance - conn.terminate_instances([instances[0].id]) - - # Check whether tag is present with correct value - retrieved_tags[tag_key].should.equal(tag_value) - - -@mock_ec2_deprecated -def test_retrieved_volumes_must_contain_their_tags(): - tag_key = 'Tag name' - tag_value = 'Tag value' - tags_to_be_set = {tag_key: tag_value} - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - - all_volumes = conn.get_all_volumes() - volume = all_volumes[0] - conn.create_tags([volume.id], tags_to_be_set) - - # Fetch the volume again - all_volumes = conn.get_all_volumes() - volume = all_volumes[0] - retrieved_tags = volume.tags - - volume.delete() - - # Check whether tag is present with correct value - retrieved_tags[tag_key].should.equal(tag_value) - - -@mock_ec2_deprecated -def test_retrieved_snapshots_must_contain_their_tags(): - tag_key = 'Tag name' - tag_value = 'Tag value' - tags_to_be_set = {tag_key: tag_value} - conn = boto.connect_ec2(aws_access_key_id='the_key', - aws_secret_access_key='the_secret') - volume = conn.create_volume(80, "eu-west-1a") - snapshot = conn.create_snapshot(volume.id) - conn.create_tags([snapshot.id], tags_to_be_set) - - # Fetch the snapshot again - all_snapshots = conn.get_all_snapshots() - snapshot = [item for item in all_snapshots if item.id == snapshot.id][0] - retrieved_tags = snapshot.tags - - conn.delete_snapshot(snapshot.id) - volume.delete() - - # Check whether tag is present with correct value - retrieved_tags[tag_key].should.equal(tag_value) - - -@mock_ec2_deprecated -def test_filter_instances_by_wildcard_tags(): - conn = boto.connect_ec2(aws_access_key_id='the_key', - aws_secret_access_key='the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance_a = reservation.instances[0] - instance_a.add_tag("Key1", "Value1") - reservation_b = conn.run_instances('ami-1234abcd') - instance_b = reservation_b.instances[0] - instance_b.add_tag("Key1", "Value2") - - reservations = conn.get_all_instances(filters={'tag:Key1': 'Value*'}) - reservations.should.have.length_of(2) - - reservations = conn.get_all_instances(filters={'tag-key': 'Key*'}) - reservations.should.have.length_of(2) - - reservations = conn.get_all_instances(filters={'tag-value': 'Value*'}) - reservations.should.have.length_of(2) - - -@mock_ec2 -def test_create_volume_with_tags(): - client = boto3.client('ec2', 'us-west-2') - response = client.create_volume( - AvailabilityZone='us-west-2', - Encrypted=False, - Size=40, - TagSpecifications=[ - { - 'ResourceType': 'volume', - 'Tags': [ - { - 'Key': 'TEST_TAG', - 'Value': 'TEST_VALUE' - } - ], - } - ] - ) - - assert response['Tags'][0]['Key'] == 'TEST_TAG' - - -@mock_ec2 -def test_create_snapshot_with_tags(): - client = boto3.client('ec2', 'us-west-2') - volume_id = client.create_volume( - AvailabilityZone='us-west-2', - Encrypted=False, - Size=40, - TagSpecifications=[ - { - 'ResourceType': 'volume', - 'Tags': [ - { - 'Key': 'TEST_TAG', - 'Value': 'TEST_VALUE' - } - ], - } - ] - )['VolumeId'] - snapshot = client.create_snapshot( - VolumeId=volume_id, - TagSpecifications=[ - { - 'ResourceType': 'snapshot', - 'Tags': [ - { - 'Key': 'TEST_SNAPSHOT_TAG', - 'Value': 'TEST_SNAPSHOT_VALUE' - } - ], - } - ] - ) - - expected_tags = [{ - 'Key': 'TEST_SNAPSHOT_TAG', - 'Value': 'TEST_SNAPSHOT_VALUE' - }] - - assert snapshot['Tags'] == expected_tags +from __future__ import unicode_literals +from nose.tools import assert_raises + +import itertools + +import boto +import boto3 +from boto.exception import EC2ResponseError +from boto.ec2.instance import Reservation +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 +from nose.tools import assert_raises + + +@mock_ec2_deprecated +def test_add_tag(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.add_tag("a key", "some value", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + instance.add_tag("a key", "some value") + chain = itertools.chain.from_iterable + existing_instances = list( + chain([res.instances for res in conn.get_all_instances()])) + existing_instances.should.have.length_of(1) + existing_instance = existing_instances[0] + existing_instance.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_remove_tag(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.add_tag("a key", "some value") + + tags = conn.get_all_tags() + tag = tags[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + with assert_raises(EC2ResponseError) as ex: + instance.remove_tag("a key", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set') + + instance.remove_tag("a key") + conn.get_all_tags().should.have.length_of(0) + + instance.add_tag("a key", "some value") + conn.get_all_tags().should.have.length_of(1) + instance.remove_tag("a key", "some value") + + +@mock_ec2_deprecated +def test_get_all_tags(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.add_tag("a key", "some value") + + tags = conn.get_all_tags() + tag = tags[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + +@mock_ec2_deprecated +def test_get_all_tags_with_special_characters(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.add_tag("a key", "some<> value") + + tags = conn.get_all_tags() + tag = tags[0] + tag.name.should.equal("a key") + tag.value.should.equal("some<> value") + + +@mock_ec2_deprecated +def test_create_tags(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + tag_dict = {'a key': 'some value', + 'another key': 'some other value', + 'blank key': ''} + + with assert_raises(EC2ResponseError) as ex: + conn.create_tags(instance.id, tag_dict, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + conn.create_tags(instance.id, tag_dict) + tags = conn.get_all_tags() + set([key for key in tag_dict]).should.equal( + set([tag.name for tag in tags])) + set([tag_dict[key] for key in tag_dict]).should.equal( + set([tag.value for tag in tags])) + + +@mock_ec2_deprecated +def test_tag_limit_exceeded(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + tag_dict = {} + for i in range(51): + tag_dict['{0:02d}'.format(i + 1)] = '' + + with assert_raises(EC2ResponseError) as cm: + conn.create_tags(instance.id, tag_dict) + cm.exception.code.should.equal('TagLimitExceeded') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + instance.add_tag("a key", "a value") + with assert_raises(EC2ResponseError) as cm: + conn.create_tags(instance.id, tag_dict) + cm.exception.code.should.equal('TagLimitExceeded') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + tags = conn.get_all_tags() + tag = tags[0] + tags.should.have.length_of(1) + tag.name.should.equal("a key") + tag.value.should.equal("a value") + + +@mock_ec2_deprecated +def test_invalid_parameter_tag_null(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as cm: + instance.add_tag("a key", None) + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_invalid_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + with assert_raises(EC2ResponseError) as cm: + conn.create_tags('ami-blah', {'key': 'tag'}) + cm.exception.code.should.equal('InvalidID') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.create_tags('blah-blah', {'key': 'tag'}) + cm.exception.code.should.equal('InvalidID') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_get_all_tags_resource_id_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.add_tag("an instance key", "some value") + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.add_tag("an image key", "some value") + + tags = conn.get_all_tags(filters={'resource-id': instance.id}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(instance.id) + tag.res_type.should.equal('instance') + tag.name.should.equal("an instance key") + tag.value.should.equal("some value") + + tags = conn.get_all_tags(filters={'resource-id': image_id}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(image_id) + tag.res_type.should.equal('image') + tag.name.should.equal("an image key") + tag.value.should.equal("some value") + + +@mock_ec2_deprecated +def test_get_all_tags_resource_type_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.add_tag("an instance key", "some value") + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.add_tag("an image key", "some value") + + tags = conn.get_all_tags(filters={'resource-type': 'instance'}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(instance.id) + tag.res_type.should.equal('instance') + tag.name.should.equal("an instance key") + tag.value.should.equal("some value") + + tags = conn.get_all_tags(filters={'resource-type': 'image'}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(image_id) + tag.res_type.should.equal('image') + tag.name.should.equal("an image key") + tag.value.should.equal("some value") + + +@mock_ec2_deprecated +def test_get_all_tags_key_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.add_tag("an instance key", "some value") + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.add_tag("an image key", "some value") + + tags = conn.get_all_tags(filters={'key': 'an instance key'}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(instance.id) + tag.res_type.should.equal('instance') + tag.name.should.equal("an instance key") + tag.value.should.equal("some value") + + +@mock_ec2_deprecated +def test_get_all_tags_value_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.add_tag("an instance key", "some value") + reservation_b = conn.run_instances('ami-1234abcd') + instance_b = reservation_b.instances[0] + instance_b.add_tag("an instance key", "some other value") + reservation_c = conn.run_instances('ami-1234abcd') + instance_c = reservation_c.instances[0] + instance_c.add_tag("an instance key", "other value*") + reservation_d = conn.run_instances('ami-1234abcd') + instance_d = reservation_d.instances[0] + instance_d.add_tag("an instance key", "other value**") + reservation_e = conn.run_instances('ami-1234abcd') + instance_e = reservation_e.instances[0] + instance_e.add_tag("an instance key", "other value*?") + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.add_tag("an image key", "some value") + + tags = conn.get_all_tags(filters={'value': 'some value'}) + tags.should.have.length_of(2) + + tags = conn.get_all_tags(filters={'value': 'some*value'}) + tags.should.have.length_of(3) + + tags = conn.get_all_tags(filters={'value': '*some*value'}) + tags.should.have.length_of(3) + + tags = conn.get_all_tags(filters={'value': '*some*value*'}) + tags.should.have.length_of(3) + + tags = conn.get_all_tags(filters={'value': '*value\*'}) + tags.should.have.length_of(1) + + tags = conn.get_all_tags(filters={'value': '*value\*\*'}) + tags.should.have.length_of(1) + + tags = conn.get_all_tags(filters={'value': '*value\*\?'}) + tags.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_retrieved_instances_must_contain_their_tags(): + tag_key = 'Tag name' + tag_value = 'Tag value' + tags_to_be_set = {tag_key: tag_value} + + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + reservation.should.be.a(Reservation) + reservation.instances.should.have.length_of(1) + instance = reservation.instances[0] + + reservations = conn.get_all_instances() + reservations.should.have.length_of(1) + reservations[0].id.should.equal(reservation.id) + instances = reservations[0].instances + instances.should.have.length_of(1) + instances[0].id.should.equal(instance.id) + + conn.create_tags([instance.id], tags_to_be_set) + reservations = conn.get_all_instances() + instance = reservations[0].instances[0] + retrieved_tags = instance.tags + + # Cleanup of instance + conn.terminate_instances([instances[0].id]) + + # Check whether tag is present with correct value + retrieved_tags[tag_key].should.equal(tag_value) + + +@mock_ec2_deprecated +def test_retrieved_volumes_must_contain_their_tags(): + tag_key = 'Tag name' + tag_value = 'Tag value' + tags_to_be_set = {tag_key: tag_value} + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + + all_volumes = conn.get_all_volumes() + volume = all_volumes[0] + conn.create_tags([volume.id], tags_to_be_set) + + # Fetch the volume again + all_volumes = conn.get_all_volumes() + volume = all_volumes[0] + retrieved_tags = volume.tags + + volume.delete() + + # Check whether tag is present with correct value + retrieved_tags[tag_key].should.equal(tag_value) + + +@mock_ec2_deprecated +def test_retrieved_snapshots_must_contain_their_tags(): + tag_key = 'Tag name' + tag_value = 'Tag value' + tags_to_be_set = {tag_key: tag_value} + conn = boto.connect_ec2(aws_access_key_id='the_key', + aws_secret_access_key='the_secret') + volume = conn.create_volume(80, "eu-west-1a") + snapshot = conn.create_snapshot(volume.id) + conn.create_tags([snapshot.id], tags_to_be_set) + + # Fetch the snapshot again + all_snapshots = conn.get_all_snapshots() + snapshot = [item for item in all_snapshots if item.id == snapshot.id][0] + retrieved_tags = snapshot.tags + + conn.delete_snapshot(snapshot.id) + volume.delete() + + # Check whether tag is present with correct value + retrieved_tags[tag_key].should.equal(tag_value) + + +@mock_ec2_deprecated +def test_filter_instances_by_wildcard_tags(): + conn = boto.connect_ec2(aws_access_key_id='the_key', + aws_secret_access_key='the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance_a = reservation.instances[0] + instance_a.add_tag("Key1", "Value1") + reservation_b = conn.run_instances('ami-1234abcd') + instance_b = reservation_b.instances[0] + instance_b.add_tag("Key1", "Value2") + + reservations = conn.get_all_instances(filters={'tag:Key1': 'Value*'}) + reservations.should.have.length_of(2) + + reservations = conn.get_all_instances(filters={'tag-key': 'Key*'}) + reservations.should.have.length_of(2) + + reservations = conn.get_all_instances(filters={'tag-value': 'Value*'}) + reservations.should.have.length_of(2) + + +@mock_ec2 +def test_create_volume_with_tags(): + client = boto3.client('ec2', 'us-west-2') + response = client.create_volume( + AvailabilityZone='us-west-2', + Encrypted=False, + Size=40, + TagSpecifications=[ + { + 'ResourceType': 'volume', + 'Tags': [ + { + 'Key': 'TEST_TAG', + 'Value': 'TEST_VALUE' + } + ], + } + ] + ) + + assert response['Tags'][0]['Key'] == 'TEST_TAG' + + +@mock_ec2 +def test_create_snapshot_with_tags(): + client = boto3.client('ec2', 'us-west-2') + volume_id = client.create_volume( + AvailabilityZone='us-west-2', + Encrypted=False, + Size=40, + TagSpecifications=[ + { + 'ResourceType': 'volume', + 'Tags': [ + { + 'Key': 'TEST_TAG', + 'Value': 'TEST_VALUE' + } + ], + } + ] + )['VolumeId'] + snapshot = client.create_snapshot( + VolumeId=volume_id, + TagSpecifications=[ + { + 'ResourceType': 'snapshot', + 'Tags': [ + { + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + } + ], + } + ] + ) + + expected_tags = [{ + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + }] + + assert snapshot['Tags'] == expected_tags diff --git a/tests/test_ec2/test_utils.py b/tests/test_ec2/test_utils.py index ef540e193883..3e7a37a7a363 100644 --- a/tests/test_ec2/test_utils.py +++ b/tests/test_ec2/test_utils.py @@ -1,8 +1,8 @@ -from moto.ec2 import utils - - -def test_random_key_pair(): - key_pair = utils.random_key_pair() - assert len(key_pair['fingerprint']) == 59 - assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----') - assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----') +from moto.ec2 import utils + + +def test_random_key_pair(): + key_pair = utils.random_key_pair() + assert len(key_pair['fingerprint']) == 59 + assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----') + assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----') diff --git a/tests/test_ec2/test_virtual_private_gateways.py b/tests/test_ec2/test_virtual_private_gateways.py index d90e97b452f1..a57bdc59fc6a 100644 --- a/tests/test_ec2/test_virtual_private_gateways.py +++ b/tests/test_ec2/test_virtual_private_gateways.py @@ -1,105 +1,105 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_virtual_private_gateways(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - vpn_gateway.should_not.be.none - vpn_gateway.id.should.match(r'vgw-\w+') - vpn_gateway.type.should.equal('ipsec.1') - vpn_gateway.state.should.equal('available') - vpn_gateway.availability_zone.should.equal('us-east-1a') - - -@mock_ec2_deprecated -def test_describe_vpn_gateway(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - - vgws = conn.get_all_vpn_gateways() - vgws.should.have.length_of(1) - - gateway = vgws[0] - gateway.id.should.match(r'vgw-\w+') - gateway.id.should.equal(vpn_gateway.id) - vpn_gateway.type.should.equal('ipsec.1') - vpn_gateway.state.should.equal('available') - vpn_gateway.availability_zone.should.equal('us-east-1a') - - -@mock_ec2_deprecated -def test_vpn_gateway_vpc_attachment(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - - conn.attach_vpn_gateway( - vpn_gateway_id=vpn_gateway.id, - vpc_id=vpc.id - ) - - gateway = conn.get_all_vpn_gateways()[0] - attachments = gateway.attachments - attachments.should.have.length_of(1) - attachments[0].vpc_id.should.equal(vpc.id) - attachments[0].state.should.equal('attached') - - -@mock_ec2_deprecated -def test_delete_vpn_gateway(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - - conn.delete_vpn_gateway(vpn_gateway.id) - vgws = conn.get_all_vpn_gateways() - vgws.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_vpn_gateway_tagging(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - vpn_gateway.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the subnet - vpn_gateway = conn.get_all_vpn_gateways()[0] - vpn_gateway.tags.should.have.length_of(1) - vpn_gateway.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_detach_vpn_gateway(): - - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - - conn.attach_vpn_gateway( - vpn_gateway_id=vpn_gateway.id, - vpc_id=vpc.id - ) - - gateway = conn.get_all_vpn_gateways()[0] - attachments = gateway.attachments - attachments.should.have.length_of(1) - attachments[0].vpc_id.should.equal(vpc.id) - attachments[0].state.should.equal('attached') - - conn.detach_vpn_gateway( - vpn_gateway_id=vpn_gateway.id, - vpc_id=vpc.id - ) - - gateway = conn.get_all_vpn_gateways()[0] - attachments = gateway.attachments - attachments.should.have.length_of(0) +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_virtual_private_gateways(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + vpn_gateway.should_not.be.none + vpn_gateway.id.should.match(r'vgw-\w+') + vpn_gateway.type.should.equal('ipsec.1') + vpn_gateway.state.should.equal('available') + vpn_gateway.availability_zone.should.equal('us-east-1a') + + +@mock_ec2_deprecated +def test_describe_vpn_gateway(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + + vgws = conn.get_all_vpn_gateways() + vgws.should.have.length_of(1) + + gateway = vgws[0] + gateway.id.should.match(r'vgw-\w+') + gateway.id.should.equal(vpn_gateway.id) + vpn_gateway.type.should.equal('ipsec.1') + vpn_gateway.state.should.equal('available') + vpn_gateway.availability_zone.should.equal('us-east-1a') + + +@mock_ec2_deprecated +def test_vpn_gateway_vpc_attachment(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + + conn.attach_vpn_gateway( + vpn_gateway_id=vpn_gateway.id, + vpc_id=vpc.id + ) + + gateway = conn.get_all_vpn_gateways()[0] + attachments = gateway.attachments + attachments.should.have.length_of(1) + attachments[0].vpc_id.should.equal(vpc.id) + attachments[0].state.should.equal('attached') + + +@mock_ec2_deprecated +def test_delete_vpn_gateway(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + + conn.delete_vpn_gateway(vpn_gateway.id) + vgws = conn.get_all_vpn_gateways() + vgws.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_vpn_gateway_tagging(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + vpn_gateway.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the subnet + vpn_gateway = conn.get_all_vpn_gateways()[0] + vpn_gateway.tags.should.have.length_of(1) + vpn_gateway.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_detach_vpn_gateway(): + + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + + conn.attach_vpn_gateway( + vpn_gateway_id=vpn_gateway.id, + vpc_id=vpc.id + ) + + gateway = conn.get_all_vpn_gateways()[0] + attachments = gateway.attachments + attachments.should.have.length_of(1) + attachments[0].vpc_id.should.equal(vpc.id) + attachments[0].state.should.equal('attached') + + conn.detach_vpn_gateway( + vpn_gateway_id=vpn_gateway.id, + vpc_id=vpc.id + ) + + gateway = conn.get_all_vpn_gateways()[0] + attachments = gateway.attachments + attachments.should.have.length_of(0) diff --git a/tests/test_ec2/test_vm_export.py b/tests/test_ec2/test_vm_export.py index f8b24f6d452b..08215d067082 100644 --- a/tests/test_ec2/test_vm_export.py +++ b/tests/test_ec2/test_vm_export.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_vm_export(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_vm_export(): + pass diff --git a/tests/test_ec2/test_vm_import.py b/tests/test_ec2/test_vm_import.py index 66c7561a70a0..0ebfaaa0c26b 100644 --- a/tests/test_ec2/test_vm_import.py +++ b/tests/test_ec2/test_vm_import.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_vm_import(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_vm_import(): + pass diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 1f98791b333e..4aab5f04164a 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -1,132 +1,132 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises -from moto.ec2.exceptions import EC2ClientError -from botocore.exceptions import ClientError - -import boto3 -import boto -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated -from tests.helpers import requires_boto_gte - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_vpc_peering_connections(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - peer_vpc = conn.create_vpc("11.0.0.0/16") - - vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) - vpc_pcx._status.code.should.equal('initiating-request') - - return vpc_pcx - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_vpc_peering_connections_get_all(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc_pcx = test_vpc_peering_connections() - vpc_pcx._status.code.should.equal('initiating-request') - - all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(1) - all_vpc_pcxs[0]._status.code.should.equal('pending-acceptance') - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_vpc_peering_connections_accept(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc_pcx = test_vpc_peering_connections() - - vpc_pcx = conn.accept_vpc_peering_connection(vpc_pcx.id) - vpc_pcx._status.code.should.equal('active') - - with assert_raises(EC2ResponseError) as cm: - conn.reject_vpc_peering_connection(vpc_pcx.id) - cm.exception.code.should.equal('InvalidStateTransition') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(1) - all_vpc_pcxs[0]._status.code.should.equal('active') - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_vpc_peering_connections_reject(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc_pcx = test_vpc_peering_connections() - - verdict = conn.reject_vpc_peering_connection(vpc_pcx.id) - verdict.should.equal(True) - - with assert_raises(EC2ResponseError) as cm: - conn.accept_vpc_peering_connection(vpc_pcx.id) - cm.exception.code.should.equal('InvalidStateTransition') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(1) - all_vpc_pcxs[0]._status.code.should.equal('rejected') - - -@requires_boto_gte("2.32.1") -@mock_ec2_deprecated -def test_vpc_peering_connections_delete(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc_pcx = test_vpc_peering_connections() - - verdict = vpc_pcx.delete() - verdict.should.equal(True) - - all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(0) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_vpc_peering_connection("pcx-1234abcd") - cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_vpc_peering_connections_cross_region(): - # create vpc in us-west-1 and ap-northeast-1 - ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') - vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') - ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') - vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') - # create peering - vpc_pcx = ec2_usw1.create_vpc_peering_connection( - VpcId=vpc_usw1.id, - PeerVpcId=vpc_apn1.id, - PeerRegion='ap-northeast-1', - ) - vpc_pcx.status['Code'].should.equal('initiating-request') - vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) - vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) - - -@mock_ec2 -def test_vpc_peering_connections_cross_region_fail(): - # create vpc in us-west-1 and ap-northeast-1 - ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') - vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') - ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') - vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') - # create peering wrong region with no vpc - with assert_raises(ClientError) as cm: - ec2_usw1.create_vpc_peering_connection( - VpcId=vpc_usw1.id, - PeerVpcId=vpc_apn1.id, - PeerRegion='ap-northeast-2') - cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError + +import boto3 +import boto +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated +from tests.helpers import requires_boto_gte + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_vpc_peering_connections(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + peer_vpc = conn.create_vpc("11.0.0.0/16") + + vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) + vpc_pcx._status.code.should.equal('initiating-request') + + return vpc_pcx + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_vpc_peering_connections_get_all(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc_pcx = test_vpc_peering_connections() + vpc_pcx._status.code.should.equal('initiating-request') + + all_vpc_pcxs = conn.get_all_vpc_peering_connections() + all_vpc_pcxs.should.have.length_of(1) + all_vpc_pcxs[0]._status.code.should.equal('pending-acceptance') + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_vpc_peering_connections_accept(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc_pcx = test_vpc_peering_connections() + + vpc_pcx = conn.accept_vpc_peering_connection(vpc_pcx.id) + vpc_pcx._status.code.should.equal('active') + + with assert_raises(EC2ResponseError) as cm: + conn.reject_vpc_peering_connection(vpc_pcx.id) + cm.exception.code.should.equal('InvalidStateTransition') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_vpc_pcxs = conn.get_all_vpc_peering_connections() + all_vpc_pcxs.should.have.length_of(1) + all_vpc_pcxs[0]._status.code.should.equal('active') + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_vpc_peering_connections_reject(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc_pcx = test_vpc_peering_connections() + + verdict = conn.reject_vpc_peering_connection(vpc_pcx.id) + verdict.should.equal(True) + + with assert_raises(EC2ResponseError) as cm: + conn.accept_vpc_peering_connection(vpc_pcx.id) + cm.exception.code.should.equal('InvalidStateTransition') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_vpc_pcxs = conn.get_all_vpc_peering_connections() + all_vpc_pcxs.should.have.length_of(1) + all_vpc_pcxs[0]._status.code.should.equal('rejected') + + +@requires_boto_gte("2.32.1") +@mock_ec2_deprecated +def test_vpc_peering_connections_delete(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc_pcx = test_vpc_peering_connections() + + verdict = vpc_pcx.delete() + verdict.should.equal(True) + + all_vpc_pcxs = conn.get_all_vpc_peering_connections() + all_vpc_pcxs.should.have.length_of(0) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_vpc_peering_connection("pcx-1234abcd") + cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_vpc_peering_connections_cross_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + vpc_pcx.status['Code'].should.equal('initiating-request') + vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_fail(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering wrong region with no vpc + with assert_raises(ClientError) as cm: + ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-2') + cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 318491b44b6a..4556e5ea0d21 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -1,541 +1,541 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # flake8: noqa -from nose.tools import assert_raises -from moto.ec2.exceptions import EC2ClientError -from botocore.exceptions import ClientError - -import boto3 -import boto -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - -SAMPLE_DOMAIN_NAME = u'example.com' -SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] - - -@mock_ec2_deprecated -def test_vpcs(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - vpc.cidr_block.should.equal('10.0.0.0/16') - - all_vpcs = conn.get_all_vpcs() - all_vpcs.should.have.length_of(2) - - vpc.delete() - - all_vpcs = conn.get_all_vpcs() - all_vpcs.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_vpc("vpc-1234abcd") - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_vpc_defaults(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - - conn.get_all_vpcs().should.have.length_of(2) - conn.get_all_route_tables().should.have.length_of(2) - conn.get_all_security_groups( - filters={'vpc-id': [vpc.id]}).should.have.length_of(1) - - vpc.delete() - - conn.get_all_vpcs().should.have.length_of(1) - conn.get_all_route_tables().should.have.length_of(1) - conn.get_all_security_groups( - filters={'vpc-id': [vpc.id]}).should.have.length_of(0) - - -@mock_ec2_deprecated -def test_vpc_isdefault_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) - vpc.delete() - conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) - - -@mock_ec2_deprecated -def test_multiple_vpcs_default_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - conn.create_vpc("10.8.0.0/16") - conn.create_vpc("10.0.0.0/16") - conn.create_vpc("192.168.0.0/16") - conn.get_all_vpcs().should.have.length_of(4) - vpc = conn.get_all_vpcs(filters={'isDefault': 'true'}) - vpc.should.have.length_of(1) - vpc[0].cidr_block.should.equal('172.31.0.0/16') - - -@mock_ec2_deprecated -def test_vpc_state_available_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.1.0.0/16") - conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(3) - vpc.delete() - conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) - - -@mock_ec2_deprecated -def test_vpc_tagging(): - conn = boto.connect_vpc() - vpc = conn.create_vpc("10.0.0.0/16") - - vpc.add_tag("a key", "some value") - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the vpc - vpc = conn.get_all_vpcs(vpc_ids=[vpc.id])[0] - vpc.tags.should.have.length_of(1) - vpc.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_vpc_get_by_id(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/16") - - vpcs = conn.get_all_vpcs(vpc_ids=[vpc1.id, vpc2.id]) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_vpcs(vpc_ids=['vpc-does_not_exist']) - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_vpc_get_by_cidr_block(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/24") - - vpcs = conn.get_all_vpcs(filters={'cidr': '10.0.0.0/16'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_dhcp_options_id(): - conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/24") - - conn.associate_dhcp_options(dhcp_options.id, vpc1.id) - conn.associate_dhcp_options(dhcp_options.id, vpc2.id) - - vpcs = conn.get_all_vpcs(filters={'dhcp-options-id': dhcp_options.id}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc2.add_tag('Name', 'TestVPC') - vpc3.add_tag('Name', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag:Name': 'TestVPC'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_key_superset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - vpc3.add_tag('Key', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-key': 'Name'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_key_subset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - vpc3.add_tag('Test', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-key': ['Name', 'Key']}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_value_superset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - vpc3.add_tag('Key', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-value': 'TestVPC'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_value_subset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-value': ['TestVPC', 'TestVPC2']}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2 -def test_default_vpc(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - default_vpc = list(ec2.vpcs.all())[0] - default_vpc.cidr_block.should.equal('172.31.0.0/16') - default_vpc.instance_tenancy.should.equal('default') - default_vpc.reload() - default_vpc.is_default.should.be.ok - - # Test default values for VPC attributes - response = default_vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').should.be.ok - - response = default_vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').should.be.ok - - -@mock_ec2 -def test_non_default_vpc(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - this already exists when backend instantiated! - #ec2.create_vpc(CidrBlock='172.31.0.0/16') - - # Create the non default VPC - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - vpc.reload() - vpc.is_default.shouldnt.be.ok - - # Test default instance_tenancy - vpc.instance_tenancy.should.equal('default') - - # Test default values for VPC attributes - response = vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').should.be.ok - - response = vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').shouldnt.be.ok - - # Check Primary CIDR Block Associations - cidr_block_association_set = next(iter(vpc.cidr_block_association_set), None) - cidr_block_association_set['CidrBlockState']['State'].should.equal('associated') - cidr_block_association_set['CidrBlock'].should.equal(vpc.cidr_block) - cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') - - -@mock_ec2 -def test_vpc_dedicated_tenancy(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') - - # Create the non default VPC - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16', InstanceTenancy='dedicated') - vpc.reload() - vpc.is_default.shouldnt.be.ok - - vpc.instance_tenancy.should.equal('dedicated') - - -@mock_ec2 -def test_vpc_modify_enable_dns_support(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - # Test default values for VPC attributes - response = vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').should.be.ok - - vpc.modify_attribute(EnableDnsSupport={'Value': False}) - - response = vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').shouldnt.be.ok - - -@mock_ec2 -def test_vpc_modify_enable_dns_hostnames(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - # Test default values for VPC attributes - response = vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').shouldnt.be.ok - - vpc.modify_attribute(EnableDnsHostnames={'Value': True}) - - response = vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').should.be.ok - - -@mock_ec2_deprecated -def test_vpc_associate_dhcp_options(): - conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - vpc = conn.create_vpc("10.0.0.0/16") - - conn.associate_dhcp_options(dhcp_options.id, vpc.id) - - vpc.update() - dhcp_options.id.should.equal(vpc.dhcp_options_id) - - -@mock_ec2 -def test_associate_vpc_ipv4_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') - - # Associate/Extend vpc CIDR range up to 5 ciders - for i in range(43, 47): - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.{}.0/24'.format(i)) - response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('associating') - response['CidrBlockAssociation']['CidrBlock'].should.equal('10.10.{}.0/24'.format(i)) - response['CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc') - - # Check all associations exist - vpc = ec2.Vpc(vpc.id) - vpc.cidr_block_association_set.should.have.length_of(5) - vpc.cidr_block_association_set[2]['CidrBlockState']['State'].should.equal('associated') - vpc.cidr_block_association_set[4]['CidrBlockState']['State'].should.equal('associated') - - # Check error on adding 6th association. - with assert_raises(ClientError) as ex: - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.50.0/22') - str(ex.exception).should.equal( - "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " - "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format(vpc.id)) - -@mock_ec2 -def test_disassociate_vpc_ipv4_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.43.0/24') - - # Remove an extended cidr block - vpc = ec2.Vpc(vpc.id) - non_default_assoc_cidr_block = next(iter([x for x in vpc.cidr_block_association_set if vpc.cidr_block != x['CidrBlock']]), None) - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=non_default_assoc_cidr_block['AssociationId']) - response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('disassociating') - response['CidrBlockAssociation']['CidrBlock'].should.equal(non_default_assoc_cidr_block['CidrBlock']) - response['CidrBlockAssociation']['AssociationId'].should.equal(non_default_assoc_cidr_block['AssociationId']) - - # Error attempting to delete a non-existent CIDR_BLOCK association - with assert_raises(ClientError) as ex: - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId='vpc-cidr-assoc-BORING123') - str(ex.exception).should.equal( - "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " - "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " - "'vpc-cidr-assoc-BORING123' does not exist") - - # Error attempting to delete Primary CIDR BLOCK association - vpc_base_cidr_assoc_id = next(iter([x for x in vpc.cidr_block_association_set - if vpc.cidr_block == x['CidrBlock']]), {})['AssociationId'] - - with assert_raises(ClientError) as ex: - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=vpc_base_cidr_assoc_id) - str(ex.exception).should.equal( - "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " - "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " - "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id)) - -@mock_ec2 -def test_cidr_block_association_filters(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') - vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') - vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') - vpc3_assoc_response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.3.0/24') - - # Test filters for a cidr-block in all VPCs cidr-block-associations - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.cidr-block', - 'Values': ['10.10.0.0/19']}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc2.id) - - # Test filter for association id in VPCs - association_id = vpc3_assoc_response['CidrBlockAssociation']['AssociationId'] - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', - 'Values': [association_id]}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc3.id) - - # Test filter for association state in VPC - this will never show anything in this test - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', - 'Values': ['failing']}])) - filtered_vpcs.should.be.length_of(0) - -@mock_ec2 -def test_vpc_associate_ipv6_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Test create VPC with IPV6 cidr range - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) - ipv6_cidr_block_association_set = next(iter(vpc.ipv6_cidr_block_association_set), None) - ipv6_cidr_block_association_set['Ipv6CidrBlockState']['State'].should.equal('associated') - ipv6_cidr_block_association_set['Ipv6CidrBlock'].should.contain('::/56') - ipv6_cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') - - # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! - with assert_raises(ClientError) as ex: - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) - str(ex.exception).should.equal( - "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " - "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format(vpc.id)) - - # Test associate ipv6 cidr block after vpc created - vpc = ec2.create_vpc(CidrBlock='10.10.50.0/24') - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('associating') - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') - response['Ipv6CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc-') - - # Check on describe vpc that has ipv6 cidr block association - vpc = ec2.Vpc(vpc.id) - vpc.ipv6_cidr_block_association_set.should.be.length_of(1) - - -@mock_ec2 -def test_vpc_disassociate_ipv6_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Test create VPC with IPV6 cidr range - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) - # Test disassociating the only IPV6 - assoc_id = vpc.ipv6_cidr_block_association_set[0]['AssociationId'] - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=assoc_id) - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('disassociating') - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') - response['Ipv6CidrBlockAssociation']['AssociationId'].should.equal(assoc_id) - - -@mock_ec2 -def test_ipv6_cidr_block_association_filters(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') - - vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16', AmazonProvidedIpv6CidrBlock=True) - vpc2_assoc_ipv6_assoc_id = vpc2.ipv6_cidr_block_association_set[0]['AssociationId'] - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') - - vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, AmazonProvidedIpv6CidrBlock=True) - vpc3_ipv6_cidr_block = response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] - - vpc4 = ec2.create_vpc(CidrBlock='10.95.0.0/16') # Here for its looks - - # Test filters for an ipv6 cidr-block in all VPCs cidr-block-associations - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.ipv6-cidr-block', - 'Values': [vpc3_ipv6_cidr_block]}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc3.id) - - # Test filter for association id in VPCs - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.association-id', - 'Values': [vpc2_assoc_ipv6_assoc_id]}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc2.id) - - # Test filter for association state in VPC - this will never show anything in this test - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', - 'Values': ['associated']}])) - filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # flake8: noqa +from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError + +import boto3 +import boto +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + +SAMPLE_DOMAIN_NAME = u'example.com' +SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] + + +@mock_ec2_deprecated +def test_vpcs(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + vpc.cidr_block.should.equal('10.0.0.0/16') + + all_vpcs = conn.get_all_vpcs() + all_vpcs.should.have.length_of(2) + + vpc.delete() + + all_vpcs = conn.get_all_vpcs() + all_vpcs.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_vpc("vpc-1234abcd") + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_vpc_defaults(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + + conn.get_all_vpcs().should.have.length_of(2) + conn.get_all_route_tables().should.have.length_of(2) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(1) + + vpc.delete() + + conn.get_all_vpcs().should.have.length_of(1) + conn.get_all_route_tables().should.have.length_of(1) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(0) + + +@mock_ec2_deprecated +def test_vpc_isdefault_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) + vpc.delete() + conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) + + +@mock_ec2_deprecated +def test_multiple_vpcs_default_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + conn.create_vpc("10.8.0.0/16") + conn.create_vpc("10.0.0.0/16") + conn.create_vpc("192.168.0.0/16") + conn.get_all_vpcs().should.have.length_of(4) + vpc = conn.get_all_vpcs(filters={'isDefault': 'true'}) + vpc.should.have.length_of(1) + vpc[0].cidr_block.should.equal('172.31.0.0/16') + + +@mock_ec2_deprecated +def test_vpc_state_available_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.1.0.0/16") + conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(3) + vpc.delete() + conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) + + +@mock_ec2_deprecated +def test_vpc_tagging(): + conn = boto.connect_vpc() + vpc = conn.create_vpc("10.0.0.0/16") + + vpc.add_tag("a key", "some value") + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the vpc + vpc = conn.get_all_vpcs(vpc_ids=[vpc.id])[0] + vpc.tags.should.have.length_of(1) + vpc.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_vpc_get_by_id(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/16") + + vpcs = conn.get_all_vpcs(vpc_ids=[vpc1.id, vpc2.id]) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_vpcs(vpc_ids=['vpc-does_not_exist']) + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_vpc_get_by_cidr_block(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/24") + + vpcs = conn.get_all_vpcs(filters={'cidr': '10.0.0.0/16'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_dhcp_options_id(): + conn = boto.connect_vpc() + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/24") + + conn.associate_dhcp_options(dhcp_options.id, vpc1.id) + conn.associate_dhcp_options(dhcp_options.id, vpc2.id) + + vpcs = conn.get_all_vpcs(filters={'dhcp-options-id': dhcp_options.id}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc2.add_tag('Name', 'TestVPC') + vpc3.add_tag('Name', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag:Name': 'TestVPC'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_key_superset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + vpc3.add_tag('Key', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-key': 'Name'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_key_subset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + vpc3.add_tag('Test', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-key': ['Name', 'Key']}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_value_superset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + vpc3.add_tag('Key', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-value': 'TestVPC'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_value_subset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-value': ['TestVPC', 'TestVPC2']}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2 +def test_default_vpc(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + default_vpc = list(ec2.vpcs.all())[0] + default_vpc.cidr_block.should.equal('172.31.0.0/16') + default_vpc.instance_tenancy.should.equal('default') + default_vpc.reload() + default_vpc.is_default.should.be.ok + + # Test default values for VPC attributes + response = default_vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').should.be.ok + + response = default_vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').should.be.ok + + +@mock_ec2 +def test_non_default_vpc(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC - this already exists when backend instantiated! + #ec2.create_vpc(CidrBlock='172.31.0.0/16') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + # Test default instance_tenancy + vpc.instance_tenancy.should.equal('default') + + # Test default values for VPC attributes + response = vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').should.be.ok + + response = vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').shouldnt.be.ok + + # Check Primary CIDR Block Associations + cidr_block_association_set = next(iter(vpc.cidr_block_association_set), None) + cidr_block_association_set['CidrBlockState']['State'].should.equal('associated') + cidr_block_association_set['CidrBlock'].should.equal(vpc.cidr_block) + cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + + +@mock_ec2 +def test_vpc_dedicated_tenancy(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16', InstanceTenancy='dedicated') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + vpc.instance_tenancy.should.equal('dedicated') + + +@mock_ec2 +def test_vpc_modify_enable_dns_support(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + # Test default values for VPC attributes + response = vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').should.be.ok + + vpc.modify_attribute(EnableDnsSupport={'Value': False}) + + response = vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').shouldnt.be.ok + + +@mock_ec2 +def test_vpc_modify_enable_dns_hostnames(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + # Test default values for VPC attributes + response = vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').shouldnt.be.ok + + vpc.modify_attribute(EnableDnsHostnames={'Value': True}) + + response = vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').should.be.ok + + +@mock_ec2_deprecated +def test_vpc_associate_dhcp_options(): + conn = boto.connect_vpc() + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + vpc = conn.create_vpc("10.0.0.0/16") + + conn.associate_dhcp_options(dhcp_options.id, vpc.id) + + vpc.update() + dhcp_options.id.should.equal(vpc.dhcp_options_id) + + +@mock_ec2 +def test_associate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + + # Associate/Extend vpc CIDR range up to 5 ciders + for i in range(43, 47): + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('associating') + response['CidrBlockAssociation']['CidrBlock'].should.equal('10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc') + + # Check all associations exist + vpc = ec2.Vpc(vpc.id) + vpc.cidr_block_association_set.should.have.length_of(5) + vpc.cidr_block_association_set[2]['CidrBlockState']['State'].should.equal('associated') + vpc.cidr_block_association_set[4]['CidrBlockState']['State'].should.equal('associated') + + # Check error on adding 6th association. + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.50.0/22') + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format(vpc.id)) + +@mock_ec2 +def test_disassociate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.43.0/24') + + # Remove an extended cidr block + vpc = ec2.Vpc(vpc.id) + non_default_assoc_cidr_block = next(iter([x for x in vpc.cidr_block_association_set if vpc.cidr_block != x['CidrBlock']]), None) + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=non_default_assoc_cidr_block['AssociationId']) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('disassociating') + response['CidrBlockAssociation']['CidrBlock'].should.equal(non_default_assoc_cidr_block['CidrBlock']) + response['CidrBlockAssociation']['AssociationId'].should.equal(non_default_assoc_cidr_block['AssociationId']) + + # Error attempting to delete a non-existent CIDR_BLOCK association + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId='vpc-cidr-assoc-BORING123') + str(ex.exception).should.equal( + "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " + "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " + "'vpc-cidr-assoc-BORING123' does not exist") + + # Error attempting to delete Primary CIDR BLOCK association + vpc_base_cidr_assoc_id = next(iter([x for x in vpc.cidr_block_association_set + if vpc.cidr_block == x['CidrBlock']]), {})['AssociationId'] + + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=vpc_base_cidr_assoc_id) + str(ex.exception).should.equal( + "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " + "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " + "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id)) + +@mock_ec2 +def test_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + vpc3_assoc_response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.3.0/24') + + # Test filters for a cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.cidr-block', + 'Values': ['10.10.0.0/19']}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association id in VPCs + association_id = vpc3_assoc_response['CidrBlockAssociation']['AssociationId'] + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': [association_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': ['failing']}])) + filtered_vpcs.should.be.length_of(0) + +@mock_ec2 +def test_vpc_associate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + ipv6_cidr_block_association_set = next(iter(vpc.ipv6_cidr_block_association_set), None) + ipv6_cidr_block_association_set['Ipv6CidrBlockState']['State'].should.equal('associated') + ipv6_cidr_block_association_set['Ipv6CidrBlock'].should.contain('::/56') + ipv6_cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + + # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format(vpc.id)) + + # Test associate ipv6 cidr block after vpc created + vpc = ec2.create_vpc(CidrBlock='10.10.50.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('associating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc-') + + # Check on describe vpc that has ipv6 cidr block association + vpc = ec2.Vpc(vpc.id) + vpc.ipv6_cidr_block_association_set.should.be.length_of(1) + + +@mock_ec2 +def test_vpc_disassociate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + # Test disassociating the only IPV6 + assoc_id = vpc.ipv6_cidr_block_association_set[0]['AssociationId'] + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=assoc_id) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('disassociating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.equal(assoc_id) + + +@mock_ec2 +def test_ipv6_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16', AmazonProvidedIpv6CidrBlock=True) + vpc2_assoc_ipv6_assoc_id = vpc2.ipv6_cidr_block_association_set[0]['AssociationId'] + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, AmazonProvidedIpv6CidrBlock=True) + vpc3_ipv6_cidr_block = response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + + vpc4 = ec2.create_vpc(CidrBlock='10.95.0.0/16') # Here for its looks + + # Test filters for an ipv6 cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.ipv6-cidr-block', + 'Values': [vpc3_ipv6_cidr_block]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association id in VPCs + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.association-id', + 'Values': [vpc2_assoc_ipv6_assoc_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', + 'Values': ['associated']}])) + filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index e95aa76ee65b..70c3f3e33327 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -1,51 +1,51 @@ -from __future__ import unicode_literals -import boto -from nose.tools import assert_raises -import sure # noqa -from boto.exception import EC2ResponseError - -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_create_vpn_connections(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_connection = conn.create_vpn_connection( - 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') - vpn_connection.should_not.be.none - vpn_connection.id.should.match(r'vpn-\w+') - vpn_connection.type.should.equal('ipsec.1') - - -@mock_ec2_deprecated -def test_delete_vpn_connections(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_connection = conn.create_vpn_connection( - 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(1) - conn.delete_vpn_connection(vpn_connection.id) - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_delete_vpn_connections_bad_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - with assert_raises(EC2ResponseError): - conn.delete_vpn_connection('vpn-0123abcd') - - -@mock_ec2_deprecated -def test_describe_vpn_connections(): - conn = boto.connect_vpc('the_key', 'the_secret') - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(0) - conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(1) - vpn = conn.create_vpn_connection('ipsec.1', 'vgw-1234abcd', 'cgw-1234abcd') - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(2) - list_of_vpn_connections = conn.get_all_vpn_connections(vpn.id) - list_of_vpn_connections.should.have.length_of(1) +from __future__ import unicode_literals +import boto +from nose.tools import assert_raises +import sure # noqa +from boto.exception import EC2ResponseError + +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_create_vpn_connections(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_connection = conn.create_vpn_connection( + 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + vpn_connection.should_not.be.none + vpn_connection.id.should.match(r'vpn-\w+') + vpn_connection.type.should.equal('ipsec.1') + + +@mock_ec2_deprecated +def test_delete_vpn_connections(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_connection = conn.create_vpn_connection( + 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(1) + conn.delete_vpn_connection(vpn_connection.id) + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_delete_vpn_connections_bad_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + with assert_raises(EC2ResponseError): + conn.delete_vpn_connection('vpn-0123abcd') + + +@mock_ec2_deprecated +def test_describe_vpn_connections(): + conn = boto.connect_vpc('the_key', 'the_secret') + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(0) + conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(1) + vpn = conn.create_vpn_connection('ipsec.1', 'vgw-1234abcd', 'cgw-1234abcd') + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(2) + list_of_vpn_connections = conn.get_all_vpn_connections(vpn.id) + list_of_vpn_connections.should.have.length_of(1) diff --git a/tests/test_ec2/test_windows.py b/tests/test_ec2/test_windows.py index 364ac2f8a023..ae2f7b29ad18 100644 --- a/tests/test_ec2/test_windows.py +++ b/tests/test_ec2/test_windows.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_windows(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_windows(): + pass diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index c0cef81a9dc7..3ce48d87d011 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -1,697 +1,697 @@ -from __future__ import unicode_literals - -import hashlib -import json -from datetime import datetime -from random import random - -import re -import sure # noqa - -import boto3 -from botocore.exceptions import ClientError, ParamValidationError -from dateutil.tz import tzlocal - -from moto import mock_ecr - - -def _create_image_digest(contents=None): - if not contents: - contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) - return "sha256:%s" % hashlib.sha256(contents.encode('utf-8')).hexdigest() - - -def _create_image_manifest(): - return { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": - { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 7023, - "digest": _create_image_digest("config") - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 32654, - "digest": _create_image_digest("layer1") - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 16724, - "digest": _create_image_digest("layer2") - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 73109, - # randomize image digest - "digest": _create_image_digest() - } - ] - } - - -@mock_ecr -def test_create_repository(): - client = boto3.client('ecr', region_name='us-east-1') - response = client.create_repository( - repositoryName='test_ecr_repository' - ) - response['repository']['repositoryName'].should.equal('test_ecr_repository') - response['repository']['repositoryArn'].should.equal( - 'arn:aws:ecr:us-east-1:012345678910:repository/test_ecr_repository') - response['repository']['registryId'].should.equal('012345678910') - response['repository']['repositoryUri'].should.equal( - '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_ecr_repository') - # response['repository']['createdAt'].should.equal(0) - - -@mock_ecr -def test_describe_repositories(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository1' - ) - _ = client.create_repository( - repositoryName='test_repository0' - ) - response = client.describe_repositories() - len(response['repositories']).should.equal(2) - - respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', - 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] - set([response['repositories'][0]['repositoryArn'], - response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) - - respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', - '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] - set([response['repositories'][0]['repositoryUri'], - response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) - - -@mock_ecr -def test_describe_repositories_1(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository1' - ) - _ = client.create_repository( - repositoryName='test_repository0' - ) - response = client.describe_repositories(registryId='012345678910') - len(response['repositories']).should.equal(2) - - respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', - 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] - set([response['repositories'][0]['repositoryArn'], - response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) - - respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', - '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] - set([response['repositories'][0]['repositoryUri'], - response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) - - -@mock_ecr -def test_describe_repositories_2(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository1' - ) - _ = client.create_repository( - repositoryName='test_repository0' - ) - response = client.describe_repositories(registryId='109876543210') - len(response['repositories']).should.equal(0) - - -@mock_ecr -def test_describe_repositories_3(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository1' - ) - _ = client.create_repository( - repositoryName='test_repository0' - ) - response = client.describe_repositories(repositoryNames=['test_repository1']) - len(response['repositories']).should.equal(1) - respository_arn = 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository1' - response['repositories'][0]['repositoryArn'].should.equal(respository_arn) - - respository_uri = '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1' - response['repositories'][0]['repositoryUri'].should.equal(respository_uri) - - -@mock_ecr -def test_describe_repositories_with_image(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - response = client.describe_repositories(repositoryNames=['test_repository']) - len(response['repositories']).should.equal(1) - - -@mock_ecr -def test_delete_repository(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - response = client.delete_repository(repositoryName='test_repository') - response['repository']['repositoryName'].should.equal('test_repository') - response['repository']['repositoryArn'].should.equal( - 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository') - response['repository']['registryId'].should.equal('012345678910') - response['repository']['repositoryUri'].should.equal( - '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository') - # response['repository']['createdAt'].should.equal(0) - - response = client.describe_repositories() - len(response['repositories']).should.equal(0) - - -@mock_ecr -def test_put_image(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - response = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - response['image']['imageId']['imageTag'].should.equal('latest') - response['image']['imageId']['imageDigest'].should.contain("sha") - response['image']['repositoryName'].should.equal('test_repository') - response['image']['registryId'].should.equal('012345678910') - -@mock_ecr -def test_put_image_with_multiple_tags(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - manifest = _create_image_manifest() - response = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag='v1' - ) - - response['image']['imageId']['imageTag'].should.equal('v1') - response['image']['imageId']['imageDigest'].should.contain("sha") - response['image']['repositoryName'].should.equal('test_repository') - response['image']['registryId'].should.equal('012345678910') - - response1 = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag='latest' - ) - - response1['image']['imageId']['imageTag'].should.equal('latest') - response1['image']['imageId']['imageDigest'].should.contain("sha") - response1['image']['repositoryName'].should.equal('test_repository') - response1['image']['registryId'].should.equal('012345678910') - - response2 = client.describe_images(repositoryName='test_repository') - type(response2['imageDetails']).should.be(list) - len(response2['imageDetails']).should.be(1) - - response2['imageDetails'][0]['imageDigest'].should.contain("sha") - - response2['imageDetails'][0]['registryId'].should.equal("012345678910") - - response2['imageDetails'][0]['repositoryName'].should.equal("test_repository") - - len(response2['imageDetails'][0]['imageTags']).should.be(2) - response2['imageDetails'][0]['imageTags'].should.be.equal(['v1', 'latest']) - -@mock_ecr -def test_list_images(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository_1' - ) - - _ = client.create_repository( - repositoryName='test_repository_2' - ) - - _ = client.put_image( - repositoryName='test_repository_1', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - _ = client.put_image( - repositoryName='test_repository_1', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v1' - ) - - _ = client.put_image( - repositoryName='test_repository_1', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v2' - ) - - _ = client.put_image( - repositoryName='test_repository_2', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='oldest' - ) - - response = client.list_images(repositoryName='test_repository_1') - type(response['imageIds']).should.be(list) - len(response['imageIds']).should.be(3) - - image_tags = ['latest', 'v1', 'v2'] - set([response['imageIds'][0]['imageTag'], - response['imageIds'][1]['imageTag'], - response['imageIds'][2]['imageTag']]).should.equal(set(image_tags)) - - response = client.list_images(repositoryName='test_repository_2') - type(response['imageIds']).should.be(list) - len(response['imageIds']).should.be(1) - response['imageIds'][0]['imageTag'].should.equal('oldest') - - -@mock_ecr -def test_list_images_from_repository_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository_1' - ) - - # non existing repo - error_msg = re.compile( - r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", - re.MULTILINE) - client.list_images.when.called_with( - repositoryName='repo-that-doesnt-exist', - registryId='123', - ).should.throw(Exception, error_msg) - - # repo does not exist in specified registry - error_msg = re.compile( - r".*The repository with name 'test_repository_1' does not exist in the registry with id '222'.*", - re.MULTILINE) - client.list_images.when.called_with( - repositoryName='test_repository_1', - registryId='222', - ).should.throw(Exception, error_msg) - - -@mock_ecr -def test_describe_images(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()) - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v1' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v2' - ) - - response = client.describe_images(repositoryName='test_repository') - type(response['imageDetails']).should.be(list) - len(response['imageDetails']).should.be(4) - - response['imageDetails'][0]['imageDigest'].should.contain("sha") - response['imageDetails'][1]['imageDigest'].should.contain("sha") - response['imageDetails'][2]['imageDigest'].should.contain("sha") - response['imageDetails'][3]['imageDigest'].should.contain("sha") - - response['imageDetails'][0]['registryId'].should.equal("012345678910") - response['imageDetails'][1]['registryId'].should.equal("012345678910") - response['imageDetails'][2]['registryId'].should.equal("012345678910") - response['imageDetails'][3]['registryId'].should.equal("012345678910") - - response['imageDetails'][0]['repositoryName'].should.equal("test_repository") - response['imageDetails'][1]['repositoryName'].should.equal("test_repository") - response['imageDetails'][2]['repositoryName'].should.equal("test_repository") - response['imageDetails'][3]['repositoryName'].should.equal("test_repository") - - response['imageDetails'][0].should_not.have.key('imageTags') - len(response['imageDetails'][1]['imageTags']).should.be(1) - len(response['imageDetails'][2]['imageTags']).should.be(1) - len(response['imageDetails'][3]['imageTags']).should.be(1) - - image_tags = ['latest', 'v1', 'v2'] - set([response['imageDetails'][1]['imageTags'][0], - response['imageDetails'][2]['imageTags'][0], - response['imageDetails'][3]['imageTags'][0]]).should.equal(set(image_tags)) - - response['imageDetails'][0]['imageSizeInBytes'].should.equal(52428800) - response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) - response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) - response['imageDetails'][3]['imageSizeInBytes'].should.equal(52428800) - - -@mock_ecr -def test_describe_images_by_tag(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - tag_map = {} - for tag in ['latest', 'v1', 'v2']: - put_response = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag=tag - ) - tag_map[tag] = put_response['image'] - - for tag, put_response in tag_map.items(): - response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) - len(response['imageDetails']).should.be(1) - image_detail = response['imageDetails'][0] - image_detail['registryId'].should.equal("012345678910") - image_detail['repositoryName'].should.equal("test_repository") - image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) - image_detail['imageDigest'].should.equal(put_response['imageId']['imageDigest']) - - -@mock_ecr -def test_describe_images_tags_should_not_contain_empty_tag1(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - manifest = _create_image_manifest() - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest) - ) - - tags = ['v1', 'v2', 'latest'] - for tag in tags: - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag=tag - ) - - response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) - len(response['imageDetails']).should.be(1) - image_detail = response['imageDetails'][0] - len(image_detail['imageTags']).should.equal(3) - image_detail['imageTags'].should.be.equal(tags) - - -@mock_ecr -def test_describe_images_tags_should_not_contain_empty_tag2(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - manifest = _create_image_manifest() - tags = ['v1', 'v2'] - for tag in tags: - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag=tag - ) - - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest) - ) - - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag='latest' - ) - - response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) - len(response['imageDetails']).should.be(1) - image_detail = response['imageDetails'][0] - len(image_detail['imageTags']).should.equal(3) - image_detail['imageTags'].should.be.equal(['v1', 'v2', 'latest']) - - -@mock_ecr -def test_describe_repository_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - - error_msg = re.compile( - r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", - re.MULTILINE) - client.describe_repositories.when.called_with( - repositoryNames=['repo-that-doesnt-exist'], - registryId='123', - ).should.throw(ClientError, error_msg) - -@mock_ecr -def test_describe_image_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - client.create_repository(repositoryName='test_repository') - - error_msg1 = re.compile( - r".*The image with imageId {imageDigest:'null', imageTag:'testtag'} does not exist within " - r"the repository with name 'test_repository' in the registry with id '123'.*", - re.MULTILINE) - - client.describe_images.when.called_with( - repositoryName='test_repository', imageIds=[{'imageTag': 'testtag'}], registryId='123', - ).should.throw(ClientError, error_msg1) - - error_msg2 = re.compile( - r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", - re.MULTILINE) - client.describe_images.when.called_with( - repositoryName='repo-that-doesnt-exist', imageIds=[{'imageTag': 'testtag'}], registryId='123', - ).should.throw(ClientError, error_msg2) - - -@mock_ecr -def test_delete_repository_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - - error_msg = re.compile( - r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", - re.MULTILINE) - - client.delete_repository.when.called_with( - repositoryName='repo-that-doesnt-exist', - registryId='123').should.throw( - ClientError, error_msg) - - -@mock_ecr -def test_describe_images_by_digest(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - tags = ['latest', 'v1', 'v2'] - digest_map = {} - for tag in tags: - put_response = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag=tag - ) - digest_map[put_response['image']['imageId']['imageDigest']] = put_response['image'] - - for digest, put_response in digest_map.items(): - response = client.describe_images(repositoryName='test_repository', - imageIds=[{'imageDigest': digest}]) - len(response['imageDetails']).should.be(1) - image_detail = response['imageDetails'][0] - image_detail['registryId'].should.equal("012345678910") - image_detail['repositoryName'].should.equal("test_repository") - image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) - image_detail['imageDigest'].should.equal(digest) - - -@mock_ecr -def test_get_authorization_token_assume_region(): - client = boto3.client('ecr', region_name='us-east-1') - auth_token_response = client.get_authorization_token() - - auth_token_response.should.contain('authorizationData') - auth_token_response.should.contain('ResponseMetadata') - auth_token_response['authorizationData'].should.equal([ - { - 'authorizationToken': 'QVdTOjAxMjM0NTY3ODkxMC1hdXRoLXRva2Vu', - 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', - 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) - }, - ]) - - -@mock_ecr -def test_get_authorization_token_explicit_regions(): - client = boto3.client('ecr', region_name='us-east-1') - auth_token_response = client.get_authorization_token(registryIds=['10987654321', '878787878787']) - - auth_token_response.should.contain('authorizationData') - auth_token_response.should.contain('ResponseMetadata') - auth_token_response['authorizationData'].should.equal([ - { - 'authorizationToken': 'QVdTOjEwOTg3NjU0MzIxLWF1dGgtdG9rZW4=', - 'proxyEndpoint': 'https://10987654321.dkr.ecr.us-east-1.amazonaws.com', - 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()), - }, - { - 'authorizationToken': 'QVdTOjg3ODc4Nzg3ODc4Ny1hdXRoLXRva2Vu', - 'proxyEndpoint': 'https://878787878787.dkr.ecr.us-east-1.amazonaws.com', - 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) - - } - ]) - - -@mock_ecr -def test_batch_get_image(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v1' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v2' - ) - - response = client.batch_get_image( - repositoryName='test_repository', - imageIds=[ - { - 'imageTag': 'v2' - }, - ], - ) - - type(response['images']).should.be(list) - len(response['images']).should.be(1) - - response['images'][0]['imageManifest'].should.contain("vnd.docker.distribution.manifest.v2+json") - response['images'][0]['registryId'].should.equal("012345678910") - response['images'][0]['repositoryName'].should.equal("test_repository") - - response['images'][0]['imageId']['imageTag'].should.equal("v2") - response['images'][0]['imageId']['imageDigest'].should.contain("sha") - - type(response['failures']).should.be(list) - len(response['failures']).should.be(0) - - -@mock_ecr -def test_batch_get_image_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v1' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v2' - ) - - response = client.batch_get_image( - repositoryName='test_repository', - imageIds=[ - { - 'imageTag': 'v5' - }, - ], - ) - - type(response['images']).should.be(list) - len(response['images']).should.be(0) - - type(response['failures']).should.be(list) - len(response['failures']).should.be(1) - response['failures'][0]['failureReason'].should.equal("Requested image not found") - response['failures'][0]['failureCode'].should.equal("ImageNotFound") - response['failures'][0]['imageId']['imageTag'].should.equal("v5") - - -@mock_ecr -def test_batch_get_image_no_tags(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - error_msg = re.compile( - r".*Missing required parameter in input: \"imageIds\".*", - re.MULTILINE) - - client.batch_get_image.when.called_with( - repositoryName='test_repository').should.throw( - ParamValidationError, error_msg) +from __future__ import unicode_literals + +import hashlib +import json +from datetime import datetime +from random import random + +import re +import sure # noqa + +import boto3 +from botocore.exceptions import ClientError, ParamValidationError +from dateutil.tz import tzlocal + +from moto import mock_ecr + + +def _create_image_digest(contents=None): + if not contents: + contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) + return "sha256:%s" % hashlib.sha256(contents.encode('utf-8')).hexdigest() + + +def _create_image_manifest(): + return { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": + { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7023, + "digest": _create_image_digest("config") + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 32654, + "digest": _create_image_digest("layer1") + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 16724, + "digest": _create_image_digest("layer2") + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 73109, + # randomize image digest + "digest": _create_image_digest() + } + ] + } + + +@mock_ecr +def test_create_repository(): + client = boto3.client('ecr', region_name='us-east-1') + response = client.create_repository( + repositoryName='test_ecr_repository' + ) + response['repository']['repositoryName'].should.equal('test_ecr_repository') + response['repository']['repositoryArn'].should.equal( + 'arn:aws:ecr:us-east-1:012345678910:repository/test_ecr_repository') + response['repository']['registryId'].should.equal('012345678910') + response['repository']['repositoryUri'].should.equal( + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_ecr_repository') + # response['repository']['createdAt'].should.equal(0) + + +@mock_ecr +def test_describe_repositories(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories() + len(response['repositories']).should.equal(2) + + respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', + 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] + set([response['repositories'][0]['repositoryArn'], + response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) + + respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] + set([response['repositories'][0]['repositoryUri'], + response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) + + +@mock_ecr +def test_describe_repositories_1(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(registryId='012345678910') + len(response['repositories']).should.equal(2) + + respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', + 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] + set([response['repositories'][0]['repositoryArn'], + response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) + + respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] + set([response['repositories'][0]['repositoryUri'], + response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) + + +@mock_ecr +def test_describe_repositories_2(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(registryId='109876543210') + len(response['repositories']).should.equal(0) + + +@mock_ecr +def test_describe_repositories_3(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(repositoryNames=['test_repository1']) + len(response['repositories']).should.equal(1) + respository_arn = 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository1' + response['repositories'][0]['repositoryArn'].should.equal(respository_arn) + + respository_uri = '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1' + response['repositories'][0]['repositoryUri'].should.equal(respository_uri) + + +@mock_ecr +def test_describe_repositories_with_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + response = client.describe_repositories(repositoryNames=['test_repository']) + len(response['repositories']).should.equal(1) + + +@mock_ecr +def test_delete_repository(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + response = client.delete_repository(repositoryName='test_repository') + response['repository']['repositoryName'].should.equal('test_repository') + response['repository']['repositoryArn'].should.equal( + 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository') + response['repository']['registryId'].should.equal('012345678910') + response['repository']['repositoryUri'].should.equal( + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository') + # response['repository']['createdAt'].should.equal(0) + + response = client.describe_repositories() + len(response['repositories']).should.equal(0) + + +@mock_ecr +def test_put_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + response['image']['imageId']['imageTag'].should.equal('latest') + response['image']['imageId']['imageDigest'].should.contain("sha") + response['image']['repositoryName'].should.equal('test_repository') + response['image']['registryId'].should.equal('012345678910') + +@mock_ecr +def test_put_image_with_multiple_tags(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + manifest = _create_image_manifest() + response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag='v1' + ) + + response['image']['imageId']['imageTag'].should.equal('v1') + response['image']['imageId']['imageDigest'].should.contain("sha") + response['image']['repositoryName'].should.equal('test_repository') + response['image']['registryId'].should.equal('012345678910') + + response1 = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag='latest' + ) + + response1['image']['imageId']['imageTag'].should.equal('latest') + response1['image']['imageId']['imageDigest'].should.contain("sha") + response1['image']['repositoryName'].should.equal('test_repository') + response1['image']['registryId'].should.equal('012345678910') + + response2 = client.describe_images(repositoryName='test_repository') + type(response2['imageDetails']).should.be(list) + len(response2['imageDetails']).should.be(1) + + response2['imageDetails'][0]['imageDigest'].should.contain("sha") + + response2['imageDetails'][0]['registryId'].should.equal("012345678910") + + response2['imageDetails'][0]['repositoryName'].should.equal("test_repository") + + len(response2['imageDetails'][0]['imageTags']).should.be(2) + response2['imageDetails'][0]['imageTags'].should.be.equal(['v1', 'latest']) + +@mock_ecr +def test_list_images(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository_1' + ) + + _ = client.create_repository( + repositoryName='test_repository_2' + ) + + _ = client.put_image( + repositoryName='test_repository_1', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository_1', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository_1', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + _ = client.put_image( + repositoryName='test_repository_2', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='oldest' + ) + + response = client.list_images(repositoryName='test_repository_1') + type(response['imageIds']).should.be(list) + len(response['imageIds']).should.be(3) + + image_tags = ['latest', 'v1', 'v2'] + set([response['imageIds'][0]['imageTag'], + response['imageIds'][1]['imageTag'], + response['imageIds'][2]['imageTag']]).should.equal(set(image_tags)) + + response = client.list_images(repositoryName='test_repository_2') + type(response['imageIds']).should.be(list) + len(response['imageIds']).should.be(1) + response['imageIds'][0]['imageTag'].should.equal('oldest') + + +@mock_ecr +def test_list_images_from_repository_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository_1' + ) + + # non existing repo + error_msg = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + client.list_images.when.called_with( + repositoryName='repo-that-doesnt-exist', + registryId='123', + ).should.throw(Exception, error_msg) + + # repo does not exist in specified registry + error_msg = re.compile( + r".*The repository with name 'test_repository_1' does not exist in the registry with id '222'.*", + re.MULTILINE) + client.list_images.when.called_with( + repositoryName='test_repository_1', + registryId='222', + ).should.throw(Exception, error_msg) + + +@mock_ecr +def test_describe_images(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()) + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.describe_images(repositoryName='test_repository') + type(response['imageDetails']).should.be(list) + len(response['imageDetails']).should.be(4) + + response['imageDetails'][0]['imageDigest'].should.contain("sha") + response['imageDetails'][1]['imageDigest'].should.contain("sha") + response['imageDetails'][2]['imageDigest'].should.contain("sha") + response['imageDetails'][3]['imageDigest'].should.contain("sha") + + response['imageDetails'][0]['registryId'].should.equal("012345678910") + response['imageDetails'][1]['registryId'].should.equal("012345678910") + response['imageDetails'][2]['registryId'].should.equal("012345678910") + response['imageDetails'][3]['registryId'].should.equal("012345678910") + + response['imageDetails'][0]['repositoryName'].should.equal("test_repository") + response['imageDetails'][1]['repositoryName'].should.equal("test_repository") + response['imageDetails'][2]['repositoryName'].should.equal("test_repository") + response['imageDetails'][3]['repositoryName'].should.equal("test_repository") + + response['imageDetails'][0].should_not.have.key('imageTags') + len(response['imageDetails'][1]['imageTags']).should.be(1) + len(response['imageDetails'][2]['imageTags']).should.be(1) + len(response['imageDetails'][3]['imageTags']).should.be(1) + + image_tags = ['latest', 'v1', 'v2'] + set([response['imageDetails'][1]['imageTags'][0], + response['imageDetails'][2]['imageTags'][0], + response['imageDetails'][3]['imageTags'][0]]).should.equal(set(image_tags)) + + response['imageDetails'][0]['imageSizeInBytes'].should.equal(52428800) + response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) + response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) + response['imageDetails'][3]['imageSizeInBytes'].should.equal(52428800) + + +@mock_ecr +def test_describe_images_by_tag(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + tag_map = {} + for tag in ['latest', 'v1', 'v2']: + put_response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag=tag + ) + tag_map[tag] = put_response['image'] + + for tag, put_response in tag_map.items(): + response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + image_detail['registryId'].should.equal("012345678910") + image_detail['repositoryName'].should.equal("test_repository") + image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) + image_detail['imageDigest'].should.equal(put_response['imageId']['imageDigest']) + + +@mock_ecr +def test_describe_images_tags_should_not_contain_empty_tag1(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest) + ) + + tags = ['v1', 'v2', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + len(image_detail['imageTags']).should.equal(3) + image_detail['imageTags'].should.be.equal(tags) + + +@mock_ecr +def test_describe_images_tags_should_not_contain_empty_tag2(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + tags = ['v1', 'v2'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest) + ) + + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag='latest' + ) + + response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + len(image_detail['imageTags']).should.equal(3) + image_detail['imageTags'].should.be.equal(['v1', 'v2', 'latest']) + + +@mock_ecr +def test_describe_repository_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + + error_msg = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + client.describe_repositories.when.called_with( + repositoryNames=['repo-that-doesnt-exist'], + registryId='123', + ).should.throw(ClientError, error_msg) + +@mock_ecr +def test_describe_image_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository(repositoryName='test_repository') + + error_msg1 = re.compile( + r".*The image with imageId {imageDigest:'null', imageTag:'testtag'} does not exist within " + r"the repository with name 'test_repository' in the registry with id '123'.*", + re.MULTILINE) + + client.describe_images.when.called_with( + repositoryName='test_repository', imageIds=[{'imageTag': 'testtag'}], registryId='123', + ).should.throw(ClientError, error_msg1) + + error_msg2 = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + client.describe_images.when.called_with( + repositoryName='repo-that-doesnt-exist', imageIds=[{'imageTag': 'testtag'}], registryId='123', + ).should.throw(ClientError, error_msg2) + + +@mock_ecr +def test_delete_repository_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + + error_msg = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + + client.delete_repository.when.called_with( + repositoryName='repo-that-doesnt-exist', + registryId='123').should.throw( + ClientError, error_msg) + + +@mock_ecr +def test_describe_images_by_digest(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + tags = ['latest', 'v1', 'v2'] + digest_map = {} + for tag in tags: + put_response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag=tag + ) + digest_map[put_response['image']['imageId']['imageDigest']] = put_response['image'] + + for digest, put_response in digest_map.items(): + response = client.describe_images(repositoryName='test_repository', + imageIds=[{'imageDigest': digest}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + image_detail['registryId'].should.equal("012345678910") + image_detail['repositoryName'].should.equal("test_repository") + image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) + image_detail['imageDigest'].should.equal(digest) + + +@mock_ecr +def test_get_authorization_token_assume_region(): + client = boto3.client('ecr', region_name='us-east-1') + auth_token_response = client.get_authorization_token() + + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') + auth_token_response['authorizationData'].should.equal([ + { + 'authorizationToken': 'QVdTOjAxMjM0NTY3ODkxMC1hdXRoLXRva2Vu', + 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', + 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) + }, + ]) + + +@mock_ecr +def test_get_authorization_token_explicit_regions(): + client = boto3.client('ecr', region_name='us-east-1') + auth_token_response = client.get_authorization_token(registryIds=['10987654321', '878787878787']) + + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') + auth_token_response['authorizationData'].should.equal([ + { + 'authorizationToken': 'QVdTOjEwOTg3NjU0MzIxLWF1dGgtdG9rZW4=', + 'proxyEndpoint': 'https://10987654321.dkr.ecr.us-east-1.amazonaws.com', + 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()), + }, + { + 'authorizationToken': 'QVdTOjg3ODc4Nzg3ODc4Ny1hdXRoLXRva2Vu', + 'proxyEndpoint': 'https://878787878787.dkr.ecr.us-east-1.amazonaws.com', + 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) + + } + ]) + + +@mock_ecr +def test_batch_get_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v2' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(1) + + response['images'][0]['imageManifest'].should.contain("vnd.docker.distribution.manifest.v2+json") + response['images'][0]['registryId'].should.equal("012345678910") + response['images'][0]['repositoryName'].should.equal("test_repository") + + response['images'][0]['imageId']['imageTag'].should.equal("v2") + response['images'][0]['imageId']['imageDigest'].should.contain("sha") + + type(response['failures']).should.be(list) + len(response['failures']).should.be(0) + + +@mock_ecr +def test_batch_get_image_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v5' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(0) + + type(response['failures']).should.be(list) + len(response['failures']).should.be(1) + response['failures'][0]['failureReason'].should.equal("Requested image not found") + response['failures'][0]['failureCode'].should.equal("ImageNotFound") + response['failures'][0]['imageId']['imageTag'].should.equal("v5") + + +@mock_ecr +def test_batch_get_image_no_tags(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + error_msg = re.compile( + r".*Missing required parameter in input: \"imageIds\".*", + re.MULTILINE) + + client.batch_get_image.when.called_with( + repositoryName='test_repository').should.throw( + ParamValidationError, error_msg) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 70c1463ee215..4bdba40d0a31 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1,2214 +1,2214 @@ -from __future__ import unicode_literals - -from copy import deepcopy - -from botocore.exceptions import ClientError -import boto3 -import sure # noqa -import json -from moto.ec2 import utils as ec2_utils -from uuid import UUID - -from moto import mock_cloudformation, mock_elbv2 -from moto import mock_ecs -from moto import mock_ec2 -from nose.tools import assert_raises - - -@mock_ecs -def test_create_cluster(): - client = boto3.client('ecs', region_name='us-east-1') - response = client.create_cluster( - clusterName='test_ecs_cluster' - ) - response['cluster']['clusterName'].should.equal('test_ecs_cluster') - response['cluster']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['cluster']['status'].should.equal('ACTIVE') - response['cluster']['registeredContainerInstancesCount'].should.equal(0) - response['cluster']['runningTasksCount'].should.equal(0) - response['cluster']['pendingTasksCount'].should.equal(0) - response['cluster']['activeServicesCount'].should.equal(0) - - -@mock_ecs -def test_list_clusters(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_cluster0' - ) - _ = client.create_cluster( - clusterName='test_cluster1' - ) - response = client.list_clusters() - response['clusterArns'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0') - response['clusterArns'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') - - -@mock_ecs -def test_delete_cluster(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - response = client.delete_cluster(cluster='test_ecs_cluster') - response['cluster']['clusterName'].should.equal('test_ecs_cluster') - response['cluster']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['cluster']['status'].should.equal('ACTIVE') - response['cluster']['registeredContainerInstancesCount'].should.equal(0) - response['cluster']['runningTasksCount'].should.equal(0) - response['cluster']['pendingTasksCount'].should.equal(0) - response['cluster']['activeServicesCount'].should.equal(0) - - response = client.list_clusters() - len(response['clusterArns']).should.equal(0) - - -@mock_ecs -def test_register_task_definition(): - client = boto3.client('ecs', region_name='us-east-1') - response = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - type(response['taskDefinition']).should.be(dict) - response['taskDefinition']['revision'].should.equal(1) - response['taskDefinition']['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinition']['containerDefinitions'][ - 0]['name'].should.equal('hello_world') - response['taskDefinition']['containerDefinitions'][0][ - 'image'].should.equal('docker/hello-world:latest') - response['taskDefinition']['containerDefinitions'][ - 0]['cpu'].should.equal(1024) - response['taskDefinition']['containerDefinitions'][ - 0]['memory'].should.equal(400) - response['taskDefinition']['containerDefinitions'][ - 0]['essential'].should.equal(True) - response['taskDefinition']['containerDefinitions'][0][ - 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') - response['taskDefinition']['containerDefinitions'][0][ - 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') - response['taskDefinition']['containerDefinitions'][0][ - 'logConfiguration']['logDriver'].should.equal('json-file') - - -@mock_ecs -def test_list_task_definitions(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world2', - 'image': 'docker/hello-world2:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY2' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.list_task_definitions() - len(response['taskDefinitionArns']).should.equal(2) - response['taskDefinitionArns'][0].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinitionArns'][1].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') - - -@mock_ecs -def test_describe_task_definition(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world2', - 'image': 'docker/hello-world2:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY2' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world3', - 'image': 'docker/hello-world3:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY3' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.describe_task_definition(taskDefinition='test_ecs_task') - response['taskDefinition']['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:3') - - response = client.describe_task_definition( - taskDefinition='test_ecs_task:2') - response['taskDefinition']['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') - - -@mock_ecs -def test_deregister_task_definition(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.deregister_task_definition( - taskDefinition='test_ecs_task:1' - ) - type(response['taskDefinition']).should.be(dict) - response['taskDefinition']['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinition']['containerDefinitions'][ - 0]['name'].should.equal('hello_world') - response['taskDefinition']['containerDefinitions'][0][ - 'image'].should.equal('docker/hello-world:latest') - response['taskDefinition']['containerDefinitions'][ - 0]['cpu'].should.equal(1024) - response['taskDefinition']['containerDefinitions'][ - 0]['memory'].should.equal(400) - response['taskDefinition']['containerDefinitions'][ - 0]['essential'].should.equal(True) - response['taskDefinition']['containerDefinitions'][0][ - 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') - response['taskDefinition']['containerDefinitions'][0][ - 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') - response['taskDefinition']['containerDefinitions'][0][ - 'logConfiguration']['logDriver'].should.equal('json-file') - - -@mock_ecs -def test_create_service(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response['service']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['service']['desiredCount'].should.equal(2) - len(response['service']['events']).should.equal(0) - len(response['service']['loadBalancers']).should.equal(0) - response['service']['pendingCount'].should.equal(0) - response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') - response['service']['serviceName'].should.equal('test_ecs_service') - response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['service']['schedulingStrategy'].should.equal('REPLICA') - -@mock_ecs -def test_create_service_scheduling_strategy(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2, - schedulingStrategy='DAEMON', - ) - response['service']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['service']['desiredCount'].should.equal(2) - len(response['service']['events']).should.equal(0) - len(response['service']['loadBalancers']).should.equal(0) - response['service']['pendingCount'].should.equal(0) - response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') - response['service']['serviceName'].should.equal('test_ecs_service') - response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['service']['schedulingStrategy'].should.equal('DAEMON') - - -@mock_ecs -def test_list_services(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service1', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service2', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response = client.list_services( - cluster='test_ecs_cluster' - ) - len(response['serviceArns']).should.equal(2) - response['serviceArns'][0].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['serviceArns'][1].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') - - -@mock_ecs -def test_describe_services(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service1', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service2', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service3', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response = client.describe_services( - cluster='test_ecs_cluster', - services=['test_ecs_service1', - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2'] - ) - len(response['services']).should.equal(2) - response['services'][0]['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['services'][0]['serviceName'].should.equal('test_ecs_service1') - response['services'][1]['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') - response['services'][1]['serviceName'].should.equal('test_ecs_service2') - - response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) - response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) - response['services'][0]['deployments'][0]['runningCount'].should.equal(0) - response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') - - -@mock_ecs -def test_describe_services_scheduling_strategy(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service1', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service2', - taskDefinition='test_ecs_task', - desiredCount=2, - schedulingStrategy='DAEMON' - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service3', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response = client.describe_services( - cluster='test_ecs_cluster', - services=['test_ecs_service1', - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2', - 'test_ecs_service3'] - ) - len(response['services']).should.equal(3) - response['services'][0]['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['services'][0]['serviceName'].should.equal('test_ecs_service1') - response['services'][1]['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') - response['services'][1]['serviceName'].should.equal('test_ecs_service2') - - response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) - response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) - response['services'][0]['deployments'][0]['runningCount'].should.equal(0) - response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') - - response['services'][0]['schedulingStrategy'].should.equal('REPLICA') - response['services'][1]['schedulingStrategy'].should.equal('DAEMON') - response['services'][2]['schedulingStrategy'].should.equal('REPLICA') - - -@mock_ecs -def test_update_service(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response['service']['desiredCount'].should.equal(2) - - response = client.update_service( - cluster='test_ecs_cluster', - service='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=0 - ) - response['service']['desiredCount'].should.equal(0) - response['service']['schedulingStrategy'].should.equal('REPLICA') - - -@mock_ecs -def test_update_missing_service(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - - client.update_service.when.called_with( - cluster='test_ecs_cluster', - service='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=0 - ).should.throw(ClientError) - - -@mock_ecs -def test_delete_service(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.update_service( - cluster='test_ecs_cluster', - service='test_ecs_service', - desiredCount=0 - ) - response = client.delete_service( - cluster='test_ecs_cluster', - service='test_ecs_service' - ) - response['service']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['service']['desiredCount'].should.equal(0) - len(response['service']['events']).should.equal(0) - len(response['service']['loadBalancers']).should.equal(0) - response['service']['pendingCount'].should.equal(0) - response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') - response['service']['serviceName'].should.equal('test_ecs_service') - response['service']['status'].should.equal('ACTIVE') - response['service']['schedulingStrategy'].should.equal('REPLICA') - response['service']['taskDefinition'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - - - -@mock_ec2 -@mock_ecs -def test_register_container_instance(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn = response['containerInstance']['containerInstanceArn'] - arn_part = full_arn.split('/') - arn_part[0].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:container-instance') - arn_part[1].should.equal(str(UUID(arn_part[1]))) - response['containerInstance']['status'].should.equal('ACTIVE') - len(response['containerInstance']['registeredResources']).should.equal(4) - len(response['containerInstance']['remainingResources']).should.equal(4) - response['containerInstance']['agentConnected'].should.equal(True) - response['containerInstance']['versionInfo'][ - 'agentVersion'].should.equal('1.0.0') - response['containerInstance']['versionInfo'][ - 'agentHash'].should.equal('4023248') - response['containerInstance']['versionInfo'][ - 'dockerVersion'].should.equal('DockerVersion: 1.5.0') - - -@mock_ec2 -@mock_ecs -def test_deregister_container_instance(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - container_instance_id = response['containerInstance']['containerInstanceArn'] - response = ecs_client.deregister_container_instance( - cluster=test_cluster_name, - containerInstance=container_instance_id - ) - container_instances_response = ecs_client.list_container_instances( - cluster=test_cluster_name - ) - len(container_instances_response['containerInstanceArns']).should.equal(0) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - container_instance_id = response['containerInstance']['containerInstanceArn'] - _ = ecs_client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - - response = ecs_client.start_task( - cluster='test_ecs_cluster', - taskDefinition='test_ecs_task', - overrides={}, - containerInstances=[container_instance_id], - startedBy='moto' - ) - with assert_raises(Exception) as e: - ecs_client.deregister_container_instance( - cluster=test_cluster_name, - containerInstance=container_instance_id - ).should.have.raised(Exception) - container_instances_response = ecs_client.list_container_instances( - cluster=test_cluster_name - ) - len(container_instances_response['containerInstanceArns']).should.equal(1) - ecs_client.deregister_container_instance( - cluster=test_cluster_name, - containerInstance=container_instance_id, - force=True - ) - container_instances_response = ecs_client.list_container_instances( - cluster=test_cluster_name - ) - len(container_instances_response['containerInstanceArns']).should.equal(0) - - -@mock_ec2 -@mock_ecs -def test_list_container_instances(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - instance_to_create = 3 - test_instance_arns = [] - for i in range(0, instance_to_create): - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document) - - test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) - - response = ecs_client.list_container_instances(cluster=test_cluster_name) - - len(response['containerInstanceArns']).should.equal(instance_to_create) - for arn in test_instance_arns: - response['containerInstanceArns'].should.contain(arn) - - -@mock_ec2 -@mock_ecs -def test_describe_container_instances(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - instance_to_create = 3 - test_instance_arns = [] - for i in range(0, instance_to_create): - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document) - - test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) - - test_instance_ids = list( - map((lambda x: x.split('/')[1]), test_instance_arns)) - response = ecs_client.describe_container_instances( - cluster=test_cluster_name, containerInstances=test_instance_ids) - len(response['failures']).should.equal(0) - len(response['containerInstances']).should.equal(instance_to_create) - response_arns = [ci['containerInstanceArn'] - for ci in response['containerInstances']] - for arn in test_instance_arns: - response_arns.should.contain(arn) - for instance in response['containerInstances']: - instance.keys().should.contain('runningTasksCount') - instance.keys().should.contain('pendingTasksCount') - - -@mock_ec2 -@mock_ecs -def test_update_container_instances_state(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - instance_to_create = 3 - test_instance_arns = [] - for i in range(0, instance_to_create): - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document) - - test_instance_arns.append(response['containerInstance']['containerInstanceArn']) - - test_instance_ids = list(map((lambda x: x.split('/')[1]), test_instance_arns)) - response = ecs_client.update_container_instances_state(cluster=test_cluster_name, - containerInstances=test_instance_ids, - status='DRAINING') - len(response['failures']).should.equal(0) - len(response['containerInstances']).should.equal(instance_to_create) - response_statuses = [ci['status'] for ci in response['containerInstances']] - for status in response_statuses: - status.should.equal('DRAINING') - response = ecs_client.update_container_instances_state(cluster=test_cluster_name, - containerInstances=test_instance_ids, - status='DRAINING') - len(response['failures']).should.equal(0) - len(response['containerInstances']).should.equal(instance_to_create) - response_statuses = [ci['status'] for ci in response['containerInstances']] - for status in response_statuses: - status.should.equal('DRAINING') - response = ecs_client.update_container_instances_state(cluster=test_cluster_name, - containerInstances=test_instance_ids, - status='ACTIVE') - len(response['failures']).should.equal(0) - len(response['containerInstances']).should.equal(instance_to_create) - response_statuses = [ci['status'] for ci in response['containerInstances']] - for status in response_statuses: - status.should.equal('ACTIVE') - ecs_client.update_container_instances_state.when.called_with(cluster=test_cluster_name, - containerInstances=test_instance_ids, - status='test_status').should.throw(Exception) - - -@mock_ec2 -@mock_ecs -def test_run_task(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=2, - startedBy='moto' - ) - len(response['tasks']).should.equal(2) - response['tasks'][0]['taskArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:container-instance/') - response['tasks'][0]['overrides'].should.equal({}) - response['tasks'][0]['lastStatus'].should.equal("RUNNING") - response['tasks'][0]['desiredStatus'].should.equal("RUNNING") - response['tasks'][0]['startedBy'].should.equal("moto") - response['tasks'][0]['stoppedReason'].should.equal("") - - -@mock_ec2 -@mock_ecs -def test_start_task(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - container_instances = client.list_container_instances( - cluster=test_cluster_name) - container_instance_id = container_instances[ - 'containerInstanceArns'][0].split('/')[-1] - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - - response = client.start_task( - cluster='test_ecs_cluster', - taskDefinition='test_ecs_task', - overrides={}, - containerInstances=[container_instance_id], - startedBy='moto' - ) - - len(response['tasks']).should.equal(1) - response['tasks'][0]['taskArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:container-instance/{0}'.format(container_instance_id)) - response['tasks'][0]['overrides'].should.equal({}) - response['tasks'][0]['lastStatus'].should.equal("RUNNING") - response['tasks'][0]['desiredStatus'].should.equal("RUNNING") - response['tasks'][0]['startedBy'].should.equal("moto") - response['tasks'][0]['stoppedReason'].should.equal("") - - -@mock_ec2 -@mock_ecs -def test_list_tasks(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - _ = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - container_instances = client.list_container_instances( - cluster=test_cluster_name) - container_instance_id = container_instances[ - 'containerInstanceArns'][0].split('/')[-1] - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - - _ = client.start_task( - cluster='test_ecs_cluster', - taskDefinition='test_ecs_task', - overrides={}, - containerInstances=[container_instance_id], - startedBy='foo' - ) - - _ = client.start_task( - cluster='test_ecs_cluster', - taskDefinition='test_ecs_task', - overrides={}, - containerInstances=[container_instance_id], - startedBy='bar' - ) - - assert len(client.list_tasks()['taskArns']).should.equal(2) - assert len(client.list_tasks(cluster='test_ecs_cluster') - ['taskArns']).should.equal(2) - assert len(client.list_tasks(startedBy='foo')['taskArns']).should.equal(1) - - -@mock_ec2 -@mock_ecs -def test_describe_tasks(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - tasks_arns = [ - task['taskArn'] for task in client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=2, - startedBy='moto' - )['tasks'] - ] - response = client.describe_tasks( - cluster='test_ecs_cluster', - tasks=tasks_arns - ) - - len(response['tasks']).should.equal(2) - set([response['tasks'][0]['taskArn'], response['tasks'] - [1]['taskArn']]).should.equal(set(tasks_arns)) - - # Test we can pass task ids instead of ARNs - response = client.describe_tasks( - cluster='test_ecs_cluster', - tasks=[tasks_arns[0].split("/")[-1]] - ) - len(response['tasks']).should.equal(1) - - -@mock_ecs -def describe_task_definition(): - client = boto3.client('ecs', region_name='us-east-1') - container_definition = { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - task_definition = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[container_definition] - ) - family = task_definition['family'] - task = client.describe_task_definition(taskDefinition=family) - task['containerDefinitions'][0].should.equal(container_definition) - task['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') - task['volumes'].should.equal([]) - - -@mock_ec2 -@mock_ecs -def test_stop_task(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - _ = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - run_response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=1, - startedBy='moto' - ) - stop_response = client.stop_task( - cluster='test_ecs_cluster', - task=run_response['tasks'][0].get('taskArn'), - reason='moto testing' - ) - - stop_response['task']['taskArn'].should.equal( - run_response['tasks'][0].get('taskArn')) - stop_response['task']['lastStatus'].should.equal('STOPPED') - stop_response['task']['desiredStatus'].should.equal('STOPPED') - stop_response['task']['stoppedReason'].should.equal('moto testing') - - -@mock_ec2 -@mock_ecs -def test_resource_reservation_and_release(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - _ = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'}, - 'portMappings': [ - { - 'hostPort': 80, - 'containerPort': 8080 - } - ] - } - ] - ) - run_response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=1, - startedBy='moto' - ) - container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') - container_instance_description = client.describe_container_instances( - cluster='test_ecs_cluster', - containerInstances=[container_instance_arn] - )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources( - container_instance_description) - remaining_resources['CPU'].should.equal(registered_resources['CPU'] - 1024) - remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) - registered_resources['PORTS'].append('80') - remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) - container_instance_description['runningTasksCount'].should.equal(1) - client.stop_task( - cluster='test_ecs_cluster', - task=run_response['tasks'][0].get('taskArn'), - reason='moto testing' - ) - container_instance_description = client.describe_container_instances( - cluster='test_ecs_cluster', - containerInstances=[container_instance_arn] - )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources( - container_instance_description) - remaining_resources['CPU'].should.equal(registered_resources['CPU']) - remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) - remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) - container_instance_description['runningTasksCount'].should.equal(0) - -@mock_ec2 -@mock_ecs -def test_resource_reservation_and_release_memory_reservation(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - _ = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'memoryReservation': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'}, - 'portMappings': [ - { - 'containerPort': 8080 - } - ] - } - ] - ) - run_response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=1, - startedBy='moto' - ) - container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') - container_instance_description = client.describe_container_instances( - cluster='test_ecs_cluster', - containerInstances=[container_instance_arn] - )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) - remaining_resources['CPU'].should.equal(registered_resources['CPU']) - remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) - remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) - container_instance_description['runningTasksCount'].should.equal(1) - client.stop_task( - cluster='test_ecs_cluster', - task=run_response['tasks'][0].get('taskArn'), - reason='moto testing' - ) - container_instance_description = client.describe_container_instances( - cluster='test_ecs_cluster', - containerInstances=[container_instance_arn] - )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) - remaining_resources['CPU'].should.equal(registered_resources['CPU']) - remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) - remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) - container_instance_description['runningTasksCount'].should.equal(0) - - - -@mock_ecs -@mock_cloudformation -def test_create_cluster_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": { - "ClusterName": "testcluster" - } - } - } - } - template_json = json.dumps(template) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_clusters() - len(resp['clusterArns']).should.equal(0) - - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - ) - - resp = ecs_conn.list_clusters() - len(resp['clusterArns']).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_create_cluster_through_cloudformation_no_name(): - # cloudformation should create a cluster name for you if you do not provide it - # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - } - } - } - template_json = json.dumps(template) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_clusters() - len(resp['clusterArns']).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": { - "ClusterName": "testcluster1" - } - } - } - } - template2 = deepcopy(template1) - template2['Resources']['testCluster'][ - 'Properties']['ClusterName'] = 'testcluster2' - template1_json = json.dumps(template1) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - stack_resp = cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template1_json, - ) - - template2_json = json.dumps(template2) - cfn_conn.update_stack( - StackName=stack_resp['StackId'], - TemplateBody=template2_json - ) - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_clusters() - len(resp['clusterArns']).should.equal(1) - resp['clusterArns'][0].endswith('testcluster2').should.be.true - - -@mock_ecs -@mock_cloudformation -def test_create_task_definition_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true" - } - ], - "Volumes": [], - } - } - } - } - template_json = json.dumps(template) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - stack_name = 'test_stack' - cfn_conn.create_stack( - StackName=stack_name, - TemplateBody=template_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_task_definitions() - len(resp['taskDefinitionArns']).should.equal(1) - task_definition_arn = resp['taskDefinitionArns'][0] - - task_definition_details = cfn_conn.describe_stack_resource( - StackName=stack_name,LogicalResourceId='testTaskDefinition')['StackResourceDetail'] - task_definition_details['PhysicalResourceId'].should.equal(task_definition_arn) - -@mock_ec2 -@mock_ecs -def test_task_definitions_unable_to_be_placed(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 5000, - 'memory': 40000, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=2, - startedBy='moto' - ) - len(response['tasks']).should.equal(0) - - -@mock_ec2 -@mock_ecs -def test_task_definitions_with_port_clash(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 256, - 'memory': 512, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'}, - 'portMappings': [ - { - 'hostPort': 80, - 'containerPort': 8080 - } - ] - } - ] - ) - response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=2, - startedBy='moto' - ) - len(response['tasks']).should.equal(1) - response['tasks'][0]['taskArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:container-instance/') - response['tasks'][0]['overrides'].should.equal({}) - response['tasks'][0]['lastStatus'].should.equal("RUNNING") - response['tasks'][0]['desiredStatus'].should.equal("RUNNING") - response['tasks'][0]['startedBy'].should.equal("moto") - response['tasks'][0]['stoppedReason'].should.equal("") - - -@mock_ecs -@mock_cloudformation -def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "Family": "testTaskDefinition1", - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true" - } - ], - "Volumes": [], - } - } - } - } - template1_json = json.dumps(template1) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template1_json, - ) - - template2 = deepcopy(template1) - template2['Resources']['testTaskDefinition'][ - 'Properties']['Family'] = 'testTaskDefinition2' - template2_json = json.dumps(template2) - cfn_conn.update_stack( - StackName="test_stack", - TemplateBody=template2_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_task_definitions(familyPrefix='testTaskDefinition') - len(resp['taskDefinitionArns']).should.equal(1) - resp['taskDefinitionArns'][0].endswith( - 'testTaskDefinition2:1').should.be.true - - -@mock_ecs -@mock_cloudformation -def test_create_service_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": { - "ClusterName": "testcluster" - } - }, - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true" - } - ], - "Volumes": [], - } - }, - "testService": { - "Type": "AWS::ECS::Service", - "Properties": { - "Cluster": {"Ref": "testCluster"}, - "DesiredCount": 10, - "TaskDefinition": {"Ref": "testTaskDefinition"}, - } - } - } - } - template_json = json.dumps(template) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_services(cluster='testcluster') - len(resp['serviceArns']).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_update_service_through_cloudformation_should_trigger_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": { - "ClusterName": "testcluster" - } - }, - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true" - } - ], - "Volumes": [], - } - }, - "testService": { - "Type": "AWS::ECS::Service", - "Properties": { - "Cluster": {"Ref": "testCluster"}, - "TaskDefinition": {"Ref": "testTaskDefinition"}, - "DesiredCount": 10, - } - } - } - } - template_json1 = json.dumps(template1) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json1, - ) - template2 = deepcopy(template1) - template2['Resources']['testService']['Properties']['DesiredCount'] = 5 - template2_json = json.dumps(template2) - cfn_conn.update_stack( - StackName="test_stack", - TemplateBody=template2_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_services(cluster='testcluster') - len(resp['serviceArns']).should.equal(1) - - -@mock_ec2 -@mock_ecs -def test_attributes(): - # Combined put, list delete attributes into the same test due to the amount of setup - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - instances = [] - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - instances.append(test_instance) - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn1 = response['containerInstance']['containerInstanceArn'] - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - instances.append(test_instance) - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn2 = response['containerInstance']['containerInstanceArn'] - partial_arn2 = full_arn2.rsplit('/', 1)[-1] - - full_arn2.should_not.equal(full_arn1) # uuid1 isnt unique enough when the pc is fast ;-) - - # Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd. - ecs_client.put_attributes( - cluster=test_cluster_name, - attributes=[ - {'name': 'env', 'value': 'prod'}, - {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1}, - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, - 'targetType': 'container-instance'} - ] - ) - - resp = ecs_client.list_attributes( - cluster=test_cluster_name, - targetType='container-instance' - ) - attrs = resp['attributes'] - - NUM_CUSTOM_ATTRIBUTES = 4 # 2 specific to individual machines and 1 global, going to both machines (2 + 1*2) - NUM_DEFAULT_ATTRIBUTES = 4 - len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) - - # Tests that the attrs have been set properly - len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2) - len(list( - filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) - - ecs_client.delete_attributes( - cluster=test_cluster_name, - attributes=[ - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, - 'targetType': 'container-instance'} - ] - ) - NUM_CUSTOM_ATTRIBUTES -= 1 - - resp = ecs_client.list_attributes( - cluster=test_cluster_name, - targetType='container-instance' - ) - attrs = resp['attributes'] - len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) - - -@mock_ecs -def test_poll_endpoint(): - # Combined put, list delete attributes into the same test due to the amount of setup - ecs_client = boto3.client('ecs', region_name='us-east-1') - - # Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception - resp = ecs_client.discover_poll_endpoint(cluster='blah', containerInstance='blah') - resp.should.contain('endpoint') - resp.should.contain('telemetryEndpoint') - - -@mock_ecs -def test_list_task_definition_families(): - client = boto3.client('ecs', region_name='us-east-1') - client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - client.register_task_definition( - family='alt_test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - - resp1 = client.list_task_definition_families() - resp2 = client.list_task_definition_families(familyPrefix='alt') - - len(resp1['families']).should.equal(2) - len(resp2['families']).should.equal(1) - - -@mock_ec2 -@mock_ecs -def test_default_container_instance_attributes(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - # Create cluster and EC2 instance - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - # Register container instance - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn = response['containerInstance']['containerInstanceArn'] - container_instance_id = full_arn.rsplit('/', 1)[-1] - - default_attributes = response['containerInstance']['attributes'] - assert len(default_attributes) == 4 - expected_result = [ - {'name': 'ecs.availability-zone', 'value': test_instance.placement['AvailabilityZone']}, - {'name': 'ecs.ami-id', 'value': test_instance.image_id}, - {'name': 'ecs.instance-type', 'value': test_instance.instance_type}, - {'name': 'ecs.os-type', 'value': test_instance.platform or 'linux'} - ] - assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, - key=lambda item: item['name']) - - -@mock_ec2 -@mock_ecs -def test_describe_container_instances_with_attributes(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - # Create cluster and EC2 instance - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - # Register container instance - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn = response['containerInstance']['containerInstanceArn'] - container_instance_id = full_arn.rsplit('/', 1)[-1] - default_attributes = response['containerInstance']['attributes'] - - # Set attributes on container instance, one without a value - attributes = [ - {'name': 'env', 'value': 'prod'}, - {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, - 'targetType': 'container-instance'}, - {'name': 'attr_without_value'} - ] - ecs_client.put_attributes( - cluster=test_cluster_name, - attributes=attributes - ) - - # Describe container instance, should have attributes previously set - described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, - containerInstances=[container_instance_id]) - - assert len(described_instance['containerInstances']) == 1 - assert isinstance(described_instance['containerInstances'][0]['attributes'], list) - - # Remove additional info passed to put_attributes - cleaned_attributes = [] - for attribute in attributes: - attribute.pop('targetId', None) - attribute.pop('targetType', None) - cleaned_attributes.append(attribute) - described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], - key=lambda item: item['name']) - expected_attributes = sorted(default_attributes + cleaned_attributes, key=lambda item: item['name']) - assert described_attributes == expected_attributes - - -def _fetch_container_instance_resources(container_instance_description): - remaining_resources = {} - registered_resources = {} - remaining_resources_list = container_instance_description['remainingResources'] - registered_resources_list = container_instance_description['registeredResources'] - remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][ - 0] - remaining_resources['MEMORY'] = \ - [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] - remaining_resources['PORTS'] = \ - [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] - registered_resources['CPU'] = \ - [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] - registered_resources['MEMORY'] = \ - [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] - registered_resources['PORTS'] = \ - [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] - return remaining_resources, registered_resources - - -@mock_ecs -def test_create_service_load_balancing(): - client = boto3.client('ecs', region_name='us-east-1') - client.create_cluster( - clusterName='test_ecs_cluster' - ) - client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2, - loadBalancers=[ - { - 'targetGroupArn': 'test_target_group_arn', - 'loadBalancerName': 'test_load_balancer_name', - 'containerName': 'test_container_name', - 'containerPort': 123 - } - ] - ) - response['service']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['service']['desiredCount'].should.equal(2) - len(response['service']['events']).should.equal(0) - len(response['service']['loadBalancers']).should.equal(1) - response['service']['loadBalancers'][0]['targetGroupArn'].should.equal( - 'test_target_group_arn') - response['service']['loadBalancers'][0]['loadBalancerName'].should.equal( - 'test_load_balancer_name') - response['service']['loadBalancers'][0]['containerName'].should.equal( - 'test_container_name') - response['service']['loadBalancers'][0]['containerPort'].should.equal(123) - response['service']['pendingCount'].should.equal(0) - response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') - response['service']['serviceName'].should.equal('test_ecs_service') - response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') +from __future__ import unicode_literals + +from copy import deepcopy + +from botocore.exceptions import ClientError +import boto3 +import sure # noqa +import json +from moto.ec2 import utils as ec2_utils +from uuid import UUID + +from moto import mock_cloudformation, mock_elbv2 +from moto import mock_ecs +from moto import mock_ec2 +from nose.tools import assert_raises + + +@mock_ecs +def test_create_cluster(): + client = boto3.client('ecs', region_name='us-east-1') + response = client.create_cluster( + clusterName='test_ecs_cluster' + ) + response['cluster']['clusterName'].should.equal('test_ecs_cluster') + response['cluster']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['cluster']['status'].should.equal('ACTIVE') + response['cluster']['registeredContainerInstancesCount'].should.equal(0) + response['cluster']['runningTasksCount'].should.equal(0) + response['cluster']['pendingTasksCount'].should.equal(0) + response['cluster']['activeServicesCount'].should.equal(0) + + +@mock_ecs +def test_list_clusters(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_cluster0' + ) + _ = client.create_cluster( + clusterName='test_cluster1' + ) + response = client.list_clusters() + response['clusterArns'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0') + response['clusterArns'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') + + +@mock_ecs +def test_delete_cluster(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + response = client.delete_cluster(cluster='test_ecs_cluster') + response['cluster']['clusterName'].should.equal('test_ecs_cluster') + response['cluster']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['cluster']['status'].should.equal('ACTIVE') + response['cluster']['registeredContainerInstancesCount'].should.equal(0) + response['cluster']['runningTasksCount'].should.equal(0) + response['cluster']['pendingTasksCount'].should.equal(0) + response['cluster']['activeServicesCount'].should.equal(0) + + response = client.list_clusters() + len(response['clusterArns']).should.equal(0) + + +@mock_ecs +def test_register_task_definition(): + client = boto3.client('ecs', region_name='us-east-1') + response = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + type(response['taskDefinition']).should.be(dict) + response['taskDefinition']['revision'].should.equal(1) + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinition']['containerDefinitions'][ + 0]['name'].should.equal('hello_world') + response['taskDefinition']['containerDefinitions'][0][ + 'image'].should.equal('docker/hello-world:latest') + response['taskDefinition']['containerDefinitions'][ + 0]['cpu'].should.equal(1024) + response['taskDefinition']['containerDefinitions'][ + 0]['memory'].should.equal(400) + response['taskDefinition']['containerDefinitions'][ + 0]['essential'].should.equal(True) + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') + response['taskDefinition']['containerDefinitions'][0][ + 'logConfiguration']['logDriver'].should.equal('json-file') + + +@mock_ecs +def test_list_task_definitions(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world2', + 'image': 'docker/hello-world2:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY2' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.list_task_definitions() + len(response['taskDefinitionArns']).should.equal(2) + response['taskDefinitionArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinitionArns'][1].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') + + +@mock_ecs +def test_describe_task_definition(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world2', + 'image': 'docker/hello-world2:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY2' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world3', + 'image': 'docker/hello-world3:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY3' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.describe_task_definition(taskDefinition='test_ecs_task') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:3') + + response = client.describe_task_definition( + taskDefinition='test_ecs_task:2') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') + + +@mock_ecs +def test_deregister_task_definition(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.deregister_task_definition( + taskDefinition='test_ecs_task:1' + ) + type(response['taskDefinition']).should.be(dict) + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinition']['containerDefinitions'][ + 0]['name'].should.equal('hello_world') + response['taskDefinition']['containerDefinitions'][0][ + 'image'].should.equal('docker/hello-world:latest') + response['taskDefinition']['containerDefinitions'][ + 0]['cpu'].should.equal(1024) + response['taskDefinition']['containerDefinitions'][ + 0]['memory'].should.equal(400) + response['taskDefinition']['containerDefinitions'][ + 0]['essential'].should.equal(True) + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') + response['taskDefinition']['containerDefinitions'][0][ + 'logConfiguration']['logDriver'].should.equal('json-file') + + +@mock_ecs +def test_create_service(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(0) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('REPLICA') + +@mock_ecs +def test_create_service_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON', + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(0) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('DAEMON') + + +@mock_ecs +def test_list_services(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service1', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service2', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response = client.list_services( + cluster='test_ecs_cluster' + ) + len(response['serviceArns']).should.equal(2) + response['serviceArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['serviceArns'][1].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + + +@mock_ecs +def test_describe_services(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service1', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service2', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service3', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response = client.describe_services( + cluster='test_ecs_cluster', + services=['test_ecs_service1', + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2'] + ) + len(response['services']).should.equal(2) + response['services'][0]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['services'][0]['serviceName'].should.equal('test_ecs_service1') + response['services'][1]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['services'][1]['serviceName'].should.equal('test_ecs_service2') + + response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) + response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) + response['services'][0]['deployments'][0]['runningCount'].should.equal(0) + response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') + + +@mock_ecs +def test_describe_services_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service1', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service2', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON' + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service3', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response = client.describe_services( + cluster='test_ecs_cluster', + services=['test_ecs_service1', + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2', + 'test_ecs_service3'] + ) + len(response['services']).should.equal(3) + response['services'][0]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['services'][0]['serviceName'].should.equal('test_ecs_service1') + response['services'][1]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['services'][1]['serviceName'].should.equal('test_ecs_service2') + + response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) + response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) + response['services'][0]['deployments'][0]['runningCount'].should.equal(0) + response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') + + response['services'][0]['schedulingStrategy'].should.equal('REPLICA') + response['services'][1]['schedulingStrategy'].should.equal('DAEMON') + response['services'][2]['schedulingStrategy'].should.equal('REPLICA') + + +@mock_ecs +def test_update_service(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response['service']['desiredCount'].should.equal(2) + + response = client.update_service( + cluster='test_ecs_cluster', + service='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=0 + ) + response['service']['desiredCount'].should.equal(0) + response['service']['schedulingStrategy'].should.equal('REPLICA') + + +@mock_ecs +def test_update_missing_service(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + + client.update_service.when.called_with( + cluster='test_ecs_cluster', + service='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=0 + ).should.throw(ClientError) + + +@mock_ecs +def test_delete_service(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.update_service( + cluster='test_ecs_cluster', + service='test_ecs_service', + desiredCount=0 + ) + response = client.delete_service( + cluster='test_ecs_cluster', + service='test_ecs_service' + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(0) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(0) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['schedulingStrategy'].should.equal('REPLICA') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + + + +@mock_ec2 +@mock_ecs +def test_register_container_instance(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + arn_part = full_arn.split('/') + arn_part[0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:container-instance') + arn_part[1].should.equal(str(UUID(arn_part[1]))) + response['containerInstance']['status'].should.equal('ACTIVE') + len(response['containerInstance']['registeredResources']).should.equal(4) + len(response['containerInstance']['remainingResources']).should.equal(4) + response['containerInstance']['agentConnected'].should.equal(True) + response['containerInstance']['versionInfo'][ + 'agentVersion'].should.equal('1.0.0') + response['containerInstance']['versionInfo'][ + 'agentHash'].should.equal('4023248') + response['containerInstance']['versionInfo'][ + 'dockerVersion'].should.equal('DockerVersion: 1.5.0') + + +@mock_ec2 +@mock_ecs +def test_deregister_container_instance(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + container_instance_id = response['containerInstance']['containerInstanceArn'] + response = ecs_client.deregister_container_instance( + cluster=test_cluster_name, + containerInstance=container_instance_id + ) + container_instances_response = ecs_client.list_container_instances( + cluster=test_cluster_name + ) + len(container_instances_response['containerInstanceArns']).should.equal(0) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + container_instance_id = response['containerInstance']['containerInstanceArn'] + _ = ecs_client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + response = ecs_client.start_task( + cluster='test_ecs_cluster', + taskDefinition='test_ecs_task', + overrides={}, + containerInstances=[container_instance_id], + startedBy='moto' + ) + with assert_raises(Exception) as e: + ecs_client.deregister_container_instance( + cluster=test_cluster_name, + containerInstance=container_instance_id + ).should.have.raised(Exception) + container_instances_response = ecs_client.list_container_instances( + cluster=test_cluster_name + ) + len(container_instances_response['containerInstanceArns']).should.equal(1) + ecs_client.deregister_container_instance( + cluster=test_cluster_name, + containerInstance=container_instance_id, + force=True + ) + container_instances_response = ecs_client.list_container_instances( + cluster=test_cluster_name + ) + len(container_instances_response['containerInstanceArns']).should.equal(0) + + +@mock_ec2 +@mock_ecs +def test_list_container_instances(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instance_to_create = 3 + test_instance_arns = [] + for i in range(0, instance_to_create): + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document) + + test_instance_arns.append(response['containerInstance'][ + 'containerInstanceArn']) + + response = ecs_client.list_container_instances(cluster=test_cluster_name) + + len(response['containerInstanceArns']).should.equal(instance_to_create) + for arn in test_instance_arns: + response['containerInstanceArns'].should.contain(arn) + + +@mock_ec2 +@mock_ecs +def test_describe_container_instances(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instance_to_create = 3 + test_instance_arns = [] + for i in range(0, instance_to_create): + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document) + + test_instance_arns.append(response['containerInstance'][ + 'containerInstanceArn']) + + test_instance_ids = list( + map((lambda x: x.split('/')[1]), test_instance_arns)) + response = ecs_client.describe_container_instances( + cluster=test_cluster_name, containerInstances=test_instance_ids) + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_arns = [ci['containerInstanceArn'] + for ci in response['containerInstances']] + for arn in test_instance_arns: + response_arns.should.contain(arn) + for instance in response['containerInstances']: + instance.keys().should.contain('runningTasksCount') + instance.keys().should.contain('pendingTasksCount') + + +@mock_ec2 +@mock_ecs +def test_update_container_instances_state(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instance_to_create = 3 + test_instance_arns = [] + for i in range(0, instance_to_create): + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document) + + test_instance_arns.append(response['containerInstance']['containerInstanceArn']) + + test_instance_ids = list(map((lambda x: x.split('/')[1]), test_instance_arns)) + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_ids, + status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_ids, + status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_ids, + status='ACTIVE') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('ACTIVE') + ecs_client.update_container_instances_state.when.called_with(cluster=test_cluster_name, + containerInstances=test_instance_ids, + status='test_status').should.throw(Exception) + + +@mock_ec2 +@mock_ecs +def test_run_task(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=2, + startedBy='moto' + ) + len(response['tasks']).should.equal(2) + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/') + response['tasks'][0]['overrides'].should.equal({}) + response['tasks'][0]['lastStatus'].should.equal("RUNNING") + response['tasks'][0]['desiredStatus'].should.equal("RUNNING") + response['tasks'][0]['startedBy'].should.equal("moto") + response['tasks'][0]['stoppedReason'].should.equal("") + + +@mock_ec2 +@mock_ecs +def test_start_task(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + container_instances = client.list_container_instances( + cluster=test_cluster_name) + container_instance_id = container_instances[ + 'containerInstanceArns'][0].split('/')[-1] + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + response = client.start_task( + cluster='test_ecs_cluster', + taskDefinition='test_ecs_task', + overrides={}, + containerInstances=[container_instance_id], + startedBy='moto' + ) + + len(response['tasks']).should.equal(1) + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/{0}'.format(container_instance_id)) + response['tasks'][0]['overrides'].should.equal({}) + response['tasks'][0]['lastStatus'].should.equal("RUNNING") + response['tasks'][0]['desiredStatus'].should.equal("RUNNING") + response['tasks'][0]['startedBy'].should.equal("moto") + response['tasks'][0]['stoppedReason'].should.equal("") + + +@mock_ec2 +@mock_ecs +def test_list_tasks(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + container_instances = client.list_container_instances( + cluster=test_cluster_name) + container_instance_id = container_instances[ + 'containerInstanceArns'][0].split('/')[-1] + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + _ = client.start_task( + cluster='test_ecs_cluster', + taskDefinition='test_ecs_task', + overrides={}, + containerInstances=[container_instance_id], + startedBy='foo' + ) + + _ = client.start_task( + cluster='test_ecs_cluster', + taskDefinition='test_ecs_task', + overrides={}, + containerInstances=[container_instance_id], + startedBy='bar' + ) + + assert len(client.list_tasks()['taskArns']).should.equal(2) + assert len(client.list_tasks(cluster='test_ecs_cluster') + ['taskArns']).should.equal(2) + assert len(client.list_tasks(startedBy='foo')['taskArns']).should.equal(1) + + +@mock_ec2 +@mock_ecs +def test_describe_tasks(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + tasks_arns = [ + task['taskArn'] for task in client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=2, + startedBy='moto' + )['tasks'] + ] + response = client.describe_tasks( + cluster='test_ecs_cluster', + tasks=tasks_arns + ) + + len(response['tasks']).should.equal(2) + set([response['tasks'][0]['taskArn'], response['tasks'] + [1]['taskArn']]).should.equal(set(tasks_arns)) + + # Test we can pass task ids instead of ARNs + response = client.describe_tasks( + cluster='test_ecs_cluster', + tasks=[tasks_arns[0].split("/")[-1]] + ) + len(response['tasks']).should.equal(1) + + +@mock_ecs +def describe_task_definition(): + client = boto3.client('ecs', region_name='us-east-1') + container_definition = { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + task_definition = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[container_definition] + ) + family = task_definition['family'] + task = client.describe_task_definition(taskDefinition=family) + task['containerDefinitions'][0].should.equal(container_definition) + task['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') + task['volumes'].should.equal([]) + + +@mock_ec2 +@mock_ecs +def test_stop_task(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + run_response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=1, + startedBy='moto' + ) + stop_response = client.stop_task( + cluster='test_ecs_cluster', + task=run_response['tasks'][0].get('taskArn'), + reason='moto testing' + ) + + stop_response['task']['taskArn'].should.equal( + run_response['tasks'][0].get('taskArn')) + stop_response['task']['lastStatus'].should.equal('STOPPED') + stop_response['task']['desiredStatus'].should.equal('STOPPED') + stop_response['task']['stoppedReason'].should.equal('moto testing') + + +@mock_ec2 +@mock_ecs +def test_resource_reservation_and_release(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'}, + 'portMappings': [ + { + 'hostPort': 80, + 'containerPort': 8080 + } + ] + } + ] + ) + run_response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=1, + startedBy='moto' + ) + container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU'] - 1024) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) + registered_resources['PORTS'].append('80') + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(1) + client.stop_task( + cluster='test_ecs_cluster', + task=run_response['tasks'][0].get('taskArn'), + reason='moto testing' + ) + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(0) + +@mock_ec2 +@mock_ecs +def test_resource_reservation_and_release_memory_reservation(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'memoryReservation': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'}, + 'portMappings': [ + { + 'containerPort': 8080 + } + ] + } + ] + ) + run_response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=1, + startedBy='moto' + ) + container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(1) + client.stop_task( + cluster='test_ecs_cluster', + task=run_response['tasks'][0].get('taskArn'), + reason='moto testing' + ) + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(0) + + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster" + } + } + } + } + template_json = json.dumps(template) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(0) + + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation_no_name(): + # cloudformation should create a cluster name for you if you do not provide it + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + } + } + } + template_json = json.dumps(template) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster1" + } + } + } + } + template2 = deepcopy(template1) + template2['Resources']['testCluster'][ + 'Properties']['ClusterName'] = 'testcluster2' + template1_json = json.dumps(template1) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + stack_resp = cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template1_json, + ) + + template2_json = json.dumps(template2) + cfn_conn.update_stack( + StackName=stack_resp['StackId'], + TemplateBody=template2_json + ) + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + resp['clusterArns'][0].endswith('testcluster2').should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_task_definition_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes": [], + } + } + } + } + template_json = json.dumps(template) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + stack_name = 'test_stack' + cfn_conn.create_stack( + StackName=stack_name, + TemplateBody=template_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_task_definitions() + len(resp['taskDefinitionArns']).should.equal(1) + task_definition_arn = resp['taskDefinitionArns'][0] + + task_definition_details = cfn_conn.describe_stack_resource( + StackName=stack_name,LogicalResourceId='testTaskDefinition')['StackResourceDetail'] + task_definition_details['PhysicalResourceId'].should.equal(task_definition_arn) + +@mock_ec2 +@mock_ecs +def test_task_definitions_unable_to_be_placed(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 5000, + 'memory': 40000, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=2, + startedBy='moto' + ) + len(response['tasks']).should.equal(0) + + +@mock_ec2 +@mock_ecs +def test_task_definitions_with_port_clash(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 256, + 'memory': 512, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'}, + 'portMappings': [ + { + 'hostPort': 80, + 'containerPort': 8080 + } + ] + } + ] + ) + response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=2, + startedBy='moto' + ) + len(response['tasks']).should.equal(1) + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/') + response['tasks'][0]['overrides'].should.equal({}) + response['tasks'][0]['lastStatus'].should.equal("RUNNING") + response['tasks'][0]['desiredStatus'].should.equal("RUNNING") + response['tasks'][0]['startedBy'].should.equal("moto") + response['tasks'][0]['stoppedReason'].should.equal("") + + +@mock_ecs +@mock_cloudformation +def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "Family": "testTaskDefinition1", + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes": [], + } + } + } + } + template1_json = json.dumps(template1) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template1_json, + ) + + template2 = deepcopy(template1) + template2['Resources']['testTaskDefinition'][ + 'Properties']['Family'] = 'testTaskDefinition2' + template2_json = json.dumps(template2) + cfn_conn.update_stack( + StackName="test_stack", + TemplateBody=template2_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_task_definitions(familyPrefix='testTaskDefinition') + len(resp['taskDefinitionArns']).should.equal(1) + resp['taskDefinitionArns'][0].endswith( + 'testTaskDefinition2:1').should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_service_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster" + } + }, + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes": [], + } + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "DesiredCount": 10, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + } + } + } + } + template_json = json.dumps(template) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_services(cluster='testcluster') + len(resp['serviceArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_service_through_cloudformation_should_trigger_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster" + } + }, + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes": [], + } + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + "DesiredCount": 10, + } + } + } + } + template_json1 = json.dumps(template1) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json1, + ) + template2 = deepcopy(template1) + template2['Resources']['testService']['Properties']['DesiredCount'] = 5 + template2_json = json.dumps(template2) + cfn_conn.update_stack( + StackName="test_stack", + TemplateBody=template2_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_services(cluster='testcluster') + len(resp['serviceArns']).should.equal(1) + + +@mock_ec2 +@mock_ecs +def test_attributes(): + # Combined put, list delete attributes into the same test due to the amount of setup + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instances = [] + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + instances.append(test_instance) + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn1 = response['containerInstance']['containerInstanceArn'] + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + instances.append(test_instance) + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn2 = response['containerInstance']['containerInstanceArn'] + partial_arn2 = full_arn2.rsplit('/', 1)[-1] + + full_arn2.should_not.equal(full_arn1) # uuid1 isnt unique enough when the pc is fast ;-) + + # Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd. + ecs_client.put_attributes( + cluster=test_cluster_name, + attributes=[ + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1}, + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} + ] + ) + + resp = ecs_client.list_attributes( + cluster=test_cluster_name, + targetType='container-instance' + ) + attrs = resp['attributes'] + + NUM_CUSTOM_ATTRIBUTES = 4 # 2 specific to individual machines and 1 global, going to both machines (2 + 1*2) + NUM_DEFAULT_ATTRIBUTES = 4 + len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) + + # Tests that the attrs have been set properly + len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2) + len(list( + filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) + + ecs_client.delete_attributes( + cluster=test_cluster_name, + attributes=[ + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} + ] + ) + NUM_CUSTOM_ATTRIBUTES -= 1 + + resp = ecs_client.list_attributes( + cluster=test_cluster_name, + targetType='container-instance' + ) + attrs = resp['attributes'] + len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) + + +@mock_ecs +def test_poll_endpoint(): + # Combined put, list delete attributes into the same test due to the amount of setup + ecs_client = boto3.client('ecs', region_name='us-east-1') + + # Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception + resp = ecs_client.discover_poll_endpoint(cluster='blah', containerInstance='blah') + resp.should.contain('endpoint') + resp.should.contain('telemetryEndpoint') + + +@mock_ecs +def test_list_task_definition_families(): + client = boto3.client('ecs', region_name='us-east-1') + client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + client.register_task_definition( + family='alt_test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + resp1 = client.list_task_definition_families() + resp2 = client.list_task_definition_families(familyPrefix='alt') + + len(resp1['families']).should.equal(2) + len(resp2['families']).should.equal(1) + + +@mock_ec2 +@mock_ecs +def test_default_container_instance_attributes(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + # Create cluster and EC2 instance + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + # Register container instance + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + container_instance_id = full_arn.rsplit('/', 1)[-1] + + default_attributes = response['containerInstance']['attributes'] + assert len(default_attributes) == 4 + expected_result = [ + {'name': 'ecs.availability-zone', 'value': test_instance.placement['AvailabilityZone']}, + {'name': 'ecs.ami-id', 'value': test_instance.image_id}, + {'name': 'ecs.instance-type', 'value': test_instance.instance_type}, + {'name': 'ecs.os-type', 'value': test_instance.platform or 'linux'} + ] + assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, + key=lambda item: item['name']) + + +@mock_ec2 +@mock_ecs +def test_describe_container_instances_with_attributes(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + # Create cluster and EC2 instance + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + # Register container instance + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + container_instance_id = full_arn.rsplit('/', 1)[-1] + default_attributes = response['containerInstance']['attributes'] + + # Set attributes on container instance, one without a value + attributes = [ + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, + 'targetType': 'container-instance'}, + {'name': 'attr_without_value'} + ] + ecs_client.put_attributes( + cluster=test_cluster_name, + attributes=attributes + ) + + # Describe container instance, should have attributes previously set + described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, + containerInstances=[container_instance_id]) + + assert len(described_instance['containerInstances']) == 1 + assert isinstance(described_instance['containerInstances'][0]['attributes'], list) + + # Remove additional info passed to put_attributes + cleaned_attributes = [] + for attribute in attributes: + attribute.pop('targetId', None) + attribute.pop('targetType', None) + cleaned_attributes.append(attribute) + described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], + key=lambda item: item['name']) + expected_attributes = sorted(default_attributes + cleaned_attributes, key=lambda item: item['name']) + assert described_attributes == expected_attributes + + +def _fetch_container_instance_resources(container_instance_description): + remaining_resources = {} + registered_resources = {} + remaining_resources_list = container_instance_description['remainingResources'] + registered_resources_list = container_instance_description['registeredResources'] + remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][ + 0] + remaining_resources['MEMORY'] = \ + [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] + remaining_resources['PORTS'] = \ + [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] + registered_resources['CPU'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] + registered_resources['MEMORY'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] + registered_resources['PORTS'] = \ + [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] + return remaining_resources, registered_resources + + +@mock_ecs +def test_create_service_load_balancing(): + client = boto3.client('ecs', region_name='us-east-1') + client.create_cluster( + clusterName='test_ecs_cluster' + ) + client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2, + loadBalancers=[ + { + 'targetGroupArn': 'test_target_group_arn', + 'loadBalancerName': 'test_load_balancer_name', + 'containerName': 'test_container_name', + 'containerPort': 123 + } + ] + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(1) + response['service']['loadBalancers'][0]['targetGroupArn'].should.equal( + 'test_target_group_arn') + response['service']['loadBalancers'][0]['loadBalancerName'].should.equal( + 'test_load_balancer_name') + response['service']['loadBalancers'][0]['containerName'].should.equal( + 'test_container_name') + response['service']['loadBalancers'][0]['containerPort'].should.equal(123) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index a67508430d7a..6c6492894e6b 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -1,982 +1,982 @@ -from __future__ import unicode_literals -import boto3 -import botocore -import boto -import boto.ec2.elb -from boto.ec2.elb import HealthCheck -from boto.ec2.elb.attributes import ( - ConnectionSettingAttribute, - ConnectionDrainingAttribute, - AccessLogAttribute, -) -from botocore.exceptions import ClientError -from boto.exception import BotoServerError -from nose.tools import assert_raises -import sure # noqa - -from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated - - -@mock_elb_deprecated -@mock_ec2_deprecated -def test_create_load_balancer(): - conn = boto.connect_elb() - ec2 = boto.connect_ec2('the_key', 'the_secret') - - security_group = ec2.create_security_group('sg-abc987', 'description') - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports, scheme='internal', security_groups=[security_group.id]) - - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - balancer.name.should.equal("my-lb") - balancer.scheme.should.equal("internal") - list(balancer.security_groups).should.equal([security_group.id]) - set(balancer.availability_zones).should.equal( - set(['us-east-1a', 'us-east-1b'])) - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(80) - listener1.instance_port.should.equal(8080) - listener1.protocol.should.equal("HTTP") - listener2 = balancer.listeners[1] - listener2.load_balancer_port.should.equal(443) - listener2.instance_port.should.equal(8443) - listener2.protocol.should.equal("TCP") - - -@mock_elb_deprecated -def test_getting_missing_elb(): - conn = boto.connect_elb() - conn.get_all_load_balancers.when.called_with( - load_balancer_names='aaa').should.throw(BotoServerError) - - -@mock_elb_deprecated -def test_create_elb_in_multiple_region(): - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - - west1_conn = boto.ec2.elb.connect_to_region("us-west-1") - west1_conn.create_load_balancer('my-lb', zones, ports) - - west2_conn = boto.ec2.elb.connect_to_region("us-west-2") - west2_conn.create_load_balancer('my-lb', zones, ports) - - list(west1_conn.get_all_load_balancers()).should.have.length_of(1) - list(west2_conn.get_all_load_balancers()).should.have.length_of(1) - - -@mock_elb_deprecated -def test_create_load_balancer_with_certificate(): - conn = boto.connect_elb() - - zones = ['us-east-1a'] - ports = [ - (443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] - conn.create_load_balancer('my-lb', zones, ports) - - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - balancer.name.should.equal("my-lb") - balancer.scheme.should.equal("internet-facing") - set(balancer.availability_zones).should.equal(set(['us-east-1a'])) - listener = balancer.listeners[0] - listener.load_balancer_port.should.equal(443) - listener.instance_port.should.equal(8443) - listener.protocol.should.equal("HTTPS") - listener.ssl_certificate_id.should.equal( - 'arn:aws:iam:123456789012:server-certificate/test-cert') - - -@mock_elb -def test_create_and_delete_boto3_support(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(1) - - client.delete_load_balancer( - LoadBalancerName='my-lb' - ) - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(0) - - -@mock_elb -def test_create_load_balancer_with_no_listeners_defined(): - client = boto3.client('elb', region_name='us-east-1') - - with assert_raises(ClientError): - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - -@mock_elb -def test_describe_paginated_balancers(): - client = boto3.client('elb', region_name='us-east-1') - - for i in range(51): - client.create_load_balancer( - LoadBalancerName='my-lb%d' % i, - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - resp = client.describe_load_balancers() - resp['LoadBalancerDescriptions'].should.have.length_of(50) - resp['NextMarker'].should.equal(resp['LoadBalancerDescriptions'][-1]['LoadBalancerName']) - resp2 = client.describe_load_balancers(Marker=resp['NextMarker']) - resp2['LoadBalancerDescriptions'].should.have.length_of(1) - assert 'NextToken' not in resp2.keys() - - -@mock_elb -@mock_ec2 -def test_apply_security_groups_to_load_balancer(): - client = boto3.client('elb', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - security_group = ec2.create_security_group( - GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - response = client.apply_security_groups_to_load_balancer( - LoadBalancerName='my-lb', - SecurityGroups=[security_group.id]) - - assert response['SecurityGroups'] == [security_group.id] - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - assert balancer['SecurityGroups'] == [security_group.id] - - # Using a not-real security group raises an error - with assert_raises(ClientError) as error: - response = client.apply_security_groups_to_load_balancer( - LoadBalancerName='my-lb', - SecurityGroups=['not-really-a-security-group']) - assert "One or more of the specified security groups do not exist." in str(error.exception) - - -@mock_elb_deprecated -def test_add_listener(): - conn = boto.connect_elb() - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http')] - conn.create_load_balancer('my-lb', zones, ports) - new_listener = (443, 8443, 'tcp') - conn.create_load_balancer_listeners('my-lb', [new_listener]) - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(80) - listener1.instance_port.should.equal(8080) - listener1.protocol.should.equal("HTTP") - listener2 = balancer.listeners[1] - listener2.load_balancer_port.should.equal(443) - listener2.instance_port.should.equal(8443) - listener2.protocol.should.equal("TCP") - - -@mock_elb_deprecated -def test_delete_listener(): - conn = boto.connect_elb() - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports) - conn.delete_load_balancer_listeners('my-lb', [443]) - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(80) - listener1.instance_port.should.equal(8080) - listener1.protocol.should.equal("HTTP") - balancer.listeners.should.have.length_of(1) - - -@mock_elb -def test_create_and_delete_listener_boto3_support(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(1) - - client.create_load_balancer_listeners( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 8443}] - ) - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - list(balancer['ListenerDescriptions']).should.have.length_of(2) - balancer['ListenerDescriptions'][0][ - 'Listener']['Protocol'].should.equal('HTTP') - balancer['ListenerDescriptions'][0]['Listener'][ - 'LoadBalancerPort'].should.equal(80) - balancer['ListenerDescriptions'][0]['Listener'][ - 'InstancePort'].should.equal(8080) - balancer['ListenerDescriptions'][1][ - 'Listener']['Protocol'].should.equal('TCP') - balancer['ListenerDescriptions'][1]['Listener'][ - 'LoadBalancerPort'].should.equal(443) - balancer['ListenerDescriptions'][1]['Listener'][ - 'InstancePort'].should.equal(8443) - - # Creating this listener with an conflicting definition throws error - with assert_raises(ClientError): - client.create_load_balancer_listeners( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 1234}] - ) - - client.delete_load_balancer_listeners( - LoadBalancerName='my-lb', - LoadBalancerPorts=[443]) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - list(balancer['ListenerDescriptions']).should.have.length_of(1) - - -@mock_elb_deprecated -def test_set_sslcertificate(): - conn = boto.connect_elb() - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports) - conn.set_lb_listener_SSL_certificate('my-lb', '443', 'arn:certificate') - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(443) - listener1.instance_port.should.equal(8443) - listener1.protocol.should.equal("TCP") - listener1.ssl_certificate_id.should.equal("arn:certificate") - - -@mock_elb_deprecated -def test_get_load_balancers_by_name(): - conn = boto.connect_elb() - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb1', zones, ports) - conn.create_load_balancer('my-lb2', zones, ports) - conn.create_load_balancer('my-lb3', zones, ports) - - conn.get_all_load_balancers().should.have.length_of(3) - conn.get_all_load_balancers( - load_balancer_names=['my-lb1']).should.have.length_of(1) - conn.get_all_load_balancers( - load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) - - -@mock_elb_deprecated -def test_delete_load_balancer(): - conn = boto.connect_elb() - - zones = ['us-east-1a'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports) - - balancers = conn.get_all_load_balancers() - balancers.should.have.length_of(1) - - conn.delete_load_balancer("my-lb") - balancers = conn.get_all_load_balancers() - balancers.should.have.length_of(0) - - -@mock_elb_deprecated -def test_create_health_check(): - conn = boto.connect_elb() - - hc = HealthCheck( - interval=20, - healthy_threshold=3, - unhealthy_threshold=5, - target='HTTP:8080/health', - timeout=23, - ) - - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - lb.configure_health_check(hc) - - balancer = conn.get_all_load_balancers()[0] - health_check = balancer.health_check - health_check.interval.should.equal(20) - health_check.healthy_threshold.should.equal(3) - health_check.unhealthy_threshold.should.equal(5) - health_check.target.should.equal('HTTP:8080/health') - health_check.timeout.should.equal(23) - - -@mock_elb -def test_create_health_check_boto3(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - client.configure_health_check( - LoadBalancerName='my-lb', - HealthCheck={ - 'Target': 'HTTP:8080/health', - 'Interval': 20, - 'Timeout': 23, - 'HealthyThreshold': 3, - 'UnhealthyThreshold': 5 - } - ) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - balancer['HealthCheck']['Target'].should.equal('HTTP:8080/health') - balancer['HealthCheck']['Interval'].should.equal(20) - balancer['HealthCheck']['Timeout'].should.equal(23) - balancer['HealthCheck']['HealthyThreshold'].should.equal(3) - balancer['HealthCheck']['UnhealthyThreshold'].should.equal(5) - - -@mock_ec2_deprecated -@mock_elb_deprecated -def test_register_instances(): - ec2_conn = boto.connect_ec2() - reservation = ec2_conn.run_instances('ami-1234abcd', 2) - instance_id1 = reservation.instances[0].id - instance_id2 = reservation.instances[1].id - - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - lb.register_instances([instance_id1, instance_id2]) - - balancer = conn.get_all_load_balancers()[0] - instance_ids = [instance.id for instance in balancer.instances] - set(instance_ids).should.equal(set([instance_id1, instance_id2])) - - -@mock_ec2 -@mock_elb -def test_register_instances_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances( - ImageId='ami-1234abcd', MinCount=2, MaxCount=2) - instance_id1 = response[0].id - instance_id2 = response[1].id - - client = boto3.client('elb', region_name='us-east-1') - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - client.register_instances_with_load_balancer( - LoadBalancerName='my-lb', - Instances=[ - {'InstanceId': instance_id1}, - {'InstanceId': instance_id2} - ] - ) - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - instance_ids = [instance['InstanceId'] - for instance in balancer['Instances']] - set(instance_ids).should.equal(set([instance_id1, instance_id2])) - - -@mock_ec2_deprecated -@mock_elb_deprecated -def test_deregister_instances(): - ec2_conn = boto.connect_ec2() - reservation = ec2_conn.run_instances('ami-1234abcd', 2) - instance_id1 = reservation.instances[0].id - instance_id2 = reservation.instances[1].id - - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - lb.register_instances([instance_id1, instance_id2]) - - balancer = conn.get_all_load_balancers()[0] - balancer.instances.should.have.length_of(2) - balancer.deregister_instances([instance_id1]) - - balancer.instances.should.have.length_of(1) - balancer.instances[0].id.should.equal(instance_id2) - - -@mock_ec2 -@mock_elb -def test_deregister_instances_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances( - ImageId='ami-1234abcd', MinCount=2, MaxCount=2) - instance_id1 = response[0].id - instance_id2 = response[1].id - - client = boto3.client('elb', region_name='us-east-1') - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - client.register_instances_with_load_balancer( - LoadBalancerName='my-lb', - Instances=[ - {'InstanceId': instance_id1}, - {'InstanceId': instance_id2} - ] - ) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - balancer['Instances'].should.have.length_of(2) - - client.deregister_instances_from_load_balancer( - LoadBalancerName='my-lb', - Instances=[ - {'InstanceId': instance_id1} - ] - ) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - balancer['Instances'].should.have.length_of(1) - balancer['Instances'][0]['InstanceId'].should.equal(instance_id2) - - -@mock_elb_deprecated -def test_default_attributes(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - attributes = lb.get_attributes() - - attributes.cross_zone_load_balancing.enabled.should.be.false - attributes.connection_draining.enabled.should.be.false - attributes.access_log.enabled.should.be.false - attributes.connecting_settings.idle_timeout.should.equal(60) - - -@mock_elb_deprecated -def test_cross_zone_load_balancing_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", True) - attributes = lb.get_attributes(force=True) - attributes.cross_zone_load_balancing.enabled.should.be.true - - conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", False) - attributes = lb.get_attributes(force=True) - attributes.cross_zone_load_balancing.enabled.should.be.false - - -@mock_elb_deprecated -def test_connection_draining_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - connection_draining = ConnectionDrainingAttribute() - connection_draining.enabled = True - connection_draining.timeout = 60 - - conn.modify_lb_attribute( - "my-lb", "ConnectionDraining", connection_draining) - attributes = lb.get_attributes(force=True) - attributes.connection_draining.enabled.should.be.true - attributes.connection_draining.timeout.should.equal(60) - - connection_draining.timeout = 30 - conn.modify_lb_attribute( - "my-lb", "ConnectionDraining", connection_draining) - attributes = lb.get_attributes(force=True) - attributes.connection_draining.timeout.should.equal(30) - - connection_draining.enabled = False - conn.modify_lb_attribute( - "my-lb", "ConnectionDraining", connection_draining) - attributes = lb.get_attributes(force=True) - attributes.connection_draining.enabled.should.be.false - - -@mock_elb_deprecated -def test_access_log_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - access_log = AccessLogAttribute() - access_log.enabled = True - access_log.s3_bucket_name = 'bucket' - access_log.s3_bucket_prefix = 'prefix' - access_log.emit_interval = 60 - - conn.modify_lb_attribute("my-lb", "AccessLog", access_log) - attributes = lb.get_attributes(force=True) - attributes.access_log.enabled.should.be.true - attributes.access_log.s3_bucket_name.should.equal("bucket") - attributes.access_log.s3_bucket_prefix.should.equal("prefix") - attributes.access_log.emit_interval.should.equal(60) - - access_log.enabled = False - conn.modify_lb_attribute("my-lb", "AccessLog", access_log) - attributes = lb.get_attributes(force=True) - attributes.access_log.enabled.should.be.false - - -@mock_elb_deprecated -def test_connection_settings_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - connection_settings = ConnectionSettingAttribute(conn) - connection_settings.idle_timeout = 120 - - conn.modify_lb_attribute( - "my-lb", "ConnectingSettings", connection_settings) - attributes = lb.get_attributes(force=True) - attributes.connecting_settings.idle_timeout.should.equal(120) - - connection_settings.idle_timeout = 60 - conn.modify_lb_attribute( - "my-lb", "ConnectingSettings", connection_settings) - attributes = lb.get_attributes(force=True) - attributes.connecting_settings.idle_timeout.should.equal(60) - - -@mock_elb_deprecated -def test_create_lb_cookie_stickiness_policy(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - cookie_expiration_period = 60 - policy_name = "LBCookieStickinessPolicy" - - lb.create_cookie_stickiness_policy(cookie_expiration_period, policy_name) - - lb = conn.get_all_load_balancers()[0] - # There appears to be a quirk about boto, whereby it returns a unicode - # string for cookie_expiration_period, despite being stated in - # documentation to be a long numeric. - # - # To work around that, this value is converted to an int and checked. - cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[ - 0].cookie_expiration_period - int(cookie_expiration_period_response_str).should.equal( - cookie_expiration_period) - lb.policies.lb_cookie_stickiness_policies[ - 0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_create_lb_cookie_stickiness_policy_no_expiry(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - policy_name = "LBCookieStickinessPolicy" - - lb.create_cookie_stickiness_policy(None, policy_name) - - lb = conn.get_all_load_balancers()[0] - lb.policies.lb_cookie_stickiness_policies[ - 0].cookie_expiration_period.should.be.none - lb.policies.lb_cookie_stickiness_policies[ - 0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_create_app_cookie_stickiness_policy(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - cookie_name = "my-stickiness-policy" - policy_name = "AppCookieStickinessPolicy" - - lb.create_app_cookie_stickiness_policy(cookie_name, policy_name) - - lb = conn.get_all_load_balancers()[0] - lb.policies.app_cookie_stickiness_policies[ - 0].cookie_name.should.equal(cookie_name) - lb.policies.app_cookie_stickiness_policies[ - 0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_create_lb_policy(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - policy_name = "ProxyPolicy" - - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { - 'ProxyProtocol': True}) - - lb = conn.get_all_load_balancers()[0] - lb.policies.other_policies[0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_set_policies_of_listener(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - listener_port = 80 - policy_name = "my-stickiness-policy" - - # boto docs currently state that zero or one policy may be associated - # with a given listener - - # in a real flow, it is necessary first to create a policy, - # then to set that policy to the listener - lb.create_cookie_stickiness_policy(None, policy_name) - lb.set_policies_of_listener(listener_port, [policy_name]) - - lb = conn.get_all_load_balancers()[0] - listener = lb.listeners[0] - listener.load_balancer_port.should.equal(listener_port) - # by contrast to a backend, a listener stores only policy name strings - listener.policy_names[0].should.equal(policy_name) - - -@mock_elb_deprecated -def test_set_policies_of_backend_server(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - instance_port = 8080 - policy_name = "ProxyPolicy" - - # in a real flow, it is necessary first to create a policy, - # then to set that policy to the backend - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { - 'ProxyProtocol': True}) - lb.set_policies_of_backend_server(instance_port, [policy_name]) - - lb = conn.get_all_load_balancers()[0] - backend = lb.backends[0] - backend.instance_port.should.equal(instance_port) - # by contrast to a listener, a backend stores OtherPolicy objects - backend.policies[0].policy_name.should.equal(policy_name) - - -@mock_ec2_deprecated -@mock_elb_deprecated -def test_describe_instance_health(): - ec2_conn = boto.connect_ec2() - reservation = ec2_conn.run_instances('ami-1234abcd', 2) - instance_id1 = reservation.instances[0].id - instance_id2 = reservation.instances[1].id - - conn = boto.connect_elb() - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', zones, ports) - - instances_health = conn.describe_instance_health('my-lb') - instances_health.should.be.empty - - lb.register_instances([instance_id1, instance_id2]) - - instances_health = conn.describe_instance_health('my-lb') - instances_health.should.have.length_of(2) - for instance_health in instances_health: - instance_health.instance_id.should.be.within( - [instance_id1, instance_id2]) - instance_health.state.should.equal('InService') - - instances_health = conn.describe_instance_health('my-lb', [instance_id1]) - instances_health.should.have.length_of(1) - instances_health[0].instance_id.should.equal(instance_id1) - instances_health[0].state.should.equal('InService') - - -@mock_ec2 -@mock_elb -def test_describe_instance_health_boto3(): - elb = boto3.client('elb', region_name="us-east-1") - ec2 = boto3.client('ec2', region_name="us-east-1") - instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances'] - lb_name = "my_load_balancer" - elb.create_load_balancer( - Listeners=[{ - 'InstancePort': 80, - 'LoadBalancerPort': 8080, - 'Protocol': 'HTTP' - }], - LoadBalancerName=lb_name, - ) - elb.register_instances_with_load_balancer( - LoadBalancerName=lb_name, - Instances=[{'InstanceId': instances[0]['InstanceId']}] - ) - instances_health = elb.describe_instance_health( - LoadBalancerName=lb_name, - Instances=[{'InstanceId': instance['InstanceId']} for instance in instances] - ) - instances_health['InstanceStates'].should.have.length_of(2) - instances_health['InstanceStates'][0]['InstanceId'].\ - should.equal(instances[0]['InstanceId']) - instances_health['InstanceStates'][0]['State'].\ - should.equal('InService') - instances_health['InstanceStates'][1]['InstanceId'].\ - should.equal(instances[1]['InstanceId']) - instances_health['InstanceStates'][1]['State'].\ - should.equal('Unknown') - - -@mock_elb -def test_add_remove_tags(): - client = boto3.client('elb', region_name='us-east-1') - - client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(1) - - client.add_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) - tags.should.have.key('a').which.should.equal('b') - - client.add_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }, { - 'Key': 'b', - 'Value': 'b' - }, { - 'Key': 'c', - 'Value': 'b' - }, { - 'Key': 'd', - 'Value': 'b' - }, { - 'Key': 'e', - 'Value': 'b' - }, { - 'Key': 'f', - 'Value': 'b' - }, { - 'Key': 'g', - 'Value': 'b' - }, { - 'Key': 'h', - 'Value': 'b' - }, { - 'Key': 'i', - 'Value': 'b' - }, { - 'Key': 'j', - 'Value': 'b' - }]) - - client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'k', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - - client.add_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'j', - 'Value': 'c' - }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) - - tags.should.have.key('a').which.should.equal('b') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('i').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - tags.shouldnt.have.key('k') - - client.remove_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a' - }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) - - tags.shouldnt.have.key('a') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('i').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - - client.create_load_balancer( - LoadBalancerName='other-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 433, 'InstancePort': 8433}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - client.add_tags(LoadBalancerNames=['other-lb'], - Tags=[{ - 'Key': 'other', - 'Value': 'something' - }]) - - lb_tags = dict([(l['LoadBalancerName'], dict([(d['Key'], d['Value']) for d in l['Tags']])) - for l in client.describe_tags(LoadBalancerNames=['my-lb', 'other-lb'])['TagDescriptions']]) - - lb_tags.should.have.key('my-lb') - lb_tags.should.have.key('other-lb') - - lb_tags['my-lb'].shouldnt.have.key('other') - lb_tags[ - 'other-lb'].should.have.key('other').which.should.equal('something') - - -@mock_elb -def test_create_with_tags(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'], - Tags=[{ - 'Key': 'k', - 'Value': 'v' - }] - ) - - tags = dict((d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) - tags.should.have.key('k').which.should.equal('v') - - -@mock_elb -def test_modify_attributes(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - # Default ConnectionDraining timeout of 300 seconds - client.modify_load_balancer_attributes( - LoadBalancerName='my-lb', - LoadBalancerAttributes={ - 'ConnectionDraining': {'Enabled': True}, - } - ) - lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(300) - - # specify a custom ConnectionDraining timeout - client.modify_load_balancer_attributes( - LoadBalancerName='my-lb', - LoadBalancerAttributes={ - 'ConnectionDraining': { - 'Enabled': True, - 'Timeout': 45, - }, - } - ) - lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(45) - - -@mock_ec2 -@mock_elb -def test_subnets(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc( - CidrBlock='172.28.7.0/24', - InstanceTenancy='default' - ) - subnet = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26' - ) - client = boto3.client('elb', region_name='us-east-1') - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - Subnets=[subnet.id] - ) - - lb = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - lb.should.have.key('Subnets').which.should.have.length_of(1) - lb['Subnets'][0].should.equal(subnet.id) - - lb.should.have.key('VPCId').which.should.equal(vpc.id) - - -@mock_elb_deprecated -def test_create_load_balancer_duplicate(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', [], ports) - conn.create_load_balancer.when.called_with( - 'my-lb', [], ports).should.throw(BotoServerError) +from __future__ import unicode_literals +import boto3 +import botocore +import boto +import boto.ec2.elb +from boto.ec2.elb import HealthCheck +from boto.ec2.elb.attributes import ( + ConnectionSettingAttribute, + ConnectionDrainingAttribute, + AccessLogAttribute, +) +from botocore.exceptions import ClientError +from boto.exception import BotoServerError +from nose.tools import assert_raises +import sure # noqa + +from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated + + +@mock_elb_deprecated +@mock_ec2_deprecated +def test_create_load_balancer(): + conn = boto.connect_elb() + ec2 = boto.connect_ec2('the_key', 'the_secret') + + security_group = ec2.create_security_group('sg-abc987', 'description') + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports, scheme='internal', security_groups=[security_group.id]) + + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + balancer.name.should.equal("my-lb") + balancer.scheme.should.equal("internal") + list(balancer.security_groups).should.equal([security_group.id]) + set(balancer.availability_zones).should.equal( + set(['us-east-1a', 'us-east-1b'])) + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + listener2 = balancer.listeners[1] + listener2.load_balancer_port.should.equal(443) + listener2.instance_port.should.equal(8443) + listener2.protocol.should.equal("TCP") + + +@mock_elb_deprecated +def test_getting_missing_elb(): + conn = boto.connect_elb() + conn.get_all_load_balancers.when.called_with( + load_balancer_names='aaa').should.throw(BotoServerError) + + +@mock_elb_deprecated +def test_create_elb_in_multiple_region(): + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + + west1_conn = boto.ec2.elb.connect_to_region("us-west-1") + west1_conn.create_load_balancer('my-lb', zones, ports) + + west2_conn = boto.ec2.elb.connect_to_region("us-west-2") + west2_conn.create_load_balancer('my-lb', zones, ports) + + list(west1_conn.get_all_load_balancers()).should.have.length_of(1) + list(west2_conn.get_all_load_balancers()).should.have.length_of(1) + + +@mock_elb_deprecated +def test_create_load_balancer_with_certificate(): + conn = boto.connect_elb() + + zones = ['us-east-1a'] + ports = [ + (443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] + conn.create_load_balancer('my-lb', zones, ports) + + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + balancer.name.should.equal("my-lb") + balancer.scheme.should.equal("internet-facing") + set(balancer.availability_zones).should.equal(set(['us-east-1a'])) + listener = balancer.listeners[0] + listener.load_balancer_port.should.equal(443) + listener.instance_port.should.equal(8443) + listener.protocol.should.equal("HTTPS") + listener.ssl_certificate_id.should.equal( + 'arn:aws:iam:123456789012:server-certificate/test-cert') + + +@mock_elb +def test_create_and_delete_boto3_support(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) + + client.delete_load_balancer( + LoadBalancerName='my-lb' + ) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(0) + + +@mock_elb +def test_create_load_balancer_with_no_listeners_defined(): + client = boto3.client('elb', region_name='us-east-1') + + with assert_raises(ClientError): + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + +@mock_elb +def test_describe_paginated_balancers(): + client = boto3.client('elb', region_name='us-east-1') + + for i in range(51): + client.create_load_balancer( + LoadBalancerName='my-lb%d' % i, + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + resp = client.describe_load_balancers() + resp['LoadBalancerDescriptions'].should.have.length_of(50) + resp['NextMarker'].should.equal(resp['LoadBalancerDescriptions'][-1]['LoadBalancerName']) + resp2 = client.describe_load_balancers(Marker=resp['NextMarker']) + resp2['LoadBalancerDescriptions'].should.have.length_of(1) + assert 'NextToken' not in resp2.keys() + + +@mock_elb +@mock_ec2 +def test_apply_security_groups_to_load_balancer(): + client = boto3.client('elb', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + security_group = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + response = client.apply_security_groups_to_load_balancer( + LoadBalancerName='my-lb', + SecurityGroups=[security_group.id]) + + assert response['SecurityGroups'] == [security_group.id] + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + assert balancer['SecurityGroups'] == [security_group.id] + + # Using a not-real security group raises an error + with assert_raises(ClientError) as error: + response = client.apply_security_groups_to_load_balancer( + LoadBalancerName='my-lb', + SecurityGroups=['not-really-a-security-group']) + assert "One or more of the specified security groups do not exist." in str(error.exception) + + +@mock_elb_deprecated +def test_add_listener(): + conn = boto.connect_elb() + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http')] + conn.create_load_balancer('my-lb', zones, ports) + new_listener = (443, 8443, 'tcp') + conn.create_load_balancer_listeners('my-lb', [new_listener]) + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + listener2 = balancer.listeners[1] + listener2.load_balancer_port.should.equal(443) + listener2.instance_port.should.equal(8443) + listener2.protocol.should.equal("TCP") + + +@mock_elb_deprecated +def test_delete_listener(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports) + conn.delete_load_balancer_listeners('my-lb', [443]) + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + balancer.listeners.should.have.length_of(1) + + +@mock_elb +def test_create_and_delete_listener_boto3_support(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) + + client.create_load_balancer_listeners( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 8443}] + ) + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + list(balancer['ListenerDescriptions']).should.have.length_of(2) + balancer['ListenerDescriptions'][0][ + 'Listener']['Protocol'].should.equal('HTTP') + balancer['ListenerDescriptions'][0]['Listener'][ + 'LoadBalancerPort'].should.equal(80) + balancer['ListenerDescriptions'][0]['Listener'][ + 'InstancePort'].should.equal(8080) + balancer['ListenerDescriptions'][1][ + 'Listener']['Protocol'].should.equal('TCP') + balancer['ListenerDescriptions'][1]['Listener'][ + 'LoadBalancerPort'].should.equal(443) + balancer['ListenerDescriptions'][1]['Listener'][ + 'InstancePort'].should.equal(8443) + + # Creating this listener with an conflicting definition throws error + with assert_raises(ClientError): + client.create_load_balancer_listeners( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 1234}] + ) + + client.delete_load_balancer_listeners( + LoadBalancerName='my-lb', + LoadBalancerPorts=[443]) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + list(balancer['ListenerDescriptions']).should.have.length_of(1) + + +@mock_elb_deprecated +def test_set_sslcertificate(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports) + conn.set_lb_listener_SSL_certificate('my-lb', '443', 'arn:certificate') + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(443) + listener1.instance_port.should.equal(8443) + listener1.protocol.should.equal("TCP") + listener1.ssl_certificate_id.should.equal("arn:certificate") + + +@mock_elb_deprecated +def test_get_load_balancers_by_name(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb1', zones, ports) + conn.create_load_balancer('my-lb2', zones, ports) + conn.create_load_balancer('my-lb3', zones, ports) + + conn.get_all_load_balancers().should.have.length_of(3) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1']).should.have.length_of(1) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) + + +@mock_elb_deprecated +def test_delete_load_balancer(): + conn = boto.connect_elb() + + zones = ['us-east-1a'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports) + + balancers = conn.get_all_load_balancers() + balancers.should.have.length_of(1) + + conn.delete_load_balancer("my-lb") + balancers = conn.get_all_load_balancers() + balancers.should.have.length_of(0) + + +@mock_elb_deprecated +def test_create_health_check(): + conn = boto.connect_elb() + + hc = HealthCheck( + interval=20, + healthy_threshold=3, + unhealthy_threshold=5, + target='HTTP:8080/health', + timeout=23, + ) + + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + lb.configure_health_check(hc) + + balancer = conn.get_all_load_balancers()[0] + health_check = balancer.health_check + health_check.interval.should.equal(20) + health_check.healthy_threshold.should.equal(3) + health_check.unhealthy_threshold.should.equal(5) + health_check.target.should.equal('HTTP:8080/health') + health_check.timeout.should.equal(23) + + +@mock_elb +def test_create_health_check_boto3(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + client.configure_health_check( + LoadBalancerName='my-lb', + HealthCheck={ + 'Target': 'HTTP:8080/health', + 'Interval': 20, + 'Timeout': 23, + 'HealthyThreshold': 3, + 'UnhealthyThreshold': 5 + } + ) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + balancer['HealthCheck']['Target'].should.equal('HTTP:8080/health') + balancer['HealthCheck']['Interval'].should.equal(20) + balancer['HealthCheck']['Timeout'].should.equal(23) + balancer['HealthCheck']['HealthyThreshold'].should.equal(3) + balancer['HealthCheck']['UnhealthyThreshold'].should.equal(5) + + +@mock_ec2_deprecated +@mock_elb_deprecated +def test_register_instances(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + lb.register_instances([instance_id1, instance_id2]) + + balancer = conn.get_all_load_balancers()[0] + instance_ids = [instance.id for instance in balancer.instances] + set(instance_ids).should.equal(set([instance_id1, instance_id2])) + + +@mock_ec2 +@mock_elb +def test_register_instances_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + client = boto3.client('elb', region_name='us-east-1') + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + client.register_instances_with_load_balancer( + LoadBalancerName='my-lb', + Instances=[ + {'InstanceId': instance_id1}, + {'InstanceId': instance_id2} + ] + ) + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + instance_ids = [instance['InstanceId'] + for instance in balancer['Instances']] + set(instance_ids).should.equal(set([instance_id1, instance_id2])) + + +@mock_ec2_deprecated +@mock_elb_deprecated +def test_deregister_instances(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + lb.register_instances([instance_id1, instance_id2]) + + balancer = conn.get_all_load_balancers()[0] + balancer.instances.should.have.length_of(2) + balancer.deregister_instances([instance_id1]) + + balancer.instances.should.have.length_of(1) + balancer.instances[0].id.should.equal(instance_id2) + + +@mock_ec2 +@mock_elb +def test_deregister_instances_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + client = boto3.client('elb', region_name='us-east-1') + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + client.register_instances_with_load_balancer( + LoadBalancerName='my-lb', + Instances=[ + {'InstanceId': instance_id1}, + {'InstanceId': instance_id2} + ] + ) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + balancer['Instances'].should.have.length_of(2) + + client.deregister_instances_from_load_balancer( + LoadBalancerName='my-lb', + Instances=[ + {'InstanceId': instance_id1} + ] + ) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + balancer['Instances'].should.have.length_of(1) + balancer['Instances'][0]['InstanceId'].should.equal(instance_id2) + + +@mock_elb_deprecated +def test_default_attributes(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + attributes = lb.get_attributes() + + attributes.cross_zone_load_balancing.enabled.should.be.false + attributes.connection_draining.enabled.should.be.false + attributes.access_log.enabled.should.be.false + attributes.connecting_settings.idle_timeout.should.equal(60) + + +@mock_elb_deprecated +def test_cross_zone_load_balancing_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", True) + attributes = lb.get_attributes(force=True) + attributes.cross_zone_load_balancing.enabled.should.be.true + + conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", False) + attributes = lb.get_attributes(force=True) + attributes.cross_zone_load_balancing.enabled.should.be.false + + +@mock_elb_deprecated +def test_connection_draining_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + connection_draining = ConnectionDrainingAttribute() + connection_draining.enabled = True + connection_draining.timeout = 60 + + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) + attributes = lb.get_attributes(force=True) + attributes.connection_draining.enabled.should.be.true + attributes.connection_draining.timeout.should.equal(60) + + connection_draining.timeout = 30 + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) + attributes = lb.get_attributes(force=True) + attributes.connection_draining.timeout.should.equal(30) + + connection_draining.enabled = False + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) + attributes = lb.get_attributes(force=True) + attributes.connection_draining.enabled.should.be.false + + +@mock_elb_deprecated +def test_access_log_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + access_log = AccessLogAttribute() + access_log.enabled = True + access_log.s3_bucket_name = 'bucket' + access_log.s3_bucket_prefix = 'prefix' + access_log.emit_interval = 60 + + conn.modify_lb_attribute("my-lb", "AccessLog", access_log) + attributes = lb.get_attributes(force=True) + attributes.access_log.enabled.should.be.true + attributes.access_log.s3_bucket_name.should.equal("bucket") + attributes.access_log.s3_bucket_prefix.should.equal("prefix") + attributes.access_log.emit_interval.should.equal(60) + + access_log.enabled = False + conn.modify_lb_attribute("my-lb", "AccessLog", access_log) + attributes = lb.get_attributes(force=True) + attributes.access_log.enabled.should.be.false + + +@mock_elb_deprecated +def test_connection_settings_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + connection_settings = ConnectionSettingAttribute(conn) + connection_settings.idle_timeout = 120 + + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) + attributes = lb.get_attributes(force=True) + attributes.connecting_settings.idle_timeout.should.equal(120) + + connection_settings.idle_timeout = 60 + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) + attributes = lb.get_attributes(force=True) + attributes.connecting_settings.idle_timeout.should.equal(60) + + +@mock_elb_deprecated +def test_create_lb_cookie_stickiness_policy(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + cookie_expiration_period = 60 + policy_name = "LBCookieStickinessPolicy" + + lb.create_cookie_stickiness_policy(cookie_expiration_period, policy_name) + + lb = conn.get_all_load_balancers()[0] + # There appears to be a quirk about boto, whereby it returns a unicode + # string for cookie_expiration_period, despite being stated in + # documentation to be a long numeric. + # + # To work around that, this value is converted to an int and checked. + cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period + int(cookie_expiration_period_response_str).should.equal( + cookie_expiration_period) + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_create_lb_cookie_stickiness_policy_no_expiry(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + policy_name = "LBCookieStickinessPolicy" + + lb.create_cookie_stickiness_policy(None, policy_name) + + lb = conn.get_all_load_balancers()[0] + lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period.should.be.none + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_create_app_cookie_stickiness_policy(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + cookie_name = "my-stickiness-policy" + policy_name = "AppCookieStickinessPolicy" + + lb.create_app_cookie_stickiness_policy(cookie_name, policy_name) + + lb = conn.get_all_load_balancers()[0] + lb.policies.app_cookie_stickiness_policies[ + 0].cookie_name.should.equal(cookie_name) + lb.policies.app_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_create_lb_policy(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + policy_name = "ProxyPolicy" + + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) + + lb = conn.get_all_load_balancers()[0] + lb.policies.other_policies[0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_set_policies_of_listener(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + listener_port = 80 + policy_name = "my-stickiness-policy" + + # boto docs currently state that zero or one policy may be associated + # with a given listener + + # in a real flow, it is necessary first to create a policy, + # then to set that policy to the listener + lb.create_cookie_stickiness_policy(None, policy_name) + lb.set_policies_of_listener(listener_port, [policy_name]) + + lb = conn.get_all_load_balancers()[0] + listener = lb.listeners[0] + listener.load_balancer_port.should.equal(listener_port) + # by contrast to a backend, a listener stores only policy name strings + listener.policy_names[0].should.equal(policy_name) + + +@mock_elb_deprecated +def test_set_policies_of_backend_server(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + instance_port = 8080 + policy_name = "ProxyPolicy" + + # in a real flow, it is necessary first to create a policy, + # then to set that policy to the backend + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) + lb.set_policies_of_backend_server(instance_port, [policy_name]) + + lb = conn.get_all_load_balancers()[0] + backend = lb.backends[0] + backend.instance_port.should.equal(instance_port) + # by contrast to a listener, a backend stores OtherPolicy objects + backend.policies[0].policy_name.should.equal(policy_name) + + +@mock_ec2_deprecated +@mock_elb_deprecated +def test_describe_instance_health(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', zones, ports) + + instances_health = conn.describe_instance_health('my-lb') + instances_health.should.be.empty + + lb.register_instances([instance_id1, instance_id2]) + + instances_health = conn.describe_instance_health('my-lb') + instances_health.should.have.length_of(2) + for instance_health in instances_health: + instance_health.instance_id.should.be.within( + [instance_id1, instance_id2]) + instance_health.state.should.equal('InService') + + instances_health = conn.describe_instance_health('my-lb', [instance_id1]) + instances_health.should.have.length_of(1) + instances_health[0].instance_id.should.equal(instance_id1) + instances_health[0].state.should.equal('InService') + + +@mock_ec2 +@mock_elb +def test_describe_instance_health_boto3(): + elb = boto3.client('elb', region_name="us-east-1") + ec2 = boto3.client('ec2', region_name="us-east-1") + instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances'] + lb_name = "my_load_balancer" + elb.create_load_balancer( + Listeners=[{ + 'InstancePort': 80, + 'LoadBalancerPort': 8080, + 'Protocol': 'HTTP' + }], + LoadBalancerName=lb_name, + ) + elb.register_instances_with_load_balancer( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instances[0]['InstanceId']}] + ) + instances_health = elb.describe_instance_health( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instance['InstanceId']} for instance in instances] + ) + instances_health['InstanceStates'].should.have.length_of(2) + instances_health['InstanceStates'][0]['InstanceId'].\ + should.equal(instances[0]['InstanceId']) + instances_health['InstanceStates'][0]['State'].\ + should.equal('InService') + instances_health['InstanceStates'][1]['InstanceId'].\ + should.equal(instances[1]['InstanceId']) + instances_health['InstanceStates'][1]['State'].\ + should.equal('Unknown') + + +@mock_elb +def test_add_remove_tags(): + client = boto3.client('elb', region_name='us-east-1') + + client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) + + client.add_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + tags.should.have.key('a').which.should.equal('b') + + client.add_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }, { + 'Key': 'b', + 'Value': 'b' + }, { + 'Key': 'c', + 'Value': 'b' + }, { + 'Key': 'd', + 'Value': 'b' + }, { + 'Key': 'e', + 'Value': 'b' + }, { + 'Key': 'f', + 'Value': 'b' + }, { + 'Key': 'g', + 'Value': 'b' + }, { + 'Key': 'h', + 'Value': 'b' + }, { + 'Key': 'i', + 'Value': 'b' + }, { + 'Key': 'j', + 'Value': 'b' + }]) + + client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'k', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + client.add_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'j', + 'Value': 'c' + }]) + + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + + tags.should.have.key('a').which.should.equal('b') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('i').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + tags.shouldnt.have.key('k') + + client.remove_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a' + }]) + + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + + tags.shouldnt.have.key('a') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('i').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + + client.create_load_balancer( + LoadBalancerName='other-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 433, 'InstancePort': 8433}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client.add_tags(LoadBalancerNames=['other-lb'], + Tags=[{ + 'Key': 'other', + 'Value': 'something' + }]) + + lb_tags = dict([(l['LoadBalancerName'], dict([(d['Key'], d['Value']) for d in l['Tags']])) + for l in client.describe_tags(LoadBalancerNames=['my-lb', 'other-lb'])['TagDescriptions']]) + + lb_tags.should.have.key('my-lb') + lb_tags.should.have.key('other-lb') + + lb_tags['my-lb'].shouldnt.have.key('other') + lb_tags[ + 'other-lb'].should.have.key('other').which.should.equal('something') + + +@mock_elb +def test_create_with_tags(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'], + Tags=[{ + 'Key': 'k', + 'Value': 'v' + }] + ) + + tags = dict((d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) + tags.should.have.key('k').which.should.equal('v') + + +@mock_elb +def test_modify_attributes(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + # Default ConnectionDraining timeout of 300 seconds + client.modify_load_balancer_attributes( + LoadBalancerName='my-lb', + LoadBalancerAttributes={ + 'ConnectionDraining': {'Enabled': True}, + } + ) + lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(300) + + # specify a custom ConnectionDraining timeout + client.modify_load_balancer_attributes( + LoadBalancerName='my-lb', + LoadBalancerAttributes={ + 'ConnectionDraining': { + 'Enabled': True, + 'Timeout': 45, + }, + } + ) + lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(45) + + +@mock_ec2 +@mock_elb +def test_subnets(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc( + CidrBlock='172.28.7.0/24', + InstanceTenancy='default' + ) + subnet = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26' + ) + client = boto3.client('elb', region_name='us-east-1') + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + Subnets=[subnet.id] + ) + + lb = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + lb.should.have.key('Subnets').which.should.have.length_of(1) + lb['Subnets'][0].should.equal(subnet.id) + + lb.should.have.key('VPCId').which.should.equal(vpc.id) + + +@mock_elb_deprecated +def test_create_load_balancer_duplicate(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', [], ports) + conn.create_load_balancer.when.called_with( + 'my-lb', [], ports).should.throw(BotoServerError) diff --git a/tests/test_elb/test_server.py b/tests/test_elb/test_server.py index 0033284d703d..159da970d70e 100644 --- a/tests/test_elb/test_server.py +++ b/tests/test_elb/test_server.py @@ -1,17 +1,17 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_elb_describe_instances(): - backend = server.create_backend_app("elb") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') - - res.data.should.contain(b'DescribeLoadBalancersResponse') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_elb_describe_instances(): + backend = server.create_backend_app("elb") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') + + res.data.should.contain(b'DescribeLoadBalancersResponse') diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index b58345fdbc15..cf0722bb2ba6 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1,1588 +1,1588 @@ -from __future__ import unicode_literals - -import json -import os -import boto3 -import botocore -from botocore.exceptions import ClientError -from nose.tools import assert_raises -import sure # noqa - -from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation -from moto.elbv2 import elbv2_backends - - -@mock_elbv2 -@mock_ec2 -def test_create_load_balancer(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - lb = response.get('LoadBalancers')[0] - - lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com") - lb.get('LoadBalancerArn').should.equal( - 'arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188') - lb.get('SecurityGroups').should.equal([security_group.id]) - lb.get('AvailabilityZones').should.equal([ - {'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'}, - {'SubnetId': subnet2.id, 'ZoneName': 'us-east-1b'}]) - - # Ensure the tags persisted - response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')]) - tags = {d['Key']: d['Value'] - for d in response['TagDescriptions'][0]['Tags']} - tags.should.equal({'key_name': 'a_value'}) - - -@mock_elbv2 -@mock_ec2 -def test_describe_load_balancers(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response = conn.describe_load_balancers() - - response.get('LoadBalancers').should.have.length_of(1) - lb = response.get('LoadBalancers')[0] - lb.get('LoadBalancerName').should.equal('my-lb') - - response = conn.describe_load_balancers( - LoadBalancerArns=[lb.get('LoadBalancerArn')]) - response.get('LoadBalancers')[0].get( - 'LoadBalancerName').should.equal('my-lb') - - response = conn.describe_load_balancers(Names=['my-lb']) - response.get('LoadBalancers')[0].get( - 'LoadBalancerName').should.equal('my-lb') - - with assert_raises(ClientError): - conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn']) - with assert_raises(ClientError): - conn.describe_load_balancers(Names=['nope']) - - -@mock_elbv2 -@mock_ec2 -def test_add_remove_tags(): - conn = boto3.client('elbv2', region_name='us-east-1') - - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - lbs = conn.describe_load_balancers()['LoadBalancers'] - lbs.should.have.length_of(1) - lb = lbs[0] - - with assert_raises(ClientError): - conn.add_tags(ResourceArns=['missing-arn'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]) - - conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]) - - tags = {d['Key']: d['Value'] for d in conn.describe_tags( - ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} - tags.should.have.key('a').which.should.equal('b') - - conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }, { - 'Key': 'b', - 'Value': 'b' - }, { - 'Key': 'c', - 'Value': 'b' - }, { - 'Key': 'd', - 'Value': 'b' - }, { - 'Key': 'e', - 'Value': 'b' - }, { - 'Key': 'f', - 'Value': 'b' - }, { - 'Key': 'g', - 'Value': 'b' - }, { - 'Key': 'h', - 'Value': 'b' - }, { - 'Key': 'j', - 'Value': 'b' - }]) - - conn.add_tags.when.called_with(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'k', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - - conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'j', - 'Value': 'c' - }]) - - tags = {d['Key']: d['Value'] for d in conn.describe_tags( - ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} - - tags.should.have.key('a').which.should.equal('b') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - tags.shouldnt.have.key('k') - - conn.remove_tags(ResourceArns=[lb.get('LoadBalancerArn')], - TagKeys=['a']) - - tags = {d['Key']: d['Value'] for d in conn.describe_tags( - ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} - - tags.shouldnt.have.key('a') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - - -@mock_elbv2 -@mock_ec2 -def test_create_elb_in_multiple_region(): - for region in ['us-west-1', 'us-west-2']: - conn = boto3.client('elbv2', region_name=region) - ec2 = boto3.resource('ec2', region_name=region) - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc( - CidrBlock='172.28.7.0/24', - InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone=region + 'a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone=region + 'b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - list( - boto3.client( - 'elbv2', - region_name='us-west-1').describe_load_balancers().get('LoadBalancers') - ).should.have.length_of(1) - list( - boto3.client( - 'elbv2', - region_name='us-west-2').describe_load_balancers().get('LoadBalancers') - ).should.have.length_of(1) - - -@mock_elbv2 -@mock_ec2 -def test_create_target_group_and_listeners(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') - - # Can't create a target group with an invalid protocol - with assert_raises(ClientError): - conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='/HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - target_group_arn = target_group['TargetGroupArn'] - - # Add tags to the target group - conn.add_tags(ResourceArns=[target_group_arn], Tags=[ - {'Key': 'target', 'Value': 'group'}]) - conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal( - [{'Key': 'target', 'Value': 'group'}]) - - # Check it's in the describe_target_groups response - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(1) - - # Plain HTTP listener - response = conn.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTP', - Port=80, - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(80) - listener.get('Protocol').should.equal('HTTP') - listener.get('DefaultActions').should.equal([{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward'}]) - http_listener_arn = listener.get('ListenerArn') - - response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn, - Names=['a-target']) - response.get('TargetGroups').should.have.length_of(1) - - # And another with SSL - response = conn.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTPS', - Port=443, - Certificates=[ - {'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}], - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(443) - listener.get('Protocol').should.equal('HTTPS') - listener.get('Certificates').should.equal([{ - 'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert', - }]) - listener.get('DefaultActions').should.equal([{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward'}]) - - https_listener_arn = listener.get('ListenerArn') - - response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) - response.get('Listeners').should.have.length_of(2) - response = conn.describe_listeners(ListenerArns=[https_listener_arn]) - response.get('Listeners').should.have.length_of(1) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(443) - listener.get('Protocol').should.equal('HTTPS') - - response = conn.describe_listeners( - ListenerArns=[ - http_listener_arn, - https_listener_arn]) - response.get('Listeners').should.have.length_of(2) - - # Try to delete the target group and it fails because there's a - # listener referencing it - with assert_raises(ClientError) as e: - conn.delete_target_group( - TargetGroupArn=target_group.get('TargetGroupArn')) - e.exception.operation_name.should.equal('DeleteTargetGroup') - e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA - - # Delete one listener - response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) - response.get('Listeners').should.have.length_of(2) - conn.delete_listener(ListenerArn=http_listener_arn) - response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) - response.get('Listeners').should.have.length_of(1) - - # Then delete the load balancer - conn.delete_load_balancer(LoadBalancerArn=load_balancer_arn) - - # It's gone - response = conn.describe_load_balancers() - response.get('LoadBalancers').should.have.length_of(0) - - # And it deleted the remaining listener - response = conn.describe_listeners( - ListenerArns=[ - http_listener_arn, - https_listener_arn]) - response.get('Listeners').should.have.length_of(0) - - # But not the target groups - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(1) - - # Which we'll now delete - conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn')) - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(0) - - -@mock_elbv2 -@mock_ec2 -def test_create_target_group_without_non_required_parameters(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - # request without HealthCheckIntervalSeconds parameter - # which is default to 30 seconds - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080' - ) - target_group = response.get('TargetGroups')[0] - target_group.should_not.be.none - - -@mock_elbv2 -@mock_ec2 -def test_create_invalid_target_group(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - - # Fail to create target group with name which length is 33 - long_name = 'A' * 33 - with assert_raises(ClientError): - conn.create_target_group( - Name=long_name, - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - invalid_names = [ - '-name', - 'name-', - '-name-', - 'example.com', - 'test@test', - 'Na--me'] - for name in invalid_names: - with assert_raises(ClientError): - conn.create_target_group( - Name=name, - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - valid_names = ['name', 'Name', '000'] - for name in valid_names: - conn.create_target_group( - Name=name, - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - -@mock_elbv2 -@mock_ec2 -def test_describe_paginated_balancers(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - for i in range(51): - conn.create_load_balancer( - Name='my-lb%d' % i, - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - resp = conn.describe_load_balancers() - resp['LoadBalancers'].should.have.length_of(50) - resp['NextMarker'].should.equal( - resp['LoadBalancers'][-1]['LoadBalancerName']) - resp2 = conn.describe_load_balancers(Marker=resp['NextMarker']) - resp2['LoadBalancers'].should.have.length_of(1) - assert 'NextToken' not in resp2.keys() - - -@mock_elbv2 -@mock_ec2 -def test_delete_load_balancer(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response.get('LoadBalancers').should.have.length_of(1) - lb = response.get('LoadBalancers')[0] - - conn.delete_load_balancer(LoadBalancerArn=lb.get('LoadBalancerArn')) - balancers = conn.describe_load_balancers().get('LoadBalancers') - balancers.should.have.length_of(0) - - -@mock_ec2 -@mock_elbv2 -def test_register_targets(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - - # No targets registered yet - response = conn.describe_target_health( - TargetGroupArn=target_group.get('TargetGroupArn')) - response.get('TargetHealthDescriptions').should.have.length_of(0) - - response = ec2.create_instances( - ImageId='ami-1234abcd', MinCount=2, MaxCount=2) - instance_id1 = response[0].id - instance_id2 = response[1].id - - response = conn.register_targets( - TargetGroupArn=target_group.get('TargetGroupArn'), - Targets=[ - { - 'Id': instance_id1, - 'Port': 5060, - }, - { - 'Id': instance_id2, - 'Port': 4030, - }, - ]) - - response = conn.describe_target_health( - TargetGroupArn=target_group.get('TargetGroupArn')) - response.get('TargetHealthDescriptions').should.have.length_of(2) - - response = conn.deregister_targets( - TargetGroupArn=target_group.get('TargetGroupArn'), - Targets=[{'Id': instance_id2}]) - - response = conn.describe_target_health( - TargetGroupArn=target_group.get('TargetGroupArn')) - response.get('TargetHealthDescriptions').should.have.length_of(1) - - -@mock_ec2 -@mock_elbv2 -def test_target_group_attributes(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - - # Check it's in the describe_target_groups response - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(1) - target_group_arn = target_group['TargetGroupArn'] - - # check if Names filter works - response = conn.describe_target_groups(Names=[]) - response = conn.describe_target_groups(Names=['a-target']) - response.get('TargetGroups').should.have.length_of(1) - target_group_arn = target_group['TargetGroupArn'] - - # The attributes should start with the two defaults - response = conn.describe_target_group_attributes( - TargetGroupArn=target_group_arn) - response['Attributes'].should.have.length_of(2) - attributes = {attr['Key']: attr['Value'] - for attr in response['Attributes']} - attributes['deregistration_delay.timeout_seconds'].should.equal('300') - attributes['stickiness.enabled'].should.equal('false') - - # Add cookie stickiness - response = conn.modify_target_group_attributes( - TargetGroupArn=target_group_arn, - Attributes=[ - { - 'Key': 'stickiness.enabled', - 'Value': 'true', - }, - { - 'Key': 'stickiness.type', - 'Value': 'lb_cookie', - }, - ]) - - # The response should have only the keys updated - response['Attributes'].should.have.length_of(2) - attributes = {attr['Key']: attr['Value'] - for attr in response['Attributes']} - attributes['stickiness.type'].should.equal('lb_cookie') - attributes['stickiness.enabled'].should.equal('true') - - # These new values should be in the full attribute list - response = conn.describe_target_group_attributes( - TargetGroupArn=target_group_arn) - response['Attributes'].should.have.length_of(3) - attributes = {attr['Key']: attr['Value'] - for attr in response['Attributes']} - attributes['stickiness.type'].should.equal('lb_cookie') - attributes['stickiness.enabled'].should.equal('true') - - -@mock_elbv2 -@mock_ec2 -def test_handle_listener_rules(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') - - # Can't create a target group with an invalid protocol - with assert_raises(ClientError): - conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='/HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - - # Plain HTTP listener - response = conn.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTP', - Port=80, - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(80) - listener.get('Protocol').should.equal('HTTP') - listener.get('DefaultActions').should.equal([{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward'}]) - http_listener_arn = listener.get('ListenerArn') - - # create first rule - priority = 100 - host = 'xxx.example.com' - path_pattern = 'foobar' - created_rule = conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - )['Rules'][0] - created_rule['Priority'].should.equal('100') - - # check if rules is sorted by priority - priority = 50 - host = 'yyy.example.com' - path_pattern = 'foobar' - rules = conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for PriorityInUse - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for describe listeners - obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) - len(obtained_rules['Rules']).should.equal(3) - priorities = [rule['Priority'] for rule in obtained_rules['Rules']] - priorities.should.equal(['50', '100', 'default']) - - first_rule = obtained_rules['Rules'][0] - second_rule = obtained_rules['Rules'][1] - obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) - obtained_rules['Rules'].should.equal([first_rule]) - - # test for pagination - obtained_rules = conn.describe_rules( - ListenerArn=http_listener_arn, PageSize=1) - len(obtained_rules['Rules']).should.equal(1) - obtained_rules.should.have.key('NextMarker') - next_marker = obtained_rules['NextMarker'] - - following_rules = conn.describe_rules( - ListenerArn=http_listener_arn, - PageSize=1, - Marker=next_marker) - len(following_rules['Rules']).should.equal(1) - following_rules.should.have.key('NextMarker') - following_rules['Rules'][0]['RuleArn'].should_not.equal( - obtained_rules['Rules'][0]['RuleArn']) - - # test for invalid describe rule request - with assert_raises(ClientError): - conn.describe_rules() - with assert_raises(ClientError): - conn.describe_rules(RuleArns=[]) - with assert_raises(ClientError): - conn.describe_rules( - ListenerArn=http_listener_arn, - RuleArns=[first_rule['RuleArn']] - ) - - # modify rule partially - new_host = 'new.example.com' - new_path_pattern = 'new_path' - modified_rule = conn.modify_rule( - RuleArn=first_rule['RuleArn'], - Conditions=[{ - 'Field': 'host-header', - 'Values': [new_host] - }, - { - 'Field': 'path-pattern', - 'Values': [new_path_pattern] - }] - )['Rules'][0] - - rules = conn.describe_rules(ListenerArn=http_listener_arn) - obtained_rule = rules['Rules'][0] - modified_rule.should.equal(obtained_rule) - obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host) - obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern) - obtained_rule['Actions'][0]['TargetGroupArn'].should.equal( - target_group.get('TargetGroupArn')) - - # modify priority - conn.set_rule_priorities( - RulePriorities=[ - {'RuleArn': first_rule['RuleArn'], - 'Priority': int(first_rule['Priority']) - 1} - ] - ) - with assert_raises(ClientError): - conn.set_rule_priorities( - RulePriorities=[ - {'RuleArn': first_rule['RuleArn'], 'Priority': 999}, - {'RuleArn': second_rule['RuleArn'], 'Priority': 999} - ] - ) - - # delete - arn = first_rule['RuleArn'] - conn.delete_rule(RuleArn=arn) - rules = conn.describe_rules(ListenerArn=http_listener_arn)['Rules'] - len(rules).should.equal(2) - - # test for invalid action type - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward2' - }] - ) - - # test for invalid action type - safe_priority = 2 - invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x' - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': invalid_target_group_arn, - 'Type': 'forward' - }] - ) - - # test for invalid condition field_name - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'xxxxxxx', - 'Values': [host] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for emptry condition value - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for multiple condition value - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host, host] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - -@mock_elbv2 -@mock_ec2 -def test_describe_invalid_target_group(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response.get('LoadBalancers')[0].get('LoadBalancerArn') - - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - # Check error raises correctly - with assert_raises(ClientError): - conn.describe_target_groups(Names=['invalid']) - - -@mock_elbv2 -@mock_ec2 -def test_describe_target_groups_no_arguments(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response.get('LoadBalancers')[0].get('LoadBalancerArn') - - conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - assert len(conn.describe_target_groups()['TargetGroups']) == 1 - - -@mock_elbv2 -def test_describe_account_limits(): - client = boto3.client('elbv2', region_name='eu-central-1') - - resp = client.describe_account_limits() - resp['Limits'][0].should.contain('Name') - resp['Limits'][0].should.contain('Max') - - -@mock_elbv2 -def test_describe_ssl_policies(): - client = boto3.client('elbv2', region_name='eu-central-1') - - resp = client.describe_ssl_policies() - len(resp['SslPolicies']).should.equal(5) - - resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08']) - len(resp['SslPolicies']).should.equal(2) - - -@mock_elbv2 -@mock_ec2 -def test_set_ip_address_type(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - # Internal LBs cant be dualstack yet - with assert_raises(ClientError): - client.set_ip_address_type( - LoadBalancerArn=arn, - IpAddressType='dualstack' - ) - - # Create internet facing one - response = client.create_load_balancer( - Name='my-lb2', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internet-facing', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.set_ip_address_type( - LoadBalancerArn=arn, - IpAddressType='dualstack' - ) - - -@mock_elbv2 -@mock_ec2 -def test_set_security_groups(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - security_group2 = ec2.create_security_group( - GroupName='b-security-group', Description='Second One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.set_security_groups( - LoadBalancerArn=arn, - SecurityGroups=[security_group.id, security_group2.id] - ) - - resp = client.describe_load_balancers(LoadBalancerArns=[arn]) - len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2) - - with assert_raises(ClientError): - client.set_security_groups( - LoadBalancerArn=arn, - SecurityGroups=['non_existant'] - ) - - -@mock_elbv2 -@mock_ec2 -def test_set_subnets(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - subnet3 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1c') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.set_subnets( - LoadBalancerArn=arn, - Subnets=[subnet1.id, subnet2.id, subnet3.id] - ) - - resp = client.describe_load_balancers(LoadBalancerArns=[arn]) - len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3) - - # Only 1 AZ - with assert_raises(ClientError): - client.set_subnets( - LoadBalancerArn=arn, - Subnets=[subnet1.id] - ) - - # Multiple subnets in same AZ - with assert_raises(ClientError): - client.set_subnets( - LoadBalancerArn=arn, - Subnets=[subnet1.id, subnet2.id, subnet2.id] - ) - - -@mock_elbv2 -@mock_ec2 -def test_set_subnets(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.modify_load_balancer_attributes( - LoadBalancerArn=arn, - Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}] - ) - - # Check its 600 not 60 - response = client.describe_load_balancer_attributes( - LoadBalancerArn=arn - ) - idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0] - idle_timeout['Value'].should.equal('600') - - -@mock_elbv2 -@mock_ec2 -def test_modify_target_group(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - - response = client.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - arn = response.get('TargetGroups')[0]['TargetGroupArn'] - - client.modify_target_group( - TargetGroupArn=arn, - HealthCheckProtocol='HTTPS', - HealthCheckPort='8081', - HealthCheckPath='/status', - HealthCheckIntervalSeconds=10, - HealthCheckTimeoutSeconds=10, - HealthyThresholdCount=10, - UnhealthyThresholdCount=4, - Matcher={'HttpCode': '200-399'} - ) - - response = client.describe_target_groups( - TargetGroupArns=[arn] - ) - response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399') - response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10) - response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status') - response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081') - response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS') - response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10) - response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10) - response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4) - - -@mock_elbv2 -@mock_ec2 -@mock_acm -def test_modify_listener_http_to_https(): - client = boto3.client('elbv2', region_name='eu-central-1') - acm = boto3.client('acm', region_name='eu-central-1') - ec2 = boto3.resource('ec2', region_name='eu-central-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='eu-central-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='eu-central-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') - - response = client.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - target_group_arn = target_group['TargetGroupArn'] - - # Plain HTTP listener - response = client.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTP', - Port=80, - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}] - ) - listener_arn = response['Listeners'][0]['ListenerArn'] - - response = acm.request_certificate( - DomainName='google.com', - SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], - ) - google_arn = response['CertificateArn'] - response = acm.request_certificate( - DomainName='yahoo.com', - SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'], - ) - yahoo_arn = response['CertificateArn'] - - response = client.modify_listener( - ListenerArn=listener_arn, - Port=443, - Protocol='HTTPS', - SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', - Certificates=[ - {'CertificateArn': google_arn, 'IsDefault': False}, - {'CertificateArn': yahoo_arn, 'IsDefault': True} - ], - DefaultActions=[ - {'Type': 'forward', 'TargetGroupArn': target_group_arn} - ] - ) - response['Listeners'][0]['Port'].should.equal(443) - response['Listeners'][0]['Protocol'].should.equal('HTTPS') - response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01') - len(response['Listeners'][0]['Certificates']).should.equal(2) - - # Check default cert, can't do this in server mode - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': - listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn] - listener.certificate.should.equal(yahoo_arn) - - # No default cert - with assert_raises(ClientError): - client.modify_listener( - ListenerArn=listener_arn, - Port=443, - Protocol='HTTPS', - SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', - Certificates=[ - {'CertificateArn': google_arn, 'IsDefault': False} - ], - DefaultActions=[ - {'Type': 'forward', 'TargetGroupArn': target_group_arn} - ] - ) - - # Bad cert - with assert_raises(ClientError): - client.modify_listener( - ListenerArn=listener_arn, - Port=443, - Protocol='HTTPS', - SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', - Certificates=[ - {'CertificateArn': 'lalala', 'IsDefault': True} - ], - DefaultActions=[ - {'Type': 'forward', 'TargetGroupArn': target_group_arn} - ] - ) - - -@mock_ec2 -@mock_elbv2 -@mock_cloudformation -def test_create_target_groups_through_cloudformation(): - cfn_conn = boto3.client('cloudformation', region_name='us-east-1') - elbv2_client = boto3.client('elbv2', region_name='us-east-1') - - # test that setting a name manually as well as letting cloudformation create a name both work - # this is a special case because test groups have a name length limit of 22 characters, and must be unique - # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - }, - }, - "testGroup1": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 80, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup2": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 90, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup3": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "MyTargetGroup", - "Port": 70, - "Protocol": "HTTPS", - "VpcId": {"Ref": "testVPC"}, - }, - }, - } - } - template_json = json.dumps(template) - cfn_conn.create_stack( - StackName="test-stack", - TemplateBody=template_json, - ) - - describe_target_groups_response = elbv2_client.describe_target_groups() - target_group_dicts = describe_target_groups_response['TargetGroups'] - assert len(target_group_dicts) == 3 - - # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) - # and one named MyTargetGroup - assert len([tg for tg in target_group_dicts if tg['TargetGroupName'] == 'MyTargetGroup']) == 1 - assert len( - [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] - ) == 2 +from __future__ import unicode_literals + +import json +import os +import boto3 +import botocore +from botocore.exceptions import ClientError +from nose.tools import assert_raises +import sure # noqa + +from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation +from moto.elbv2 import elbv2_backends + + +@mock_elbv2 +@mock_ec2 +def test_create_load_balancer(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + lb = response.get('LoadBalancers')[0] + + lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com") + lb.get('LoadBalancerArn').should.equal( + 'arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188') + lb.get('SecurityGroups').should.equal([security_group.id]) + lb.get('AvailabilityZones').should.equal([ + {'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'}, + {'SubnetId': subnet2.id, 'ZoneName': 'us-east-1b'}]) + + # Ensure the tags persisted + response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')]) + tags = {d['Key']: d['Value'] + for d in response['TagDescriptions'][0]['Tags']} + tags.should.equal({'key_name': 'a_value'}) + + +@mock_elbv2 +@mock_ec2 +def test_describe_load_balancers(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.describe_load_balancers() + + response.get('LoadBalancers').should.have.length_of(1) + lb = response.get('LoadBalancers')[0] + lb.get('LoadBalancerName').should.equal('my-lb') + + response = conn.describe_load_balancers( + LoadBalancerArns=[lb.get('LoadBalancerArn')]) + response.get('LoadBalancers')[0].get( + 'LoadBalancerName').should.equal('my-lb') + + response = conn.describe_load_balancers(Names=['my-lb']) + response.get('LoadBalancers')[0].get( + 'LoadBalancerName').should.equal('my-lb') + + with assert_raises(ClientError): + conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn']) + with assert_raises(ClientError): + conn.describe_load_balancers(Names=['nope']) + + +@mock_elbv2 +@mock_ec2 +def test_add_remove_tags(): + conn = boto3.client('elbv2', region_name='us-east-1') + + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + lbs = conn.describe_load_balancers()['LoadBalancers'] + lbs.should.have.length_of(1) + lb = lbs[0] + + with assert_raises(ClientError): + conn.add_tags(ResourceArns=['missing-arn'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + tags.should.have.key('a').which.should.equal('b') + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }, { + 'Key': 'b', + 'Value': 'b' + }, { + 'Key': 'c', + 'Value': 'b' + }, { + 'Key': 'd', + 'Value': 'b' + }, { + 'Key': 'e', + 'Value': 'b' + }, { + 'Key': 'f', + 'Value': 'b' + }, { + 'Key': 'g', + 'Value': 'b' + }, { + 'Key': 'h', + 'Value': 'b' + }, { + 'Key': 'j', + 'Value': 'b' + }]) + + conn.add_tags.when.called_with(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'k', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'j', + 'Value': 'c' + }]) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + + tags.should.have.key('a').which.should.equal('b') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + tags.shouldnt.have.key('k') + + conn.remove_tags(ResourceArns=[lb.get('LoadBalancerArn')], + TagKeys=['a']) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + + tags.shouldnt.have.key('a') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + + +@mock_elbv2 +@mock_ec2 +def test_create_elb_in_multiple_region(): + for region in ['us-west-1', 'us-west-2']: + conn = boto3.client('elbv2', region_name=region) + ec2 = boto3.resource('ec2', region_name=region) + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc( + CidrBlock='172.28.7.0/24', + InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone=region + 'a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone=region + 'b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + list( + boto3.client( + 'elbv2', + region_name='us-west-1').describe_load_balancers().get('LoadBalancers') + ).should.have.length_of(1) + list( + boto3.client( + 'elbv2', + region_name='us-west-2').describe_load_balancers().get('LoadBalancers') + ).should.have.length_of(1) + + +@mock_elbv2 +@mock_ec2 +def test_create_target_group_and_listeners(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + target_group_arn = target_group['TargetGroupArn'] + + # Add tags to the target group + conn.add_tags(ResourceArns=[target_group_arn], Tags=[ + {'Key': 'target', 'Value': 'group'}]) + conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal( + [{'Key': 'target', 'Value': 'group'}]) + + # Check it's in the describe_target_groups response + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + + # Plain HTTP listener + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(80) + listener.get('Protocol').should.equal('HTTP') + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + http_listener_arn = listener.get('ListenerArn') + + response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn, + Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + + # And another with SSL + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTPS', + Port=443, + Certificates=[ + {'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}], + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(443) + listener.get('Protocol').should.equal('HTTPS') + listener.get('Certificates').should.equal([{ + 'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert', + }]) + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + + https_listener_arn = listener.get('ListenerArn') + + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(2) + response = conn.describe_listeners(ListenerArns=[https_listener_arn]) + response.get('Listeners').should.have.length_of(1) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(443) + listener.get('Protocol').should.equal('HTTPS') + + response = conn.describe_listeners( + ListenerArns=[ + http_listener_arn, + https_listener_arn]) + response.get('Listeners').should.have.length_of(2) + + # Try to delete the target group and it fails because there's a + # listener referencing it + with assert_raises(ClientError) as e: + conn.delete_target_group( + TargetGroupArn=target_group.get('TargetGroupArn')) + e.exception.operation_name.should.equal('DeleteTargetGroup') + e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA + + # Delete one listener + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(2) + conn.delete_listener(ListenerArn=http_listener_arn) + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(1) + + # Then delete the load balancer + conn.delete_load_balancer(LoadBalancerArn=load_balancer_arn) + + # It's gone + response = conn.describe_load_balancers() + response.get('LoadBalancers').should.have.length_of(0) + + # And it deleted the remaining listener + response = conn.describe_listeners( + ListenerArns=[ + http_listener_arn, + https_listener_arn]) + response.get('Listeners').should.have.length_of(0) + + # But not the target groups + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + + # Which we'll now delete + conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn')) + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(0) + + +@mock_elbv2 +@mock_ec2 +def test_create_target_group_without_non_required_parameters(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + # request without HealthCheckIntervalSeconds parameter + # which is default to 30 seconds + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080' + ) + target_group = response.get('TargetGroups')[0] + target_group.should_not.be.none + + +@mock_elbv2 +@mock_ec2 +def test_create_invalid_target_group(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + # Fail to create target group with name which length is 33 + long_name = 'A' * 33 + with assert_raises(ClientError): + conn.create_target_group( + Name=long_name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + invalid_names = [ + '-name', + 'name-', + '-name-', + 'example.com', + 'test@test', + 'Na--me'] + for name in invalid_names: + with assert_raises(ClientError): + conn.create_target_group( + Name=name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + valid_names = ['name', 'Name', '000'] + for name in valid_names: + conn.create_target_group( + Name=name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + +@mock_elbv2 +@mock_ec2 +def test_describe_paginated_balancers(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + for i in range(51): + conn.create_load_balancer( + Name='my-lb%d' % i, + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + resp = conn.describe_load_balancers() + resp['LoadBalancers'].should.have.length_of(50) + resp['NextMarker'].should.equal( + resp['LoadBalancers'][-1]['LoadBalancerName']) + resp2 = conn.describe_load_balancers(Marker=resp['NextMarker']) + resp2['LoadBalancers'].should.have.length_of(1) + assert 'NextToken' not in resp2.keys() + + +@mock_elbv2 +@mock_ec2 +def test_delete_load_balancer(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers').should.have.length_of(1) + lb = response.get('LoadBalancers')[0] + + conn.delete_load_balancer(LoadBalancerArn=lb.get('LoadBalancerArn')) + balancers = conn.describe_load_balancers().get('LoadBalancers') + balancers.should.have.length_of(0) + + +@mock_ec2 +@mock_elbv2 +def test_register_targets(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # No targets registered yet + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(0) + + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + response = conn.register_targets( + TargetGroupArn=target_group.get('TargetGroupArn'), + Targets=[ + { + 'Id': instance_id1, + 'Port': 5060, + }, + { + 'Id': instance_id2, + 'Port': 4030, + }, + ]) + + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(2) + + response = conn.deregister_targets( + TargetGroupArn=target_group.get('TargetGroupArn'), + Targets=[{'Id': instance_id2}]) + + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(1) + + +@mock_ec2 +@mock_elbv2 +def test_target_group_attributes(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Check it's in the describe_target_groups response + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + target_group_arn = target_group['TargetGroupArn'] + + # check if Names filter works + response = conn.describe_target_groups(Names=[]) + response = conn.describe_target_groups(Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + target_group_arn = target_group['TargetGroupArn'] + + # The attributes should start with the two defaults + response = conn.describe_target_group_attributes( + TargetGroupArn=target_group_arn) + response['Attributes'].should.have.length_of(2) + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} + attributes['deregistration_delay.timeout_seconds'].should.equal('300') + attributes['stickiness.enabled'].should.equal('false') + + # Add cookie stickiness + response = conn.modify_target_group_attributes( + TargetGroupArn=target_group_arn, + Attributes=[ + { + 'Key': 'stickiness.enabled', + 'Value': 'true', + }, + { + 'Key': 'stickiness.type', + 'Value': 'lb_cookie', + }, + ]) + + # The response should have only the keys updated + response['Attributes'].should.have.length_of(2) + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} + attributes['stickiness.type'].should.equal('lb_cookie') + attributes['stickiness.enabled'].should.equal('true') + + # These new values should be in the full attribute list + response = conn.describe_target_group_attributes( + TargetGroupArn=target_group_arn) + response['Attributes'].should.have.length_of(3) + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} + attributes['stickiness.type'].should.equal('lb_cookie') + attributes['stickiness.enabled'].should.equal('true') + + +@mock_elbv2 +@mock_ec2 +def test_handle_listener_rules(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Plain HTTP listener + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(80) + listener.get('Protocol').should.equal('HTTP') + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + http_listener_arn = listener.get('ListenerArn') + + # create first rule + priority = 100 + host = 'xxx.example.com' + path_pattern = 'foobar' + created_rule = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + )['Rules'][0] + created_rule['Priority'].should.equal('100') + + # check if rules is sorted by priority + priority = 50 + host = 'yyy.example.com' + path_pattern = 'foobar' + rules = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for PriorityInUse + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for describe listeners + obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) + len(obtained_rules['Rules']).should.equal(3) + priorities = [rule['Priority'] for rule in obtained_rules['Rules']] + priorities.should.equal(['50', '100', 'default']) + + first_rule = obtained_rules['Rules'][0] + second_rule = obtained_rules['Rules'][1] + obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) + obtained_rules['Rules'].should.equal([first_rule]) + + # test for pagination + obtained_rules = conn.describe_rules( + ListenerArn=http_listener_arn, PageSize=1) + len(obtained_rules['Rules']).should.equal(1) + obtained_rules.should.have.key('NextMarker') + next_marker = obtained_rules['NextMarker'] + + following_rules = conn.describe_rules( + ListenerArn=http_listener_arn, + PageSize=1, + Marker=next_marker) + len(following_rules['Rules']).should.equal(1) + following_rules.should.have.key('NextMarker') + following_rules['Rules'][0]['RuleArn'].should_not.equal( + obtained_rules['Rules'][0]['RuleArn']) + + # test for invalid describe rule request + with assert_raises(ClientError): + conn.describe_rules() + with assert_raises(ClientError): + conn.describe_rules(RuleArns=[]) + with assert_raises(ClientError): + conn.describe_rules( + ListenerArn=http_listener_arn, + RuleArns=[first_rule['RuleArn']] + ) + + # modify rule partially + new_host = 'new.example.com' + new_path_pattern = 'new_path' + modified_rule = conn.modify_rule( + RuleArn=first_rule['RuleArn'], + Conditions=[{ + 'Field': 'host-header', + 'Values': [new_host] + }, + { + 'Field': 'path-pattern', + 'Values': [new_path_pattern] + }] + )['Rules'][0] + + rules = conn.describe_rules(ListenerArn=http_listener_arn) + obtained_rule = rules['Rules'][0] + modified_rule.should.equal(obtained_rule) + obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host) + obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern) + obtained_rule['Actions'][0]['TargetGroupArn'].should.equal( + target_group.get('TargetGroupArn')) + + # modify priority + conn.set_rule_priorities( + RulePriorities=[ + {'RuleArn': first_rule['RuleArn'], + 'Priority': int(first_rule['Priority']) - 1} + ] + ) + with assert_raises(ClientError): + conn.set_rule_priorities( + RulePriorities=[ + {'RuleArn': first_rule['RuleArn'], 'Priority': 999}, + {'RuleArn': second_rule['RuleArn'], 'Priority': 999} + ] + ) + + # delete + arn = first_rule['RuleArn'] + conn.delete_rule(RuleArn=arn) + rules = conn.describe_rules(ListenerArn=http_listener_arn)['Rules'] + len(rules).should.equal(2) + + # test for invalid action type + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward2' + }] + ) + + # test for invalid action type + safe_priority = 2 + invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x' + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': invalid_target_group_arn, + 'Type': 'forward' + }] + ) + + # test for invalid condition field_name + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'xxxxxxx', + 'Values': [host] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for emptry condition value + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for multiple condition value + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host, host] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + +@mock_elbv2 +@mock_ec2 +def test_describe_invalid_target_group(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + # Check error raises correctly + with assert_raises(ClientError): + conn.describe_target_groups(Names=['invalid']) + + +@mock_elbv2 +@mock_ec2 +def test_describe_target_groups_no_arguments(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers')[0].get('LoadBalancerArn') + + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + assert len(conn.describe_target_groups()['TargetGroups']) == 1 + + +@mock_elbv2 +def test_describe_account_limits(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_account_limits() + resp['Limits'][0].should.contain('Name') + resp['Limits'][0].should.contain('Max') + + +@mock_elbv2 +def test_describe_ssl_policies(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_ssl_policies() + len(resp['SslPolicies']).should.equal(5) + + resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08']) + len(resp['SslPolicies']).should.equal(2) + + +@mock_elbv2 +@mock_ec2 +def test_set_ip_address_type(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + # Internal LBs cant be dualstack yet + with assert_raises(ClientError): + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + # Create internet facing one + response = client.create_load_balancer( + Name='my-lb2', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internet-facing', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_security_groups(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + security_group2 = ec2.create_security_group( + GroupName='b-security-group', Description='Second One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=[security_group.id, security_group2.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2) + + with assert_raises(ClientError): + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=['non_existant'] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + subnet3 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1c') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet3.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3) + + # Only 1 AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id] + ) + + # Multiple subnets in same AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet2.id] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.modify_load_balancer_attributes( + LoadBalancerArn=arn, + Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}] + ) + + # Check its 600 not 60 + response = client.describe_load_balancer_attributes( + LoadBalancerArn=arn + ) + idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0] + idle_timeout['Value'].should.equal('600') + + +@mock_elbv2 +@mock_ec2 +def test_modify_target_group(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + arn = response.get('TargetGroups')[0]['TargetGroupArn'] + + client.modify_target_group( + TargetGroupArn=arn, + HealthCheckProtocol='HTTPS', + HealthCheckPort='8081', + HealthCheckPath='/status', + HealthCheckIntervalSeconds=10, + HealthCheckTimeoutSeconds=10, + HealthyThresholdCount=10, + UnhealthyThresholdCount=4, + Matcher={'HttpCode': '200-399'} + ) + + response = client.describe_target_groups( + TargetGroupArns=[arn] + ) + response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399') + response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status') + response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081') + response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS') + response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10) + response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4) + + +@mock_elbv2 +@mock_ec2 +@mock_acm +def test_modify_listener_http_to_https(): + client = boto3.client('elbv2', region_name='eu-central-1') + acm = boto3.client('acm', region_name='eu-central-1') + ec2 = boto3.resource('ec2', region_name='eu-central-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='eu-central-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='eu-central-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + target_group_arn = target_group['TargetGroupArn'] + + # Plain HTTP listener + response = client.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}] + ) + listener_arn = response['Listeners'][0]['ListenerArn'] + + response = acm.request_certificate( + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + google_arn = response['CertificateArn'] + response = acm.request_certificate( + DomainName='yahoo.com', + SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'], + ) + yahoo_arn = response['CertificateArn'] + + response = client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False}, + {'CertificateArn': yahoo_arn, 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + response['Listeners'][0]['Port'].should.equal(443) + response['Listeners'][0]['Protocol'].should.equal('HTTPS') + response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01') + len(response['Listeners'][0]['Certificates']).should.equal(2) + + # Check default cert, can't do this in server mode + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn] + listener.certificate.should.equal(yahoo_arn) + + # No default cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + + # Bad cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': 'lalala', 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_create_target_groups_through_cloudformation(): + cfn_conn = boto3.client('cloudformation', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + + # test that setting a name manually as well as letting cloudformation create a name both work + # this is a special case because test groups have a name length limit of 22 characters, and must be unique + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + }, + }, + "testGroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 80, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 90, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup3": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Name": "MyTargetGroup", + "Port": 70, + "Protocol": "HTTPS", + "VpcId": {"Ref": "testVPC"}, + }, + }, + } + } + template_json = json.dumps(template) + cfn_conn.create_stack( + StackName="test-stack", + TemplateBody=template_json, + ) + + describe_target_groups_response = elbv2_client.describe_target_groups() + target_group_dicts = describe_target_groups_response['TargetGroups'] + assert len(target_group_dicts) == 3 + + # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) + # and one named MyTargetGroup + assert len([tg for tg in target_group_dicts if tg['TargetGroupName'] == 'MyTargetGroup']) == 1 + assert len( + [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] + ) == 2 diff --git a/tests/test_elbv2/test_server.py b/tests/test_elbv2/test_server.py index ddd40a02d899..7d47d23ad0e9 100644 --- a/tests/test_elbv2/test_server.py +++ b/tests/test_elbv2/test_server.py @@ -1,17 +1,17 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_elbv2_describe_load_balancers(): - backend = server.create_backend_app("elbv2") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') - - res.data.should.contain(b'DescribeLoadBalancersResponse') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_elbv2_describe_load_balancers(): + backend = server.create_backend_app("elbv2") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') + + res.data.should.contain(b'DescribeLoadBalancersResponse') diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py index 505c69b11ac2..a1918ac30004 100644 --- a/tests/test_emr/test_emr.py +++ b/tests/test_emr/test_emr.py @@ -1,658 +1,658 @@ -from __future__ import unicode_literals -import time -from datetime import datetime - -import boto -import pytz -from boto.emr.bootstrap_action import BootstrapAction -from boto.emr.instance_group import InstanceGroup -from boto.emr.step import StreamingStep - -import six -import sure # noqa - -from moto import mock_emr_deprecated -from tests.helpers import requires_boto_gte - - -run_jobflow_args = dict( - job_flow_role='EMR_EC2_DefaultRole', - keep_alive=True, - log_uri='s3://some_bucket/jobflow_logs', - master_instance_type='c1.medium', - name='My jobflow', - num_instances=2, - service_role='EMR_DefaultRole', - slave_instance_type='c1.medium', -) - - -input_instance_groups = [ - InstanceGroup(1, 'MASTER', 'c1.medium', 'ON_DEMAND', 'master'), - InstanceGroup(3, 'CORE', 'c1.medium', 'ON_DEMAND', 'core'), - InstanceGroup(6, 'TASK', 'c1.large', 'SPOT', 'task-1', '0.07'), - InstanceGroup(10, 'TASK', 'c1.xlarge', 'SPOT', 'task-2', '0.05'), -] - - -@mock_emr_deprecated -def test_describe_cluster(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - args.update(dict( - api_params={ - 'Applications.member.1.Name': 'Spark', - 'Applications.member.1.Version': '2.4.2', - 'Configurations.member.1.Classification': 'yarn-site', - 'Configurations.member.1.Properties.entry.1.key': 'someproperty', - 'Configurations.member.1.Properties.entry.1.value': 'somevalue', - 'Configurations.member.1.Properties.entry.2.key': 'someotherproperty', - 'Configurations.member.1.Properties.entry.2.value': 'someothervalue', - 'Instances.EmrManagedMasterSecurityGroup': 'master-security-group', - 'Instances.Ec2SubnetId': 'subnet-8be41cec', - }, - availability_zone='us-east-2b', - ec2_keyname='mykey', - job_flow_role='EMR_EC2_DefaultRole', - keep_alive=False, - log_uri='s3://some_bucket/jobflow_logs', - name='My jobflow', - service_role='EMR_DefaultRole', - visible_to_all_users=True, - )) - cluster_id = conn.run_jobflow(**args) - input_tags = {'tag1': 'val1', 'tag2': 'val2'} - conn.add_tags(cluster_id, input_tags) - - cluster = conn.describe_cluster(cluster_id) - cluster.applications[0].name.should.equal('Spark') - cluster.applications[0].version.should.equal('2.4.2') - cluster.autoterminate.should.equal('true') - - # configurations appear not be supplied as attributes? - - attrs = cluster.ec2instanceattributes - # AdditionalMasterSecurityGroups - # AdditionalSlaveSecurityGroups - attrs.ec2availabilityzone.should.equal(args['availability_zone']) - attrs.ec2keyname.should.equal(args['ec2_keyname']) - attrs.ec2subnetid.should.equal(args['api_params']['Instances.Ec2SubnetId']) - # EmrManagedMasterSecurityGroups - # EmrManagedSlaveSecurityGroups - attrs.iaminstanceprofile.should.equal(args['job_flow_role']) - # ServiceAccessSecurityGroup - - cluster.id.should.equal(cluster_id) - cluster.loguri.should.equal(args['log_uri']) - cluster.masterpublicdnsname.should.be.a(six.string_types) - cluster.name.should.equal(args['name']) - int(cluster.normalizedinstancehours).should.equal(0) - # cluster.release_label - cluster.shouldnt.have.property('requestedamiversion') - cluster.runningamiversion.should.equal('1.0.0') - # cluster.securityconfiguration - cluster.servicerole.should.equal(args['service_role']) - - cluster.status.state.should.equal('TERMINATED') - cluster.status.statechangereason.message.should.be.a(six.string_types) - cluster.status.statechangereason.code.should.be.a(six.string_types) - cluster.status.timeline.creationdatetime.should.be.a(six.string_types) - # cluster.status.timeline.enddatetime.should.be.a(six.string_types) - # cluster.status.timeline.readydatetime.should.be.a(six.string_types) - - dict((item.key, item.value) - for item in cluster.tags).should.equal(input_tags) - - cluster.terminationprotected.should.equal('false') - cluster.visibletoallusers.should.equal('true') - - -@mock_emr_deprecated -def test_describe_jobflows(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - expected = {} - - for idx in range(4): - cluster_name = 'cluster' + str(idx) - args['name'] = cluster_name - cluster_id = conn.run_jobflow(**args) - expected[cluster_id] = { - 'id': cluster_id, - 'name': cluster_name, - 'state': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(4, 6): - cluster_name = 'cluster' + str(idx) - args['name'] = cluster_name - cluster_id = conn.run_jobflow(**args) - conn.terminate_jobflow(cluster_id) - expected[cluster_id] = { - 'id': cluster_id, - 'name': cluster_name, - 'state': 'TERMINATED' - } - jobs = conn.describe_jobflows() - jobs.should.have.length_of(6) - - for cluster_id, y in expected.items(): - resp = conn.describe_jobflows(jobflow_ids=[cluster_id]) - resp.should.have.length_of(1) - resp[0].jobflowid.should.equal(cluster_id) - - resp = conn.describe_jobflows(states=['WAITING']) - resp.should.have.length_of(4) - for x in resp: - x.state.should.equal('WAITING') - - resp = conn.describe_jobflows(created_before=timestamp) - resp.should.have.length_of(4) - - resp = conn.describe_jobflows(created_after=timestamp) - resp.should.have.length_of(2) - - -@mock_emr_deprecated -def test_describe_jobflow(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - args.update(dict( - ami_version='3.8.1', - api_params={ - #'Applications.member.1.Name': 'Spark', - #'Applications.member.1.Version': '2.4.2', - #'Configurations.member.1.Classification': 'yarn-site', - #'Configurations.member.1.Properties.entry.1.key': 'someproperty', - #'Configurations.member.1.Properties.entry.1.value': 'somevalue', - #'Instances.EmrManagedMasterSecurityGroup': 'master-security-group', - 'Instances.Ec2SubnetId': 'subnet-8be41cec', - }, - ec2_keyname='mykey', - hadoop_version='2.4.0', - - name='My jobflow', - log_uri='s3://some_bucket/jobflow_logs', - keep_alive=True, - master_instance_type='c1.medium', - slave_instance_type='c1.medium', - num_instances=2, - - availability_zone='us-west-2b', - - job_flow_role='EMR_EC2_DefaultRole', - service_role='EMR_DefaultRole', - visible_to_all_users=True, - )) - - cluster_id = conn.run_jobflow(**args) - jf = conn.describe_jobflow(cluster_id) - jf.amiversion.should.equal(args['ami_version']) - jf.bootstrapactions.should.equal(None) - jf.creationdatetime.should.be.a(six.string_types) - jf.should.have.property('laststatechangereason') - jf.readydatetime.should.be.a(six.string_types) - jf.startdatetime.should.be.a(six.string_types) - jf.state.should.equal('WAITING') - - jf.ec2keyname.should.equal(args['ec2_keyname']) - # Ec2SubnetId - jf.hadoopversion.should.equal(args['hadoop_version']) - int(jf.instancecount).should.equal(2) - - for ig in jf.instancegroups: - ig.creationdatetime.should.be.a(six.string_types) - # ig.enddatetime.should.be.a(six.string_types) - ig.should.have.property('instancegroupid').being.a(six.string_types) - int(ig.instancerequestcount).should.equal(1) - ig.instancerole.should.be.within(['MASTER', 'CORE']) - int(ig.instancerunningcount).should.equal(1) - ig.instancetype.should.equal('c1.medium') - ig.laststatechangereason.should.be.a(six.string_types) - ig.market.should.equal('ON_DEMAND') - ig.name.should.be.a(six.string_types) - ig.readydatetime.should.be.a(six.string_types) - ig.startdatetime.should.be.a(six.string_types) - ig.state.should.equal('RUNNING') - - jf.keepjobflowalivewhennosteps.should.equal('true') - jf.masterinstanceid.should.be.a(six.string_types) - jf.masterinstancetype.should.equal(args['master_instance_type']) - jf.masterpublicdnsname.should.be.a(six.string_types) - int(jf.normalizedinstancehours).should.equal(0) - jf.availabilityzone.should.equal(args['availability_zone']) - jf.slaveinstancetype.should.equal(args['slave_instance_type']) - jf.terminationprotected.should.equal('false') - - jf.jobflowid.should.equal(cluster_id) - # jf.jobflowrole.should.equal(args['job_flow_role']) - jf.loguri.should.equal(args['log_uri']) - jf.name.should.equal(args['name']) - # jf.servicerole.should.equal(args['service_role']) - - jf.steps.should.have.length_of(0) - - list(i.value for i in jf.supported_products).should.equal([]) - jf.visibletoallusers.should.equal('true') - - -@mock_emr_deprecated -def test_list_clusters(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - expected = {} - - for idx in range(40): - cluster_name = 'jobflow' + str(idx) - args['name'] = cluster_name - cluster_id = conn.run_jobflow(**args) - expected[cluster_id] = { - 'id': cluster_id, - 'name': cluster_name, - 'normalizedinstancehours': '0', - 'state': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(40, 70): - cluster_name = 'jobflow' + str(idx) - args['name'] = cluster_name - cluster_id = conn.run_jobflow(**args) - conn.terminate_jobflow(cluster_id) - expected[cluster_id] = { - 'id': cluster_id, - 'name': cluster_name, - 'normalizedinstancehours': '0', - 'state': 'TERMINATED' - } - - args = {} - while 1: - resp = conn.list_clusters(**args) - clusters = resp.clusters - len(clusters).should.be.lower_than_or_equal_to(50) - for x in clusters: - y = expected[x.id] - x.id.should.equal(y['id']) - x.name.should.equal(y['name']) - x.normalizedinstancehours.should.equal( - y['normalizedinstancehours']) - x.status.state.should.equal(y['state']) - x.status.timeline.creationdatetime.should.be.a(six.string_types) - if y['state'] == 'TERMINATED': - x.status.timeline.enddatetime.should.be.a(six.string_types) - else: - x.status.timeline.shouldnt.have.property('enddatetime') - x.status.timeline.readydatetime.should.be.a(six.string_types) - if not hasattr(resp, 'marker'): - break - args = {'marker': resp.marker} - - resp = conn.list_clusters(cluster_states=['TERMINATED']) - resp.clusters.should.have.length_of(30) - for x in resp.clusters: - x.status.state.should.equal('TERMINATED') - - resp = conn.list_clusters(created_before=timestamp) - resp.clusters.should.have.length_of(40) - - resp = conn.list_clusters(created_after=timestamp) - resp.clusters.should.have.length_of(30) - - -@mock_emr_deprecated -def test_run_jobflow(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - job_id = conn.run_jobflow(**args) - job_flow = conn.describe_jobflow(job_id) - job_flow.state.should.equal('WAITING') - job_flow.jobflowid.should.equal(job_id) - job_flow.name.should.equal(args['name']) - job_flow.masterinstancetype.should.equal(args['master_instance_type']) - job_flow.slaveinstancetype.should.equal(args['slave_instance_type']) - job_flow.loguri.should.equal(args['log_uri']) - job_flow.visibletoallusers.should.equal('false') - int(job_flow.normalizedinstancehours).should.equal(0) - job_flow.steps.should.have.length_of(0) - - -@mock_emr_deprecated -def test_run_jobflow_in_multiple_regions(): - regions = {} - for region in ['us-east-1', 'eu-west-1']: - conn = boto.emr.connect_to_region(region) - args = run_jobflow_args.copy() - args['name'] = region - cluster_id = conn.run_jobflow(**args) - regions[region] = {'conn': conn, 'cluster_id': cluster_id} - - for region in regions.keys(): - conn = regions[region]['conn'] - jf = conn.describe_jobflow(regions[region]['cluster_id']) - jf.name.should.equal(region) - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_run_jobflow_with_new_params(): - # Test that run_jobflow works with newer params - conn = boto.connect_emr() - conn.run_jobflow(**run_jobflow_args) - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_run_jobflow_with_visible_to_all_users(): - conn = boto.connect_emr() - for expected in (True, False): - job_id = conn.run_jobflow( - visible_to_all_users=expected, - **run_jobflow_args - ) - job_flow = conn.describe_jobflow(job_id) - job_flow.visibletoallusers.should.equal(str(expected).lower()) - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_run_jobflow_with_instance_groups(): - input_groups = dict((g.name, g) for g in input_instance_groups) - conn = boto.connect_emr() - job_id = conn.run_jobflow(instance_groups=input_instance_groups, - **run_jobflow_args) - job_flow = conn.describe_jobflow(job_id) - int(job_flow.instancecount).should.equal( - sum(g.num_instances for g in input_instance_groups)) - for instance_group in job_flow.instancegroups: - expected = input_groups[instance_group.name] - instance_group.should.have.property('instancegroupid') - int(instance_group.instancerunningcount).should.equal( - expected.num_instances) - instance_group.instancerole.should.equal(expected.role) - instance_group.instancetype.should.equal(expected.type) - instance_group.market.should.equal(expected.market) - if hasattr(expected, 'bidprice'): - instance_group.bidprice.should.equal(expected.bidprice) - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_set_termination_protection(): - conn = boto.connect_emr() - job_id = conn.run_jobflow(**run_jobflow_args) - job_flow = conn.describe_jobflow(job_id) - job_flow.terminationprotected.should.equal('false') - - conn.set_termination_protection(job_id, True) - job_flow = conn.describe_jobflow(job_id) - job_flow.terminationprotected.should.equal('true') - - conn.set_termination_protection(job_id, False) - job_flow = conn.describe_jobflow(job_id) - job_flow.terminationprotected.should.equal('false') - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_set_visible_to_all_users(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - args['visible_to_all_users'] = False - job_id = conn.run_jobflow(**args) - job_flow = conn.describe_jobflow(job_id) - job_flow.visibletoallusers.should.equal('false') - - conn.set_visible_to_all_users(job_id, True) - job_flow = conn.describe_jobflow(job_id) - job_flow.visibletoallusers.should.equal('true') - - conn.set_visible_to_all_users(job_id, False) - job_flow = conn.describe_jobflow(job_id) - job_flow.visibletoallusers.should.equal('false') - - -@mock_emr_deprecated -def test_terminate_jobflow(): - conn = boto.connect_emr() - job_id = conn.run_jobflow(**run_jobflow_args) - flow = conn.describe_jobflows()[0] - flow.state.should.equal('WAITING') - - conn.terminate_jobflow(job_id) - flow = conn.describe_jobflows()[0] - flow.state.should.equal('TERMINATED') - - -# testing multiple end points for each feature - -@mock_emr_deprecated -def test_bootstrap_actions(): - bootstrap_actions = [ - BootstrapAction( - name='bs1', - path='path/to/script', - bootstrap_action_args=['arg1', 'arg2&arg3']), - BootstrapAction( - name='bs2', - path='path/to/anotherscript', - bootstrap_action_args=[]) - ] - - conn = boto.connect_emr() - cluster_id = conn.run_jobflow( - bootstrap_actions=bootstrap_actions, - **run_jobflow_args - ) - - jf = conn.describe_jobflow(cluster_id) - for x, y in zip(jf.bootstrapactions, bootstrap_actions): - x.name.should.equal(y.name) - x.path.should.equal(y.path) - list(o.value for o in x.args).should.equal(y.args()) - - resp = conn.list_bootstrap_actions(cluster_id) - for i, y in enumerate(bootstrap_actions): - x = resp.actions[i] - x.name.should.equal(y.name) - x.scriptpath.should.equal(y.path) - list(arg.value for arg in x.args).should.equal(y.args()) - - -@mock_emr_deprecated -def test_instance_groups(): - input_groups = dict((g.name, g) for g in input_instance_groups) - - conn = boto.connect_emr() - args = run_jobflow_args.copy() - for key in ['master_instance_type', 'slave_instance_type', 'num_instances']: - del args[key] - args['instance_groups'] = input_instance_groups[:2] - job_id = conn.run_jobflow(**args) - - jf = conn.describe_jobflow(job_id) - base_instance_count = int(jf.instancecount) - - conn.add_instance_groups(job_id, input_instance_groups[2:]) - - jf = conn.describe_jobflow(job_id) - int(jf.instancecount).should.equal( - sum(g.num_instances for g in input_instance_groups)) - for x in jf.instancegroups: - y = input_groups[x.name] - if hasattr(y, 'bidprice'): - x.bidprice.should.equal(y.bidprice) - x.creationdatetime.should.be.a(six.string_types) - # x.enddatetime.should.be.a(six.string_types) - x.should.have.property('instancegroupid') - int(x.instancerequestcount).should.equal(y.num_instances) - x.instancerole.should.equal(y.role) - int(x.instancerunningcount).should.equal(y.num_instances) - x.instancetype.should.equal(y.type) - x.laststatechangereason.should.be.a(six.string_types) - x.market.should.equal(y.market) - x.name.should.be.a(six.string_types) - x.readydatetime.should.be.a(six.string_types) - x.startdatetime.should.be.a(six.string_types) - x.state.should.equal('RUNNING') - - for x in conn.list_instance_groups(job_id).instancegroups: - y = input_groups[x.name] - if hasattr(y, 'bidprice'): - x.bidprice.should.equal(y.bidprice) - # Configurations - # EbsBlockDevices - # EbsOptimized - x.should.have.property('id') - x.instancegrouptype.should.equal(y.role) - x.instancetype.should.equal(y.type) - x.market.should.equal(y.market) - x.name.should.equal(y.name) - int(x.requestedinstancecount).should.equal(y.num_instances) - int(x.runninginstancecount).should.equal(y.num_instances) - # ShrinkPolicy - x.status.state.should.equal('RUNNING') - x.status.statechangereason.code.should.be.a(six.string_types) - x.status.statechangereason.message.should.be.a(six.string_types) - x.status.timeline.creationdatetime.should.be.a(six.string_types) - # x.status.timeline.enddatetime.should.be.a(six.string_types) - x.status.timeline.readydatetime.should.be.a(six.string_types) - - igs = dict((g.name, g) for g in jf.instancegroups) - - conn.modify_instance_groups( - [igs['task-1'].instancegroupid, igs['task-2'].instancegroupid], - [2, 3]) - jf = conn.describe_jobflow(job_id) - int(jf.instancecount).should.equal(base_instance_count + 5) - igs = dict((g.name, g) for g in jf.instancegroups) - int(igs['task-1'].instancerunningcount).should.equal(2) - int(igs['task-2'].instancerunningcount).should.equal(3) - - -@mock_emr_deprecated -def test_steps(): - input_steps = [ - StreamingStep( - name='My wordcount example', - mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py', - reducer='aggregate', - input='s3n://elasticmapreduce/samples/wordcount/input', - output='s3n://output_bucket/output/wordcount_output'), - StreamingStep( - name='My wordcount example & co.', - mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', - reducer='aggregate', - input='s3n://elasticmapreduce/samples/wordcount/input2', - output='s3n://output_bucket/output/wordcount_output2') - ] - - # TODO: implementation and test for cancel_steps - - conn = boto.connect_emr() - cluster_id = conn.run_jobflow( - steps=[input_steps[0]], - **run_jobflow_args) - - jf = conn.describe_jobflow(cluster_id) - jf.steps.should.have.length_of(1) - - conn.add_jobflow_steps(cluster_id, [input_steps[1]]) - - jf = conn.describe_jobflow(cluster_id) - jf.steps.should.have.length_of(2) - for step in jf.steps: - step.actiononfailure.should.equal('TERMINATE_JOB_FLOW') - list(arg.value for arg in step.args).should.have.length_of(8) - step.creationdatetime.should.be.a(six.string_types) - # step.enddatetime.should.be.a(six.string_types) - step.jar.should.equal( - '/home/hadoop/contrib/streaming/hadoop-streaming.jar') - step.laststatechangereason.should.be.a(six.string_types) - step.mainclass.should.equal('') - step.name.should.be.a(six.string_types) - # step.readydatetime.should.be.a(six.string_types) - # step.startdatetime.should.be.a(six.string_types) - step.state.should.be.within(['STARTING', 'PENDING']) - - expected = dict((s.name, s) for s in input_steps) - - steps = conn.list_steps(cluster_id).steps - for x in steps: - y = expected[x.name] - # actiononfailure - list(arg.value for arg in x.config.args).should.equal([ - '-mapper', y.mapper, - '-reducer', y.reducer, - '-input', y.input, - '-output', y.output, - ]) - x.config.jar.should.equal( - '/home/hadoop/contrib/streaming/hadoop-streaming.jar') - x.config.mainclass.should.equal('') - # properties - x.should.have.property('id').should.be.a(six.string_types) - x.name.should.equal(y.name) - x.status.state.should.be.within(['STARTING', 'PENDING']) - # x.status.statechangereason - x.status.timeline.creationdatetime.should.be.a(six.string_types) - # x.status.timeline.enddatetime.should.be.a(six.string_types) - # x.status.timeline.startdatetime.should.be.a(six.string_types) - - x = conn.describe_step(cluster_id, x.id) - list(arg.value for arg in x.config.args).should.equal([ - '-mapper', y.mapper, - '-reducer', y.reducer, - '-input', y.input, - '-output', y.output, - ]) - x.config.jar.should.equal( - '/home/hadoop/contrib/streaming/hadoop-streaming.jar') - x.config.mainclass.should.equal('') - # properties - x.should.have.property('id').should.be.a(six.string_types) - x.name.should.equal(y.name) - x.status.state.should.be.within(['STARTING', 'PENDING']) - # x.status.statechangereason - x.status.timeline.creationdatetime.should.be.a(six.string_types) - # x.status.timeline.enddatetime.should.be.a(six.string_types) - # x.status.timeline.startdatetime.should.be.a(six.string_types) - - @requires_boto_gte('2.39') - def test_list_steps_with_states(): - # boto's list_steps prior to 2.39 has a bug that ignores - # step_states argument. - steps = conn.list_steps(cluster_id).steps - step_id = steps[0].id - steps = conn.list_steps(cluster_id, step_states=['STARTING']).steps - steps.should.have.length_of(1) - steps[0].id.should.equal(step_id) - test_list_steps_with_states() - - -@mock_emr_deprecated -def test_tags(): - input_tags = {"tag1": "val1", "tag2": "val2"} - - conn = boto.connect_emr() - cluster_id = conn.run_jobflow(**run_jobflow_args) - - conn.add_tags(cluster_id, input_tags) - cluster = conn.describe_cluster(cluster_id) - cluster.tags.should.have.length_of(2) - dict((t.key, t.value) for t in cluster.tags).should.equal(input_tags) - - conn.remove_tags(cluster_id, list(input_tags.keys())) - cluster = conn.describe_cluster(cluster_id) - cluster.tags.should.have.length_of(0) +from __future__ import unicode_literals +import time +from datetime import datetime + +import boto +import pytz +from boto.emr.bootstrap_action import BootstrapAction +from boto.emr.instance_group import InstanceGroup +from boto.emr.step import StreamingStep + +import six +import sure # noqa + +from moto import mock_emr_deprecated +from tests.helpers import requires_boto_gte + + +run_jobflow_args = dict( + job_flow_role='EMR_EC2_DefaultRole', + keep_alive=True, + log_uri='s3://some_bucket/jobflow_logs', + master_instance_type='c1.medium', + name='My jobflow', + num_instances=2, + service_role='EMR_DefaultRole', + slave_instance_type='c1.medium', +) + + +input_instance_groups = [ + InstanceGroup(1, 'MASTER', 'c1.medium', 'ON_DEMAND', 'master'), + InstanceGroup(3, 'CORE', 'c1.medium', 'ON_DEMAND', 'core'), + InstanceGroup(6, 'TASK', 'c1.large', 'SPOT', 'task-1', '0.07'), + InstanceGroup(10, 'TASK', 'c1.xlarge', 'SPOT', 'task-2', '0.05'), +] + + +@mock_emr_deprecated +def test_describe_cluster(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + args.update(dict( + api_params={ + 'Applications.member.1.Name': 'Spark', + 'Applications.member.1.Version': '2.4.2', + 'Configurations.member.1.Classification': 'yarn-site', + 'Configurations.member.1.Properties.entry.1.key': 'someproperty', + 'Configurations.member.1.Properties.entry.1.value': 'somevalue', + 'Configurations.member.1.Properties.entry.2.key': 'someotherproperty', + 'Configurations.member.1.Properties.entry.2.value': 'someothervalue', + 'Instances.EmrManagedMasterSecurityGroup': 'master-security-group', + 'Instances.Ec2SubnetId': 'subnet-8be41cec', + }, + availability_zone='us-east-2b', + ec2_keyname='mykey', + job_flow_role='EMR_EC2_DefaultRole', + keep_alive=False, + log_uri='s3://some_bucket/jobflow_logs', + name='My jobflow', + service_role='EMR_DefaultRole', + visible_to_all_users=True, + )) + cluster_id = conn.run_jobflow(**args) + input_tags = {'tag1': 'val1', 'tag2': 'val2'} + conn.add_tags(cluster_id, input_tags) + + cluster = conn.describe_cluster(cluster_id) + cluster.applications[0].name.should.equal('Spark') + cluster.applications[0].version.should.equal('2.4.2') + cluster.autoterminate.should.equal('true') + + # configurations appear not be supplied as attributes? + + attrs = cluster.ec2instanceattributes + # AdditionalMasterSecurityGroups + # AdditionalSlaveSecurityGroups + attrs.ec2availabilityzone.should.equal(args['availability_zone']) + attrs.ec2keyname.should.equal(args['ec2_keyname']) + attrs.ec2subnetid.should.equal(args['api_params']['Instances.Ec2SubnetId']) + # EmrManagedMasterSecurityGroups + # EmrManagedSlaveSecurityGroups + attrs.iaminstanceprofile.should.equal(args['job_flow_role']) + # ServiceAccessSecurityGroup + + cluster.id.should.equal(cluster_id) + cluster.loguri.should.equal(args['log_uri']) + cluster.masterpublicdnsname.should.be.a(six.string_types) + cluster.name.should.equal(args['name']) + int(cluster.normalizedinstancehours).should.equal(0) + # cluster.release_label + cluster.shouldnt.have.property('requestedamiversion') + cluster.runningamiversion.should.equal('1.0.0') + # cluster.securityconfiguration + cluster.servicerole.should.equal(args['service_role']) + + cluster.status.state.should.equal('TERMINATED') + cluster.status.statechangereason.message.should.be.a(six.string_types) + cluster.status.statechangereason.code.should.be.a(six.string_types) + cluster.status.timeline.creationdatetime.should.be.a(six.string_types) + # cluster.status.timeline.enddatetime.should.be.a(six.string_types) + # cluster.status.timeline.readydatetime.should.be.a(six.string_types) + + dict((item.key, item.value) + for item in cluster.tags).should.equal(input_tags) + + cluster.terminationprotected.should.equal('false') + cluster.visibletoallusers.should.equal('true') + + +@mock_emr_deprecated +def test_describe_jobflows(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + expected = {} + + for idx in range(4): + cluster_name = 'cluster' + str(idx) + args['name'] = cluster_name + cluster_id = conn.run_jobflow(**args) + expected[cluster_id] = { + 'id': cluster_id, + 'name': cluster_name, + 'state': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(4, 6): + cluster_name = 'cluster' + str(idx) + args['name'] = cluster_name + cluster_id = conn.run_jobflow(**args) + conn.terminate_jobflow(cluster_id) + expected[cluster_id] = { + 'id': cluster_id, + 'name': cluster_name, + 'state': 'TERMINATED' + } + jobs = conn.describe_jobflows() + jobs.should.have.length_of(6) + + for cluster_id, y in expected.items(): + resp = conn.describe_jobflows(jobflow_ids=[cluster_id]) + resp.should.have.length_of(1) + resp[0].jobflowid.should.equal(cluster_id) + + resp = conn.describe_jobflows(states=['WAITING']) + resp.should.have.length_of(4) + for x in resp: + x.state.should.equal('WAITING') + + resp = conn.describe_jobflows(created_before=timestamp) + resp.should.have.length_of(4) + + resp = conn.describe_jobflows(created_after=timestamp) + resp.should.have.length_of(2) + + +@mock_emr_deprecated +def test_describe_jobflow(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + args.update(dict( + ami_version='3.8.1', + api_params={ + #'Applications.member.1.Name': 'Spark', + #'Applications.member.1.Version': '2.4.2', + #'Configurations.member.1.Classification': 'yarn-site', + #'Configurations.member.1.Properties.entry.1.key': 'someproperty', + #'Configurations.member.1.Properties.entry.1.value': 'somevalue', + #'Instances.EmrManagedMasterSecurityGroup': 'master-security-group', + 'Instances.Ec2SubnetId': 'subnet-8be41cec', + }, + ec2_keyname='mykey', + hadoop_version='2.4.0', + + name='My jobflow', + log_uri='s3://some_bucket/jobflow_logs', + keep_alive=True, + master_instance_type='c1.medium', + slave_instance_type='c1.medium', + num_instances=2, + + availability_zone='us-west-2b', + + job_flow_role='EMR_EC2_DefaultRole', + service_role='EMR_DefaultRole', + visible_to_all_users=True, + )) + + cluster_id = conn.run_jobflow(**args) + jf = conn.describe_jobflow(cluster_id) + jf.amiversion.should.equal(args['ami_version']) + jf.bootstrapactions.should.equal(None) + jf.creationdatetime.should.be.a(six.string_types) + jf.should.have.property('laststatechangereason') + jf.readydatetime.should.be.a(six.string_types) + jf.startdatetime.should.be.a(six.string_types) + jf.state.should.equal('WAITING') + + jf.ec2keyname.should.equal(args['ec2_keyname']) + # Ec2SubnetId + jf.hadoopversion.should.equal(args['hadoop_version']) + int(jf.instancecount).should.equal(2) + + for ig in jf.instancegroups: + ig.creationdatetime.should.be.a(six.string_types) + # ig.enddatetime.should.be.a(six.string_types) + ig.should.have.property('instancegroupid').being.a(six.string_types) + int(ig.instancerequestcount).should.equal(1) + ig.instancerole.should.be.within(['MASTER', 'CORE']) + int(ig.instancerunningcount).should.equal(1) + ig.instancetype.should.equal('c1.medium') + ig.laststatechangereason.should.be.a(six.string_types) + ig.market.should.equal('ON_DEMAND') + ig.name.should.be.a(six.string_types) + ig.readydatetime.should.be.a(six.string_types) + ig.startdatetime.should.be.a(six.string_types) + ig.state.should.equal('RUNNING') + + jf.keepjobflowalivewhennosteps.should.equal('true') + jf.masterinstanceid.should.be.a(six.string_types) + jf.masterinstancetype.should.equal(args['master_instance_type']) + jf.masterpublicdnsname.should.be.a(six.string_types) + int(jf.normalizedinstancehours).should.equal(0) + jf.availabilityzone.should.equal(args['availability_zone']) + jf.slaveinstancetype.should.equal(args['slave_instance_type']) + jf.terminationprotected.should.equal('false') + + jf.jobflowid.should.equal(cluster_id) + # jf.jobflowrole.should.equal(args['job_flow_role']) + jf.loguri.should.equal(args['log_uri']) + jf.name.should.equal(args['name']) + # jf.servicerole.should.equal(args['service_role']) + + jf.steps.should.have.length_of(0) + + list(i.value for i in jf.supported_products).should.equal([]) + jf.visibletoallusers.should.equal('true') + + +@mock_emr_deprecated +def test_list_clusters(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + expected = {} + + for idx in range(40): + cluster_name = 'jobflow' + str(idx) + args['name'] = cluster_name + cluster_id = conn.run_jobflow(**args) + expected[cluster_id] = { + 'id': cluster_id, + 'name': cluster_name, + 'normalizedinstancehours': '0', + 'state': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(40, 70): + cluster_name = 'jobflow' + str(idx) + args['name'] = cluster_name + cluster_id = conn.run_jobflow(**args) + conn.terminate_jobflow(cluster_id) + expected[cluster_id] = { + 'id': cluster_id, + 'name': cluster_name, + 'normalizedinstancehours': '0', + 'state': 'TERMINATED' + } + + args = {} + while 1: + resp = conn.list_clusters(**args) + clusters = resp.clusters + len(clusters).should.be.lower_than_or_equal_to(50) + for x in clusters: + y = expected[x.id] + x.id.should.equal(y['id']) + x.name.should.equal(y['name']) + x.normalizedinstancehours.should.equal( + y['normalizedinstancehours']) + x.status.state.should.equal(y['state']) + x.status.timeline.creationdatetime.should.be.a(six.string_types) + if y['state'] == 'TERMINATED': + x.status.timeline.enddatetime.should.be.a(six.string_types) + else: + x.status.timeline.shouldnt.have.property('enddatetime') + x.status.timeline.readydatetime.should.be.a(six.string_types) + if not hasattr(resp, 'marker'): + break + args = {'marker': resp.marker} + + resp = conn.list_clusters(cluster_states=['TERMINATED']) + resp.clusters.should.have.length_of(30) + for x in resp.clusters: + x.status.state.should.equal('TERMINATED') + + resp = conn.list_clusters(created_before=timestamp) + resp.clusters.should.have.length_of(40) + + resp = conn.list_clusters(created_after=timestamp) + resp.clusters.should.have.length_of(30) + + +@mock_emr_deprecated +def test_run_jobflow(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + job_id = conn.run_jobflow(**args) + job_flow = conn.describe_jobflow(job_id) + job_flow.state.should.equal('WAITING') + job_flow.jobflowid.should.equal(job_id) + job_flow.name.should.equal(args['name']) + job_flow.masterinstancetype.should.equal(args['master_instance_type']) + job_flow.slaveinstancetype.should.equal(args['slave_instance_type']) + job_flow.loguri.should.equal(args['log_uri']) + job_flow.visibletoallusers.should.equal('false') + int(job_flow.normalizedinstancehours).should.equal(0) + job_flow.steps.should.have.length_of(0) + + +@mock_emr_deprecated +def test_run_jobflow_in_multiple_regions(): + regions = {} + for region in ['us-east-1', 'eu-west-1']: + conn = boto.emr.connect_to_region(region) + args = run_jobflow_args.copy() + args['name'] = region + cluster_id = conn.run_jobflow(**args) + regions[region] = {'conn': conn, 'cluster_id': cluster_id} + + for region in regions.keys(): + conn = regions[region]['conn'] + jf = conn.describe_jobflow(regions[region]['cluster_id']) + jf.name.should.equal(region) + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_run_jobflow_with_new_params(): + # Test that run_jobflow works with newer params + conn = boto.connect_emr() + conn.run_jobflow(**run_jobflow_args) + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_run_jobflow_with_visible_to_all_users(): + conn = boto.connect_emr() + for expected in (True, False): + job_id = conn.run_jobflow( + visible_to_all_users=expected, + **run_jobflow_args + ) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal(str(expected).lower()) + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_run_jobflow_with_instance_groups(): + input_groups = dict((g.name, g) for g in input_instance_groups) + conn = boto.connect_emr() + job_id = conn.run_jobflow(instance_groups=input_instance_groups, + **run_jobflow_args) + job_flow = conn.describe_jobflow(job_id) + int(job_flow.instancecount).should.equal( + sum(g.num_instances for g in input_instance_groups)) + for instance_group in job_flow.instancegroups: + expected = input_groups[instance_group.name] + instance_group.should.have.property('instancegroupid') + int(instance_group.instancerunningcount).should.equal( + expected.num_instances) + instance_group.instancerole.should.equal(expected.role) + instance_group.instancetype.should.equal(expected.type) + instance_group.market.should.equal(expected.market) + if hasattr(expected, 'bidprice'): + instance_group.bidprice.should.equal(expected.bidprice) + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_set_termination_protection(): + conn = boto.connect_emr() + job_id = conn.run_jobflow(**run_jobflow_args) + job_flow = conn.describe_jobflow(job_id) + job_flow.terminationprotected.should.equal('false') + + conn.set_termination_protection(job_id, True) + job_flow = conn.describe_jobflow(job_id) + job_flow.terminationprotected.should.equal('true') + + conn.set_termination_protection(job_id, False) + job_flow = conn.describe_jobflow(job_id) + job_flow.terminationprotected.should.equal('false') + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_set_visible_to_all_users(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + args['visible_to_all_users'] = False + job_id = conn.run_jobflow(**args) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('false') + + conn.set_visible_to_all_users(job_id, True) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('true') + + conn.set_visible_to_all_users(job_id, False) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('false') + + +@mock_emr_deprecated +def test_terminate_jobflow(): + conn = boto.connect_emr() + job_id = conn.run_jobflow(**run_jobflow_args) + flow = conn.describe_jobflows()[0] + flow.state.should.equal('WAITING') + + conn.terminate_jobflow(job_id) + flow = conn.describe_jobflows()[0] + flow.state.should.equal('TERMINATED') + + +# testing multiple end points for each feature + +@mock_emr_deprecated +def test_bootstrap_actions(): + bootstrap_actions = [ + BootstrapAction( + name='bs1', + path='path/to/script', + bootstrap_action_args=['arg1', 'arg2&arg3']), + BootstrapAction( + name='bs2', + path='path/to/anotherscript', + bootstrap_action_args=[]) + ] + + conn = boto.connect_emr() + cluster_id = conn.run_jobflow( + bootstrap_actions=bootstrap_actions, + **run_jobflow_args + ) + + jf = conn.describe_jobflow(cluster_id) + for x, y in zip(jf.bootstrapactions, bootstrap_actions): + x.name.should.equal(y.name) + x.path.should.equal(y.path) + list(o.value for o in x.args).should.equal(y.args()) + + resp = conn.list_bootstrap_actions(cluster_id) + for i, y in enumerate(bootstrap_actions): + x = resp.actions[i] + x.name.should.equal(y.name) + x.scriptpath.should.equal(y.path) + list(arg.value for arg in x.args).should.equal(y.args()) + + +@mock_emr_deprecated +def test_instance_groups(): + input_groups = dict((g.name, g) for g in input_instance_groups) + + conn = boto.connect_emr() + args = run_jobflow_args.copy() + for key in ['master_instance_type', 'slave_instance_type', 'num_instances']: + del args[key] + args['instance_groups'] = input_instance_groups[:2] + job_id = conn.run_jobflow(**args) + + jf = conn.describe_jobflow(job_id) + base_instance_count = int(jf.instancecount) + + conn.add_instance_groups(job_id, input_instance_groups[2:]) + + jf = conn.describe_jobflow(job_id) + int(jf.instancecount).should.equal( + sum(g.num_instances for g in input_instance_groups)) + for x in jf.instancegroups: + y = input_groups[x.name] + if hasattr(y, 'bidprice'): + x.bidprice.should.equal(y.bidprice) + x.creationdatetime.should.be.a(six.string_types) + # x.enddatetime.should.be.a(six.string_types) + x.should.have.property('instancegroupid') + int(x.instancerequestcount).should.equal(y.num_instances) + x.instancerole.should.equal(y.role) + int(x.instancerunningcount).should.equal(y.num_instances) + x.instancetype.should.equal(y.type) + x.laststatechangereason.should.be.a(six.string_types) + x.market.should.equal(y.market) + x.name.should.be.a(six.string_types) + x.readydatetime.should.be.a(six.string_types) + x.startdatetime.should.be.a(six.string_types) + x.state.should.equal('RUNNING') + + for x in conn.list_instance_groups(job_id).instancegroups: + y = input_groups[x.name] + if hasattr(y, 'bidprice'): + x.bidprice.should.equal(y.bidprice) + # Configurations + # EbsBlockDevices + # EbsOptimized + x.should.have.property('id') + x.instancegrouptype.should.equal(y.role) + x.instancetype.should.equal(y.type) + x.market.should.equal(y.market) + x.name.should.equal(y.name) + int(x.requestedinstancecount).should.equal(y.num_instances) + int(x.runninginstancecount).should.equal(y.num_instances) + # ShrinkPolicy + x.status.state.should.equal('RUNNING') + x.status.statechangereason.code.should.be.a(six.string_types) + x.status.statechangereason.message.should.be.a(six.string_types) + x.status.timeline.creationdatetime.should.be.a(six.string_types) + # x.status.timeline.enddatetime.should.be.a(six.string_types) + x.status.timeline.readydatetime.should.be.a(six.string_types) + + igs = dict((g.name, g) for g in jf.instancegroups) + + conn.modify_instance_groups( + [igs['task-1'].instancegroupid, igs['task-2'].instancegroupid], + [2, 3]) + jf = conn.describe_jobflow(job_id) + int(jf.instancecount).should.equal(base_instance_count + 5) + igs = dict((g.name, g) for g in jf.instancegroups) + int(igs['task-1'].instancerunningcount).should.equal(2) + int(igs['task-2'].instancerunningcount).should.equal(3) + + +@mock_emr_deprecated +def test_steps(): + input_steps = [ + StreamingStep( + name='My wordcount example', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input', + output='s3n://output_bucket/output/wordcount_output'), + StreamingStep( + name='My wordcount example & co.', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input2', + output='s3n://output_bucket/output/wordcount_output2') + ] + + # TODO: implementation and test for cancel_steps + + conn = boto.connect_emr() + cluster_id = conn.run_jobflow( + steps=[input_steps[0]], + **run_jobflow_args) + + jf = conn.describe_jobflow(cluster_id) + jf.steps.should.have.length_of(1) + + conn.add_jobflow_steps(cluster_id, [input_steps[1]]) + + jf = conn.describe_jobflow(cluster_id) + jf.steps.should.have.length_of(2) + for step in jf.steps: + step.actiononfailure.should.equal('TERMINATE_JOB_FLOW') + list(arg.value for arg in step.args).should.have.length_of(8) + step.creationdatetime.should.be.a(six.string_types) + # step.enddatetime.should.be.a(six.string_types) + step.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') + step.laststatechangereason.should.be.a(six.string_types) + step.mainclass.should.equal('') + step.name.should.be.a(six.string_types) + # step.readydatetime.should.be.a(six.string_types) + # step.startdatetime.should.be.a(six.string_types) + step.state.should.be.within(['STARTING', 'PENDING']) + + expected = dict((s.name, s) for s in input_steps) + + steps = conn.list_steps(cluster_id).steps + for x in steps: + y = expected[x.name] + # actiononfailure + list(arg.value for arg in x.config.args).should.equal([ + '-mapper', y.mapper, + '-reducer', y.reducer, + '-input', y.input, + '-output', y.output, + ]) + x.config.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') + x.config.mainclass.should.equal('') + # properties + x.should.have.property('id').should.be.a(six.string_types) + x.name.should.equal(y.name) + x.status.state.should.be.within(['STARTING', 'PENDING']) + # x.status.statechangereason + x.status.timeline.creationdatetime.should.be.a(six.string_types) + # x.status.timeline.enddatetime.should.be.a(six.string_types) + # x.status.timeline.startdatetime.should.be.a(six.string_types) + + x = conn.describe_step(cluster_id, x.id) + list(arg.value for arg in x.config.args).should.equal([ + '-mapper', y.mapper, + '-reducer', y.reducer, + '-input', y.input, + '-output', y.output, + ]) + x.config.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') + x.config.mainclass.should.equal('') + # properties + x.should.have.property('id').should.be.a(six.string_types) + x.name.should.equal(y.name) + x.status.state.should.be.within(['STARTING', 'PENDING']) + # x.status.statechangereason + x.status.timeline.creationdatetime.should.be.a(six.string_types) + # x.status.timeline.enddatetime.should.be.a(six.string_types) + # x.status.timeline.startdatetime.should.be.a(six.string_types) + + @requires_boto_gte('2.39') + def test_list_steps_with_states(): + # boto's list_steps prior to 2.39 has a bug that ignores + # step_states argument. + steps = conn.list_steps(cluster_id).steps + step_id = steps[0].id + steps = conn.list_steps(cluster_id, step_states=['STARTING']).steps + steps.should.have.length_of(1) + steps[0].id.should.equal(step_id) + test_list_steps_with_states() + + +@mock_emr_deprecated +def test_tags(): + input_tags = {"tag1": "val1", "tag2": "val2"} + + conn = boto.connect_emr() + cluster_id = conn.run_jobflow(**run_jobflow_args) + + conn.add_tags(cluster_id, input_tags) + cluster = conn.describe_cluster(cluster_id) + cluster.tags.should.have.length_of(2) + dict((t.key, t.value) for t in cluster.tags).should.equal(input_tags) + + conn.remove_tags(cluster_id, list(input_tags.keys())) + cluster = conn.describe_cluster(cluster_id) + cluster.tags.should.have.length_of(0) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 237ff8bbaa04..28fff455b986 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -1,720 +1,720 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import time -from copy import deepcopy -from datetime import datetime - -import boto3 -import pytz -import six -import sure # noqa -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_emr - - -run_job_flow_args = dict( - Instances={ - 'InstanceCount': 3, - 'KeepJobFlowAliveWhenNoSteps': True, - 'MasterInstanceType': 'c3.medium', - 'Placement': {'AvailabilityZone': 'us-east-1a'}, - 'SlaveInstanceType': 'c3.xlarge', - }, - JobFlowRole='EMR_EC2_DefaultRole', - LogUri='s3://mybucket/log', - Name='cluster', - ServiceRole='EMR_DefaultRole', - VisibleToAllUsers=True) - - -input_instance_groups = [ - {'InstanceCount': 1, - 'InstanceRole': 'MASTER', - 'InstanceType': 'c1.medium', - 'Market': 'ON_DEMAND', - 'Name': 'master'}, - {'InstanceCount': 3, - 'InstanceRole': 'CORE', - 'InstanceType': 'c1.medium', - 'Market': 'ON_DEMAND', - 'Name': 'core'}, - {'InstanceCount': 6, - 'InstanceRole': 'TASK', - 'InstanceType': 'c1.large', - 'Market': 'SPOT', - 'Name': 'task-1', - 'BidPrice': '0.07'}, - {'InstanceCount': 10, - 'InstanceRole': 'TASK', - 'InstanceType': 'c1.xlarge', - 'Market': 'SPOT', - 'Name': 'task-2', - 'BidPrice': '0.05'}, -] - - -@mock_emr -def test_describe_cluster(): - client = boto3.client('emr', region_name='us-east-1') - - args = deepcopy(run_job_flow_args) - args['Applications'] = [{'Name': 'Spark', 'Version': '2.4.2'}] - args['Configurations'] = [ - {'Classification': 'yarn-site', - 'Properties': {'someproperty': 'somevalue', - 'someotherproperty': 'someothervalue'}}, - {'Classification': 'nested-configs', - 'Properties': {}, - 'Configurations': [ - { - 'Classification': 'nested-config', - 'Properties': { - 'nested-property': 'nested-value' - } - } - ]} - ] - args['Instances']['AdditionalMasterSecurityGroups'] = ['additional-master'] - args['Instances']['AdditionalSlaveSecurityGroups'] = ['additional-slave'] - args['Instances']['Ec2KeyName'] = 'mykey' - args['Instances']['Ec2SubnetId'] = 'subnet-8be41cec' - args['Instances']['EmrManagedMasterSecurityGroup'] = 'master-security-group' - args['Instances']['EmrManagedSlaveSecurityGroup'] = 'slave-security-group' - args['Instances']['KeepJobFlowAliveWhenNoSteps'] = False - args['Instances']['ServiceAccessSecurityGroup'] = 'service-access-security-group' - args['Tags'] = [{'Key': 'tag1', 'Value': 'val1'}, - {'Key': 'tag2', 'Value': 'val2'}] - - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - cl = client.describe_cluster(ClusterId=cluster_id)['Cluster'] - cl['Applications'][0]['Name'].should.equal('Spark') - cl['Applications'][0]['Version'].should.equal('2.4.2') - cl['AutoTerminate'].should.equal(True) - - config = cl['Configurations'][0] - config['Classification'].should.equal('yarn-site') - config['Properties'].should.equal(args['Configurations'][0]['Properties']) - - nested_config = cl['Configurations'][1] - nested_config['Classification'].should.equal('nested-configs') - nested_config['Properties'].should.equal(args['Configurations'][1]['Properties']) - - attrs = cl['Ec2InstanceAttributes'] - attrs['AdditionalMasterSecurityGroups'].should.equal( - args['Instances']['AdditionalMasterSecurityGroups']) - attrs['AdditionalSlaveSecurityGroups'].should.equal( - args['Instances']['AdditionalSlaveSecurityGroups']) - attrs['Ec2AvailabilityZone'].should.equal('us-east-1a') - attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) - attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) - attrs['EmrManagedMasterSecurityGroup'].should.equal( - args['Instances']['EmrManagedMasterSecurityGroup']) - attrs['EmrManagedSlaveSecurityGroup'].should.equal( - args['Instances']['EmrManagedSlaveSecurityGroup']) - attrs['IamInstanceProfile'].should.equal(args['JobFlowRole']) - attrs['ServiceAccessSecurityGroup'].should.equal( - args['Instances']['ServiceAccessSecurityGroup']) - cl['Id'].should.equal(cluster_id) - cl['LogUri'].should.equal(args['LogUri']) - cl['MasterPublicDnsName'].should.be.a(six.string_types) - cl['Name'].should.equal(args['Name']) - cl['NormalizedInstanceHours'].should.equal(0) - # cl['ReleaseLabel'].should.equal('emr-5.0.0') - cl.shouldnt.have.key('RequestedAmiVersion') - cl['RunningAmiVersion'].should.equal('1.0.0') - # cl['SecurityConfiguration'].should.be.a(six.string_types) - cl['ServiceRole'].should.equal(args['ServiceRole']) - - status = cl['Status'] - status['State'].should.equal('TERMINATED') - # cluster['Status']['StateChangeReason'] - status['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') - # status['Timeline']['EndDateTime'].should.equal(datetime(2014, 1, 24, 2, 19, 46, tzinfo=pytz.utc)) - status['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') - - dict((t['Key'], t['Value']) for t in cl['Tags']).should.equal( - dict((t['Key'], t['Value']) for t in args['Tags'])) - - cl['TerminationProtected'].should.equal(False) - cl['VisibleToAllUsers'].should.equal(True) - - -@mock_emr -def test_describe_cluster_not_found(): - conn = boto3.client('emr', region_name='us-east-1') - raised = False - try: - cluster = conn.describe_cluster(ClusterId='DummyId') - except ClientError as e: - if e.response['Error']['Code'] == "ResourceNotFoundException": - raised = True - raised.should.equal(True) - - -@mock_emr -def test_describe_job_flows(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - expected = {} - - for idx in range(4): - cluster_name = 'cluster' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'State': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(4, 6): - cluster_name = 'cluster' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - client.terminate_job_flows(JobFlowIds=[cluster_id]) - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'State': 'TERMINATED' - } - - resp = client.describe_job_flows() - resp['JobFlows'].should.have.length_of(6) - - for cluster_id, y in expected.items(): - resp = client.describe_job_flows(JobFlowIds=[cluster_id]) - resp['JobFlows'].should.have.length_of(1) - resp['JobFlows'][0]['JobFlowId'].should.equal(cluster_id) - - resp = client.describe_job_flows(JobFlowStates=['WAITING']) - resp['JobFlows'].should.have.length_of(4) - for x in resp['JobFlows']: - x['ExecutionStatusDetail']['State'].should.equal('WAITING') - - resp = client.describe_job_flows(CreatedBefore=timestamp) - resp['JobFlows'].should.have.length_of(4) - - resp = client.describe_job_flows(CreatedAfter=timestamp) - resp['JobFlows'].should.have.length_of(2) - - -@mock_emr -def test_describe_job_flow(): - client = boto3.client('emr', region_name='us-east-1') - - args = deepcopy(run_job_flow_args) - args['AmiVersion'] = '3.8.1' - args['Instances'].update( - {'Ec2KeyName': 'ec2keyname', - 'Ec2SubnetId': 'subnet-8be41cec', - 'HadoopVersion': '2.4.0'}) - args['VisibleToAllUsers'] = True - - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - - jf['AmiVersion'].should.equal(args['AmiVersion']) - jf.shouldnt.have.key('BootstrapActions') - esd = jf['ExecutionStatusDetail'] - esd['CreationDateTime'].should.be.a('datetime.datetime') - # esd['EndDateTime'].should.be.a('datetime.datetime') - # esd['LastStateChangeReason'].should.be.a(six.string_types) - esd['ReadyDateTime'].should.be.a('datetime.datetime') - esd['StartDateTime'].should.be.a('datetime.datetime') - esd['State'].should.equal('WAITING') - attrs = jf['Instances'] - attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) - attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) - attrs['HadoopVersion'].should.equal(args['Instances']['HadoopVersion']) - attrs['InstanceCount'].should.equal(args['Instances']['InstanceCount']) - for ig in attrs['InstanceGroups']: - # ig['BidPrice'] - ig['CreationDateTime'].should.be.a('datetime.datetime') - # ig['EndDateTime'].should.be.a('datetime.datetime') - ig['InstanceGroupId'].should.be.a(six.string_types) - ig['InstanceRequestCount'].should.be.a(int) - ig['InstanceRole'].should.be.within(['MASTER', 'CORE']) - ig['InstanceRunningCount'].should.be.a(int) - ig['InstanceType'].should.be.within(['c3.medium', 'c3.xlarge']) - # ig['LastStateChangeReason'].should.be.a(six.string_types) - ig['Market'].should.equal('ON_DEMAND') - ig['Name'].should.be.a(six.string_types) - ig['ReadyDateTime'].should.be.a('datetime.datetime') - ig['StartDateTime'].should.be.a('datetime.datetime') - ig['State'].should.equal('RUNNING') - attrs['KeepJobFlowAliveWhenNoSteps'].should.equal(True) - # attrs['MasterInstanceId'].should.be.a(six.string_types) - attrs['MasterInstanceType'].should.equal( - args['Instances']['MasterInstanceType']) - attrs['MasterPublicDnsName'].should.be.a(six.string_types) - attrs['NormalizedInstanceHours'].should.equal(0) - attrs['Placement']['AvailabilityZone'].should.equal( - args['Instances']['Placement']['AvailabilityZone']) - attrs['SlaveInstanceType'].should.equal( - args['Instances']['SlaveInstanceType']) - attrs['TerminationProtected'].should.equal(False) - jf['JobFlowId'].should.equal(cluster_id) - jf['JobFlowRole'].should.equal(args['JobFlowRole']) - jf['LogUri'].should.equal(args['LogUri']) - jf['Name'].should.equal(args['Name']) - jf['ServiceRole'].should.equal(args['ServiceRole']) - jf['Steps'].should.equal([]) - jf['SupportedProducts'].should.equal([]) - jf['VisibleToAllUsers'].should.equal(True) - - -@mock_emr -def test_list_clusters(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - expected = {} - - for idx in range(40): - cluster_name = 'jobflow' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'NormalizedInstanceHours': 0, - 'State': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(40, 70): - cluster_name = 'jobflow' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - client.terminate_job_flows(JobFlowIds=[cluster_id]) - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'NormalizedInstanceHours': 0, - 'State': 'TERMINATED' - } - - args = {} - while 1: - resp = client.list_clusters(**args) - clusters = resp['Clusters'] - len(clusters).should.be.lower_than_or_equal_to(50) - for x in clusters: - y = expected[x['Id']] - x['Id'].should.equal(y['Id']) - x['Name'].should.equal(y['Name']) - x['NormalizedInstanceHours'].should.equal( - y['NormalizedInstanceHours']) - x['Status']['State'].should.equal(y['State']) - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - if y['State'] == 'TERMINATED': - x['Status']['Timeline'][ - 'EndDateTime'].should.be.a('datetime.datetime') - else: - x['Status']['Timeline'].shouldnt.have.key('EndDateTime') - x['Status']['Timeline'][ - 'ReadyDateTime'].should.be.a('datetime.datetime') - marker = resp.get('Marker') - if marker is None: - break - args = {'Marker': marker} - - resp = client.list_clusters(ClusterStates=['TERMINATED']) - resp['Clusters'].should.have.length_of(30) - for x in resp['Clusters']: - x['Status']['State'].should.equal('TERMINATED') - - resp = client.list_clusters(CreatedBefore=timestamp) - resp['Clusters'].should.have.length_of(40) - - resp = client.list_clusters(CreatedAfter=timestamp) - resp['Clusters'].should.have.length_of(30) - - -@mock_emr -def test_run_job_flow(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - cluster_id = client.run_job_flow(**args)['JobFlowId'] - resp = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - resp['ExecutionStatusDetail']['State'].should.equal('WAITING') - resp['JobFlowId'].should.equal(cluster_id) - resp['Name'].should.equal(args['Name']) - resp['Instances']['MasterInstanceType'].should.equal( - args['Instances']['MasterInstanceType']) - resp['Instances']['SlaveInstanceType'].should.equal( - args['Instances']['SlaveInstanceType']) - resp['LogUri'].should.equal(args['LogUri']) - resp['VisibleToAllUsers'].should.equal(args['VisibleToAllUsers']) - resp['Instances']['NormalizedInstanceHours'].should.equal(0) - resp['Steps'].should.equal([]) - - -@mock_emr -def test_run_job_flow_with_invalid_params(): - client = boto3.client('emr', region_name='us-east-1') - with assert_raises(ClientError) as ex: - # cannot set both AmiVersion and ReleaseLabel - args = deepcopy(run_job_flow_args) - args['AmiVersion'] = '2.4' - args['ReleaseLabel'] = 'emr-5.0.0' - client.run_job_flow(**args) - ex.exception.response['Error']['Code'].should.equal('ValidationException') - - -@mock_emr -def test_run_job_flow_in_multiple_regions(): - regions = {} - for region in ['us-east-1', 'eu-west-1']: - client = boto3.client('emr', region_name=region) - args = deepcopy(run_job_flow_args) - args['Name'] = region - cluster_id = client.run_job_flow(**args)['JobFlowId'] - regions[region] = {'client': client, 'cluster_id': cluster_id} - - for region in regions.keys(): - client = regions[region]['client'] - resp = client.describe_cluster(ClusterId=regions[region]['cluster_id']) - resp['Cluster']['Name'].should.equal(region) - - -@mock_emr -def test_run_job_flow_with_new_params(): - client = boto3.client('emr', region_name='us-east-1') - resp = client.run_job_flow(**run_job_flow_args) - resp.should.have.key('JobFlowId') - - -@mock_emr -def test_run_job_flow_with_visible_to_all_users(): - client = boto3.client('emr', region_name='us-east-1') - for expected in (True, False): - args = deepcopy(run_job_flow_args) - args['VisibleToAllUsers'] = expected - resp = client.run_job_flow(**args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['VisibleToAllUsers'].should.equal(expected) - - -@mock_emr -def test_run_job_flow_with_instance_groups(): - input_groups = dict((g['Name'], g) for g in input_instance_groups) - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['Instances'] = {'InstanceGroups': input_instance_groups} - cluster_id = client.run_job_flow(**args)['JobFlowId'] - groups = client.list_instance_groups(ClusterId=cluster_id)[ - 'InstanceGroups'] - for x in groups: - y = input_groups[x['Name']] - x.should.have.key('Id') - x['RequestedInstanceCount'].should.equal(y['InstanceCount']) - x['InstanceGroupType'].should.equal(y['InstanceRole']) - x['InstanceType'].should.equal(y['InstanceType']) - x['Market'].should.equal(y['Market']) - if 'BidPrice' in y: - x['BidPrice'].should.equal(y['BidPrice']) - - -@mock_emr -def test_set_termination_protection(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['Instances']['TerminationProtected'] = False - resp = client.run_job_flow(**args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['TerminationProtected'].should.equal(False) - - for expected in (True, False): - resp = client.set_termination_protection(JobFlowIds=[cluster_id], - TerminationProtected=expected) - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['TerminationProtected'].should.equal(expected) - - -@mock_emr -def test_set_visible_to_all_users(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['VisibleToAllUsers'] = False - resp = client.run_job_flow(**args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['VisibleToAllUsers'].should.equal(False) - - for expected in (True, False): - resp = client.set_visible_to_all_users(JobFlowIds=[cluster_id], - VisibleToAllUsers=expected) - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['VisibleToAllUsers'].should.equal(expected) - - -@mock_emr -def test_terminate_job_flows(): - client = boto3.client('emr', region_name='us-east-1') - - resp = client.run_job_flow(**run_job_flow_args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['Status']['State'].should.equal('WAITING') - - resp = client.terminate_job_flows(JobFlowIds=[cluster_id]) - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['Status']['State'].should.equal('TERMINATED') - - -# testing multiple end points for each feature - -@mock_emr -def test_bootstrap_actions(): - bootstrap_actions = [ - {'Name': 'bs1', - 'ScriptBootstrapAction': { - 'Args': ['arg1', 'arg2'], - 'Path': 's3://path/to/script'}}, - {'Name': 'bs2', - 'ScriptBootstrapAction': { - 'Args': [], - 'Path': 's3://path/to/anotherscript'}} - ] - - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['BootstrapActions'] = bootstrap_actions - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - cl = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - for x, y in zip(cl['BootstrapActions'], bootstrap_actions): - x['BootstrapActionConfig'].should.equal(y) - - resp = client.list_bootstrap_actions(ClusterId=cluster_id) - for x, y in zip(resp['BootstrapActions'], bootstrap_actions): - x['Name'].should.equal(y['Name']) - if 'Args' in y['ScriptBootstrapAction']: - x['Args'].should.equal(y['ScriptBootstrapAction']['Args']) - x['ScriptPath'].should.equal(y['ScriptBootstrapAction']['Path']) - - -@mock_emr -def test_instance_groups(): - input_groups = dict((g['Name'], g) for g in input_instance_groups) - - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - for key in ['MasterInstanceType', 'SlaveInstanceType', 'InstanceCount']: - del args['Instances'][key] - args['Instances']['InstanceGroups'] = input_instance_groups[:2] - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - base_instance_count = jf['Instances']['InstanceCount'] - - client.add_instance_groups( - JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Instances']['InstanceCount'].should.equal( - sum(g['InstanceCount'] for g in input_instance_groups)) - for x in jf['Instances']['InstanceGroups']: - y = input_groups[x['Name']] - if hasattr(y, 'BidPrice'): - x['BidPrice'].should.equal('BidPrice') - x['CreationDateTime'].should.be.a('datetime.datetime') - # x['EndDateTime'].should.be.a('datetime.datetime') - x.should.have.key('InstanceGroupId') - x['InstanceRequestCount'].should.equal(y['InstanceCount']) - x['InstanceRole'].should.equal(y['InstanceRole']) - x['InstanceRunningCount'].should.equal(y['InstanceCount']) - x['InstanceType'].should.equal(y['InstanceType']) - # x['LastStateChangeReason'].should.equal(y['LastStateChangeReason']) - x['Market'].should.equal(y['Market']) - x['Name'].should.equal(y['Name']) - x['ReadyDateTime'].should.be.a('datetime.datetime') - x['StartDateTime'].should.be.a('datetime.datetime') - x['State'].should.equal('RUNNING') - - groups = client.list_instance_groups(ClusterId=cluster_id)[ - 'InstanceGroups'] - for x in groups: - y = input_groups[x['Name']] - if hasattr(y, 'BidPrice'): - x['BidPrice'].should.equal('BidPrice') - # Configurations - # EbsBlockDevices - # EbsOptimized - x.should.have.key('Id') - x['InstanceGroupType'].should.equal(y['InstanceRole']) - x['InstanceType'].should.equal(y['InstanceType']) - x['Market'].should.equal(y['Market']) - x['Name'].should.equal(y['Name']) - x['RequestedInstanceCount'].should.equal(y['InstanceCount']) - x['RunningInstanceCount'].should.equal(y['InstanceCount']) - # ShrinkPolicy - x['Status']['State'].should.equal('RUNNING') - x['Status']['StateChangeReason']['Code'].should.be.a(six.string_types) - # x['Status']['StateChangeReason']['Message'].should.be.a(six.string_types) - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - x['Status']['Timeline'][ - 'ReadyDateTime'].should.be.a('datetime.datetime') - - igs = dict((g['Name'], g) for g in groups) - client.modify_instance_groups( - InstanceGroups=[ - {'InstanceGroupId': igs['task-1']['Id'], - 'InstanceCount': 2}, - {'InstanceGroupId': igs['task-2']['Id'], - 'InstanceCount': 3}]) - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Instances']['InstanceCount'].should.equal(base_instance_count + 5) - igs = dict((g['Name'], g) for g in jf['Instances']['InstanceGroups']) - igs['task-1']['InstanceRunningCount'].should.equal(2) - igs['task-2']['InstanceRunningCount'].should.equal(3) - - -@mock_emr -def test_steps(): - input_steps = [{ - 'HadoopJarStep': { - 'Args': [ - 'hadoop-streaming', - '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter.py#wordSplitter.py', - '-mapper', 'python wordSplitter.py', - '-input', 's3://elasticmapreduce/samples/wordcount/input', - '-output', 's3://output_bucket/output/wordcount_output', - '-reducer', 'aggregate' - ], - 'Jar': 'command-runner.jar', - }, - 'Name': 'My wordcount example', - }, { - 'HadoopJarStep': { - 'Args': [ - 'hadoop-streaming', - '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter2.py#wordSplitter2.py', - '-mapper', 'python wordSplitter2.py', - '-input', 's3://elasticmapreduce/samples/wordcount/input2', - '-output', 's3://output_bucket/output/wordcount_output2', - '-reducer', 'aggregate' - ], - 'Jar': 'command-runner.jar', - }, - 'Name': 'My wordcount example2', - }] - - # TODO: implementation and test for cancel_steps - - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['Steps'] = [input_steps[0]] - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Steps'].should.have.length_of(1) - - client.add_job_flow_steps(JobFlowId=cluster_id, Steps=[input_steps[1]]) - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Steps'].should.have.length_of(2) - for idx, (x, y) in enumerate(zip(jf['Steps'], input_steps)): - x['ExecutionStatusDetail'].should.have.key('CreationDateTime') - # x['ExecutionStatusDetail'].should.have.key('EndDateTime') - # x['ExecutionStatusDetail'].should.have.key('LastStateChangeReason') - # x['ExecutionStatusDetail'].should.have.key('StartDateTime') - x['ExecutionStatusDetail']['State'].should.equal( - 'STARTING' if idx == 0 else 'PENDING') - x['StepConfig']['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['StepConfig']['HadoopJarStep'][ - 'Args'].should.equal(y['HadoopJarStep']['Args']) - x['StepConfig']['HadoopJarStep'][ - 'Jar'].should.equal(y['HadoopJarStep']['Jar']) - if 'MainClass' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['MainClass'].should.equal( - y['HadoopJarStep']['MainClass']) - if 'Properties' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['Properties'].should.equal( - y['HadoopJarStep']['Properties']) - x['StepConfig']['Name'].should.equal(y['Name']) - - expected = dict((s['Name'], s) for s in input_steps) - - steps = client.list_steps(ClusterId=cluster_id)['Steps'] - steps.should.have.length_of(2) - for x in steps: - y = expected[x['Name']] - x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) - x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) - # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) - # Properties - x['Id'].should.be.a(six.string_types) - x['Name'].should.equal(y['Name']) - x['Status']['State'].should.be.within(['STARTING', 'PENDING']) - # StateChangeReason - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') - - x = client.describe_step(ClusterId=cluster_id, StepId=x['Id'])['Step'] - x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) - x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) - # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) - # Properties - x['Id'].should.be.a(six.string_types) - x['Name'].should.equal(y['Name']) - x['Status']['State'].should.be.within(['STARTING', 'PENDING']) - # StateChangeReason - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') - - step_id = steps[0]['Id'] - steps = client.list_steps(ClusterId=cluster_id, StepIds=[step_id])['Steps'] - steps.should.have.length_of(1) - steps[0]['Id'].should.equal(step_id) - - steps = client.list_steps(ClusterId=cluster_id, - StepStates=['STARTING'])['Steps'] - steps.should.have.length_of(1) - steps[0]['Id'].should.equal(step_id) - - -@mock_emr -def test_tags(): - input_tags = [{'Key': 'newkey1', 'Value': 'newval1'}, - {'Key': 'newkey2', 'Value': 'newval2'}] - - client = boto3.client('emr', region_name='us-east-1') - cluster_id = client.run_job_flow(**run_job_flow_args)['JobFlowId'] - - client.add_tags(ResourceId=cluster_id, Tags=input_tags) - resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] - resp['Tags'].should.have.length_of(2) - dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal( - dict((t['Key'], t['Value']) for t in input_tags)) - - client.remove_tags(ResourceId=cluster_id, TagKeys=[ - t['Key'] for t in input_tags]) - resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] - resp['Tags'].should.equal([]) +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import time +from copy import deepcopy +from datetime import datetime + +import boto3 +import pytz +import six +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_emr + + +run_job_flow_args = dict( + Instances={ + 'InstanceCount': 3, + 'KeepJobFlowAliveWhenNoSteps': True, + 'MasterInstanceType': 'c3.medium', + 'Placement': {'AvailabilityZone': 'us-east-1a'}, + 'SlaveInstanceType': 'c3.xlarge', + }, + JobFlowRole='EMR_EC2_DefaultRole', + LogUri='s3://mybucket/log', + Name='cluster', + ServiceRole='EMR_DefaultRole', + VisibleToAllUsers=True) + + +input_instance_groups = [ + {'InstanceCount': 1, + 'InstanceRole': 'MASTER', + 'InstanceType': 'c1.medium', + 'Market': 'ON_DEMAND', + 'Name': 'master'}, + {'InstanceCount': 3, + 'InstanceRole': 'CORE', + 'InstanceType': 'c1.medium', + 'Market': 'ON_DEMAND', + 'Name': 'core'}, + {'InstanceCount': 6, + 'InstanceRole': 'TASK', + 'InstanceType': 'c1.large', + 'Market': 'SPOT', + 'Name': 'task-1', + 'BidPrice': '0.07'}, + {'InstanceCount': 10, + 'InstanceRole': 'TASK', + 'InstanceType': 'c1.xlarge', + 'Market': 'SPOT', + 'Name': 'task-2', + 'BidPrice': '0.05'}, +] + + +@mock_emr +def test_describe_cluster(): + client = boto3.client('emr', region_name='us-east-1') + + args = deepcopy(run_job_flow_args) + args['Applications'] = [{'Name': 'Spark', 'Version': '2.4.2'}] + args['Configurations'] = [ + {'Classification': 'yarn-site', + 'Properties': {'someproperty': 'somevalue', + 'someotherproperty': 'someothervalue'}}, + {'Classification': 'nested-configs', + 'Properties': {}, + 'Configurations': [ + { + 'Classification': 'nested-config', + 'Properties': { + 'nested-property': 'nested-value' + } + } + ]} + ] + args['Instances']['AdditionalMasterSecurityGroups'] = ['additional-master'] + args['Instances']['AdditionalSlaveSecurityGroups'] = ['additional-slave'] + args['Instances']['Ec2KeyName'] = 'mykey' + args['Instances']['Ec2SubnetId'] = 'subnet-8be41cec' + args['Instances']['EmrManagedMasterSecurityGroup'] = 'master-security-group' + args['Instances']['EmrManagedSlaveSecurityGroup'] = 'slave-security-group' + args['Instances']['KeepJobFlowAliveWhenNoSteps'] = False + args['Instances']['ServiceAccessSecurityGroup'] = 'service-access-security-group' + args['Tags'] = [{'Key': 'tag1', 'Value': 'val1'}, + {'Key': 'tag2', 'Value': 'val2'}] + + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + cl = client.describe_cluster(ClusterId=cluster_id)['Cluster'] + cl['Applications'][0]['Name'].should.equal('Spark') + cl['Applications'][0]['Version'].should.equal('2.4.2') + cl['AutoTerminate'].should.equal(True) + + config = cl['Configurations'][0] + config['Classification'].should.equal('yarn-site') + config['Properties'].should.equal(args['Configurations'][0]['Properties']) + + nested_config = cl['Configurations'][1] + nested_config['Classification'].should.equal('nested-configs') + nested_config['Properties'].should.equal(args['Configurations'][1]['Properties']) + + attrs = cl['Ec2InstanceAttributes'] + attrs['AdditionalMasterSecurityGroups'].should.equal( + args['Instances']['AdditionalMasterSecurityGroups']) + attrs['AdditionalSlaveSecurityGroups'].should.equal( + args['Instances']['AdditionalSlaveSecurityGroups']) + attrs['Ec2AvailabilityZone'].should.equal('us-east-1a') + attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) + attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) + attrs['EmrManagedMasterSecurityGroup'].should.equal( + args['Instances']['EmrManagedMasterSecurityGroup']) + attrs['EmrManagedSlaveSecurityGroup'].should.equal( + args['Instances']['EmrManagedSlaveSecurityGroup']) + attrs['IamInstanceProfile'].should.equal(args['JobFlowRole']) + attrs['ServiceAccessSecurityGroup'].should.equal( + args['Instances']['ServiceAccessSecurityGroup']) + cl['Id'].should.equal(cluster_id) + cl['LogUri'].should.equal(args['LogUri']) + cl['MasterPublicDnsName'].should.be.a(six.string_types) + cl['Name'].should.equal(args['Name']) + cl['NormalizedInstanceHours'].should.equal(0) + # cl['ReleaseLabel'].should.equal('emr-5.0.0') + cl.shouldnt.have.key('RequestedAmiVersion') + cl['RunningAmiVersion'].should.equal('1.0.0') + # cl['SecurityConfiguration'].should.be.a(six.string_types) + cl['ServiceRole'].should.equal(args['ServiceRole']) + + status = cl['Status'] + status['State'].should.equal('TERMINATED') + # cluster['Status']['StateChangeReason'] + status['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + # status['Timeline']['EndDateTime'].should.equal(datetime(2014, 1, 24, 2, 19, 46, tzinfo=pytz.utc)) + status['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') + + dict((t['Key'], t['Value']) for t in cl['Tags']).should.equal( + dict((t['Key'], t['Value']) for t in args['Tags'])) + + cl['TerminationProtected'].should.equal(False) + cl['VisibleToAllUsers'].should.equal(True) + + +@mock_emr +def test_describe_cluster_not_found(): + conn = boto3.client('emr', region_name='us-east-1') + raised = False + try: + cluster = conn.describe_cluster(ClusterId='DummyId') + except ClientError as e: + if e.response['Error']['Code'] == "ResourceNotFoundException": + raised = True + raised.should.equal(True) + + +@mock_emr +def test_describe_job_flows(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + expected = {} + + for idx in range(4): + cluster_name = 'cluster' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'State': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(4, 6): + cluster_name = 'cluster' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + client.terminate_job_flows(JobFlowIds=[cluster_id]) + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'State': 'TERMINATED' + } + + resp = client.describe_job_flows() + resp['JobFlows'].should.have.length_of(6) + + for cluster_id, y in expected.items(): + resp = client.describe_job_flows(JobFlowIds=[cluster_id]) + resp['JobFlows'].should.have.length_of(1) + resp['JobFlows'][0]['JobFlowId'].should.equal(cluster_id) + + resp = client.describe_job_flows(JobFlowStates=['WAITING']) + resp['JobFlows'].should.have.length_of(4) + for x in resp['JobFlows']: + x['ExecutionStatusDetail']['State'].should.equal('WAITING') + + resp = client.describe_job_flows(CreatedBefore=timestamp) + resp['JobFlows'].should.have.length_of(4) + + resp = client.describe_job_flows(CreatedAfter=timestamp) + resp['JobFlows'].should.have.length_of(2) + + +@mock_emr +def test_describe_job_flow(): + client = boto3.client('emr', region_name='us-east-1') + + args = deepcopy(run_job_flow_args) + args['AmiVersion'] = '3.8.1' + args['Instances'].update( + {'Ec2KeyName': 'ec2keyname', + 'Ec2SubnetId': 'subnet-8be41cec', + 'HadoopVersion': '2.4.0'}) + args['VisibleToAllUsers'] = True + + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + + jf['AmiVersion'].should.equal(args['AmiVersion']) + jf.shouldnt.have.key('BootstrapActions') + esd = jf['ExecutionStatusDetail'] + esd['CreationDateTime'].should.be.a('datetime.datetime') + # esd['EndDateTime'].should.be.a('datetime.datetime') + # esd['LastStateChangeReason'].should.be.a(six.string_types) + esd['ReadyDateTime'].should.be.a('datetime.datetime') + esd['StartDateTime'].should.be.a('datetime.datetime') + esd['State'].should.equal('WAITING') + attrs = jf['Instances'] + attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) + attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) + attrs['HadoopVersion'].should.equal(args['Instances']['HadoopVersion']) + attrs['InstanceCount'].should.equal(args['Instances']['InstanceCount']) + for ig in attrs['InstanceGroups']: + # ig['BidPrice'] + ig['CreationDateTime'].should.be.a('datetime.datetime') + # ig['EndDateTime'].should.be.a('datetime.datetime') + ig['InstanceGroupId'].should.be.a(six.string_types) + ig['InstanceRequestCount'].should.be.a(int) + ig['InstanceRole'].should.be.within(['MASTER', 'CORE']) + ig['InstanceRunningCount'].should.be.a(int) + ig['InstanceType'].should.be.within(['c3.medium', 'c3.xlarge']) + # ig['LastStateChangeReason'].should.be.a(six.string_types) + ig['Market'].should.equal('ON_DEMAND') + ig['Name'].should.be.a(six.string_types) + ig['ReadyDateTime'].should.be.a('datetime.datetime') + ig['StartDateTime'].should.be.a('datetime.datetime') + ig['State'].should.equal('RUNNING') + attrs['KeepJobFlowAliveWhenNoSteps'].should.equal(True) + # attrs['MasterInstanceId'].should.be.a(six.string_types) + attrs['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) + attrs['MasterPublicDnsName'].should.be.a(six.string_types) + attrs['NormalizedInstanceHours'].should.equal(0) + attrs['Placement']['AvailabilityZone'].should.equal( + args['Instances']['Placement']['AvailabilityZone']) + attrs['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) + attrs['TerminationProtected'].should.equal(False) + jf['JobFlowId'].should.equal(cluster_id) + jf['JobFlowRole'].should.equal(args['JobFlowRole']) + jf['LogUri'].should.equal(args['LogUri']) + jf['Name'].should.equal(args['Name']) + jf['ServiceRole'].should.equal(args['ServiceRole']) + jf['Steps'].should.equal([]) + jf['SupportedProducts'].should.equal([]) + jf['VisibleToAllUsers'].should.equal(True) + + +@mock_emr +def test_list_clusters(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + expected = {} + + for idx in range(40): + cluster_name = 'jobflow' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'NormalizedInstanceHours': 0, + 'State': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(40, 70): + cluster_name = 'jobflow' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + client.terminate_job_flows(JobFlowIds=[cluster_id]) + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'NormalizedInstanceHours': 0, + 'State': 'TERMINATED' + } + + args = {} + while 1: + resp = client.list_clusters(**args) + clusters = resp['Clusters'] + len(clusters).should.be.lower_than_or_equal_to(50) + for x in clusters: + y = expected[x['Id']] + x['Id'].should.equal(y['Id']) + x['Name'].should.equal(y['Name']) + x['NormalizedInstanceHours'].should.equal( + y['NormalizedInstanceHours']) + x['Status']['State'].should.equal(y['State']) + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + if y['State'] == 'TERMINATED': + x['Status']['Timeline'][ + 'EndDateTime'].should.be.a('datetime.datetime') + else: + x['Status']['Timeline'].shouldnt.have.key('EndDateTime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') + marker = resp.get('Marker') + if marker is None: + break + args = {'Marker': marker} + + resp = client.list_clusters(ClusterStates=['TERMINATED']) + resp['Clusters'].should.have.length_of(30) + for x in resp['Clusters']: + x['Status']['State'].should.equal('TERMINATED') + + resp = client.list_clusters(CreatedBefore=timestamp) + resp['Clusters'].should.have.length_of(40) + + resp = client.list_clusters(CreatedAfter=timestamp) + resp['Clusters'].should.have.length_of(30) + + +@mock_emr +def test_run_job_flow(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + cluster_id = client.run_job_flow(**args)['JobFlowId'] + resp = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + resp['ExecutionStatusDetail']['State'].should.equal('WAITING') + resp['JobFlowId'].should.equal(cluster_id) + resp['Name'].should.equal(args['Name']) + resp['Instances']['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) + resp['Instances']['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) + resp['LogUri'].should.equal(args['LogUri']) + resp['VisibleToAllUsers'].should.equal(args['VisibleToAllUsers']) + resp['Instances']['NormalizedInstanceHours'].should.equal(0) + resp['Steps'].should.equal([]) + + +@mock_emr +def test_run_job_flow_with_invalid_params(): + client = boto3.client('emr', region_name='us-east-1') + with assert_raises(ClientError) as ex: + # cannot set both AmiVersion and ReleaseLabel + args = deepcopy(run_job_flow_args) + args['AmiVersion'] = '2.4' + args['ReleaseLabel'] = 'emr-5.0.0' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + + +@mock_emr +def test_run_job_flow_in_multiple_regions(): + regions = {} + for region in ['us-east-1', 'eu-west-1']: + client = boto3.client('emr', region_name=region) + args = deepcopy(run_job_flow_args) + args['Name'] = region + cluster_id = client.run_job_flow(**args)['JobFlowId'] + regions[region] = {'client': client, 'cluster_id': cluster_id} + + for region in regions.keys(): + client = regions[region]['client'] + resp = client.describe_cluster(ClusterId=regions[region]['cluster_id']) + resp['Cluster']['Name'].should.equal(region) + + +@mock_emr +def test_run_job_flow_with_new_params(): + client = boto3.client('emr', region_name='us-east-1') + resp = client.run_job_flow(**run_job_flow_args) + resp.should.have.key('JobFlowId') + + +@mock_emr +def test_run_job_flow_with_visible_to_all_users(): + client = boto3.client('emr', region_name='us-east-1') + for expected in (True, False): + args = deepcopy(run_job_flow_args) + args['VisibleToAllUsers'] = expected + resp = client.run_job_flow(**args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['VisibleToAllUsers'].should.equal(expected) + + +@mock_emr +def test_run_job_flow_with_instance_groups(): + input_groups = dict((g['Name'], g) for g in input_instance_groups) + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['Instances'] = {'InstanceGroups': input_instance_groups} + cluster_id = client.run_job_flow(**args)['JobFlowId'] + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] + for x in groups: + y = input_groups[x['Name']] + x.should.have.key('Id') + x['RequestedInstanceCount'].should.equal(y['InstanceCount']) + x['InstanceGroupType'].should.equal(y['InstanceRole']) + x['InstanceType'].should.equal(y['InstanceType']) + x['Market'].should.equal(y['Market']) + if 'BidPrice' in y: + x['BidPrice'].should.equal(y['BidPrice']) + + +@mock_emr +def test_set_termination_protection(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['Instances']['TerminationProtected'] = False + resp = client.run_job_flow(**args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['TerminationProtected'].should.equal(False) + + for expected in (True, False): + resp = client.set_termination_protection(JobFlowIds=[cluster_id], + TerminationProtected=expected) + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['TerminationProtected'].should.equal(expected) + + +@mock_emr +def test_set_visible_to_all_users(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['VisibleToAllUsers'] = False + resp = client.run_job_flow(**args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['VisibleToAllUsers'].should.equal(False) + + for expected in (True, False): + resp = client.set_visible_to_all_users(JobFlowIds=[cluster_id], + VisibleToAllUsers=expected) + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['VisibleToAllUsers'].should.equal(expected) + + +@mock_emr +def test_terminate_job_flows(): + client = boto3.client('emr', region_name='us-east-1') + + resp = client.run_job_flow(**run_job_flow_args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['Status']['State'].should.equal('WAITING') + + resp = client.terminate_job_flows(JobFlowIds=[cluster_id]) + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['Status']['State'].should.equal('TERMINATED') + + +# testing multiple end points for each feature + +@mock_emr +def test_bootstrap_actions(): + bootstrap_actions = [ + {'Name': 'bs1', + 'ScriptBootstrapAction': { + 'Args': ['arg1', 'arg2'], + 'Path': 's3://path/to/script'}}, + {'Name': 'bs2', + 'ScriptBootstrapAction': { + 'Args': [], + 'Path': 's3://path/to/anotherscript'}} + ] + + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['BootstrapActions'] = bootstrap_actions + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + cl = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + for x, y in zip(cl['BootstrapActions'], bootstrap_actions): + x['BootstrapActionConfig'].should.equal(y) + + resp = client.list_bootstrap_actions(ClusterId=cluster_id) + for x, y in zip(resp['BootstrapActions'], bootstrap_actions): + x['Name'].should.equal(y['Name']) + if 'Args' in y['ScriptBootstrapAction']: + x['Args'].should.equal(y['ScriptBootstrapAction']['Args']) + x['ScriptPath'].should.equal(y['ScriptBootstrapAction']['Path']) + + +@mock_emr +def test_instance_groups(): + input_groups = dict((g['Name'], g) for g in input_instance_groups) + + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + for key in ['MasterInstanceType', 'SlaveInstanceType', 'InstanceCount']: + del args['Instances'][key] + args['Instances']['InstanceGroups'] = input_instance_groups[:2] + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + base_instance_count = jf['Instances']['InstanceCount'] + + client.add_instance_groups( + JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Instances']['InstanceCount'].should.equal( + sum(g['InstanceCount'] for g in input_instance_groups)) + for x in jf['Instances']['InstanceGroups']: + y = input_groups[x['Name']] + if hasattr(y, 'BidPrice'): + x['BidPrice'].should.equal('BidPrice') + x['CreationDateTime'].should.be.a('datetime.datetime') + # x['EndDateTime'].should.be.a('datetime.datetime') + x.should.have.key('InstanceGroupId') + x['InstanceRequestCount'].should.equal(y['InstanceCount']) + x['InstanceRole'].should.equal(y['InstanceRole']) + x['InstanceRunningCount'].should.equal(y['InstanceCount']) + x['InstanceType'].should.equal(y['InstanceType']) + # x['LastStateChangeReason'].should.equal(y['LastStateChangeReason']) + x['Market'].should.equal(y['Market']) + x['Name'].should.equal(y['Name']) + x['ReadyDateTime'].should.be.a('datetime.datetime') + x['StartDateTime'].should.be.a('datetime.datetime') + x['State'].should.equal('RUNNING') + + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] + for x in groups: + y = input_groups[x['Name']] + if hasattr(y, 'BidPrice'): + x['BidPrice'].should.equal('BidPrice') + # Configurations + # EbsBlockDevices + # EbsOptimized + x.should.have.key('Id') + x['InstanceGroupType'].should.equal(y['InstanceRole']) + x['InstanceType'].should.equal(y['InstanceType']) + x['Market'].should.equal(y['Market']) + x['Name'].should.equal(y['Name']) + x['RequestedInstanceCount'].should.equal(y['InstanceCount']) + x['RunningInstanceCount'].should.equal(y['InstanceCount']) + # ShrinkPolicy + x['Status']['State'].should.equal('RUNNING') + x['Status']['StateChangeReason']['Code'].should.be.a(six.string_types) + # x['Status']['StateChangeReason']['Message'].should.be.a(six.string_types) + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') + + igs = dict((g['Name'], g) for g in groups) + client.modify_instance_groups( + InstanceGroups=[ + {'InstanceGroupId': igs['task-1']['Id'], + 'InstanceCount': 2}, + {'InstanceGroupId': igs['task-2']['Id'], + 'InstanceCount': 3}]) + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Instances']['InstanceCount'].should.equal(base_instance_count + 5) + igs = dict((g['Name'], g) for g in jf['Instances']['InstanceGroups']) + igs['task-1']['InstanceRunningCount'].should.equal(2) + igs['task-2']['InstanceRunningCount'].should.equal(3) + + +@mock_emr +def test_steps(): + input_steps = [{ + 'HadoopJarStep': { + 'Args': [ + 'hadoop-streaming', + '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter.py#wordSplitter.py', + '-mapper', 'python wordSplitter.py', + '-input', 's3://elasticmapreduce/samples/wordcount/input', + '-output', 's3://output_bucket/output/wordcount_output', + '-reducer', 'aggregate' + ], + 'Jar': 'command-runner.jar', + }, + 'Name': 'My wordcount example', + }, { + 'HadoopJarStep': { + 'Args': [ + 'hadoop-streaming', + '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter2.py#wordSplitter2.py', + '-mapper', 'python wordSplitter2.py', + '-input', 's3://elasticmapreduce/samples/wordcount/input2', + '-output', 's3://output_bucket/output/wordcount_output2', + '-reducer', 'aggregate' + ], + 'Jar': 'command-runner.jar', + }, + 'Name': 'My wordcount example2', + }] + + # TODO: implementation and test for cancel_steps + + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['Steps'] = [input_steps[0]] + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Steps'].should.have.length_of(1) + + client.add_job_flow_steps(JobFlowId=cluster_id, Steps=[input_steps[1]]) + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Steps'].should.have.length_of(2) + for idx, (x, y) in enumerate(zip(jf['Steps'], input_steps)): + x['ExecutionStatusDetail'].should.have.key('CreationDateTime') + # x['ExecutionStatusDetail'].should.have.key('EndDateTime') + # x['ExecutionStatusDetail'].should.have.key('LastStateChangeReason') + # x['ExecutionStatusDetail'].should.have.key('StartDateTime') + x['ExecutionStatusDetail']['State'].should.equal( + 'STARTING' if idx == 0 else 'PENDING') + x['StepConfig']['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') + x['StepConfig']['HadoopJarStep'][ + 'Args'].should.equal(y['HadoopJarStep']['Args']) + x['StepConfig']['HadoopJarStep'][ + 'Jar'].should.equal(y['HadoopJarStep']['Jar']) + if 'MainClass' in y['HadoopJarStep']: + x['StepConfig']['HadoopJarStep']['MainClass'].should.equal( + y['HadoopJarStep']['MainClass']) + if 'Properties' in y['HadoopJarStep']: + x['StepConfig']['HadoopJarStep']['Properties'].should.equal( + y['HadoopJarStep']['Properties']) + x['StepConfig']['Name'].should.equal(y['Name']) + + expected = dict((s['Name'], s) for s in input_steps) + + steps = client.list_steps(ClusterId=cluster_id)['Steps'] + steps.should.have.length_of(2) + for x in steps: + y = expected[x['Name']] + x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') + x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) + x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) + # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) + # Properties + x['Id'].should.be.a(six.string_types) + x['Name'].should.equal(y['Name']) + x['Status']['State'].should.be.within(['STARTING', 'PENDING']) + # StateChangeReason + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') + + x = client.describe_step(ClusterId=cluster_id, StepId=x['Id'])['Step'] + x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') + x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) + x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) + # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) + # Properties + x['Id'].should.be.a(six.string_types) + x['Name'].should.equal(y['Name']) + x['Status']['State'].should.be.within(['STARTING', 'PENDING']) + # StateChangeReason + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') + + step_id = steps[0]['Id'] + steps = client.list_steps(ClusterId=cluster_id, StepIds=[step_id])['Steps'] + steps.should.have.length_of(1) + steps[0]['Id'].should.equal(step_id) + + steps = client.list_steps(ClusterId=cluster_id, + StepStates=['STARTING'])['Steps'] + steps.should.have.length_of(1) + steps[0]['Id'].should.equal(step_id) + + +@mock_emr +def test_tags(): + input_tags = [{'Key': 'newkey1', 'Value': 'newval1'}, + {'Key': 'newkey2', 'Value': 'newval2'}] + + client = boto3.client('emr', region_name='us-east-1') + cluster_id = client.run_job_flow(**run_job_flow_args)['JobFlowId'] + + client.add_tags(ResourceId=cluster_id, Tags=input_tags) + resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] + resp['Tags'].should.have.length_of(2) + dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal( + dict((t['Key'], t['Value']) for t in input_tags)) + + client.remove_tags(ResourceId=cluster_id, TagKeys=[ + t['Key'] for t in input_tags]) + resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] + resp['Tags'].should.equal([]) diff --git a/tests/test_emr/test_server.py b/tests/test_emr/test_server.py index 56eba3ff8a05..f2b215ec7294 100644 --- a/tests/test_emr/test_server.py +++ b/tests/test_emr/test_server.py @@ -1,18 +1,18 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_describe_jobflows(): - backend = server.create_backend_app("emr") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeJobFlows') - - res.data.should.contain(b'') - res.data.should.contain(b'') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_describe_jobflows(): + backend = server.create_backend_app("emr") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeJobFlows') + + res.data.should.contain(b'') + res.data.should.contain(b'') diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 80630c5b81c8..d459af53325b 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -1,211 +1,211 @@ -import random - -import boto3 -import json - -from moto.events import mock_events -from botocore.exceptions import ClientError -from nose.tools import assert_raises - - -RULES = [ - {'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'}, - {'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'}, - {'Name': 'test3', 'EventPattern': '{"source": ["test-source"]}'} -] - -TARGETS = { - 'test-target-1': { - 'Id': 'test-target-1', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-1', - 'Rules': ['test1', 'test2'] - }, - 'test-target-2': { - 'Id': 'test-target-2', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-2', - 'Rules': ['test1', 'test3'] - }, - 'test-target-3': { - 'Id': 'test-target-3', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-3', - 'Rules': ['test1', 'test2'] - }, - 'test-target-4': { - 'Id': 'test-target-4', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-4', - 'Rules': ['test1', 'test3'] - }, - 'test-target-5': { - 'Id': 'test-target-5', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-5', - 'Rules': ['test1', 'test2'] - }, - 'test-target-6': { - 'Id': 'test-target-6', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-6', - 'Rules': ['test1', 'test3'] - } -} - - -def get_random_rule(): - return RULES[random.randint(0, len(RULES) - 1)] - - -def generate_environment(): - client = boto3.client('events', 'us-west-2') - - for rule in RULES: - client.put_rule( - Name=rule['Name'], - ScheduleExpression=rule.get('ScheduleExpression', ''), - EventPattern=rule.get('EventPattern', '') - ) - - targets = [] - for target in TARGETS: - if rule['Name'] in TARGETS[target].get('Rules'): - targets.append({'Id': target, 'Arn': TARGETS[target]['Arn']}) - - client.put_targets(Rule=rule['Name'], Targets=targets) - - return client - - -@mock_events -def test_list_rules(): - client = generate_environment() - response = client.list_rules() - - assert(response is not None) - assert(len(response['Rules']) > 0) - - -@mock_events -def test_describe_rule(): - rule_name = get_random_rule()['Name'] - client = generate_environment() - response = client.describe_rule(Name=rule_name) - - assert(response is not None) - assert(response.get('Name') == rule_name) - assert(response.get('Arn') is not None) - - -@mock_events -def test_enable_disable_rule(): - rule_name = get_random_rule()['Name'] - client = generate_environment() - - # Rules should start out enabled in these tests. - rule = client.describe_rule(Name=rule_name) - assert(rule['State'] == 'ENABLED') - - client.disable_rule(Name=rule_name) - rule = client.describe_rule(Name=rule_name) - assert(rule['State'] == 'DISABLED') - - client.enable_rule(Name=rule_name) - rule = client.describe_rule(Name=rule_name) - assert(rule['State'] == 'ENABLED') - - -@mock_events -def test_list_rule_names_by_target(): - test_1_target = TARGETS['test-target-1'] - test_2_target = TARGETS['test-target-2'] - client = generate_environment() - - rules = client.list_rule_names_by_target(TargetArn=test_1_target['Arn']) - assert(len(rules['RuleNames']) == len(test_1_target['Rules'])) - for rule in rules['RuleNames']: - assert(rule in test_1_target['Rules']) - - rules = client.list_rule_names_by_target(TargetArn=test_2_target['Arn']) - assert(len(rules['RuleNames']) == len(test_2_target['Rules'])) - for rule in rules['RuleNames']: - assert(rule in test_2_target['Rules']) - - -@mock_events -def test_list_rules(): - client = generate_environment() - - rules = client.list_rules() - assert(len(rules['Rules']) == len(RULES)) - - -@mock_events -def test_delete_rule(): - client = generate_environment() - - client.delete_rule(Name=RULES[0]['Name']) - rules = client.list_rules() - assert(len(rules['Rules']) == len(RULES) - 1) - - -@mock_events -def test_list_targets_by_rule(): - rule_name = get_random_rule()['Name'] - client = generate_environment() - targets = client.list_targets_by_rule(Rule=rule_name) - - expected_targets = [] - for target in TARGETS: - if rule_name in TARGETS[target].get('Rules'): - expected_targets.append(target) - - assert(len(targets['Targets']) == len(expected_targets)) - - -@mock_events -def test_remove_targets(): - rule_name = get_random_rule()['Name'] - client = generate_environment() - - targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] - targets_before = len(targets) - assert(targets_before > 0) - - client.remove_targets(Rule=rule_name, Ids=[targets[0]['Id']]) - - targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] - targets_after = len(targets) - assert(targets_before - 1 == targets_after) - - -@mock_events -def test_permissions(): - client = boto3.client('events', 'eu-central-1') - - client.put_permission(Action='events:PutEvents', Principal='111111111111', StatementId='Account1') - client.put_permission(Action='events:PutEvents', Principal='222222222222', StatementId='Account2') - - resp = client.describe_event_bus() - resp_policy = json.loads(resp['Policy']) - assert len(resp_policy['Statement']) == 2 - - client.remove_permission(StatementId='Account2') - - resp = client.describe_event_bus() - resp_policy = json.loads(resp['Policy']) - assert len(resp_policy['Statement']) == 1 - assert resp_policy['Statement'][0]['Sid'] == 'Account1' - - -@mock_events -def test_put_events(): - client = boto3.client('events', 'eu-central-1') - - event = { - "Source": "com.mycompany.myapp", - "Detail": '{"key1": "value3", "key2": "value4"}', - "Resources": ["resource1", "resource2"], - "DetailType": "myDetailType" - } - - client.put_events(Entries=[event]) - # Boto3 would error if it didn't return 200 OK - - with assert_raises(ClientError): - client.put_events(Entries=[event]*20) +import random + +import boto3 +import json + +from moto.events import mock_events +from botocore.exceptions import ClientError +from nose.tools import assert_raises + + +RULES = [ + {'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'}, + {'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'}, + {'Name': 'test3', 'EventPattern': '{"source": ["test-source"]}'} +] + +TARGETS = { + 'test-target-1': { + 'Id': 'test-target-1', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-1', + 'Rules': ['test1', 'test2'] + }, + 'test-target-2': { + 'Id': 'test-target-2', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-2', + 'Rules': ['test1', 'test3'] + }, + 'test-target-3': { + 'Id': 'test-target-3', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-3', + 'Rules': ['test1', 'test2'] + }, + 'test-target-4': { + 'Id': 'test-target-4', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-4', + 'Rules': ['test1', 'test3'] + }, + 'test-target-5': { + 'Id': 'test-target-5', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-5', + 'Rules': ['test1', 'test2'] + }, + 'test-target-6': { + 'Id': 'test-target-6', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-6', + 'Rules': ['test1', 'test3'] + } +} + + +def get_random_rule(): + return RULES[random.randint(0, len(RULES) - 1)] + + +def generate_environment(): + client = boto3.client('events', 'us-west-2') + + for rule in RULES: + client.put_rule( + Name=rule['Name'], + ScheduleExpression=rule.get('ScheduleExpression', ''), + EventPattern=rule.get('EventPattern', '') + ) + + targets = [] + for target in TARGETS: + if rule['Name'] in TARGETS[target].get('Rules'): + targets.append({'Id': target, 'Arn': TARGETS[target]['Arn']}) + + client.put_targets(Rule=rule['Name'], Targets=targets) + + return client + + +@mock_events +def test_list_rules(): + client = generate_environment() + response = client.list_rules() + + assert(response is not None) + assert(len(response['Rules']) > 0) + + +@mock_events +def test_describe_rule(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + response = client.describe_rule(Name=rule_name) + + assert(response is not None) + assert(response.get('Name') == rule_name) + assert(response.get('Arn') is not None) + + +@mock_events +def test_enable_disable_rule(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + + # Rules should start out enabled in these tests. + rule = client.describe_rule(Name=rule_name) + assert(rule['State'] == 'ENABLED') + + client.disable_rule(Name=rule_name) + rule = client.describe_rule(Name=rule_name) + assert(rule['State'] == 'DISABLED') + + client.enable_rule(Name=rule_name) + rule = client.describe_rule(Name=rule_name) + assert(rule['State'] == 'ENABLED') + + +@mock_events +def test_list_rule_names_by_target(): + test_1_target = TARGETS['test-target-1'] + test_2_target = TARGETS['test-target-2'] + client = generate_environment() + + rules = client.list_rule_names_by_target(TargetArn=test_1_target['Arn']) + assert(len(rules['RuleNames']) == len(test_1_target['Rules'])) + for rule in rules['RuleNames']: + assert(rule in test_1_target['Rules']) + + rules = client.list_rule_names_by_target(TargetArn=test_2_target['Arn']) + assert(len(rules['RuleNames']) == len(test_2_target['Rules'])) + for rule in rules['RuleNames']: + assert(rule in test_2_target['Rules']) + + +@mock_events +def test_list_rules(): + client = generate_environment() + + rules = client.list_rules() + assert(len(rules['Rules']) == len(RULES)) + + +@mock_events +def test_delete_rule(): + client = generate_environment() + + client.delete_rule(Name=RULES[0]['Name']) + rules = client.list_rules() + assert(len(rules['Rules']) == len(RULES) - 1) + + +@mock_events +def test_list_targets_by_rule(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + targets = client.list_targets_by_rule(Rule=rule_name) + + expected_targets = [] + for target in TARGETS: + if rule_name in TARGETS[target].get('Rules'): + expected_targets.append(target) + + assert(len(targets['Targets']) == len(expected_targets)) + + +@mock_events +def test_remove_targets(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + + targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] + targets_before = len(targets) + assert(targets_before > 0) + + client.remove_targets(Rule=rule_name, Ids=[targets[0]['Id']]) + + targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] + targets_after = len(targets) + assert(targets_before - 1 == targets_after) + + +@mock_events +def test_permissions(): + client = boto3.client('events', 'eu-central-1') + + client.put_permission(Action='events:PutEvents', Principal='111111111111', StatementId='Account1') + client.put_permission(Action='events:PutEvents', Principal='222222222222', StatementId='Account2') + + resp = client.describe_event_bus() + resp_policy = json.loads(resp['Policy']) + assert len(resp_policy['Statement']) == 2 + + client.remove_permission(StatementId='Account2') + + resp = client.describe_event_bus() + resp_policy = json.loads(resp['Policy']) + assert len(resp_policy['Statement']) == 1 + assert resp_policy['Statement'][0]['Sid'] == 'Account1' + + +@mock_events +def test_put_events(): + client = boto3.client('events', 'eu-central-1') + + event = { + "Source": "com.mycompany.myapp", + "Detail": '{"key1": "value3", "key2": "value4"}', + "Resources": ["resource1", "resource2"], + "DetailType": "myDetailType" + } + + client.put_events(Entries=[event]) + # Boto3 would error if it didn't return 200 OK + + with assert_raises(ClientError): + client.put_events(Entries=[event]*20) diff --git a/tests/test_glacier/test_glacier_archives.py b/tests/test_glacier/test_glacier_archives.py index e8fa6045eba8..ec43e613c2b7 100644 --- a/tests/test_glacier/test_glacier_archives.py +++ b/tests/test_glacier/test_glacier_archives.py @@ -1,21 +1,21 @@ -from __future__ import unicode_literals - -from tempfile import NamedTemporaryFile -import boto.glacier -import sure # noqa - -from moto import mock_glacier_deprecated - - -@mock_glacier_deprecated -def test_create_and_delete_archive(): - the_file = NamedTemporaryFile(delete=False) - the_file.write(b"some stuff") - the_file.close() - - conn = boto.glacier.connect_to_region("us-west-2") - vault = conn.create_vault("my_vault") - - archive_id = vault.upload_archive(the_file.name) - - vault.delete_archive(archive_id) +from __future__ import unicode_literals + +from tempfile import NamedTemporaryFile +import boto.glacier +import sure # noqa + +from moto import mock_glacier_deprecated + + +@mock_glacier_deprecated +def test_create_and_delete_archive(): + the_file = NamedTemporaryFile(delete=False) + the_file.write(b"some stuff") + the_file.close() + + conn = boto.glacier.connect_to_region("us-west-2") + vault = conn.create_vault("my_vault") + + archive_id = vault.upload_archive(the_file.name) + + vault.delete_archive(archive_id) diff --git a/tests/test_glacier/test_glacier_jobs.py b/tests/test_glacier/test_glacier_jobs.py index 152aa14c8ebf..761b47a662a9 100644 --- a/tests/test_glacier/test_glacier_jobs.py +++ b/tests/test_glacier/test_glacier_jobs.py @@ -1,90 +1,90 @@ -from __future__ import unicode_literals - -import json -import time - -from boto.glacier.layer1 import Layer1 -import sure # noqa - -from moto import mock_glacier_deprecated - - -@mock_glacier_deprecated -def test_init_glacier_job(): - conn = Layer1(region_name="us-west-2") - vault_name = "my_vault" - conn.create_vault(vault_name) - archive_id = conn.upload_archive( - vault_name, "some stuff", "", "", "some description") - - job_response = conn.initiate_job(vault_name, { - "ArchiveId": archive_id, - "Type": "archive-retrieval", - }) - job_id = job_response['JobId'] - job_response['Location'].should.equal( - "//vaults/my_vault/jobs/{0}".format(job_id)) - - -@mock_glacier_deprecated -def test_describe_job(): - conn = Layer1(region_name="us-west-2") - vault_name = "my_vault" - conn.create_vault(vault_name) - archive_id = conn.upload_archive( - vault_name, "some stuff", "", "", "some description") - job_response = conn.initiate_job(vault_name, { - "ArchiveId": archive_id, - "Type": "archive-retrieval", - }) - job_id = job_response['JobId'] - - job = conn.describe_job(vault_name, job_id) - joboutput = json.loads(job.read().decode("utf-8")) - - joboutput.should.have.key('Tier').which.should.equal('Standard') - joboutput.should.have.key('StatusCode').which.should.equal('InProgress') - joboutput.should.have.key('VaultARN').which.should.equal('arn:aws:glacier:RegionInfo:us-west-2:012345678901:vaults/my_vault') - - -@mock_glacier_deprecated -def test_list_glacier_jobs(): - conn = Layer1(region_name="us-west-2") - vault_name = "my_vault" - conn.create_vault(vault_name) - archive_id1 = conn.upload_archive( - vault_name, "some stuff", "", "", "some description")['ArchiveId'] - archive_id2 = conn.upload_archive( - vault_name, "some other stuff", "", "", "some description")['ArchiveId'] - - conn.initiate_job(vault_name, { - "ArchiveId": archive_id1, - "Type": "archive-retrieval", - }) - conn.initiate_job(vault_name, { - "ArchiveId": archive_id2, - "Type": "archive-retrieval", - }) - - jobs = conn.list_jobs(vault_name) - len(jobs['JobList']).should.equal(2) - - -@mock_glacier_deprecated -def test_get_job_output(): - conn = Layer1(region_name="us-west-2") - vault_name = "my_vault" - conn.create_vault(vault_name) - archive_response = conn.upload_archive( - vault_name, "some stuff", "", "", "some description") - archive_id = archive_response['ArchiveId'] - job_response = conn.initiate_job(vault_name, { - "ArchiveId": archive_id, - "Type": "archive-retrieval", - }) - job_id = job_response['JobId'] - - time.sleep(6) - - output = conn.get_job_output(vault_name, job_id) - output.read().decode("utf-8").should.equal("some stuff") +from __future__ import unicode_literals + +import json +import time + +from boto.glacier.layer1 import Layer1 +import sure # noqa + +from moto import mock_glacier_deprecated + + +@mock_glacier_deprecated +def test_init_glacier_job(): + conn = Layer1(region_name="us-west-2") + vault_name = "my_vault" + conn.create_vault(vault_name) + archive_id = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") + + job_response = conn.initiate_job(vault_name, { + "ArchiveId": archive_id, + "Type": "archive-retrieval", + }) + job_id = job_response['JobId'] + job_response['Location'].should.equal( + "//vaults/my_vault/jobs/{0}".format(job_id)) + + +@mock_glacier_deprecated +def test_describe_job(): + conn = Layer1(region_name="us-west-2") + vault_name = "my_vault" + conn.create_vault(vault_name) + archive_id = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") + job_response = conn.initiate_job(vault_name, { + "ArchiveId": archive_id, + "Type": "archive-retrieval", + }) + job_id = job_response['JobId'] + + job = conn.describe_job(vault_name, job_id) + joboutput = json.loads(job.read().decode("utf-8")) + + joboutput.should.have.key('Tier').which.should.equal('Standard') + joboutput.should.have.key('StatusCode').which.should.equal('InProgress') + joboutput.should.have.key('VaultARN').which.should.equal('arn:aws:glacier:RegionInfo:us-west-2:012345678901:vaults/my_vault') + + +@mock_glacier_deprecated +def test_list_glacier_jobs(): + conn = Layer1(region_name="us-west-2") + vault_name = "my_vault" + conn.create_vault(vault_name) + archive_id1 = conn.upload_archive( + vault_name, "some stuff", "", "", "some description")['ArchiveId'] + archive_id2 = conn.upload_archive( + vault_name, "some other stuff", "", "", "some description")['ArchiveId'] + + conn.initiate_job(vault_name, { + "ArchiveId": archive_id1, + "Type": "archive-retrieval", + }) + conn.initiate_job(vault_name, { + "ArchiveId": archive_id2, + "Type": "archive-retrieval", + }) + + jobs = conn.list_jobs(vault_name) + len(jobs['JobList']).should.equal(2) + + +@mock_glacier_deprecated +def test_get_job_output(): + conn = Layer1(region_name="us-west-2") + vault_name = "my_vault" + conn.create_vault(vault_name) + archive_response = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") + archive_id = archive_response['ArchiveId'] + job_response = conn.initiate_job(vault_name, { + "ArchiveId": archive_id, + "Type": "archive-retrieval", + }) + job_id = job_response['JobId'] + + time.sleep(6) + + output = conn.get_job_output(vault_name, job_id) + output.read().decode("utf-8").should.equal("some stuff") diff --git a/tests/test_glacier/test_glacier_server.py b/tests/test_glacier/test_glacier_server.py index fd803442130d..b6c03428e4fb 100644 --- a/tests/test_glacier/test_glacier_server.py +++ b/tests/test_glacier/test_glacier_server.py @@ -1,22 +1,22 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_glacier - -''' -Test the different server responses -''' - - -@mock_glacier -def test_list_vaults(): - backend = server.create_backend_app("glacier") - test_client = backend.test_client() - - res = test_client.get('/1234bcd/vaults') - - json.loads(res.data.decode("utf-8") - ).should.equal({u'Marker': None, u'VaultList': []}) +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_glacier + +''' +Test the different server responses +''' + + +@mock_glacier +def test_list_vaults(): + backend = server.create_backend_app("glacier") + test_client = backend.test_client() + + res = test_client.get('/1234bcd/vaults') + + json.loads(res.data.decode("utf-8") + ).should.equal({u'Marker': None, u'VaultList': []}) diff --git a/tests/test_glacier/test_glacier_vaults.py b/tests/test_glacier/test_glacier_vaults.py index e64f40a90d2b..93c79423eff9 100644 --- a/tests/test_glacier/test_glacier_vaults.py +++ b/tests/test_glacier/test_glacier_vaults.py @@ -1,31 +1,31 @@ -from __future__ import unicode_literals - -import boto.glacier -import sure # noqa - -from moto import mock_glacier_deprecated - - -@mock_glacier_deprecated -def test_create_vault(): - conn = boto.glacier.connect_to_region("us-west-2") - - conn.create_vault("my_vault") - - vaults = conn.list_vaults() - vaults.should.have.length_of(1) - vaults[0].name.should.equal("my_vault") - - -@mock_glacier_deprecated -def test_delete_vault(): - conn = boto.glacier.connect_to_region("us-west-2") - - conn.create_vault("my_vault") - - vaults = conn.list_vaults() - vaults.should.have.length_of(1) - - conn.delete_vault("my_vault") - vaults = conn.list_vaults() - vaults.should.have.length_of(0) +from __future__ import unicode_literals + +import boto.glacier +import sure # noqa + +from moto import mock_glacier_deprecated + + +@mock_glacier_deprecated +def test_create_vault(): + conn = boto.glacier.connect_to_region("us-west-2") + + conn.create_vault("my_vault") + + vaults = conn.list_vaults() + vaults.should.have.length_of(1) + vaults[0].name.should.equal("my_vault") + + +@mock_glacier_deprecated +def test_delete_vault(): + conn = boto.glacier.connect_to_region("us-west-2") + + conn.create_vault("my_vault") + + vaults = conn.list_vaults() + vaults.should.have.length_of(1) + + conn.delete_vault("my_vault") + vaults = conn.list_vaults() + vaults.should.have.length_of(0) diff --git a/tests/test_glue/__init__.py b/tests/test_glue/__init__.py index baffc4882521..78b780d97562 100644 --- a/tests/test_glue/__init__.py +++ b/tests/test_glue/__init__.py @@ -1 +1 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals diff --git a/tests/test_glue/fixtures/__init__.py b/tests/test_glue/fixtures/__init__.py index baffc4882521..78b780d97562 100644 --- a/tests/test_glue/fixtures/__init__.py +++ b/tests/test_glue/fixtures/__init__.py @@ -1 +1 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals diff --git a/tests/test_glue/fixtures/datacatalog.py b/tests/test_glue/fixtures/datacatalog.py index edad2f0f4c26..13136158b562 100644 --- a/tests/test_glue/fixtures/datacatalog.py +++ b/tests/test_glue/fixtures/datacatalog.py @@ -1,56 +1,56 @@ -from __future__ import unicode_literals - -TABLE_INPUT = { - 'Owner': 'a_fake_owner', - 'Parameters': { - 'EXTERNAL': 'TRUE', - }, - 'Retention': 0, - 'StorageDescriptor': { - 'BucketColumns': [], - 'Compressed': False, - 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', - 'NumberOfBuckets': -1, - 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', - 'Parameters': {}, - 'SerdeInfo': { - 'Parameters': { - 'serialization.format': '1' - }, - 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' - }, - 'SkewedInfo': { - 'SkewedColumnNames': [], - 'SkewedColumnValueLocationMaps': {}, - 'SkewedColumnValues': [] - }, - 'SortColumns': [], - 'StoredAsSubDirectories': False - }, - 'TableType': 'EXTERNAL_TABLE', -} - - -PARTITION_INPUT = { - # 'DatabaseName': 'dbname', - 'StorageDescriptor': { - 'BucketColumns': [], - 'Columns': [], - 'Compressed': False, - 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', - 'Location': 's3://.../partition=value', - 'NumberOfBuckets': -1, - 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', - 'Parameters': {}, - 'SerdeInfo': { - 'Parameters': {'path': 's3://...', 'serialization.format': '1'}, - 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'}, - 'SkewedInfo': {'SkewedColumnNames': [], - 'SkewedColumnValueLocationMaps': {}, - 'SkewedColumnValues': []}, - 'SortColumns': [], - 'StoredAsSubDirectories': False, - }, - # 'TableName': 'source_table', - # 'Values': ['2018-06-26'], -} +from __future__ import unicode_literals + +TABLE_INPUT = { + 'Owner': 'a_fake_owner', + 'Parameters': { + 'EXTERNAL': 'TRUE', + }, + 'Retention': 0, + 'StorageDescriptor': { + 'BucketColumns': [], + 'Compressed': False, + 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', + 'NumberOfBuckets': -1, + 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', + 'Parameters': {}, + 'SerdeInfo': { + 'Parameters': { + 'serialization.format': '1' + }, + 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' + }, + 'SkewedInfo': { + 'SkewedColumnNames': [], + 'SkewedColumnValueLocationMaps': {}, + 'SkewedColumnValues': [] + }, + 'SortColumns': [], + 'StoredAsSubDirectories': False + }, + 'TableType': 'EXTERNAL_TABLE', +} + + +PARTITION_INPUT = { + # 'DatabaseName': 'dbname', + 'StorageDescriptor': { + 'BucketColumns': [], + 'Columns': [], + 'Compressed': False, + 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', + 'Location': 's3://.../partition=value', + 'NumberOfBuckets': -1, + 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', + 'Parameters': {}, + 'SerdeInfo': { + 'Parameters': {'path': 's3://...', 'serialization.format': '1'}, + 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'}, + 'SkewedInfo': {'SkewedColumnNames': [], + 'SkewedColumnValueLocationMaps': {}, + 'SkewedColumnValues': []}, + 'SortColumns': [], + 'StoredAsSubDirectories': False, + }, + # 'TableName': 'source_table', + # 'Values': ['2018-06-26'], +} diff --git a/tests/test_glue/helpers.py b/tests/test_glue/helpers.py index 331b99867280..48908532c460 100644 --- a/tests/test_glue/helpers.py +++ b/tests/test_glue/helpers.py @@ -1,119 +1,119 @@ -from __future__ import unicode_literals - -import copy - -from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT - - -def create_database(client, database_name): - return client.create_database( - DatabaseInput={ - 'Name': database_name - } - ) - - -def get_database(client, database_name): - return client.get_database(Name=database_name) - - -def create_table_input(database_name, table_name, columns=[], partition_keys=[]): - table_input = copy.deepcopy(TABLE_INPUT) - table_input['Name'] = table_name - table_input['PartitionKeys'] = partition_keys - table_input['StorageDescriptor']['Columns'] = columns - table_input['StorageDescriptor']['Location'] = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - return table_input - - -def create_table(client, database_name, table_name, table_input=None, **kwargs): - if table_input is None: - table_input = create_table_input(database_name, table_name, **kwargs) - - return client.create_table( - DatabaseName=database_name, - TableInput=table_input - ) - - -def update_table(client, database_name, table_name, table_input=None, **kwargs): - if table_input is None: - table_input = create_table_input(database_name, table_name, **kwargs) - - return client.update_table( - DatabaseName=database_name, - TableInput=table_input, - ) - - -def get_table(client, database_name, table_name): - return client.get_table( - DatabaseName=database_name, - Name=table_name - ) - - -def get_tables(client, database_name): - return client.get_tables( - DatabaseName=database_name - ) - - -def get_table_versions(client, database_name, table_name): - return client.get_table_versions( - DatabaseName=database_name, - TableName=table_name - ) - - -def get_table_version(client, database_name, table_name, version_id): - return client.get_table_version( - DatabaseName=database_name, - TableName=table_name, - VersionId=version_id, - ) - - -def create_partition_input(database_name, table_name, values=[], columns=[]): - root_path = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - - part_input = copy.deepcopy(PARTITION_INPUT) - part_input['Values'] = values - part_input['StorageDescriptor']['Columns'] = columns - part_input['StorageDescriptor']['SerdeInfo']['Parameters']['path'] = root_path - return part_input - - -def create_partition(client, database_name, table_name, partiton_input=None, **kwargs): - if partiton_input is None: - partiton_input = create_partition_input(database_name, table_name, **kwargs) - return client.create_partition( - DatabaseName=database_name, - TableName=table_name, - PartitionInput=partiton_input - ) - - -def update_partition(client, database_name, table_name, old_values=[], partiton_input=None, **kwargs): - if partiton_input is None: - partiton_input = create_partition_input(database_name, table_name, **kwargs) - return client.update_partition( - DatabaseName=database_name, - TableName=table_name, - PartitionInput=partiton_input, - PartitionValueList=old_values, - ) - - -def get_partition(client, database_name, table_name, values): - return client.get_partition( - DatabaseName=database_name, - TableName=table_name, - PartitionValues=values, - ) +from __future__ import unicode_literals + +import copy + +from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT + + +def create_database(client, database_name): + return client.create_database( + DatabaseInput={ + 'Name': database_name + } + ) + + +def get_database(client, database_name): + return client.get_database(Name=database_name) + + +def create_table_input(database_name, table_name, columns=[], partition_keys=[]): + table_input = copy.deepcopy(TABLE_INPUT) + table_input['Name'] = table_name + table_input['PartitionKeys'] = partition_keys + table_input['StorageDescriptor']['Columns'] = columns + table_input['StorageDescriptor']['Location'] = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) + return table_input + + +def create_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.create_table( + DatabaseName=database_name, + TableInput=table_input + ) + + +def update_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.update_table( + DatabaseName=database_name, + TableInput=table_input, + ) + + +def get_table(client, database_name, table_name): + return client.get_table( + DatabaseName=database_name, + Name=table_name + ) + + +def get_tables(client, database_name): + return client.get_tables( + DatabaseName=database_name + ) + + +def get_table_versions(client, database_name, table_name): + return client.get_table_versions( + DatabaseName=database_name, + TableName=table_name + ) + + +def get_table_version(client, database_name, table_name, version_id): + return client.get_table_version( + DatabaseName=database_name, + TableName=table_name, + VersionId=version_id, + ) + + +def create_partition_input(database_name, table_name, values=[], columns=[]): + root_path = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) + + part_input = copy.deepcopy(PARTITION_INPUT) + part_input['Values'] = values + part_input['StorageDescriptor']['Columns'] = columns + part_input['StorageDescriptor']['SerdeInfo']['Parameters']['path'] = root_path + return part_input + + +def create_partition(client, database_name, table_name, partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input + ) + + +def update_partition(client, database_name, table_name, old_values=[], partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.update_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input, + PartitionValueList=old_values, + ) + + +def get_partition(client, database_name, table_name, values): + return client.get_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionValues=values, + ) diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index a457d5127e30..72daed28da22 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -1,426 +1,426 @@ -from __future__ import unicode_literals - -import sure # noqa -import re -from nose.tools import assert_raises -import boto3 -from botocore.client import ClientError - - -from datetime import datetime -import pytz - -from moto import mock_glue -from . import helpers - - -@mock_glue -def test_create_database(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - response = helpers.get_database(client, database_name) - database = response['Database'] - - database.should.equal({'Name': database_name}) - - -@mock_glue -def test_create_database_already_exists(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'cantcreatethisdatabasetwice' - helpers.create_database(client, database_name) - - with assert_raises(ClientError) as exc: - helpers.create_database(client, database_name) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_get_database_not_exits(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'nosuchdatabase' - - with assert_raises(ClientError) as exc: - helpers.get_database(client, database_name) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') - - -@mock_glue -def test_create_table(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_name = 'myspecialtable' - table_input = helpers.create_table_input(database_name, table_name) - helpers.create_table(client, database_name, table_name, table_input) - - response = helpers.get_table(client, database_name, table_name) - table = response['Table'] - - table['Name'].should.equal(table_input['Name']) - table['StorageDescriptor'].should.equal(table_input['StorageDescriptor']) - table['PartitionKeys'].should.equal(table_input['PartitionKeys']) - - -@mock_glue -def test_create_table_already_exists(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_name = 'cantcreatethistabletwice' - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.create_table(client, database_name, table_name) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_get_tables(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_names = ['myfirsttable', 'mysecondtable', 'mythirdtable'] - table_inputs = {} - - for table_name in table_names: - table_input = helpers.create_table_input(database_name, table_name) - table_inputs[table_name] = table_input - helpers.create_table(client, database_name, table_name, table_input) - - response = helpers.get_tables(client, database_name) - - tables = response['TableList'] - - tables.should.have.length_of(3) - - for table in tables: - table_name = table['Name'] - table_name.should.equal(table_inputs[table_name]['Name']) - table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor']) - table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys']) - - -@mock_glue -def test_get_table_versions(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_name = 'myfirsttable' - version_inputs = {} - - table_input = helpers.create_table_input(database_name, table_name) - helpers.create_table(client, database_name, table_name, table_input) - version_inputs["1"] = table_input - - columns = [{'Name': 'country', 'Type': 'string'}] - table_input = helpers.create_table_input(database_name, table_name, columns=columns) - helpers.update_table(client, database_name, table_name, table_input) - version_inputs["2"] = table_input - - # Updateing with an indentical input should still create a new version - helpers.update_table(client, database_name, table_name, table_input) - version_inputs["3"] = table_input - - response = helpers.get_table_versions(client, database_name, table_name) - - vers = response['TableVersions'] - - vers.should.have.length_of(3) - vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([]) - vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns) - - for n, ver in enumerate(vers): - n = str(n + 1) - ver['VersionId'].should.equal(n) - ver['Table']['Name'].should.equal(table_name) - ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor']) - ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys']) - - response = helpers.get_table_version(client, database_name, table_name, "3") - ver = response['TableVersion'] - - ver['VersionId'].should.equal("3") - ver['Table']['Name'].should.equal(table_name) - ver['Table']['StorageDescriptor']['Columns'].should.equal(columns) - - -@mock_glue -def test_get_table_version_not_found(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.get_table_version(client, database_name, 'myfirsttable', "20") - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('version', re.I) - - -@mock_glue -def test_get_table_version_invalid_input(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int") - - exc.exception.response['Error']['Code'].should.equal('InvalidInputException') - - -@mock_glue -def test_get_table_not_exits(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - with assert_raises(ClientError) as exc: - helpers.get_table(client, database_name, 'myfirsttable') - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found') - - -@mock_glue -def test_get_table_when_database_not_exits(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'nosuchdatabase' - - with assert_raises(ClientError) as exc: - helpers.get_table(client, database_name, 'myfirsttable') - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') - - -@mock_glue -def test_get_partitions_empty(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - response = client.get_partitions(DatabaseName=database_name, TableName=table_name) - - response['Partitions'].should.have.length_of(0) - - -@mock_glue -def test_create_partition(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - before = datetime.now(pytz.utc) - - part_input = helpers.create_partition_input(database_name, table_name, values=values) - helpers.create_partition(client, database_name, table_name, part_input) - - after = datetime.now(pytz.utc) - - response = client.get_partitions(DatabaseName=database_name, TableName=table_name) - - partitions = response['Partitions'] - - partitions.should.have.length_of(1) - - partition = partitions[0] - - partition['TableName'].should.equal(table_name) - partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor']) - partition['Values'].should.equal(values) - partition['CreationTime'].should.be.greater_than(before) - partition['CreationTime'].should.be.lower_than(after) - - -@mock_glue -def test_create_partition_already_exist(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - helpers.create_partition(client, database_name, table_name, values=values) - - with assert_raises(ClientError) as exc: - helpers.create_partition(client, database_name, table_name, values=values) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_get_partition_not_found(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.get_partition(client, database_name, table_name, values) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('partition') - - -@mock_glue -def test_get_partition(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - values = [['2018-10-01'], ['2018-09-01']] - - helpers.create_partition(client, database_name, table_name, values=values[0]) - helpers.create_partition(client, database_name, table_name, values=values[1]) - - response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1]) - - partition = response['Partition'] - - partition['TableName'].should.equal(table_name) - partition['Values'].should.equal(values[1]) - - -@mock_glue -def test_update_partition_not_found_moving(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02']) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('partition') - - -@mock_glue -def test_update_partition_not_found_change_in_place(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.update_partition(client, database_name, table_name, old_values=values, values=values) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('partition') - - -@mock_glue -def test_update_partition_cannot_overwrite(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - values = [['2018-10-01'], ['2018-09-01']] - - helpers.create_partition(client, database_name, table_name, values=values[0]) - helpers.create_partition(client, database_name, table_name, values=values[1]) - - with assert_raises(ClientError) as exc: - helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1]) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_update_partition(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - helpers.create_partition(client, database_name, table_name, values=values) - - response = helpers.update_partition( - client, - database_name, - table_name, - old_values=values, - values=values, - columns=[{'Name': 'country', 'Type': 'string'}], - ) - - response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values) - partition = response['Partition'] - - partition['TableName'].should.equal(table_name) - partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) - - -@mock_glue -def test_update_partition_move(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - new_values = ['2018-09-01'] - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - helpers.create_partition(client, database_name, table_name, values=values) - - response = helpers.update_partition( - client, - database_name, - table_name, - old_values=values, - values=new_values, - columns=[{'Name': 'country', 'Type': 'string'}], - ) - - with assert_raises(ClientError) as exc: - helpers.get_partition(client, database_name, table_name, values) - - # Old partition shouldn't exist anymore - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - - response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values) - partition = response['Partition'] - - partition['TableName'].should.equal(table_name) - partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) +from __future__ import unicode_literals + +import sure # noqa +import re +from nose.tools import assert_raises +import boto3 +from botocore.client import ClientError + + +from datetime import datetime +import pytz + +from moto import mock_glue +from . import helpers + + +@mock_glue +def test_create_database(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + response = helpers.get_database(client, database_name) + database = response['Database'] + + database.should.equal({'Name': database_name}) + + +@mock_glue +def test_create_database_already_exists(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'cantcreatethisdatabasetwice' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.create_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_create_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + response = helpers.get_table(client, database_name, table_name) + table = response['Table'] + + table['Name'].should.equal(table_input['Name']) + table['StorageDescriptor'].should.equal(table_input['StorageDescriptor']) + table['PartitionKeys'].should.equal(table_input['PartitionKeys']) + + +@mock_glue +def test_create_table_already_exists(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'cantcreatethistabletwice' + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.create_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_tables(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_names = ['myfirsttable', 'mysecondtable', 'mythirdtable'] + table_inputs = {} + + for table_name in table_names: + table_input = helpers.create_table_input(database_name, table_name) + table_inputs[table_name] = table_input + helpers.create_table(client, database_name, table_name, table_input) + + response = helpers.get_tables(client, database_name) + + tables = response['TableList'] + + tables.should.have.length_of(3) + + for table in tables: + table_name = table['Name'] + table_name.should.equal(table_inputs[table_name]['Name']) + table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor']) + table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys']) + + +@mock_glue +def test_get_table_versions(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myfirsttable' + version_inputs = {} + + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + version_inputs["1"] = table_input + + columns = [{'Name': 'country', 'Type': 'string'}] + table_input = helpers.create_table_input(database_name, table_name, columns=columns) + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["2"] = table_input + + # Updateing with an indentical input should still create a new version + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["3"] = table_input + + response = helpers.get_table_versions(client, database_name, table_name) + + vers = response['TableVersions'] + + vers.should.have.length_of(3) + vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([]) + vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + for n, ver in enumerate(vers): + n = str(n + 1) + ver['VersionId'].should.equal(n) + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor']) + ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys']) + + response = helpers.get_table_version(client, database_name, table_name, "3") + ver = response['TableVersion'] + + ver['VersionId'].should.equal("3") + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + +@mock_glue +def test_get_table_version_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "20") + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('version', re.I) + + +@mock_glue +def test_get_table_version_invalid_input(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int") + + exc.exception.response['Error']['Code'].should.equal('InvalidInputException') + + +@mock_glue +def test_get_table_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found') + + +@mock_glue +def test_get_table_when_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_get_partitions_empty(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + response['Partitions'].should.have.length_of(0) + + +@mock_glue +def test_create_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + before = datetime.now(pytz.utc) + + part_input = helpers.create_partition_input(database_name, table_name, values=values) + helpers.create_partition(client, database_name, table_name, part_input) + + after = datetime.now(pytz.utc) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + partitions = response['Partitions'] + + partitions.should.have.length_of(1) + + partition = partitions[0] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor']) + partition['Values'].should.equal(values) + partition['CreationTime'].should.be.greater_than(before) + partition['CreationTime'].should.be.lower_than(after) + + +@mock_glue +def test_create_partition_already_exist(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + helpers.create_partition(client, database_name, table_name, values=values) + + with assert_raises(ClientError) as exc: + helpers.create_partition(client, database_name, table_name, values=values) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_partition_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_get_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1]) + + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['Values'].should.equal(values[1]) + + +@mock_glue +def test_update_partition_not_found_moving(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02']) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_not_found_change_in_place(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values, values=values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_cannot_overwrite(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1]) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_update_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) + + +@mock_glue +def test_update_partition_move(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + new_values = ['2018-09-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=new_values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + # Old partition shouldn't exist anymore + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index bc23ff7126dd..1db4dae1e2cc 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -1,760 +1,760 @@ -from __future__ import unicode_literals -import base64 - -import boto -import boto3 -import sure # noqa -from boto.exception import BotoServerError -from botocore.exceptions import ClientError -from moto import mock_iam, mock_iam_deprecated -from moto.iam.models import aws_managed_policies -from nose.tools import assert_raises, assert_equals -from nose.tools import raises - -from tests.helpers import requires_boto_gte - - -@mock_iam_deprecated() -def test_get_all_server_certs(): - conn = boto.connect_iam() - - conn.upload_server_cert("certname", "certbody", "privatekey") - certs = conn.get_all_server_certs()['list_server_certificates_response'][ - 'list_server_certificates_result']['server_certificate_metadata_list'] - certs.should.have.length_of(1) - cert1 = certs[0] - cert1.server_certificate_name.should.equal("certname") - cert1.arn.should.equal( - "arn:aws:iam::123456789012:server-certificate/certname") - - -@mock_iam_deprecated() -def test_get_server_cert_doesnt_exist(): - conn = boto.connect_iam() - - with assert_raises(BotoServerError): - conn.get_server_certificate("NonExistant") - - -@mock_iam_deprecated() -def test_get_server_cert(): - conn = boto.connect_iam() - - conn.upload_server_cert("certname", "certbody", "privatekey") - cert = conn.get_server_certificate("certname") - cert.server_certificate_name.should.equal("certname") - cert.arn.should.equal( - "arn:aws:iam::123456789012:server-certificate/certname") - - -@mock_iam_deprecated() -def test_upload_server_cert(): - conn = boto.connect_iam() - - conn.upload_server_cert("certname", "certbody", "privatekey") - cert = conn.get_server_certificate("certname") - cert.server_certificate_name.should.equal("certname") - cert.arn.should.equal( - "arn:aws:iam::123456789012:server-certificate/certname") - - -@mock_iam_deprecated() -def test_delete_server_cert(): - conn = boto.connect_iam() - - conn.upload_server_cert("certname", "certbody", "privatekey") - conn.get_server_certificate("certname") - conn.delete_server_cert("certname") - with assert_raises(BotoServerError): - conn.get_server_certificate("certname") - with assert_raises(BotoServerError): - conn.delete_server_cert("certname") - - -@mock_iam_deprecated() -@raises(BotoServerError) -def test_get_role__should_throw__when_role_does_not_exist(): - conn = boto.connect_iam() - - conn.get_role('unexisting_role') - - -@mock_iam_deprecated() -@raises(BotoServerError) -def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist(): - conn = boto.connect_iam() - - conn.get_instance_profile('unexisting_instance_profile') - - -@mock_iam_deprecated() -def test_create_role_and_instance_profile(): - conn = boto.connect_iam() - conn.create_instance_profile("my-profile", path="my-path") - conn.create_role( - "my-role", assume_role_policy_document="some policy", path="my-path") - - conn.add_role_to_instance_profile("my-profile", "my-role") - - role = conn.get_role("my-role") - role.path.should.equal("my-path") - role.assume_role_policy_document.should.equal("some policy") - - profile = conn.get_instance_profile("my-profile") - profile.path.should.equal("my-path") - role_from_profile = list(profile.roles.values())[0] - role_from_profile['role_id'].should.equal(role.role_id) - role_from_profile['role_name'].should.equal("my-role") - - conn.list_roles().roles[0].role_name.should.equal('my-role') - - -@mock_iam_deprecated() -def test_remove_role_from_instance_profile(): - conn = boto.connect_iam() - conn.create_instance_profile("my-profile", path="my-path") - conn.create_role( - "my-role", assume_role_policy_document="some policy", path="my-path") - conn.add_role_to_instance_profile("my-profile", "my-role") - - profile = conn.get_instance_profile("my-profile") - role_from_profile = list(profile.roles.values())[0] - role_from_profile['role_name'].should.equal("my-role") - - conn.remove_role_from_instance_profile("my-profile", "my-role") - - profile = conn.get_instance_profile("my-profile") - dict(profile.roles).should.be.empty - - -@mock_iam() -def test_get_login_profile(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName='my-user') - conn.create_login_profile(UserName='my-user', Password='my-pass') - - response = conn.get_login_profile(UserName='my-user') - response['LoginProfile']['UserName'].should.equal('my-user') - - -@mock_iam() -def test_update_login_profile(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName='my-user') - conn.create_login_profile(UserName='my-user', Password='my-pass') - response = conn.get_login_profile(UserName='my-user') - response['LoginProfile'].get('PasswordResetRequired').should.equal(None) - - conn.update_login_profile(UserName='my-user', Password='new-pass', PasswordResetRequired=True) - response = conn.get_login_profile(UserName='my-user') - response['LoginProfile'].get('PasswordResetRequired').should.equal(True) - - -@mock_iam() -def test_delete_role(): - conn = boto3.client('iam', region_name='us-east-1') - - with assert_raises(ClientError): - conn.delete_role(RoleName="my-role") - - conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") - role = conn.get_role(RoleName="my-role") - role.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-path/my-role') - - conn.delete_role(RoleName="my-role") - - with assert_raises(ClientError): - conn.get_role(RoleName="my-role") - - -@mock_iam_deprecated() -def test_list_instance_profiles(): - conn = boto.connect_iam() - conn.create_instance_profile("my-profile", path="my-path") - conn.create_role("my-role", path="my-path") - - conn.add_role_to_instance_profile("my-profile", "my-role") - - profiles = conn.list_instance_profiles().instance_profiles - - len(profiles).should.equal(1) - profiles[0].instance_profile_name.should.equal("my-profile") - profiles[0].roles.role_name.should.equal("my-role") - - -@mock_iam_deprecated() -def test_list_instance_profiles_for_role(): - conn = boto.connect_iam() - - conn.create_role(role_name="my-role", - assume_role_policy_document="some policy", path="my-path") - conn.create_role(role_name="my-role2", - assume_role_policy_document="some policy2", path="my-path2") - - profile_name_list = ['my-profile', 'my-profile2'] - profile_path_list = ['my-path', 'my-path2'] - for profile_count in range(0, 2): - conn.create_instance_profile( - profile_name_list[profile_count], path=profile_path_list[profile_count]) - - for profile_count in range(0, 2): - conn.add_role_to_instance_profile( - profile_name_list[profile_count], "my-role") - - profile_dump = conn.list_instance_profiles_for_role(role_name="my-role") - profile_list = profile_dump['list_instance_profiles_for_role_response'][ - 'list_instance_profiles_for_role_result']['instance_profiles'] - for profile_count in range(0, len(profile_list)): - profile_name_list.remove(profile_list[profile_count][ - "instance_profile_name"]) - profile_path_list.remove(profile_list[profile_count]["path"]) - profile_list[profile_count]["roles"]["member"][ - "role_name"].should.equal("my-role") - - len(profile_name_list).should.equal(0) - len(profile_path_list).should.equal(0) - - profile_dump2 = conn.list_instance_profiles_for_role(role_name="my-role2") - profile_list = profile_dump2['list_instance_profiles_for_role_response'][ - 'list_instance_profiles_for_role_result']['instance_profiles'] - len(profile_list).should.equal(0) - - -@mock_iam_deprecated() -def test_list_role_policies(): - conn = boto.connect_iam() - conn.create_role("my-role") - conn.put_role_policy("my-role", "test policy", "my policy") - role = conn.list_role_policies("my-role") - role.policy_names.should.have.length_of(1) - role.policy_names[0].should.equal("test policy") - - conn.put_role_policy("my-role", "test policy 2", "another policy") - role = conn.list_role_policies("my-role") - role.policy_names.should.have.length_of(2) - - conn.delete_role_policy("my-role", "test policy") - role = conn.list_role_policies("my-role") - role.policy_names.should.have.length_of(1) - role.policy_names[0].should.equal("test policy 2") - - with assert_raises(BotoServerError): - conn.delete_role_policy("my-role", "test policy") - - -@mock_iam_deprecated() -def test_put_role_policy(): - conn = boto.connect_iam() - conn.create_role( - "my-role", assume_role_policy_document="some policy", path="my-path") - conn.put_role_policy("my-role", "test policy", "my policy") - policy = conn.get_role_policy( - "my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] - policy.should.equal("test policy") - - -@mock_iam_deprecated() -def test_update_assume_role_policy(): - conn = boto.connect_iam() - role = conn.create_role("my-role") - conn.update_assume_role_policy(role.role_name, "my-policy") - role = conn.get_role("my-role") - role.assume_role_policy_document.should.equal("my-policy") - - -@mock_iam -def test_create_policy(): - conn = boto3.client('iam', region_name='us-east-1') - response = conn.create_policy( - PolicyName="TestCreatePolicy", - PolicyDocument='{"some":"policy"}') - response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestCreatePolicy") - - -@mock_iam -def test_create_policy_versions(): - conn = boto3.client('iam', region_name='us-east-1') - with assert_raises(ClientError): - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') - conn.create_policy( - PolicyName="TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') - version = conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') - version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) - -@mock_iam -def test_get_policy(): - conn = boto3.client('iam', region_name='us-east-1') - response = conn.create_policy( - PolicyName="TestGetPolicy", - PolicyDocument='{"some":"policy"}') - policy = conn.get_policy( - PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicy") - response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") - - -@mock_iam -def test_get_policy_version(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_policy( - PolicyName="TestGetPolicyVersion", - PolicyDocument='{"some":"policy"}') - version = conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", - PolicyDocument='{"some":"policy"}') - with assert_raises(ClientError): - conn.get_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", - VersionId='v2-does-not-exist') - retrieved = conn.get_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", - VersionId=version.get('PolicyVersion').get('VersionId')) - retrieved.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) - - -@mock_iam -def test_list_policy_versions(): - conn = boto3.client('iam', region_name='us-east-1') - with assert_raises(ClientError): - versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") - conn.create_policy( - PolicyName="TestListPolicyVersions", - PolicyDocument='{"first":"policy"}') - versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") - versions.get('Versions')[0].get('VersionId').should.equal('v1') - - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", - PolicyDocument='{"second":"policy"}') - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", - PolicyDocument='{"third":"policy"}') - versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") - print(versions.get('Versions')) - versions.get('Versions')[1].get('Document').should.equal({'second': 'policy'}) - versions.get('Versions')[2].get('Document').should.equal({'third': 'policy'}) - - -@mock_iam -def test_delete_policy_version(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_policy( - PolicyName="TestDeletePolicyVersion", - PolicyDocument='{"first":"policy"}') - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - PolicyDocument='{"second":"policy"}') - with assert_raises(ClientError): - conn.delete_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - VersionId='v2-nope-this-does-not-exist') - conn.delete_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - VersionId='v2') - versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion") - len(versions.get('Versions')).should.equal(1) - - -@mock_iam_deprecated() -def test_create_user(): - conn = boto.connect_iam() - conn.create_user('my-user') - with assert_raises(BotoServerError): - conn.create_user('my-user') - - -@mock_iam_deprecated() -def test_get_user(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.get_user('my-user') - conn.create_user('my-user') - conn.get_user('my-user') - - -@mock_iam_deprecated() -def test_get_current_user(): - """If no user is specific, IAM returns the current user""" - conn = boto.connect_iam() - user = conn.get_user()['get_user_response']['get_user_result']['user'] - user['user_name'].should.equal('default_user') - - -@mock_iam() -def test_list_users(): - path_prefix = '/' - max_items = 10 - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName='my-user') - response = conn.list_users(PathPrefix=path_prefix, MaxItems=max_items) - user = response['Users'][0] - user['UserName'].should.equal('my-user') - user['Path'].should.equal('/') - user['Arn'].should.equal('arn:aws:iam::123456789012:user/my-user') - - -@mock_iam() -def test_user_policies(): - policy_name = 'UserManagedPolicy' - policy_document = "{'mypolicy': 'test'}" - user_name = 'my-user' - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName=user_name) - conn.put_user_policy( - UserName=user_name, - PolicyName=policy_name, - PolicyDocument=policy_document - ) - - policy_doc = conn.get_user_policy( - UserName=user_name, - PolicyName=policy_name - ) - test = policy_document in policy_doc['PolicyDocument'] - test.should.equal(True) - - policies = conn.list_user_policies(UserName=user_name) - len(policies['PolicyNames']).should.equal(1) - policies['PolicyNames'][0].should.equal(policy_name) - - conn.delete_user_policy( - UserName=user_name, - PolicyName=policy_name - ) - - policies = conn.list_user_policies(UserName=user_name) - len(policies['PolicyNames']).should.equal(0) - - -@mock_iam_deprecated() -def test_create_login_profile(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.create_login_profile('my-user', 'my-pass') - conn.create_user('my-user') - conn.create_login_profile('my-user', 'my-pass') - with assert_raises(BotoServerError): - conn.create_login_profile('my-user', 'my-pass') - - -@mock_iam_deprecated() -def test_delete_login_profile(): - conn = boto.connect_iam() - conn.create_user('my-user') - with assert_raises(BotoServerError): - conn.delete_login_profile('my-user') - conn.create_login_profile('my-user', 'my-pass') - conn.delete_login_profile('my-user') - - -@mock_iam_deprecated() -def test_create_access_key(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.create_access_key('my-user') - conn.create_user('my-user') - conn.create_access_key('my-user') - - -@mock_iam_deprecated() -def test_get_all_access_keys(): - """If no access keys exist there should be none in the response, - if an access key is present it should have the correct fields present""" - conn = boto.connect_iam() - conn.create_user('my-user') - response = conn.get_all_access_keys('my-user') - assert_equals( - response['list_access_keys_response'][ - 'list_access_keys_result']['access_key_metadata'], - [] - ) - conn.create_access_key('my-user') - response = conn.get_all_access_keys('my-user') - assert_equals( - sorted(response['list_access_keys_response'][ - 'list_access_keys_result']['access_key_metadata'][0].keys()), - sorted(['status', 'create_date', 'user_name', 'access_key_id']) - ) - - -@mock_iam_deprecated() -def test_delete_access_key(): - conn = boto.connect_iam() - conn.create_user('my-user') - access_key_id = conn.create_access_key('my-user')['create_access_key_response'][ - 'create_access_key_result']['access_key']['access_key_id'] - conn.delete_access_key(access_key_id, 'my-user') - - -@mock_iam() -def test_mfa_devices(): - # Test enable device - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName='my-user') - conn.enable_mfa_device( - UserName='my-user', - SerialNumber='123456789', - AuthenticationCode1='234567', - AuthenticationCode2='987654' - ) - - # Test list mfa devices - response = conn.list_mfa_devices(UserName='my-user') - device = response['MFADevices'][0] - device['SerialNumber'].should.equal('123456789') - - # Test deactivate mfa device - conn.deactivate_mfa_device(UserName='my-user', SerialNumber='123456789') - response = conn.list_mfa_devices(UserName='my-user') - len(response['MFADevices']).should.equal(0) - - -@mock_iam_deprecated() -def test_delete_user(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.delete_user('my-user') - conn.create_user('my-user') - conn.delete_user('my-user') - - -@mock_iam_deprecated() -def test_generate_credential_report(): - conn = boto.connect_iam() - result = conn.generate_credential_report() - result['generate_credential_report_response'][ - 'generate_credential_report_result']['state'].should.equal('STARTED') - result = conn.generate_credential_report() - result['generate_credential_report_response'][ - 'generate_credential_report_result']['state'].should.equal('COMPLETE') - - -@mock_iam_deprecated() -def test_get_credential_report(): - conn = boto.connect_iam() - conn.create_user('my-user') - with assert_raises(BotoServerError): - conn.get_credential_report() - result = conn.generate_credential_report() - while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE': - result = conn.generate_credential_report() - result = conn.get_credential_report() - report = base64.b64decode(result['get_credential_report_response'][ - 'get_credential_report_result']['content'].encode('ascii')).decode('ascii') - report.should.match(r'.*my-user.*') - - -@requires_boto_gte('2.39') -@mock_iam_deprecated() -def test_managed_policy(): - conn = boto.connect_iam() - - conn.create_policy(policy_name='UserManagedPolicy', - policy_document={'mypolicy': 'test'}, - path='/mypolicy/', - description='my user managed policy') - - marker = 0 - aws_policies = [] - while marker is not None: - response = conn.list_policies(scope='AWS', marker=marker)[ - 'list_policies_response']['list_policies_result'] - for policy in response['policies']: - aws_policies.append(policy) - marker = response.get('marker') - set(p.name for p in aws_managed_policies).should.equal( - set(p['policy_name'] for p in aws_policies)) - - user_policies = conn.list_policies(scope='Local')['list_policies_response'][ - 'list_policies_result']['policies'] - set(['UserManagedPolicy']).should.equal( - set(p['policy_name'] for p in user_policies)) - - marker = 0 - all_policies = [] - while marker is not None: - response = conn.list_policies(marker=marker)[ - 'list_policies_response']['list_policies_result'] - for policy in response['policies']: - all_policies.append(policy) - marker = response.get('marker') - set(p['policy_name'] for p in aws_policies + - user_policies).should.equal(set(p['policy_name'] for p in all_policies)) - - role_name = 'my-role' - conn.create_role(role_name, assume_role_policy_document={ - 'policy': 'test'}, path="my-path") - for policy_name in ['AmazonElasticMapReduceRole', - 'AmazonElasticMapReduceforEC2Role']: - policy_arn = 'arn:aws:iam::aws:policy/service-role/' + policy_name - conn.attach_role_policy(policy_arn, role_name) - - rows = conn.list_policies(only_attached=True)['list_policies_response'][ - 'list_policies_result']['policies'] - rows.should.have.length_of(2) - for x in rows: - int(x['attachment_count']).should.be.greater_than(0) - - # boto has not implemented this end point but accessible this way - resp = conn.get_response('ListAttachedRolePolicies', - {'RoleName': role_name}, - list_marker='AttachedPolicies') - resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ - 'attached_policies'].should.have.length_of(2) - - conn.detach_role_policy( - "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", - role_name) - rows = conn.list_policies(only_attached=True)['list_policies_response'][ - 'list_policies_result']['policies'] - rows.should.have.length_of(1) - for x in rows: - int(x['attachment_count']).should.be.greater_than(0) - - # boto has not implemented this end point but accessible this way - resp = conn.get_response('ListAttachedRolePolicies', - {'RoleName': role_name}, - list_marker='AttachedPolicies') - resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ - 'attached_policies'].should.have.length_of(1) - - with assert_raises(BotoServerError): - conn.detach_role_policy( - "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", - role_name) - - with assert_raises(BotoServerError): - conn.detach_role_policy( - "arn:aws:iam::aws:policy/Nonexistent", role_name) - - -@mock_iam -def test_boto3_create_login_profile(): - conn = boto3.client('iam', region_name='us-east-1') - - with assert_raises(ClientError): - conn.create_login_profile(UserName='my-user', Password='Password') - - conn.create_user(UserName='my-user') - conn.create_login_profile(UserName='my-user', Password='Password') - - with assert_raises(ClientError): - conn.create_login_profile(UserName='my-user', Password='Password') - - -@mock_iam() -def test_attach_detach_user_policy(): - iam = boto3.resource('iam', region_name='us-east-1') - client = boto3.client('iam', region_name='us-east-1') - - user = iam.create_user(UserName='test-user') - - policy_name = 'UserAttachedPolicy' - policy = iam.create_policy(PolicyName=policy_name, - PolicyDocument='{"mypolicy": "test"}', - Path='/mypolicy/', - Description='my user attached policy') - - client.attach_user_policy(UserName=user.name, PolicyArn=policy.arn) - - resp = client.list_attached_user_policies(UserName=user.name) - resp['AttachedPolicies'].should.have.length_of(1) - attached_policy = resp['AttachedPolicies'][0] - attached_policy['PolicyArn'].should.equal(policy.arn) - attached_policy['PolicyName'].should.equal(policy_name) - - client.detach_user_policy(UserName=user.name, PolicyArn=policy.arn) - - resp = client.list_attached_user_policies(UserName=user.name) - resp['AttachedPolicies'].should.have.length_of(0) - - -@mock_iam -def test_update_access_key(): - iam = boto3.resource('iam', region_name='us-east-1') - client = iam.meta.client - username = 'test-user' - iam.create_user(UserName=username) - with assert_raises(ClientError): - client.update_access_key(UserName=username, - AccessKeyId='non-existent-key', - Status='Inactive') - key = client.create_access_key(UserName=username)['AccessKey'] - client.update_access_key(UserName=username, - AccessKeyId=key['AccessKeyId'], - Status='Inactive') - resp = client.list_access_keys(UserName=username) - resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') - - -@mock_iam -def test_get_account_authorization_details(): - import json - conn = boto3.client('iam', region_name='us-east-1') - conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") - conn.create_user(Path='/', UserName='testCloudAuxUser') - conn.create_group(Path='/', GroupName='testCloudAuxGroup') - conn.create_policy( - PolicyName='testCloudAuxPolicy', - Path='/', - PolicyDocument=json.dumps({ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "s3:ListBucket", - "Resource": "*", - "Effect": "Allow", - } - ] - }), - Description='Test CloudAux Policy' - ) - - result = conn.get_account_authorization_details(Filter=['Role']) - len(result['RoleDetailList']) == 1 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 0 - len(result['Policies']) == 0 - - result = conn.get_account_authorization_details(Filter=['User']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 1 - len(result['GroupDetailList']) == 0 - len(result['Policies']) == 0 - - result = conn.get_account_authorization_details(Filter=['Group']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 1 - len(result['Policies']) == 0 - - result = conn.get_account_authorization_details(Filter=['LocalManagedPolicy']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 0 - len(result['Policies']) == 1 - - # Check for greater than 1 since this should always be greater than one but might change. - # See iam/aws_managed_policies.py - result = conn.get_account_authorization_details(Filter=['AWSManagedPolicy']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 0 - len(result['Policies']) > 1 - - result = conn.get_account_authorization_details() - len(result['RoleDetailList']) == 1 - len(result['UserDetailList']) == 1 - len(result['GroupDetailList']) == 1 - len(result['Policies']) > 1 - - - +from __future__ import unicode_literals +import base64 + +import boto +import boto3 +import sure # noqa +from boto.exception import BotoServerError +from botocore.exceptions import ClientError +from moto import mock_iam, mock_iam_deprecated +from moto.iam.models import aws_managed_policies +from nose.tools import assert_raises, assert_equals +from nose.tools import raises + +from tests.helpers import requires_boto_gte + + +@mock_iam_deprecated() +def test_get_all_server_certs(): + conn = boto.connect_iam() + + conn.upload_server_cert("certname", "certbody", "privatekey") + certs = conn.get_all_server_certs()['list_server_certificates_response'][ + 'list_server_certificates_result']['server_certificate_metadata_list'] + certs.should.have.length_of(1) + cert1 = certs[0] + cert1.server_certificate_name.should.equal("certname") + cert1.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") + + +@mock_iam_deprecated() +def test_get_server_cert_doesnt_exist(): + conn = boto.connect_iam() + + with assert_raises(BotoServerError): + conn.get_server_certificate("NonExistant") + + +@mock_iam_deprecated() +def test_get_server_cert(): + conn = boto.connect_iam() + + conn.upload_server_cert("certname", "certbody", "privatekey") + cert = conn.get_server_certificate("certname") + cert.server_certificate_name.should.equal("certname") + cert.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") + + +@mock_iam_deprecated() +def test_upload_server_cert(): + conn = boto.connect_iam() + + conn.upload_server_cert("certname", "certbody", "privatekey") + cert = conn.get_server_certificate("certname") + cert.server_certificate_name.should.equal("certname") + cert.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") + + +@mock_iam_deprecated() +def test_delete_server_cert(): + conn = boto.connect_iam() + + conn.upload_server_cert("certname", "certbody", "privatekey") + conn.get_server_certificate("certname") + conn.delete_server_cert("certname") + with assert_raises(BotoServerError): + conn.get_server_certificate("certname") + with assert_raises(BotoServerError): + conn.delete_server_cert("certname") + + +@mock_iam_deprecated() +@raises(BotoServerError) +def test_get_role__should_throw__when_role_does_not_exist(): + conn = boto.connect_iam() + + conn.get_role('unexisting_role') + + +@mock_iam_deprecated() +@raises(BotoServerError) +def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist(): + conn = boto.connect_iam() + + conn.get_instance_profile('unexisting_instance_profile') + + +@mock_iam_deprecated() +def test_create_role_and_instance_profile(): + conn = boto.connect_iam() + conn.create_instance_profile("my-profile", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") + + conn.add_role_to_instance_profile("my-profile", "my-role") + + role = conn.get_role("my-role") + role.path.should.equal("my-path") + role.assume_role_policy_document.should.equal("some policy") + + profile = conn.get_instance_profile("my-profile") + profile.path.should.equal("my-path") + role_from_profile = list(profile.roles.values())[0] + role_from_profile['role_id'].should.equal(role.role_id) + role_from_profile['role_name'].should.equal("my-role") + + conn.list_roles().roles[0].role_name.should.equal('my-role') + + +@mock_iam_deprecated() +def test_remove_role_from_instance_profile(): + conn = boto.connect_iam() + conn.create_instance_profile("my-profile", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") + conn.add_role_to_instance_profile("my-profile", "my-role") + + profile = conn.get_instance_profile("my-profile") + role_from_profile = list(profile.roles.values())[0] + role_from_profile['role_name'].should.equal("my-role") + + conn.remove_role_from_instance_profile("my-profile", "my-role") + + profile = conn.get_instance_profile("my-profile") + dict(profile.roles).should.be.empty + + +@mock_iam() +def test_get_login_profile(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + conn.create_login_profile(UserName='my-user', Password='my-pass') + + response = conn.get_login_profile(UserName='my-user') + response['LoginProfile']['UserName'].should.equal('my-user') + + +@mock_iam() +def test_update_login_profile(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + conn.create_login_profile(UserName='my-user', Password='my-pass') + response = conn.get_login_profile(UserName='my-user') + response['LoginProfile'].get('PasswordResetRequired').should.equal(None) + + conn.update_login_profile(UserName='my-user', Password='new-pass', PasswordResetRequired=True) + response = conn.get_login_profile(UserName='my-user') + response['LoginProfile'].get('PasswordResetRequired').should.equal(True) + + +@mock_iam() +def test_delete_role(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.delete_role(RoleName="my-role") + + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + role = conn.get_role(RoleName="my-role") + role.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-path/my-role') + + conn.delete_role(RoleName="my-role") + + with assert_raises(ClientError): + conn.get_role(RoleName="my-role") + + +@mock_iam_deprecated() +def test_list_instance_profiles(): + conn = boto.connect_iam() + conn.create_instance_profile("my-profile", path="my-path") + conn.create_role("my-role", path="my-path") + + conn.add_role_to_instance_profile("my-profile", "my-role") + + profiles = conn.list_instance_profiles().instance_profiles + + len(profiles).should.equal(1) + profiles[0].instance_profile_name.should.equal("my-profile") + profiles[0].roles.role_name.should.equal("my-role") + + +@mock_iam_deprecated() +def test_list_instance_profiles_for_role(): + conn = boto.connect_iam() + + conn.create_role(role_name="my-role", + assume_role_policy_document="some policy", path="my-path") + conn.create_role(role_name="my-role2", + assume_role_policy_document="some policy2", path="my-path2") + + profile_name_list = ['my-profile', 'my-profile2'] + profile_path_list = ['my-path', 'my-path2'] + for profile_count in range(0, 2): + conn.create_instance_profile( + profile_name_list[profile_count], path=profile_path_list[profile_count]) + + for profile_count in range(0, 2): + conn.add_role_to_instance_profile( + profile_name_list[profile_count], "my-role") + + profile_dump = conn.list_instance_profiles_for_role(role_name="my-role") + profile_list = profile_dump['list_instance_profiles_for_role_response'][ + 'list_instance_profiles_for_role_result']['instance_profiles'] + for profile_count in range(0, len(profile_list)): + profile_name_list.remove(profile_list[profile_count][ + "instance_profile_name"]) + profile_path_list.remove(profile_list[profile_count]["path"]) + profile_list[profile_count]["roles"]["member"][ + "role_name"].should.equal("my-role") + + len(profile_name_list).should.equal(0) + len(profile_path_list).should.equal(0) + + profile_dump2 = conn.list_instance_profiles_for_role(role_name="my-role2") + profile_list = profile_dump2['list_instance_profiles_for_role_response'][ + 'list_instance_profiles_for_role_result']['instance_profiles'] + len(profile_list).should.equal(0) + + +@mock_iam_deprecated() +def test_list_role_policies(): + conn = boto.connect_iam() + conn.create_role("my-role") + conn.put_role_policy("my-role", "test policy", "my policy") + role = conn.list_role_policies("my-role") + role.policy_names.should.have.length_of(1) + role.policy_names[0].should.equal("test policy") + + conn.put_role_policy("my-role", "test policy 2", "another policy") + role = conn.list_role_policies("my-role") + role.policy_names.should.have.length_of(2) + + conn.delete_role_policy("my-role", "test policy") + role = conn.list_role_policies("my-role") + role.policy_names.should.have.length_of(1) + role.policy_names[0].should.equal("test policy 2") + + with assert_raises(BotoServerError): + conn.delete_role_policy("my-role", "test policy") + + +@mock_iam_deprecated() +def test_put_role_policy(): + conn = boto.connect_iam() + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") + conn.put_role_policy("my-role", "test policy", "my policy") + policy = conn.get_role_policy( + "my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] + policy.should.equal("test policy") + + +@mock_iam_deprecated() +def test_update_assume_role_policy(): + conn = boto.connect_iam() + role = conn.create_role("my-role") + conn.update_assume_role_policy(role.role_name, "my-policy") + role = conn.get_role("my-role") + role.assume_role_policy_document.should.equal("my-policy") + + +@mock_iam +def test_create_policy(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument='{"some":"policy"}') + response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestCreatePolicy") + + +@mock_iam +def test_create_policy_versions(): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(ClientError): + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + PolicyDocument='{"some":"policy"}') + conn.create_policy( + PolicyName="TestCreatePolicyVersion", + PolicyDocument='{"some":"policy"}') + version = conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + PolicyDocument='{"some":"policy"}') + version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) + +@mock_iam +def test_get_policy(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_policy( + PolicyName="TestGetPolicy", + PolicyDocument='{"some":"policy"}') + policy = conn.get_policy( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicy") + response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") + + +@mock_iam +def test_get_policy_version(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestGetPolicyVersion", + PolicyDocument='{"some":"policy"}') + version = conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", + PolicyDocument='{"some":"policy"}') + with assert_raises(ClientError): + conn.get_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", + VersionId='v2-does-not-exist') + retrieved = conn.get_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", + VersionId=version.get('PolicyVersion').get('VersionId')) + retrieved.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) + + +@mock_iam +def test_list_policy_versions(): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(ClientError): + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + conn.create_policy( + PolicyName="TestListPolicyVersions", + PolicyDocument='{"first":"policy"}') + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + versions.get('Versions')[0].get('VersionId').should.equal('v1') + + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", + PolicyDocument='{"second":"policy"}') + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", + PolicyDocument='{"third":"policy"}') + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + print(versions.get('Versions')) + versions.get('Versions')[1].get('Document').should.equal({'second': 'policy'}) + versions.get('Versions')[2].get('Document').should.equal({'third': 'policy'}) + + +@mock_iam +def test_delete_policy_version(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestDeletePolicyVersion", + PolicyDocument='{"first":"policy"}') + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + PolicyDocument='{"second":"policy"}') + with assert_raises(ClientError): + conn.delete_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + VersionId='v2-nope-this-does-not-exist') + conn.delete_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + VersionId='v2') + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion") + len(versions.get('Versions')).should.equal(1) + + +@mock_iam_deprecated() +def test_create_user(): + conn = boto.connect_iam() + conn.create_user('my-user') + with assert_raises(BotoServerError): + conn.create_user('my-user') + + +@mock_iam_deprecated() +def test_get_user(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.get_user('my-user') + conn.create_user('my-user') + conn.get_user('my-user') + + +@mock_iam_deprecated() +def test_get_current_user(): + """If no user is specific, IAM returns the current user""" + conn = boto.connect_iam() + user = conn.get_user()['get_user_response']['get_user_result']['user'] + user['user_name'].should.equal('default_user') + + +@mock_iam() +def test_list_users(): + path_prefix = '/' + max_items = 10 + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + response = conn.list_users(PathPrefix=path_prefix, MaxItems=max_items) + user = response['Users'][0] + user['UserName'].should.equal('my-user') + user['Path'].should.equal('/') + user['Arn'].should.equal('arn:aws:iam::123456789012:user/my-user') + + +@mock_iam() +def test_user_policies(): + policy_name = 'UserManagedPolicy' + policy_document = "{'mypolicy': 'test'}" + user_name = 'my-user' + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName=user_name) + conn.put_user_policy( + UserName=user_name, + PolicyName=policy_name, + PolicyDocument=policy_document + ) + + policy_doc = conn.get_user_policy( + UserName=user_name, + PolicyName=policy_name + ) + test = policy_document in policy_doc['PolicyDocument'] + test.should.equal(True) + + policies = conn.list_user_policies(UserName=user_name) + len(policies['PolicyNames']).should.equal(1) + policies['PolicyNames'][0].should.equal(policy_name) + + conn.delete_user_policy( + UserName=user_name, + PolicyName=policy_name + ) + + policies = conn.list_user_policies(UserName=user_name) + len(policies['PolicyNames']).should.equal(0) + + +@mock_iam_deprecated() +def test_create_login_profile(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.create_login_profile('my-user', 'my-pass') + conn.create_user('my-user') + conn.create_login_profile('my-user', 'my-pass') + with assert_raises(BotoServerError): + conn.create_login_profile('my-user', 'my-pass') + + +@mock_iam_deprecated() +def test_delete_login_profile(): + conn = boto.connect_iam() + conn.create_user('my-user') + with assert_raises(BotoServerError): + conn.delete_login_profile('my-user') + conn.create_login_profile('my-user', 'my-pass') + conn.delete_login_profile('my-user') + + +@mock_iam_deprecated() +def test_create_access_key(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.create_access_key('my-user') + conn.create_user('my-user') + conn.create_access_key('my-user') + + +@mock_iam_deprecated() +def test_get_all_access_keys(): + """If no access keys exist there should be none in the response, + if an access key is present it should have the correct fields present""" + conn = boto.connect_iam() + conn.create_user('my-user') + response = conn.get_all_access_keys('my-user') + assert_equals( + response['list_access_keys_response'][ + 'list_access_keys_result']['access_key_metadata'], + [] + ) + conn.create_access_key('my-user') + response = conn.get_all_access_keys('my-user') + assert_equals( + sorted(response['list_access_keys_response'][ + 'list_access_keys_result']['access_key_metadata'][0].keys()), + sorted(['status', 'create_date', 'user_name', 'access_key_id']) + ) + + +@mock_iam_deprecated() +def test_delete_access_key(): + conn = boto.connect_iam() + conn.create_user('my-user') + access_key_id = conn.create_access_key('my-user')['create_access_key_response'][ + 'create_access_key_result']['access_key']['access_key_id'] + conn.delete_access_key(access_key_id, 'my-user') + + +@mock_iam() +def test_mfa_devices(): + # Test enable device + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + conn.enable_mfa_device( + UserName='my-user', + SerialNumber='123456789', + AuthenticationCode1='234567', + AuthenticationCode2='987654' + ) + + # Test list mfa devices + response = conn.list_mfa_devices(UserName='my-user') + device = response['MFADevices'][0] + device['SerialNumber'].should.equal('123456789') + + # Test deactivate mfa device + conn.deactivate_mfa_device(UserName='my-user', SerialNumber='123456789') + response = conn.list_mfa_devices(UserName='my-user') + len(response['MFADevices']).should.equal(0) + + +@mock_iam_deprecated() +def test_delete_user(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.delete_user('my-user') + conn.create_user('my-user') + conn.delete_user('my-user') + + +@mock_iam_deprecated() +def test_generate_credential_report(): + conn = boto.connect_iam() + result = conn.generate_credential_report() + result['generate_credential_report_response'][ + 'generate_credential_report_result']['state'].should.equal('STARTED') + result = conn.generate_credential_report() + result['generate_credential_report_response'][ + 'generate_credential_report_result']['state'].should.equal('COMPLETE') + + +@mock_iam_deprecated() +def test_get_credential_report(): + conn = boto.connect_iam() + conn.create_user('my-user') + with assert_raises(BotoServerError): + conn.get_credential_report() + result = conn.generate_credential_report() + while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE': + result = conn.generate_credential_report() + result = conn.get_credential_report() + report = base64.b64decode(result['get_credential_report_response'][ + 'get_credential_report_result']['content'].encode('ascii')).decode('ascii') + report.should.match(r'.*my-user.*') + + +@requires_boto_gte('2.39') +@mock_iam_deprecated() +def test_managed_policy(): + conn = boto.connect_iam() + + conn.create_policy(policy_name='UserManagedPolicy', + policy_document={'mypolicy': 'test'}, + path='/mypolicy/', + description='my user managed policy') + + marker = 0 + aws_policies = [] + while marker is not None: + response = conn.list_policies(scope='AWS', marker=marker)[ + 'list_policies_response']['list_policies_result'] + for policy in response['policies']: + aws_policies.append(policy) + marker = response.get('marker') + set(p.name for p in aws_managed_policies).should.equal( + set(p['policy_name'] for p in aws_policies)) + + user_policies = conn.list_policies(scope='Local')['list_policies_response'][ + 'list_policies_result']['policies'] + set(['UserManagedPolicy']).should.equal( + set(p['policy_name'] for p in user_policies)) + + marker = 0 + all_policies = [] + while marker is not None: + response = conn.list_policies(marker=marker)[ + 'list_policies_response']['list_policies_result'] + for policy in response['policies']: + all_policies.append(policy) + marker = response.get('marker') + set(p['policy_name'] for p in aws_policies + + user_policies).should.equal(set(p['policy_name'] for p in all_policies)) + + role_name = 'my-role' + conn.create_role(role_name, assume_role_policy_document={ + 'policy': 'test'}, path="my-path") + for policy_name in ['AmazonElasticMapReduceRole', + 'AmazonElasticMapReduceforEC2Role']: + policy_arn = 'arn:aws:iam::aws:policy/service-role/' + policy_name + conn.attach_role_policy(policy_arn, role_name) + + rows = conn.list_policies(only_attached=True)['list_policies_response'][ + 'list_policies_result']['policies'] + rows.should.have.length_of(2) + for x in rows: + int(x['attachment_count']).should.be.greater_than(0) + + # boto has not implemented this end point but accessible this way + resp = conn.get_response('ListAttachedRolePolicies', + {'RoleName': role_name}, + list_marker='AttachedPolicies') + resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ + 'attached_policies'].should.have.length_of(2) + + conn.detach_role_policy( + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + role_name) + rows = conn.list_policies(only_attached=True)['list_policies_response'][ + 'list_policies_result']['policies'] + rows.should.have.length_of(1) + for x in rows: + int(x['attachment_count']).should.be.greater_than(0) + + # boto has not implemented this end point but accessible this way + resp = conn.get_response('ListAttachedRolePolicies', + {'RoleName': role_name}, + list_marker='AttachedPolicies') + resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ + 'attached_policies'].should.have.length_of(1) + + with assert_raises(BotoServerError): + conn.detach_role_policy( + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + role_name) + + with assert_raises(BotoServerError): + conn.detach_role_policy( + "arn:aws:iam::aws:policy/Nonexistent", role_name) + + +@mock_iam +def test_boto3_create_login_profile(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.create_login_profile(UserName='my-user', Password='Password') + + conn.create_user(UserName='my-user') + conn.create_login_profile(UserName='my-user', Password='Password') + + with assert_raises(ClientError): + conn.create_login_profile(UserName='my-user', Password='Password') + + +@mock_iam() +def test_attach_detach_user_policy(): + iam = boto3.resource('iam', region_name='us-east-1') + client = boto3.client('iam', region_name='us-east-1') + + user = iam.create_user(UserName='test-user') + + policy_name = 'UserAttachedPolicy' + policy = iam.create_policy(PolicyName=policy_name, + PolicyDocument='{"mypolicy": "test"}', + Path='/mypolicy/', + Description='my user attached policy') + + client.attach_user_policy(UserName=user.name, PolicyArn=policy.arn) + + resp = client.list_attached_user_policies(UserName=user.name) + resp['AttachedPolicies'].should.have.length_of(1) + attached_policy = resp['AttachedPolicies'][0] + attached_policy['PolicyArn'].should.equal(policy.arn) + attached_policy['PolicyName'].should.equal(policy_name) + + client.detach_user_policy(UserName=user.name, PolicyArn=policy.arn) + + resp = client.list_attached_user_policies(UserName=user.name) + resp['AttachedPolicies'].should.have.length_of(0) + + +@mock_iam +def test_update_access_key(): + iam = boto3.resource('iam', region_name='us-east-1') + client = iam.meta.client + username = 'test-user' + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.update_access_key(UserName=username, + AccessKeyId='non-existent-key', + Status='Inactive') + key = client.create_access_key(UserName=username)['AccessKey'] + client.update_access_key(UserName=username, + AccessKeyId=key['AccessKeyId'], + Status='Inactive') + resp = client.list_access_keys(UserName=username) + resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') + + +@mock_iam +def test_get_account_authorization_details(): + import json + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + conn.create_user(Path='/', UserName='testCloudAuxUser') + conn.create_group(Path='/', GroupName='testCloudAuxGroup') + conn.create_policy( + PolicyName='testCloudAuxPolicy', + Path='/', + PolicyDocument=json.dumps({ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "s3:ListBucket", + "Resource": "*", + "Effect": "Allow", + } + ] + }), + Description='Test CloudAux Policy' + ) + + result = conn.get_account_authorization_details(Filter=['Role']) + len(result['RoleDetailList']) == 1 + len(result['UserDetailList']) == 0 + len(result['GroupDetailList']) == 0 + len(result['Policies']) == 0 + + result = conn.get_account_authorization_details(Filter=['User']) + len(result['RoleDetailList']) == 0 + len(result['UserDetailList']) == 1 + len(result['GroupDetailList']) == 0 + len(result['Policies']) == 0 + + result = conn.get_account_authorization_details(Filter=['Group']) + len(result['RoleDetailList']) == 0 + len(result['UserDetailList']) == 0 + len(result['GroupDetailList']) == 1 + len(result['Policies']) == 0 + + result = conn.get_account_authorization_details(Filter=['LocalManagedPolicy']) + len(result['RoleDetailList']) == 0 + len(result['UserDetailList']) == 0 + len(result['GroupDetailList']) == 0 + len(result['Policies']) == 1 + + # Check for greater than 1 since this should always be greater than one but might change. + # See iam/aws_managed_policies.py + result = conn.get_account_authorization_details(Filter=['AWSManagedPolicy']) + len(result['RoleDetailList']) == 0 + len(result['UserDetailList']) == 0 + len(result['GroupDetailList']) == 0 + len(result['Policies']) > 1 + + result = conn.get_account_authorization_details() + len(result['RoleDetailList']) == 1 + len(result['UserDetailList']) == 1 + len(result['GroupDetailList']) == 1 + len(result['Policies']) > 1 + + + diff --git a/tests/test_iam/test_iam_account_aliases.py b/tests/test_iam/test_iam_account_aliases.py index 3d927038d1c6..5d7dec408eaf 100644 --- a/tests/test_iam/test_iam_account_aliases.py +++ b/tests/test_iam/test_iam_account_aliases.py @@ -1,20 +1,20 @@ -import boto3 -import sure # noqa -from moto import mock_iam - - -@mock_iam() -def test_account_aliases(): - client = boto3.client('iam', region_name='us-east-1') - - alias = 'my-account-name' - aliases = client.list_account_aliases() - aliases.should.have.key('AccountAliases').which.should.equal([]) - - client.create_account_alias(AccountAlias=alias) - aliases = client.list_account_aliases() - aliases.should.have.key('AccountAliases').which.should.equal([alias]) - - client.delete_account_alias(AccountAlias=alias) - aliases = client.list_account_aliases() - aliases.should.have.key('AccountAliases').which.should.equal([]) +import boto3 +import sure # noqa +from moto import mock_iam + + +@mock_iam() +def test_account_aliases(): + client = boto3.client('iam', region_name='us-east-1') + + alias = 'my-account-name' + aliases = client.list_account_aliases() + aliases.should.have.key('AccountAliases').which.should.equal([]) + + client.create_account_alias(AccountAlias=alias) + aliases = client.list_account_aliases() + aliases.should.have.key('AccountAliases').which.should.equal([alias]) + + client.delete_account_alias(AccountAlias=alias) + aliases = client.list_account_aliases() + aliases.should.have.key('AccountAliases').which.should.equal([]) diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 0d4756f756c3..87d4123e23d4 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -1,155 +1,155 @@ -from __future__ import unicode_literals - -from datetime import datetime - -import boto -import boto3 -import sure # noqa - -from nose.tools import assert_raises -from boto.exception import BotoServerError -from moto import mock_iam, mock_iam_deprecated - - -@mock_iam_deprecated() -def test_create_group(): - conn = boto.connect_iam() - conn.create_group('my-group') - with assert_raises(BotoServerError): - conn.create_group('my-group') - - -@mock_iam_deprecated() -def test_get_group(): - conn = boto.connect_iam() - conn.create_group('my-group') - conn.get_group('my-group') - with assert_raises(BotoServerError): - conn.get_group('not-group') - - -@mock_iam() -def test_get_group_current(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_group(GroupName='my-group') - result = conn.get_group(GroupName='my-group') - - assert result['Group']['Path'] == '/' - assert result['Group']['GroupName'] == 'my-group' - assert isinstance(result['Group']['CreateDate'], datetime) - assert result['Group']['GroupId'] - assert result['Group']['Arn'] == 'arn:aws:iam::123456789012:group/my-group' - assert not result['Users'] - - # Make a group with a different path: - other_group = conn.create_group(GroupName='my-other-group', Path='some/location') - assert other_group['Group']['Path'] == 'some/location' - assert other_group['Group']['Arn'] == 'arn:aws:iam::123456789012:group/some/location/my-other-group' - - -@mock_iam_deprecated() -def test_get_all_groups(): - conn = boto.connect_iam() - conn.create_group('my-group1') - conn.create_group('my-group2') - groups = conn.get_all_groups()['list_groups_response'][ - 'list_groups_result']['groups'] - groups.should.have.length_of(2) - - -@mock_iam_deprecated() -def test_add_user_to_group(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.add_user_to_group('my-group', 'my-user') - conn.create_group('my-group') - with assert_raises(BotoServerError): - conn.add_user_to_group('my-group', 'my-user') - conn.create_user('my-user') - conn.add_user_to_group('my-group', 'my-user') - - -@mock_iam_deprecated() -def test_remove_user_from_group(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.remove_user_from_group('my-group', 'my-user') - conn.create_group('my-group') - conn.create_user('my-user') - with assert_raises(BotoServerError): - conn.remove_user_from_group('my-group', 'my-user') - conn.add_user_to_group('my-group', 'my-user') - conn.remove_user_from_group('my-group', 'my-user') - - -@mock_iam_deprecated() -def test_get_groups_for_user(): - conn = boto.connect_iam() - conn.create_group('my-group1') - conn.create_group('my-group2') - conn.create_group('other-group') - conn.create_user('my-user') - conn.add_user_to_group('my-group1', 'my-user') - conn.add_user_to_group('my-group2', 'my-user') - - groups = conn.get_groups_for_user( - 'my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups'] - groups.should.have.length_of(2) - - -@mock_iam_deprecated() -def test_put_group_policy(): - conn = boto.connect_iam() - conn.create_group('my-group') - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') - - -@mock_iam -def test_attach_group_policies(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_group(GroupName='my-group') - conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty - policy_arn = 'arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role' - conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty - conn.attach_group_policy(GroupName='my-group', PolicyArn=policy_arn) - conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.equal( - [ - { - 'PolicyName': 'AmazonElasticMapReduceforEC2Role', - 'PolicyArn': policy_arn, - } - ]) - - conn.detach_group_policy(GroupName='my-group', PolicyArn=policy_arn) - conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty - - -@mock_iam_deprecated() -def test_get_group_policy(): - conn = boto.connect_iam() - conn.create_group('my-group') - with assert_raises(BotoServerError): - conn.get_group_policy('my-group', 'my-policy') - - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') - conn.get_group_policy('my-group', 'my-policy') - - -@mock_iam_deprecated() -def test_get_all_group_policies(): - conn = boto.connect_iam() - conn.create_group('my-group') - policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] - assert policies == [] - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') - policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] - assert policies == ['my-policy'] - - -@mock_iam() -def test_list_group_policies(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_group(GroupName='my-group') - conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty - conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument='{"some": "json"}') - conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.equal(['my-policy']) +from __future__ import unicode_literals + +from datetime import datetime + +import boto +import boto3 +import sure # noqa + +from nose.tools import assert_raises +from boto.exception import BotoServerError +from moto import mock_iam, mock_iam_deprecated + + +@mock_iam_deprecated() +def test_create_group(): + conn = boto.connect_iam() + conn.create_group('my-group') + with assert_raises(BotoServerError): + conn.create_group('my-group') + + +@mock_iam_deprecated() +def test_get_group(): + conn = boto.connect_iam() + conn.create_group('my-group') + conn.get_group('my-group') + with assert_raises(BotoServerError): + conn.get_group('not-group') + + +@mock_iam() +def test_get_group_current(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_group(GroupName='my-group') + result = conn.get_group(GroupName='my-group') + + assert result['Group']['Path'] == '/' + assert result['Group']['GroupName'] == 'my-group' + assert isinstance(result['Group']['CreateDate'], datetime) + assert result['Group']['GroupId'] + assert result['Group']['Arn'] == 'arn:aws:iam::123456789012:group/my-group' + assert not result['Users'] + + # Make a group with a different path: + other_group = conn.create_group(GroupName='my-other-group', Path='some/location') + assert other_group['Group']['Path'] == 'some/location' + assert other_group['Group']['Arn'] == 'arn:aws:iam::123456789012:group/some/location/my-other-group' + + +@mock_iam_deprecated() +def test_get_all_groups(): + conn = boto.connect_iam() + conn.create_group('my-group1') + conn.create_group('my-group2') + groups = conn.get_all_groups()['list_groups_response'][ + 'list_groups_result']['groups'] + groups.should.have.length_of(2) + + +@mock_iam_deprecated() +def test_add_user_to_group(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.add_user_to_group('my-group', 'my-user') + conn.create_group('my-group') + with assert_raises(BotoServerError): + conn.add_user_to_group('my-group', 'my-user') + conn.create_user('my-user') + conn.add_user_to_group('my-group', 'my-user') + + +@mock_iam_deprecated() +def test_remove_user_from_group(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.remove_user_from_group('my-group', 'my-user') + conn.create_group('my-group') + conn.create_user('my-user') + with assert_raises(BotoServerError): + conn.remove_user_from_group('my-group', 'my-user') + conn.add_user_to_group('my-group', 'my-user') + conn.remove_user_from_group('my-group', 'my-user') + + +@mock_iam_deprecated() +def test_get_groups_for_user(): + conn = boto.connect_iam() + conn.create_group('my-group1') + conn.create_group('my-group2') + conn.create_group('other-group') + conn.create_user('my-user') + conn.add_user_to_group('my-group1', 'my-user') + conn.add_user_to_group('my-group2', 'my-user') + + groups = conn.get_groups_for_user( + 'my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups'] + groups.should.have.length_of(2) + + +@mock_iam_deprecated() +def test_put_group_policy(): + conn = boto.connect_iam() + conn.create_group('my-group') + conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + + +@mock_iam +def test_attach_group_policies(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_group(GroupName='my-group') + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty + policy_arn = 'arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role' + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty + conn.attach_group_policy(GroupName='my-group', PolicyArn=policy_arn) + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.equal( + [ + { + 'PolicyName': 'AmazonElasticMapReduceforEC2Role', + 'PolicyArn': policy_arn, + } + ]) + + conn.detach_group_policy(GroupName='my-group', PolicyArn=policy_arn) + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty + + +@mock_iam_deprecated() +def test_get_group_policy(): + conn = boto.connect_iam() + conn.create_group('my-group') + with assert_raises(BotoServerError): + conn.get_group_policy('my-group', 'my-policy') + + conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + conn.get_group_policy('my-group', 'my-policy') + + +@mock_iam_deprecated() +def test_get_all_group_policies(): + conn = boto.connect_iam() + conn.create_group('my-group') + policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] + assert policies == [] + conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] + assert policies == ['my-policy'] + + +@mock_iam() +def test_list_group_policies(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_group(GroupName='my-group') + conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty + conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument='{"some": "json"}') + conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.equal(['my-policy']) diff --git a/tests/test_iam/test_server.py b/tests/test_iam/test_server.py index 59aaf14626cc..80c15b59de1b 100644 --- a/tests/test_iam/test_server.py +++ b/tests/test_iam/test_server.py @@ -1,26 +1,26 @@ -from __future__ import unicode_literals - -import json - -import re -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_iam_server_get(): - backend = server.create_backend_app("iam") - test_client = backend.test_client() - - group_data = test_client.action_data( - "CreateGroup", GroupName="test group", Path="/") - group_id = re.search("(.*)", group_data).groups()[0] - - groups_data = test_client.action_data("ListGroups") - groups_ids = re.findall("(.*)", groups_data) - - assert group_id in groups_ids +from __future__ import unicode_literals + +import json + +import re +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_iam_server_get(): + backend = server.create_backend_app("iam") + test_client = backend.test_client() + + group_data = test_client.action_data( + "CreateGroup", GroupName="test group", Path="/") + group_id = re.search("(.*)", group_data).groups()[0] + + groups_data = test_client.action_data("ListGroups") + groups_ids = re.findall("(.*)", groups_data) + + assert group_id in groups_ids diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 758ff8940378..92fb3dfd0900 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,876 +1,876 @@ -from __future__ import unicode_literals - -import json -import sure #noqa -import boto3 - -from moto import mock_iot - - -@mock_iot -def test_attach_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - res['policies'][0]['policyName'].should.equal('my-policy') - - -@mock_iot -def test_detach_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - res['policies'][0]['policyName'].should.equal('my-policy') - - client.detach_policy(policyName=policy_name, target=cert_arn) - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.be.empty - - -@mock_iot -def test_list_attached_policies(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - policies = client.list_attached_policies(target=cert['certificateArn']) - policies['policies'].should.be.empty - - -@mock_iot -def test_policy_versions(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - policy = client.create_policy(policyName=policy_name, policyDocument=doc) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) - policy.should.have.key('policyVersionId').which.should.equal('1') - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) - policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) - - policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), - setAsDefault=True) - policy1.should.have.key('policyArn').which.should_not.be.none - policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy1.should.have.key('policyVersionId').which.should.equal('2') - policy1.should.have.key('isDefaultVersion').which.should.equal(True) - - policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), - setAsDefault=False) - policy2.should.have.key('policyArn').which.should_not.be.none - policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) - policy2.should.have.key('policyVersionId').which.should.equal('3') - policy2.should.have.key('isDefaultVersion').which.should.equal(False) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) - - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) - list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) - default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) - - client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) - list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) - default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) - - client.delete_policy_version(policyName=policy_name, policyVersionId='1') - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) - - client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) - - # should fail as it's the default policy. Should use delete_policy instead - try: - client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) - assert False, 'Should have failed in previous call' - except Exception as exception: - exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') - - -@mock_iot -def test_things(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-thing' - type_name = 'my-type-name' - - # thing type - thing_type = client.create_thing_type(thingTypeName=type_name) - thing_type.should.have.key('thingTypeName').which.should.equal(type_name) - thing_type.should.have.key('thingTypeArn') - - res = client.list_thing_types() - res.should.have.key('thingTypes').which.should.have.length_of(1) - for thing_type in res['thingTypes']: - thing_type.should.have.key('thingTypeName').which.should_not.be.none - - thing_type = client.describe_thing_type(thingTypeName=type_name) - thing_type.should.have.key('thingTypeName').which.should.equal(type_name) - thing_type.should.have.key('thingTypeProperties') - thing_type.should.have.key('thingTypeMetadata') - - # thing - thing = client.create_thing(thingName=name, thingTypeName=type_name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should.have.key('thingName').which.should_not.be.none - thing.should.have.key('thingArn').which.should_not.be.none - - thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should.have.key('thingName').which.should_not.be.none - thing.should.have.key('thingArn').which.should_not.be.none - res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') - - thing = client.describe_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('defaultClientId') - thing.should.have.key('thingTypeName') - thing.should.have.key('attributes') - thing.should.have.key('version') - - # delete thing - client.delete_thing(thingName=name) - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(0) - - # delete thing type - client.delete_thing_type(thingTypeName=type_name) - res = client.list_thing_types() - res.should.have.key('thingTypes').which.should.have.length_of(0) - - -@mock_iot -def test_list_thing_types(): - client = boto3.client('iot', region_name='ap-northeast-1') - - for i in range(0, 100): - client.create_thing_type(thingTypeName=str(i + 1)) - - thing_types = client.list_thing_types() - thing_types.should.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(50) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('1') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50') - - thing_types = client.list_thing_types(nextToken=thing_types['nextToken']) - thing_types.should.have.key('thingTypes').which.should.have.length_of(50) - thing_types.should_not.have.key('nextToken') - thing_types['thingTypes'][0]['thingTypeName'].should.equal('51') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100') - - -@mock_iot -def test_list_thing_types_with_typename_filter(): - client = boto3.client('iot', region_name='ap-northeast-1') - - client.create_thing_type(thingTypeName='thing') - client.create_thing_type(thingTypeName='thingType') - client.create_thing_type(thingTypeName='thingTypeName') - client.create_thing_type(thingTypeName='thingTypeNameGroup') - client.create_thing_type(thingTypeName='shouldNotFind') - client.create_thing_type(thingTypeName='find me it shall not') - - thing_types = client.list_thing_types(thingTypeName='thing') - thing_types.should_not.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(4) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') - - thing_types = client.list_thing_types(thingTypeName='thingTypeName') - thing_types.should_not.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(2) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') - - -@mock_iot -def test_list_things_with_next_token(): - client = boto3.client('iot', region_name='ap-northeast-1') - - for i in range(0, 200): - client.create_thing(thingName=str(i + 1)) - - things = client.list_things() - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('1') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1') - things['things'][-1]['thingName'].should.equal('50') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50') - - things = client.list_things(nextToken=things['nextToken']) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('51') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51') - things['things'][-1]['thingName'].should.equal('100') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') - - things = client.list_things(nextToken=things['nextToken']) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('101') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101') - things['things'][-1]['thingName'].should.equal('150') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') - - things = client.list_things(nextToken=things['nextToken']) - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('151') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151') - things['things'][-1]['thingName'].should.equal('200') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') - - -@mock_iot -def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): - client = boto3.client('iot', region_name='ap-northeast-1') - client.create_thing_type(thingTypeName='my-thing-type') - - for i in range(0, 200): - if not (i + 1) % 3: - attribute_payload = { - 'attributes': { - 'foo': 'bar' - } - } - elif not (i + 1) % 5: - attribute_payload = { - 'attributes': { - 'bar': 'foo' - } - } - else: - attribute_payload = {} - - if not (i + 1) % 2: - thing_type_name = 'my-thing-type' - client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload) - else: - client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload) - - # Test filter for thingTypeName - things = client.list_things(thingTypeName=thing_type_name) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('2') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2') - things['things'][-1]['thingName'].should.equal('100') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') - all(item['thingTypeName'] == thing_type_name for item in things['things']) - - things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name) - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('102') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102') - things['things'][-1]['thingName'].should.equal('200') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') - all(item['thingTypeName'] == thing_type_name for item in things['things']) - - # Test filter for attributes - things = client.list_things(attributeName='foo', attributeValue='bar') - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('3') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3') - things['things'][-1]['thingName'].should.equal('150') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') - all(item['attributes'] == {'foo': 'bar'} for item in things['things']) - - things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar') - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(16) - things['things'][0]['thingName'].should.equal('153') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153') - things['things'][-1]['thingName'].should.equal('198') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') - all(item['attributes'] == {'foo': 'bar'} for item in things['things']) - - # Test filter for attributes and thingTypeName - things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar') - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(33) - things['things'][0]['thingName'].should.equal('6') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6') - things['things'][-1]['thingName'].should.equal('198') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') - all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things']) - - -@mock_iot -def test_certs(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('certificatePem').which.should_not.be.none - cert.should.have.key('keyPair') - cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none - cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none - cert_id = cert['certificateId'] - - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('certificateArn').which.should_not.be.none - cert_desc.should.have.key('certificateId').which.should_not.be.none - cert_desc.should.have.key('certificatePem').which.should_not.be.none - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - for cert in res['certificates']: - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('status').which.should_not.be.none - cert.should.have.key('creationDate').which.should_not.be.none - - client.update_certificate(certificateId=cert_id, newStatus='REVOKED') - cert = client.describe_certificate(certificateId=cert_id) - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - - client.delete_certificate(certificateId=cert_id) - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(0) - - -@mock_iot -def test_certs_create_inactive(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=False) - cert_id = cert['certificateId'] - - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('INACTIVE') - - client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - - -@mock_iot -def test_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-policy' - doc = '{}' - policy = client.create_policy(policyName=name, policyDocument=doc) - policy.should.have.key('policyName').which.should.equal(name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(doc) - policy.should.have.key('policyVersionId').which.should.equal('1') - - policy = client.get_policy(policyName=name) - policy.should.have.key('policyName').which.should.equal(name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(doc) - policy.should.have.key('defaultVersionId').which.should.equal('1') - - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - client.delete_policy(policyName=name) - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(0) - - -@mock_iot -def test_principal_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - policy = client.create_policy(policyName=policy_name, policyDocument=doc) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_principal_policy(policyName=policy_name, principal=cert_arn) - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(0) - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(0) - - -@mock_iot -def test_principal_thing(): - client = boto3.client('iot', region_name='ap-northeast-1') - thing_name = 'my-thing' - thing = client.create_thing(thingName=thing_name) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_thing_principal(thingName=thing_name, principal=cert_arn) - - res = client.list_principal_things(principal=cert_arn) - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should_not.be.none - res = client.list_thing_principals(thingName=thing_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_thing_principal(thingName=thing_name, principal=cert_arn) - res = client.list_principal_things(principal=cert_arn) - res.should.have.key('things').which.should.have.length_of(0) - res = client.list_thing_principals(thingName=thing_name) - res.should.have.key('principals').which.should.have.length_of(0) - - -@mock_iot -def test_thing_groups(): - client = boto3.client('iot', region_name='ap-northeast-1') - group_name = 'my-group-name' - - # thing group - thing_group = client.create_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - res = client.list_thing_groups() - res.should.have.key('thingGroups').which.should.have.length_of(1) - for thing_group in res['thingGroups']: - thing_group.should.have.key('groupName').which.should_not.be.none - thing_group.should.have.key('groupArn').which.should_not.be.none - - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupProperties') - thing_group.should.have.key('thingGroupMetadata') - thing_group.should.have.key('version') - - # delete thing group - client.delete_thing_group(thingGroupName=group_name) - res = client.list_thing_groups() - res.should.have.key('thingGroups').which.should.have.length_of(0) - - # props create test - props = { - 'thingGroupDescription': 'my first thing group', - 'attributePayload': { - 'attributes': { - 'key1': 'val01', - 'Key02': 'VAL2' - } - } - } - thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('key1').which.should.equal('val01') - res_props.should.have.key('Key02').which.should.equal('VAL2') - - # props update test with merge - new_props = { - 'attributePayload': { - 'attributes': { - 'k3': 'v3' - }, - 'merge': True - } - } - client.update_thing_group( - thingGroupName=group_name, - thingGroupProperties=new_props - ) - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('key1').which.should.equal('val01') - res_props.should.have.key('Key02').which.should.equal('VAL2') - - res_props.should.have.key('k3').which.should.equal('v3') - - # props update test - new_props = { - 'attributePayload': { - 'attributes': { - 'k4': 'v4' - } - } - } - client.update_thing_group( - thingGroupName=group_name, - thingGroupProperties=new_props - ) - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('k4').which.should.equal('v4') - res_props.should_not.have.key('key1') - - -@mock_iot -def test_thing_group_relations(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-thing' - group_name = 'my-group-name' - - # thing group - thing_group = client.create_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # add in 4 way - client.add_thing_to_thing_group( - thingGroupName=group_name, - thingName=name - ) - client.add_thing_to_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingArn=thing['thingArn'] - ) - client.add_thing_to_thing_group( - thingGroupName=group_name, - thingArn=thing['thingArn'] - ) - client.add_thing_to_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingName=name - ) - - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(1) - - thing_groups = client.list_thing_groups_for_thing( - thingName=name - ) - thing_groups.should.have.key('thingGroups') - thing_groups['thingGroups'].should.have.length_of(1) - - # remove in 4 way - client.remove_thing_from_thing_group( - thingGroupName=group_name, - thingName=name - ) - client.remove_thing_from_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingArn=thing['thingArn'] - ) - client.remove_thing_from_thing_group( - thingGroupName=group_name, - thingArn=thing['thingArn'] - ) - client.remove_thing_from_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingName=name - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(0) - - # update thing group for thing - client.update_thing_groups_for_thing( - thingName=name, - thingGroupsToAdd=[ - group_name - ] - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(1) - - client.update_thing_groups_for_thing( - thingName=name, - thingGroupsToRemove=[ - group_name - ] - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(0) - - -@mock_iot -def test_create_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing# job document - # job_document = { - # "field": "value" - # } - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - -@mock_iot -def test_describe_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('documentSource') - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobArn") - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("targets") - job.should.have.key('job').which.should.have.key("jobProcessDetails") - job.should.have.key('job').which.should.have.key("lastUpdatedAt") - job.should.have.key('job').which.should.have.key("createdAt") - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") - job.should.have.key('job').which.should.have.key("presignedUrlConfig") - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) - - -@mock_iot -def test_describe_job_1(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobArn") - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("targets") - job.should.have.key('job').which.should.have.key("jobProcessDetails") - job.should.have.key('job').which.should.have.key("lastUpdatedAt") - job.should.have.key('job').which.should.have.key("createdAt") - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") - job.should.have.key('job').which.should.have.key("presignedUrlConfig") - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) - - -@mock_iot -def test_get_job_document_with_document_source(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal('') - - -@mock_iot -def test_get_job_document_with_document(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") +from __future__ import unicode_literals + +import json +import sure #noqa +import boto3 + +from moto import mock_iot + + +@mock_iot +def test_attach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + +@mock_iot +def test_detach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.be.empty + + +@mock_iot +def test_list_attached_policies(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + policies = client.list_attached_policies(target=cert['certificateArn']) + policies['policies'].should.be.empty + + +@mock_iot +def test_policy_versions(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) + + policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), + setAsDefault=True) + policy1.should.have.key('policyArn').which.should_not.be.none + policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy1.should.have.key('policyVersionId').which.should.equal('2') + policy1.should.have.key('isDefaultVersion').which.should.equal(True) + + policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), + setAsDefault=False) + policy2.should.have.key('policyArn').which.should_not.be.none + policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy2.should.have.key('policyVersionId').which.should.equal('3') + policy2.should.have.key('isDefaultVersion').which.should.equal(False) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) + + client.delete_policy_version(policyName=policy_name, policyVersionId='1') + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) + + client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) + + # should fail as it's the default policy. Should use delete_policy instead + try: + client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + assert False, 'Should have failed in previous call' + except Exception as exception: + exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') + + +@mock_iot +def test_things(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + type_name = 'my-type-name' + + # thing type + thing_type = client.create_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeArn') + + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(1) + for thing_type in res['thingTypes']: + thing_type.should.have.key('thingTypeName').which.should_not.be.none + + thing_type = client.describe_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeProperties') + thing_type.should.have.key('thingTypeMetadata') + + # thing + thing = client.create_thing(thingName=name, thingTypeName=type_name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none + + thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none + res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') + + thing = client.describe_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('defaultClientId') + thing.should.have.key('thingTypeName') + thing.should.have.key('attributes') + thing.should.have.key('version') + + # delete thing + client.delete_thing(thingName=name) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(0) + + # delete thing type + client.delete_thing_type(thingTypeName=type_name) + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(0) + + +@mock_iot +def test_list_thing_types(): + client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 100): + client.create_thing_type(thingTypeName=str(i + 1)) + + thing_types = client.list_thing_types() + thing_types.should.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('1') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50') + + thing_types = client.list_thing_types(nextToken=thing_types['nextToken']) + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types.should_not.have.key('nextToken') + thing_types['thingTypes'][0]['thingTypeName'].should.equal('51') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100') + + +@mock_iot +def test_list_thing_types_with_typename_filter(): + client = boto3.client('iot', region_name='ap-northeast-1') + + client.create_thing_type(thingTypeName='thing') + client.create_thing_type(thingTypeName='thingType') + client.create_thing_type(thingTypeName='thingTypeName') + client.create_thing_type(thingTypeName='thingTypeNameGroup') + client.create_thing_type(thingTypeName='shouldNotFind') + client.create_thing_type(thingTypeName='find me it shall not') + + thing_types = client.list_thing_types(thingTypeName='thing') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(4) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + thing_types = client.list_thing_types(thingTypeName='thingTypeName') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(2) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + +@mock_iot +def test_list_things_with_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 200): + client.create_thing(thingName=str(i + 1)) + + things = client.list_things() + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('1') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1') + things['things'][-1]['thingName'].should.equal('50') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('51') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('101') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + + things = client.list_things(nextToken=things['nextToken']) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('151') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + + +@mock_iot +def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + client.create_thing_type(thingTypeName='my-thing-type') + + for i in range(0, 200): + if not (i + 1) % 3: + attribute_payload = { + 'attributes': { + 'foo': 'bar' + } + } + elif not (i + 1) % 5: + attribute_payload = { + 'attributes': { + 'bar': 'foo' + } + } + else: + attribute_payload = {} + + if not (i + 1) % 2: + thing_type_name = 'my-thing-type' + client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload) + else: + client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload) + + # Test filter for thingTypeName + things = client.list_things(thingTypeName=thing_type_name) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('2') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('102') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + # Test filter for attributes + things = client.list_things(attributeName='foo', attributeValue='bar') + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('3') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(16) + things['things'][0]['thingName'].should.equal('153') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + # Test filter for attributes and thingTypeName + things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(33) + things['things'][0]['thingName'].should.equal('6') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things']) + + +@mock_iot +def test_certs(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificatePem').which.should_not.be.none + cert.should.have.key('keyPair') + cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none + cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('certificateArn').which.should_not.be.none + cert_desc.should.have.key('certificateId').which.should_not.be.none + cert_desc.should.have.key('certificatePem').which.should_not.be.none + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(0) + + +@mock_iot +def test_certs_create_inactive(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=False) + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('INACTIVE') + + client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + +@mock_iot +def test_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=name) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('defaultVersionId').which.should.equal('1') + + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + client.delete_policy(policyName=name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_principal_thing(): + client = boto3.client('iot', region_name='ap-northeast-1') + thing_name = 'my-thing' + thing = client.create_thing(thingName=thing_name) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should_not.be.none + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(0) + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_thing_groups(): + client = boto3.client('iot', region_name='ap-northeast-1') + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(1) + for thing_group in res['thingGroups']: + thing_group.should.have.key('groupName').which.should_not.be.none + thing_group.should.have.key('groupArn').which.should_not.be.none + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupProperties') + thing_group.should.have.key('thingGroupMetadata') + thing_group.should.have.key('version') + + # delete thing group + client.delete_thing_group(thingGroupName=group_name) + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(0) + + # props create test + props = { + 'thingGroupDescription': 'my first thing group', + 'attributePayload': { + 'attributes': { + 'key1': 'val01', + 'Key02': 'VAL2' + } + } + } + thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + # props update test with merge + new_props = { + 'attributePayload': { + 'attributes': { + 'k3': 'v3' + }, + 'merge': True + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + res_props.should.have.key('k3').which.should.equal('v3') + + # props update test + new_props = { + 'attributePayload': { + 'attributes': { + 'k4': 'v4' + } + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('k4').which.should.equal('v4') + res_props.should_not.have.key('key1') + + +@mock_iot +def test_thing_group_relations(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # add in 4 way + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + thing_groups = client.list_thing_groups_for_thing( + thingName=name + ) + thing_groups.should.have.key('thingGroups') + thing_groups['thingGroups'].should.have.length_of(1) + + # remove in 4 way + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + # update thing group for thing + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToAdd=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToRemove=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + +@mock_iot +def test_create_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing# job document + # job_document = { + # "field": "value" + # } + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + +@mock_iot +def test_describe_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('documentSource') + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_describe_job_1(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_get_job_document_with_document_source(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal('') + + +@mock_iot +def test_get_job_document_with_document(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") diff --git a/tests/test_iot/test_server.py b/tests/test_iot/test_server.py index 47091531a1f0..60e81435aee2 100644 --- a/tests/test_iot/test_server.py +++ b/tests/test_iot/test_server.py @@ -1,19 +1,19 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_iot - -''' -Test the different server responses -''' - -@mock_iot -def test_iot_list(): - backend = server.create_backend_app("iot") - test_client = backend.test_client() - - # just making sure that server is up - res = test_client.get('/things') - res.status_code.should.equal(404) +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_iot + +''' +Test the different server responses +''' + +@mock_iot +def test_iot_list(): + backend = server.create_backend_app("iot") + test_client = backend.test_client() + + # just making sure that server is up + res = test_client.get('/things') + res.status_code.should.equal(404) diff --git a/tests/test_iotdata/test_iotdata.py b/tests/test_iotdata/test_iotdata.py index 09c1ada4c68f..8c03521f1395 100644 --- a/tests/test_iotdata/test_iotdata.py +++ b/tests/test_iotdata/test_iotdata.py @@ -1,93 +1,93 @@ -from __future__ import unicode_literals - -import json -import boto3 -import sure # noqa -from nose.tools import assert_raises -from botocore.exceptions import ClientError -from moto import mock_iotdata, mock_iot - - -@mock_iot -@mock_iotdata -def test_basic(): - iot_client = boto3.client('iot', region_name='ap-northeast-1') - client = boto3.client('iot-data', region_name='ap-northeast-1') - name = 'my-thing' - raw_payload = b'{"state": {"desired": {"led": "on"}}}' - iot_client.create_thing(thingName=name) - - with assert_raises(ClientError): - client.get_thing_shadow(thingName=name) - - res = client.update_thing_shadow(thingName=name, payload=raw_payload) - - payload = json.loads(res['payload'].read()) - expected_state = '{"desired": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(1) - payload.should.have.key('timestamp') - - res = client.get_thing_shadow(thingName=name) - payload = json.loads(res['payload'].read()) - expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(1) - payload.should.have.key('timestamp') - - client.delete_thing_shadow(thingName=name) - with assert_raises(ClientError): - client.get_thing_shadow(thingName=name) - - -@mock_iot -@mock_iotdata -def test_update(): - iot_client = boto3.client('iot', region_name='ap-northeast-1') - client = boto3.client('iot-data', region_name='ap-northeast-1') - name = 'my-thing' - raw_payload = b'{"state": {"desired": {"led": "on"}}}' - iot_client.create_thing(thingName=name) - - # first update - res = client.update_thing_shadow(thingName=name, payload=raw_payload) - payload = json.loads(res['payload'].read()) - expected_state = '{"desired": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(1) - payload.should.have.key('timestamp') - - res = client.get_thing_shadow(thingName=name) - payload = json.loads(res['payload'].read()) - expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(1) - payload.should.have.key('timestamp') - - # reporting new state - new_payload = b'{"state": {"reported": {"led": "on"}}}' - res = client.update_thing_shadow(thingName=name, payload=new_payload) - payload = json.loads(res['payload'].read()) - expected_state = '{"reported": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('reported').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(2) - payload.should.have.key('timestamp') - - res = client.get_thing_shadow(thingName=name) - payload = json.loads(res['payload'].read()) - expected_state = b'{"desired": {"led": "on"}, "reported": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(2) - payload.should.have.key('timestamp') - - -@mock_iotdata -def test_publish(): - client = boto3.client('iot-data', region_name='ap-northeast-1') - client.publish(topic='test/topic', qos=1, payload=b'') +from __future__ import unicode_literals + +import json +import boto3 +import sure # noqa +from nose.tools import assert_raises +from botocore.exceptions import ClientError +from moto import mock_iotdata, mock_iot + + +@mock_iot +@mock_iotdata +def test_basic(): + iot_client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot-data', region_name='ap-northeast-1') + name = 'my-thing' + raw_payload = b'{"state": {"desired": {"led": "on"}}}' + iot_client.create_thing(thingName=name) + + with assert_raises(ClientError): + client.get_thing_shadow(thingName=name) + + res = client.update_thing_shadow(thingName=name, payload=raw_payload) + + payload = json.loads(res['payload'].read()) + expected_state = '{"desired": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + client.delete_thing_shadow(thingName=name) + with assert_raises(ClientError): + client.get_thing_shadow(thingName=name) + + +@mock_iot +@mock_iotdata +def test_update(): + iot_client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot-data', region_name='ap-northeast-1') + name = 'my-thing' + raw_payload = b'{"state": {"desired": {"led": "on"}}}' + iot_client.create_thing(thingName=name) + + # first update + res = client.update_thing_shadow(thingName=name, payload=raw_payload) + payload = json.loads(res['payload'].read()) + expected_state = '{"desired": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + # reporting new state + new_payload = b'{"state": {"reported": {"led": "on"}}}' + res = client.update_thing_shadow(thingName=name, payload=new_payload) + payload = json.loads(res['payload'].read()) + expected_state = '{"reported": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('reported').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(2) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "reported": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(2) + payload.should.have.key('timestamp') + + +@mock_iotdata +def test_publish(): + client = boto3.client('iot-data', region_name='ap-northeast-1') + client.publish(topic='test/topic', qos=1, payload=b'') diff --git a/tests/test_iotdata/test_server.py b/tests/test_iotdata/test_server.py index 42a5c5f22a37..edcd92a33b44 100644 --- a/tests/test_iotdata/test_server.py +++ b/tests/test_iotdata/test_server.py @@ -1,20 +1,20 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_iotdata - -''' -Test the different server responses -''' - -@mock_iotdata -def test_iotdata_list(): - backend = server.create_backend_app("iot-data") - test_client = backend.test_client() - - # just making sure that server is up - thing_name = 'nothing' - res = test_client.get('/things/{}/shadow'.format(thing_name)) - res.status_code.should.equal(404) +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_iotdata + +''' +Test the different server responses +''' + +@mock_iotdata +def test_iotdata_list(): + backend = server.create_backend_app("iot-data") + test_client = backend.test_client() + + # just making sure that server is up + thing_name = 'nothing' + res = test_client.get('/things/{}/shadow'.format(thing_name)) + res.status_code.should.equal(404) diff --git a/tests/test_kinesis/test_firehose.py b/tests/test_kinesis/test_firehose.py index 6ab46c6f95d6..b13672e2656e 100644 --- a/tests/test_kinesis/test_firehose.py +++ b/tests/test_kinesis/test_firehose.py @@ -1,188 +1,188 @@ -from __future__ import unicode_literals - -import datetime - -from botocore.exceptions import ClientError -import boto3 -import sure # noqa - -from moto import mock_kinesis - - -def create_stream(client, stream_name): - return client.create_delivery_stream( - DeliveryStreamName=stream_name, - RedshiftDestinationConfiguration={ - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database', - 'CopyCommand': { - 'DataTableName': 'outputTable', - 'CopyOptions': "CSV DELIMITER ',' NULL '\\0'" - }, - 'Username': 'username', - 'Password': 'password', - 'S3Configuration': { - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'BucketARN': 'arn:aws:s3:::kinesis-test', - 'Prefix': 'myFolder/', - 'BufferingHints': { - 'SizeInMBs': 123, - 'IntervalInSeconds': 124 - }, - 'CompressionFormat': 'UNCOMPRESSED', - } - } - ) - - -@mock_kinesis -def test_create_stream(): - client = boto3.client('firehose', region_name='us-east-1') - - response = create_stream(client, 'stream1') - stream_arn = response['DeliveryStreamARN'] - - response = client.describe_delivery_stream(DeliveryStreamName='stream1') - stream_description = response['DeliveryStreamDescription'] - - # Sure and Freezegun don't play nicely together - _ = stream_description.pop('CreateTimestamp') - _ = stream_description.pop('LastUpdateTimestamp') - - stream_description.should.equal({ - 'DeliveryStreamName': 'stream1', - 'DeliveryStreamARN': stream_arn, - 'DeliveryStreamStatus': 'ACTIVE', - 'VersionId': 'string', - 'Destinations': [ - { - 'DestinationId': 'string', - 'RedshiftDestinationDescription': { - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database', - 'CopyCommand': { - 'DataTableName': 'outputTable', - 'CopyOptions': "CSV DELIMITER ',' NULL '\\0'" - }, - 'Username': 'username', - 'S3DestinationDescription': { - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'BucketARN': 'arn:aws:s3:::kinesis-test', - 'Prefix': 'myFolder/', - 'BufferingHints': { - 'SizeInMBs': 123, - 'IntervalInSeconds': 124 - }, - 'CompressionFormat': 'UNCOMPRESSED', - } - } - }, - ], - "HasMoreDestinations": False, - }) - - -@mock_kinesis -def test_create_stream_without_redshift(): - client = boto3.client('firehose', region_name='us-east-1') - - response = client.create_delivery_stream( - DeliveryStreamName="stream1", - S3DestinationConfiguration={ - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'BucketARN': 'arn:aws:s3:::kinesis-test', - 'Prefix': 'myFolder/', - 'BufferingHints': { - 'SizeInMBs': 123, - 'IntervalInSeconds': 124 - }, - 'CompressionFormat': 'UNCOMPRESSED', - } - ) - stream_arn = response['DeliveryStreamARN'] - - response = client.describe_delivery_stream(DeliveryStreamName='stream1') - stream_description = response['DeliveryStreamDescription'] - - # Sure and Freezegun don't play nicely together - _ = stream_description.pop('CreateTimestamp') - _ = stream_description.pop('LastUpdateTimestamp') - - stream_description.should.equal({ - 'DeliveryStreamName': 'stream1', - 'DeliveryStreamARN': stream_arn, - 'DeliveryStreamStatus': 'ACTIVE', - 'VersionId': 'string', - 'Destinations': [ - { - 'DestinationId': 'string', - 'S3DestinationDescription': { - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'BucketARN': 'arn:aws:s3:::kinesis-test', - 'Prefix': 'myFolder/', - 'BufferingHints': { - 'SizeInMBs': 123, - 'IntervalInSeconds': 124 - }, - 'CompressionFormat': 'UNCOMPRESSED', - } - }, - ], - "HasMoreDestinations": False, - }) - - -@mock_kinesis -def test_deescribe_non_existant_stream(): - client = boto3.client('firehose', region_name='us-east-1') - - client.describe_delivery_stream.when.called_with( - DeliveryStreamName='not-a-stream').should.throw(ClientError) - - -@mock_kinesis -def test_list_and_delete_stream(): - client = boto3.client('firehose', region_name='us-east-1') - - create_stream(client, 'stream1') - create_stream(client, 'stream2') - - set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal( - set(['stream1', 'stream2'])) - - client.delete_delivery_stream(DeliveryStreamName='stream1') - - set(client.list_delivery_streams()[ - 'DeliveryStreamNames']).should.equal(set(['stream2'])) - - -@mock_kinesis -def test_put_record(): - client = boto3.client('firehose', region_name='us-east-1') - - create_stream(client, 'stream1') - client.put_record( - DeliveryStreamName='stream1', - Record={ - 'Data': 'some data' - } - ) - - -@mock_kinesis -def test_put_record_batch(): - client = boto3.client('firehose', region_name='us-east-1') - - create_stream(client, 'stream1') - client.put_record_batch( - DeliveryStreamName='stream1', - Records=[ - { - 'Data': 'some data1' - }, - { - 'Data': 'some data2' - }, - ] - ) +from __future__ import unicode_literals + +import datetime + +from botocore.exceptions import ClientError +import boto3 +import sure # noqa + +from moto import mock_kinesis + + +def create_stream(client, stream_name): + return client.create_delivery_stream( + DeliveryStreamName=stream_name, + RedshiftDestinationConfiguration={ + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database', + 'CopyCommand': { + 'DataTableName': 'outputTable', + 'CopyOptions': "CSV DELIMITER ',' NULL '\\0'" + }, + 'Username': 'username', + 'Password': 'password', + 'S3Configuration': { + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'BufferingHints': { + 'SizeInMBs': 123, + 'IntervalInSeconds': 124 + }, + 'CompressionFormat': 'UNCOMPRESSED', + } + } + ) + + +@mock_kinesis +def test_create_stream(): + client = boto3.client('firehose', region_name='us-east-1') + + response = create_stream(client, 'stream1') + stream_arn = response['DeliveryStreamARN'] + + response = client.describe_delivery_stream(DeliveryStreamName='stream1') + stream_description = response['DeliveryStreamDescription'] + + # Sure and Freezegun don't play nicely together + _ = stream_description.pop('CreateTimestamp') + _ = stream_description.pop('LastUpdateTimestamp') + + stream_description.should.equal({ + 'DeliveryStreamName': 'stream1', + 'DeliveryStreamARN': stream_arn, + 'DeliveryStreamStatus': 'ACTIVE', + 'VersionId': 'string', + 'Destinations': [ + { + 'DestinationId': 'string', + 'RedshiftDestinationDescription': { + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database', + 'CopyCommand': { + 'DataTableName': 'outputTable', + 'CopyOptions': "CSV DELIMITER ',' NULL '\\0'" + }, + 'Username': 'username', + 'S3DestinationDescription': { + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'BufferingHints': { + 'SizeInMBs': 123, + 'IntervalInSeconds': 124 + }, + 'CompressionFormat': 'UNCOMPRESSED', + } + } + }, + ], + "HasMoreDestinations": False, + }) + + +@mock_kinesis +def test_create_stream_without_redshift(): + client = boto3.client('firehose', region_name='us-east-1') + + response = client.create_delivery_stream( + DeliveryStreamName="stream1", + S3DestinationConfiguration={ + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'BufferingHints': { + 'SizeInMBs': 123, + 'IntervalInSeconds': 124 + }, + 'CompressionFormat': 'UNCOMPRESSED', + } + ) + stream_arn = response['DeliveryStreamARN'] + + response = client.describe_delivery_stream(DeliveryStreamName='stream1') + stream_description = response['DeliveryStreamDescription'] + + # Sure and Freezegun don't play nicely together + _ = stream_description.pop('CreateTimestamp') + _ = stream_description.pop('LastUpdateTimestamp') + + stream_description.should.equal({ + 'DeliveryStreamName': 'stream1', + 'DeliveryStreamARN': stream_arn, + 'DeliveryStreamStatus': 'ACTIVE', + 'VersionId': 'string', + 'Destinations': [ + { + 'DestinationId': 'string', + 'S3DestinationDescription': { + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'BufferingHints': { + 'SizeInMBs': 123, + 'IntervalInSeconds': 124 + }, + 'CompressionFormat': 'UNCOMPRESSED', + } + }, + ], + "HasMoreDestinations": False, + }) + + +@mock_kinesis +def test_deescribe_non_existant_stream(): + client = boto3.client('firehose', region_name='us-east-1') + + client.describe_delivery_stream.when.called_with( + DeliveryStreamName='not-a-stream').should.throw(ClientError) + + +@mock_kinesis +def test_list_and_delete_stream(): + client = boto3.client('firehose', region_name='us-east-1') + + create_stream(client, 'stream1') + create_stream(client, 'stream2') + + set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal( + set(['stream1', 'stream2'])) + + client.delete_delivery_stream(DeliveryStreamName='stream1') + + set(client.list_delivery_streams()[ + 'DeliveryStreamNames']).should.equal(set(['stream2'])) + + +@mock_kinesis +def test_put_record(): + client = boto3.client('firehose', region_name='us-east-1') + + create_stream(client, 'stream1') + client.put_record( + DeliveryStreamName='stream1', + Record={ + 'Data': 'some data' + } + ) + + +@mock_kinesis +def test_put_record_batch(): + client = boto3.client('firehose', region_name='us-east-1') + + create_stream(client, 'stream1') + client.put_record_batch( + DeliveryStreamName='stream1', + Records=[ + { + 'Data': 'some data1' + }, + { + 'Data': 'some data2' + }, + ] + ) diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index c70236978983..736dc05c3f35 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -1,624 +1,624 @@ -from __future__ import unicode_literals - -import boto.kinesis -from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException -import boto3 -import sure # noqa -import datetime -import time - -from moto import mock_kinesis, mock_kinesis_deprecated - - -@mock_kinesis_deprecated -def test_create_cluster(): - conn = boto.kinesis.connect_to_region("us-west-2") - - conn.create_stream("my_stream", 2) - - stream_response = conn.describe_stream("my_stream") - - stream = stream_response["StreamDescription"] - stream["StreamName"].should.equal("my_stream") - stream["HasMoreShards"].should.equal(False) - stream["StreamARN"].should.equal( - "arn:aws:kinesis:us-west-2:123456789012:my_stream") - stream["StreamStatus"].should.equal("ACTIVE") - - shards = stream['Shards'] - shards.should.have.length_of(2) - - -@mock_kinesis_deprecated -def test_describe_non_existant_stream(): - conn = boto.kinesis.connect_to_region("us-east-1") - conn.describe_stream.when.called_with( - "not-a-stream").should.throw(ResourceNotFoundException) - - -@mock_kinesis_deprecated -def test_list_and_delete_stream(): - conn = boto.kinesis.connect_to_region("us-west-2") - - conn.create_stream("stream1", 1) - conn.create_stream("stream2", 1) - - conn.list_streams()['StreamNames'].should.have.length_of(2) - - conn.delete_stream("stream2") - - conn.list_streams()['StreamNames'].should.have.length_of(1) - - # Delete invalid id - conn.delete_stream.when.called_with( - "not-a-stream").should.throw(ResourceNotFoundException) - - -@mock_kinesis -def test_list_many_streams(): - conn = boto3.client('kinesis', region_name="us-west-2") - - for i in range(11): - conn.create_stream(StreamName="stream%d" % i, ShardCount=1) - - resp = conn.list_streams() - stream_names = resp["StreamNames"] - has_more_streams = resp["HasMoreStreams"] - stream_names.should.have.length_of(10) - has_more_streams.should.be(True) - resp2 = conn.list_streams(ExclusiveStartStreamName=stream_names[-1]) - stream_names = resp2["StreamNames"] - has_more_streams = resp2["HasMoreStreams"] - stream_names.should.have.length_of(1) - has_more_streams.should.equal(False) - - -@mock_kinesis_deprecated -def test_basic_shard_iterator(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - shard_iterator = response['NextShardIterator'] - response['Records'].should.equal([]) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis_deprecated -def test_get_invalid_shard_iterator(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.get_shard_iterator.when.called_with( - stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) - - -@mock_kinesis_deprecated -def test_put_records(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - data = "hello world" - partition_key = "1234" - - conn.put_record.when.called_with( - stream_name, data, 1234).should.throw(InvalidArgumentException) - - conn.put_record(stream_name, data, partition_key) - - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - shard_iterator = response['NextShardIterator'] - response['Records'].should.have.length_of(1) - record = response['Records'][0] - - record["Data"].should.equal("hello world") - record["PartitionKey"].should.equal("1234") - record["SequenceNumber"].should.equal("1") - - -@mock_kinesis_deprecated -def test_get_records_limit(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - data = "hello world" - - for index in range(5): - conn.put_record(stream_name, data, str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Retrieve only 3 records - response = conn.get_records(shard_iterator, limit=3) - response['Records'].should.have.length_of(3) - - # Then get the rest of the results - next_shard_iterator = response['NextShardIterator'] - response = conn.get_records(next_shard_iterator) - response['Records'].should.have.length_of(2) - - -@mock_kinesis_deprecated -def test_get_records_at_sequence_number(): - # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by - # a specific sequence number. - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - for index in range(1, 5): - conn.put_record(stream_name, str(index), str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Get the second record - response = conn.get_records(shard_iterator, limit=2) - second_sequence_id = response['Records'][1]['SequenceNumber'] - - # Then get a new iterator starting at that id - response = conn.get_shard_iterator( - stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - # And the first result returned should be the second item - response['Records'][0]['SequenceNumber'].should.equal(second_sequence_id) - response['Records'][0]['Data'].should.equal('2') - - -@mock_kinesis_deprecated -def test_get_records_after_sequence_number(): - # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted - # by a specific sequence number. - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - for index in range(1, 5): - conn.put_record(stream_name, str(index), str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Get the second record - response = conn.get_records(shard_iterator, limit=2) - second_sequence_id = response['Records'][1]['SequenceNumber'] - - # Then get a new iterator starting after that id - response = conn.get_shard_iterator( - stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - # And the first result returned should be the third item - response['Records'][0]['Data'].should.equal('3') - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis_deprecated -def test_get_records_latest(): - # LATEST - Start reading just after the most recent record in the shard, - # so that you always read the most recent data in the shard. - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - for index in range(1, 5): - conn.put_record(stream_name, str(index), str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Get the second record - response = conn.get_records(shard_iterator, limit=2) - second_sequence_id = response['Records'][1]['SequenceNumber'] - - # Then get a new iterator starting after that id - response = conn.get_shard_iterator( - stream_name, shard_id, 'LATEST', second_sequence_id) - shard_iterator = response['ShardIterator'] - - # Write some more data - conn.put_record(stream_name, "last_record", "last_record") - - response = conn.get_records(shard_iterator) - # And the only result returned should be the new item - response['Records'].should.have.length_of(1) - response['Records'][0]['PartitionKey'].should.equal('last_record') - response['Records'][0]['Data'].should.equal('last_record') - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_at_timestamp(): - # AT_TIMESTAMP - Read the first record at or after the specified timestamp - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - # Create some data - for index in range(1, 5): - conn.put_record(StreamName=stream_name, - Data=str(index), - PartitionKey=str(index)) - - # When boto3 floors the timestamp that we pass to get_shard_iterator to - # second precision even though AWS supports ms precision: - # http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html - # To test around this limitation we wait until we well into the next second - # before capturing the time and storing the records we expect to retrieve. - time.sleep(1.0) - timestamp = datetime.datetime.utcnow() - - keys = [str(i) for i in range(5, 10)] - for k in keys: - conn.put_record(StreamName=stream_name, - Data=k, - PartitionKey=k) - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - - response['Records'].should.have.length_of(len(keys)) - partition_keys = [r['PartitionKey'] for r in response['Records']] - partition_keys.should.equal(keys) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_at_very_old_timestamp(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - # Create some data - keys = [str(i) for i in range(1, 5)] - for k in keys: - conn.put_record(StreamName=stream_name, - Data=k, - PartitionKey=k) - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=1) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - response['Records'].should.have.length_of(len(keys)) - partition_keys = [r['PartitionKey'] for r in response['Records']] - partition_keys.should.equal(keys) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_timestamp_filtering(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - conn.put_record(StreamName=stream_name, - Data='0', - PartitionKey='0') - - time.sleep(1.0) - timestamp = datetime.datetime.utcnow() - - conn.put_record(StreamName=stream_name, - Data='1', - PartitionKey='1') - - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - response['Records'].should.have.length_of(1) - response['Records'][0]['PartitionKey'].should.equal('1') - response['Records'][0]['ApproximateArrivalTimestamp'].should.be.\ - greater_than(timestamp) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_millis_behind_latest(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - conn.put_record(StreamName=stream_name, - Data='0', - PartitionKey='0') - time.sleep(1.0) - conn.put_record(StreamName=stream_name, - Data='1', - PartitionKey='1') - - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator, Limit=1) - response['Records'].should.have.length_of(1) - response['MillisBehindLatest'].should.be.greater_than(0) - - -@mock_kinesis -def test_get_records_at_very_new_timestamp(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - # Create some data - keys = [str(i) for i in range(1, 5)] - for k in keys: - conn.put_record(StreamName=stream_name, - Data=k, - PartitionKey=k) - - timestamp = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - - response['Records'].should.have.length_of(0) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_from_empty_stream_at_timestamp(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - timestamp = datetime.datetime.utcnow() - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - - response['Records'].should.have.length_of(0) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis_deprecated -def test_invalid_shard_iterator_type(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator.when.called_with( - stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) - - -@mock_kinesis_deprecated -def test_add_tags(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) - conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) - conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) - conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) - - -@mock_kinesis_deprecated -def test_list_tags(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal('val1') - conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal('val2') - conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal('val3') - conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal('val4') - - -@mock_kinesis_deprecated -def test_remove_tags(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal('val1') - conn.remove_tags_from_stream(stream_name, ['tag1']) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal(None) - - conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal('val2') - conn.remove_tags_from_stream(stream_name, ['tag2']) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal(None) - - -@mock_kinesis_deprecated -def test_split_shard(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = 'my_stream' - - conn.create_stream(stream_name, 2) - - # Create some data - for index in range(1, 100): - conn.put_record(stream_name, str(index), str(index)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - shard_range = shards[0]['HashKeyRange'] - new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 - conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - shard_range = shards[2]['HashKeyRange'] - new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 - conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - -@mock_kinesis_deprecated -def test_merge_shards(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = 'my_stream' - - conn.create_stream(stream_name, 4) - - # Create some data - for index in range(1, 100): - conn.put_record(stream_name, str(index), str(index)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(4) - - conn.merge_shards.when.called_with( - stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - conn.merge_shards(stream_name, 'shardId-000000000000', - 'shardId-000000000001') - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - conn.merge_shards(stream_name, 'shardId-000000000002', - 'shardId-000000000000') - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) +from __future__ import unicode_literals + +import boto.kinesis +from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException +import boto3 +import sure # noqa +import datetime +import time + +from moto import mock_kinesis, mock_kinesis_deprecated + + +@mock_kinesis_deprecated +def test_create_cluster(): + conn = boto.kinesis.connect_to_region("us-west-2") + + conn.create_stream("my_stream", 2) + + stream_response = conn.describe_stream("my_stream") + + stream = stream_response["StreamDescription"] + stream["StreamName"].should.equal("my_stream") + stream["HasMoreShards"].should.equal(False) + stream["StreamARN"].should.equal( + "arn:aws:kinesis:us-west-2:123456789012:my_stream") + stream["StreamStatus"].should.equal("ACTIVE") + + shards = stream['Shards'] + shards.should.have.length_of(2) + + +@mock_kinesis_deprecated +def test_describe_non_existant_stream(): + conn = boto.kinesis.connect_to_region("us-east-1") + conn.describe_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) + + +@mock_kinesis_deprecated +def test_list_and_delete_stream(): + conn = boto.kinesis.connect_to_region("us-west-2") + + conn.create_stream("stream1", 1) + conn.create_stream("stream2", 1) + + conn.list_streams()['StreamNames'].should.have.length_of(2) + + conn.delete_stream("stream2") + + conn.list_streams()['StreamNames'].should.have.length_of(1) + + # Delete invalid id + conn.delete_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) + + +@mock_kinesis +def test_list_many_streams(): + conn = boto3.client('kinesis', region_name="us-west-2") + + for i in range(11): + conn.create_stream(StreamName="stream%d" % i, ShardCount=1) + + resp = conn.list_streams() + stream_names = resp["StreamNames"] + has_more_streams = resp["HasMoreStreams"] + stream_names.should.have.length_of(10) + has_more_streams.should.be(True) + resp2 = conn.list_streams(ExclusiveStartStreamName=stream_names[-1]) + stream_names = resp2["StreamNames"] + has_more_streams = resp2["HasMoreStreams"] + stream_names.should.have.length_of(1) + has_more_streams.should.equal(False) + + +@mock_kinesis_deprecated +def test_basic_shard_iterator(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + shard_iterator = response['NextShardIterator'] + response['Records'].should.equal([]) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis_deprecated +def test_get_invalid_shard_iterator(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.get_shard_iterator.when.called_with( + stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) + + +@mock_kinesis_deprecated +def test_put_records(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + data = "hello world" + partition_key = "1234" + + conn.put_record.when.called_with( + stream_name, data, 1234).should.throw(InvalidArgumentException) + + conn.put_record(stream_name, data, partition_key) + + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + shard_iterator = response['NextShardIterator'] + response['Records'].should.have.length_of(1) + record = response['Records'][0] + + record["Data"].should.equal("hello world") + record["PartitionKey"].should.equal("1234") + record["SequenceNumber"].should.equal("1") + + +@mock_kinesis_deprecated +def test_get_records_limit(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + data = "hello world" + + for index in range(5): + conn.put_record(stream_name, data, str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Retrieve only 3 records + response = conn.get_records(shard_iterator, limit=3) + response['Records'].should.have.length_of(3) + + # Then get the rest of the results + next_shard_iterator = response['NextShardIterator'] + response = conn.get_records(next_shard_iterator) + response['Records'].should.have.length_of(2) + + +@mock_kinesis_deprecated +def test_get_records_at_sequence_number(): + # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by + # a specific sequence number. + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + for index in range(1, 5): + conn.put_record(stream_name, str(index), str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Get the second record + response = conn.get_records(shard_iterator, limit=2) + second_sequence_id = response['Records'][1]['SequenceNumber'] + + # Then get a new iterator starting at that id + response = conn.get_shard_iterator( + stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + # And the first result returned should be the second item + response['Records'][0]['SequenceNumber'].should.equal(second_sequence_id) + response['Records'][0]['Data'].should.equal('2') + + +@mock_kinesis_deprecated +def test_get_records_after_sequence_number(): + # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted + # by a specific sequence number. + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + for index in range(1, 5): + conn.put_record(stream_name, str(index), str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Get the second record + response = conn.get_records(shard_iterator, limit=2) + second_sequence_id = response['Records'][1]['SequenceNumber'] + + # Then get a new iterator starting after that id + response = conn.get_shard_iterator( + stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + # And the first result returned should be the third item + response['Records'][0]['Data'].should.equal('3') + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis_deprecated +def test_get_records_latest(): + # LATEST - Start reading just after the most recent record in the shard, + # so that you always read the most recent data in the shard. + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + for index in range(1, 5): + conn.put_record(stream_name, str(index), str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Get the second record + response = conn.get_records(shard_iterator, limit=2) + second_sequence_id = response['Records'][1]['SequenceNumber'] + + # Then get a new iterator starting after that id + response = conn.get_shard_iterator( + stream_name, shard_id, 'LATEST', second_sequence_id) + shard_iterator = response['ShardIterator'] + + # Write some more data + conn.put_record(stream_name, "last_record", "last_record") + + response = conn.get_records(shard_iterator) + # And the only result returned should be the new item + response['Records'].should.have.length_of(1) + response['Records'][0]['PartitionKey'].should.equal('last_record') + response['Records'][0]['Data'].should.equal('last_record') + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_at_timestamp(): + # AT_TIMESTAMP - Read the first record at or after the specified timestamp + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + for index in range(1, 5): + conn.put_record(StreamName=stream_name, + Data=str(index), + PartitionKey=str(index)) + + # When boto3 floors the timestamp that we pass to get_shard_iterator to + # second precision even though AWS supports ms precision: + # http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html + # To test around this limitation we wait until we well into the next second + # before capturing the time and storing the records we expect to retrieve. + time.sleep(1.0) + timestamp = datetime.datetime.utcnow() + + keys = [str(i) for i in range(5, 10)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(len(keys)) + partition_keys = [r['PartitionKey'] for r in response['Records']] + partition_keys.should.equal(keys) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_at_very_old_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + keys = [str(i) for i in range(1, 5)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=1) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + response['Records'].should.have.length_of(len(keys)) + partition_keys = [r['PartitionKey'] for r in response['Records']] + partition_keys.should.equal(keys) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_timestamp_filtering(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + conn.put_record(StreamName=stream_name, + Data='0', + PartitionKey='0') + + time.sleep(1.0) + timestamp = datetime.datetime.utcnow() + + conn.put_record(StreamName=stream_name, + Data='1', + PartitionKey='1') + + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + response['Records'].should.have.length_of(1) + response['Records'][0]['PartitionKey'].should.equal('1') + response['Records'][0]['ApproximateArrivalTimestamp'].should.be.\ + greater_than(timestamp) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_millis_behind_latest(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + conn.put_record(StreamName=stream_name, + Data='0', + PartitionKey='0') + time.sleep(1.0) + conn.put_record(StreamName=stream_name, + Data='1', + PartitionKey='1') + + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator, Limit=1) + response['Records'].should.have.length_of(1) + response['MillisBehindLatest'].should.be.greater_than(0) + + +@mock_kinesis +def test_get_records_at_very_new_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + keys = [str(i) for i in range(1, 5)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + timestamp = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(0) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_from_empty_stream_at_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + timestamp = datetime.datetime.utcnow() + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(0) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis_deprecated +def test_invalid_shard_iterator_type(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator.when.called_with( + stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) + + +@mock_kinesis_deprecated +def test_add_tags(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.describe_stream(stream_name) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) + + +@mock_kinesis_deprecated +def test_list_tags(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.describe_stream(stream_name) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal('val1') + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal('val2') + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal('val3') + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal('val4') + + +@mock_kinesis_deprecated +def test_remove_tags(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.describe_stream(stream_name) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal('val1') + conn.remove_tags_from_stream(stream_name, ['tag1']) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal(None) + + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal('val2') + conn.remove_tags_from_stream(stream_name, ['tag2']) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal(None) + + +@mock_kinesis_deprecated +def test_split_shard(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = 'my_stream' + + conn.create_stream(stream_name, 2) + + # Create some data + for index in range(1, 100): + conn.put_record(stream_name, str(index), str(index)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(2) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + shard_range = shards[0]['HashKeyRange'] + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(3) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + shard_range = shards[2]['HashKeyRange'] + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(4) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + +@mock_kinesis_deprecated +def test_merge_shards(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = 'my_stream' + + conn.create_stream(stream_name, 4) + + # Create some data + for index in range(1, 100): + conn.put_record(stream_name, str(index), str(index)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(4) + + conn.merge_shards.when.called_with( + stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(4) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + conn.merge_shards(stream_name, 'shardId-000000000000', + 'shardId-000000000001') + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(3) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + conn.merge_shards(stream_name, 'shardId-000000000002', + 'shardId-000000000000') + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(2) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) diff --git a/tests/test_kinesis/test_server.py b/tests/test_kinesis/test_server.py index 527310d752cd..b88ab1bb2dbe 100644 --- a/tests/test_kinesis/test_server.py +++ b/tests/test_kinesis/test_server.py @@ -1,25 +1,25 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_kinesis - -''' -Test the different server responses -''' - - -@mock_kinesis -def test_list_streams(): - backend = server.create_backend_app("kinesis") - test_client = backend.test_client() - - res = test_client.get('/?Action=ListStreams') - - json_data = json.loads(res.data.decode("utf-8")) - json_data.should.equal({ - "HasMoreStreams": False, - "StreamNames": [], - }) +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_kinesis + +''' +Test the different server responses +''' + + +@mock_kinesis +def test_list_streams(): + backend = server.create_backend_app("kinesis") + test_client = backend.test_client() + + res = test_client.get('/?Action=ListStreams') + + json_data = json.loads(res.data.decode("utf-8")) + json_data.should.equal({ + "HasMoreStreams": False, + "StreamNames": [], + }) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 8bccae27a02f..830c531a2478 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -1,719 +1,719 @@ -from __future__ import unicode_literals -import os, re - -import boto3 -import boto.kms -from boto.exception import JSONResponseError -from boto.kms.exceptions import AlreadyExistsException, NotFoundException -import sure # noqa -from moto import mock_kms, mock_kms_deprecated -from nose.tools import assert_raises -from freezegun import freeze_time -from datetime import datetime, timedelta -from dateutil.tz import tzlocal - - -@mock_kms_deprecated -def test_create_key(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - - key['KeyMetadata']['Description'].should.equal("my key") - key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - key['KeyMetadata']['Enabled'].should.equal(True) - - -@mock_kms_deprecated -def test_describe_key(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - key = conn.describe_key(key_id) - key['KeyMetadata']['Description'].should.equal("my key") - key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - - -@mock_kms_deprecated -def test_describe_key_via_alias(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', - target_key_id=key['KeyMetadata']['KeyId']) - - alias_key = conn.describe_key('alias/my-key-alias') - alias_key['KeyMetadata']['Description'].should.equal("my key") - alias_key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) - - -@mock_kms_deprecated -def test_describe_key_via_alias_not_found(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', - target_key_id=key['KeyMetadata']['KeyId']) - - conn.describe_key.when.called_with( - 'alias/not-found-alias').should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_describe_key_via_arn(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - arn = key['KeyMetadata']['Arn'] - - the_key = conn.describe_key(arn) - the_key['KeyMetadata']['Description'].should.equal("my key") - the_key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - the_key['KeyMetadata']['KeyId'].should.equal(key['KeyMetadata']['KeyId']) - - -@mock_kms_deprecated -def test_describe_missing_key(): - conn = boto.kms.connect_to_region("us-west-2") - conn.describe_key.when.called_with( - "not-a-key").should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_list_keys(): - conn = boto.kms.connect_to_region("us-west-2") - - conn.create_key(policy="my policy", description="my key1", - key_usage='ENCRYPT_DECRYPT') - conn.create_key(policy="my policy", description="my key2", - key_usage='ENCRYPT_DECRYPT') - - keys = conn.list_keys() - keys['Keys'].should.have.length_of(2) - - -@mock_kms_deprecated -def test_enable_key_rotation(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.enable_key_rotation(key_id) - - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(True) - - -@mock_kms_deprecated -def test_enable_key_rotation_via_arn(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['Arn'] - - conn.enable_key_rotation(key_id) - - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(True) - - -@mock_kms_deprecated -def test_enable_key_rotation_with_missing_key(): - conn = boto.kms.connect_to_region("us-west-2") - conn.enable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_enable_key_rotation_with_alias_name_should_fail(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', - target_key_id=key['KeyMetadata']['KeyId']) - - alias_key = conn.describe_key('alias/my-key-alias') - alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) - - conn.enable_key_rotation.when.called_with( - 'alias/my-alias').should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_disable_key_rotation(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.enable_key_rotation(key_id) - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(True) - - conn.disable_key_rotation(key_id) - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(False) - - -@mock_kms_deprecated -def test_encrypt(): - """ - test_encrypt - Using base64 encoding to merely test that the endpoint was called - """ - conn = boto.kms.connect_to_region("us-west-2") - response = conn.encrypt('key_id', 'encryptme'.encode('utf-8')) - response['CiphertextBlob'].should.equal(b'ZW5jcnlwdG1l') - - -@mock_kms_deprecated -def test_decrypt(): - conn = boto.kms.connect_to_region('us-west-2') - response = conn.decrypt('ZW5jcnlwdG1l'.encode('utf-8')) - response['Plaintext'].should.equal(b'encryptme') - - -@mock_kms_deprecated -def test_disable_key_rotation_with_missing_key(): - conn = boto.kms.connect_to_region("us-west-2") - conn.disable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_get_key_rotation_status_with_missing_key(): - conn = boto.kms.connect_to_region("us-west-2") - conn.get_key_rotation_status.when.called_with( - "not-a-key").should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_get_key_rotation_status(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(False) - - -@mock_kms_deprecated -def test_create_key_defaults_key_rotation(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(False) - - -@mock_kms_deprecated -def test_get_key_policy(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - policy = conn.get_key_policy(key_id, 'default') - policy['Policy'].should.equal('my policy') - - -@mock_kms_deprecated -def test_get_key_policy_via_arn(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - policy = conn.get_key_policy(key['KeyMetadata']['Arn'], 'default') - - policy['Policy'].should.equal('my policy') - - -@mock_kms_deprecated -def test_put_key_policy(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.put_key_policy(key_id, 'default', 'new policy') - policy = conn.get_key_policy(key_id, 'default') - policy['Policy'].should.equal('new policy') - - -@mock_kms_deprecated -def test_put_key_policy_via_arn(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['Arn'] - - conn.put_key_policy(key_id, 'default', 'new policy') - policy = conn.get_key_policy(key_id, 'default') - policy['Policy'].should.equal('new policy') - - -@mock_kms_deprecated -def test_put_key_policy_via_alias_should_not_update(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', - target_key_id=key['KeyMetadata']['KeyId']) - - conn.put_key_policy.when.called_with( - 'alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) - - policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') - policy['Policy'].should.equal('my policy') - - -@mock_kms_deprecated -def test_put_key_policy(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - conn.put_key_policy(key['KeyMetadata']['Arn'], 'default', 'new policy') - - policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') - policy['Policy'].should.equal('new policy') - - -@mock_kms_deprecated -def test_list_key_policies(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - policies = conn.list_key_policies(key_id) - policies['PolicyNames'].should.equal(['default']) - - -@mock_kms_deprecated -def test__create_alias__returns_none_if_correct(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - resp = kms.create_alias('alias/my-alias', key_id) - - resp.should.be.none - - -@mock_kms_deprecated -def test__create_alias__raises_if_reserved_alias(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - reserved_aliases = [ - 'alias/aws/ebs', - 'alias/aws/s3', - 'alias/aws/redshift', - 'alias/aws/rds', - ] - - for alias_name in reserved_aliases: - with assert_raises(JSONResponseError) as err: - kms.create_alias(alias_name, key_id) - - ex = err.exception - ex.error_message.should.be.none - ex.error_code.should.equal('NotAuthorizedException') - ex.body.should.equal({'__type': 'NotAuthorizedException'}) - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__can_create_multiple_aliases_for_same_key_id(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - kms.create_alias('alias/my-alias3', key_id).should.be.none - kms.create_alias('alias/my-alias4', key_id).should.be.none - kms.create_alias('alias/my-alias5', key_id).should.be.none - - -@mock_kms_deprecated -def test__create_alias__raises_if_wrong_prefix(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - with assert_raises(JSONResponseError) as err: - kms.create_alias('wrongprefix/my-alias', key_id) - - ex = err.exception - ex.error_message.should.equal('Invalid identifier') - ex.error_code.should.equal('ValidationException') - ex.body.should.equal({'message': 'Invalid identifier', - '__type': 'ValidationException'}) - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__raises_if_duplicate(): - region = 'us-west-2' - kms = boto.kms.connect_to_region(region) - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - alias = 'alias/my-alias' - - kms.create_alias(alias, key_id) - - with assert_raises(AlreadyExistsException) as err: - kms.create_alias(alias, key_id) - - ex = err.exception - ex.error_message.should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' - .format(**locals())) - ex.error_code.should.be.none - ex.box_usage.should.be.none - ex.request_id.should.be.none - ex.body['message'].should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' - .format(**locals())) - ex.body['__type'].should.equal('AlreadyExistsException') - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__raises_if_alias_has_restricted_characters(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - alias_names_with_restricted_characters = [ - 'alias/my-alias!', - 'alias/my-alias$', - 'alias/my-alias@', - ] - - for alias_name in alias_names_with_restricted_characters: - with assert_raises(JSONResponseError) as err: - kms.create_alias(alias_name, key_id) - ex = err.exception - ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal( - "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) - ex.error_code.should.equal('ValidationException') - ex.message.should.equal( - "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__raises_if_alias_has_colon_character(): - # For some reason, colons are not accepted for an alias, even though they - # are accepted by regex ^[a-zA-Z0-9:/_-]+$ - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - alias_names_with_restricted_characters = [ - 'alias/my:alias', - ] - - for alias_name in alias_names_with_restricted_characters: - with assert_raises(JSONResponseError) as err: - kms.create_alias(alias_name, key_id) - ex = err.exception - ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal( - "{alias_name} contains invalid characters for an alias".format(**locals())) - ex.error_code.should.equal('ValidationException') - ex.message.should.equal( - "{alias_name} contains invalid characters for an alias".format(**locals())) - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__accepted_characters(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - alias_names_with_accepted_characters = [ - 'alias/my-alias_/', - 'alias/my_alias-/', - ] - - for alias_name in alias_names_with_accepted_characters: - kms.create_alias(alias_name, key_id) - - -@mock_kms_deprecated -def test__create_alias__raises_if_target_key_id_is_existing_alias(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - alias = 'alias/my-alias' - - kms.create_alias(alias, key_id) - - with assert_raises(JSONResponseError) as err: - kms.create_alias(alias, alias) - - ex = err.exception - ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal('Aliases must refer to keys. Not aliases') - ex.error_code.should.equal('ValidationException') - ex.message.should.equal('Aliases must refer to keys. Not aliases') - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__delete_alias(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - alias = 'alias/my-alias' - - # added another alias here to make sure that the deletion of the alias can - # be done when there are multiple existing aliases. - another_create_resp = kms.create_key() - another_key_id = create_resp['KeyMetadata']['KeyId'] - another_alias = 'alias/another-alias' - - kms.create_alias(alias, key_id) - kms.create_alias(another_alias, another_key_id) - - resp = kms.delete_alias(alias) - - resp.should.be.none - - # we can create the alias again, since it has been deleted - kms.create_alias(alias, key_id) - - -@mock_kms_deprecated -def test__delete_alias__raises_if_wrong_prefix(): - kms = boto.connect_kms() - - with assert_raises(JSONResponseError) as err: - kms.delete_alias('wrongprefix/my-alias') - - ex = err.exception - ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal('Invalid identifier') - ex.error_code.should.equal('ValidationException') - ex.message.should.equal('Invalid identifier') - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__delete_alias__raises_if_alias_is_not_found(): - region = 'us-west-2' - kms = boto.kms.connect_to_region(region) - alias_name = 'alias/unexisting-alias' - - with assert_raises(NotFoundException) as err: - kms.delete_alias(alias_name) - - ex = err.exception - ex.body['__type'].should.equal('NotFoundException') - ex.body['message'].should.match( - r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) - ex.box_usage.should.be.none - ex.error_code.should.be.none - ex.message.should.match( - r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) - ex.reason.should.equal('Bad Request') - ex.request_id.should.be.none - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__list_aliases(): - region = "eu-west-1" - kms = boto.kms.connect_to_region(region) - - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - kms.create_alias('alias/my-alias1', key_id) - kms.create_alias('alias/my-alias2', key_id) - kms.create_alias('alias/my-alias3', key_id) - - resp = kms.list_aliases() - - resp['Truncated'].should.be.false - - aliases = resp['Aliases'] - - def has_correct_arn(alias_obj): - alias_name = alias_obj['AliasName'] - alias_arn = alias_obj['AliasArn'] - return re.match(r'arn:aws:kms:{region}:\d{{12}}:{alias_name}'.format(region=region, alias_name=alias_name), - alias_arn) - - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/aws/ebs' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/aws/rds' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/aws/redshift' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/aws/s3' == alias['AliasName']]).should.equal(1) - - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/my-alias1' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/my-alias2' == alias['AliasName']]).should.equal(1) - - len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == - alias['TargetKeyId']]).should.equal(3) - - len(aliases).should.equal(7) - - -@mock_kms_deprecated -def test__assert_valid_key_id(): - from moto.kms.responses import _assert_valid_key_id - import uuid - - _assert_valid_key_id.when.called_with( - "not-a-key").should.throw(JSONResponseError) - _assert_valid_key_id.when.called_with( - str(uuid.uuid4())).should_not.throw(JSONResponseError) - - -@mock_kms_deprecated -def test__assert_default_policy(): - from moto.kms.responses import _assert_default_policy - - _assert_default_policy.when.called_with( - "not-default").should.throw(JSONResponseError) - _assert_default_policy.when.called_with( - "default").should_not.throw(JSONResponseError) - - -@mock_kms -def test_kms_encrypt_boto3(): - client = boto3.client('kms', region_name='us-east-1') - response = client.encrypt(KeyId='foo', Plaintext=b'bar') - - response = client.decrypt(CiphertextBlob=response['CiphertextBlob']) - response['Plaintext'].should.equal(b'bar') - - -@mock_kms -def test_disable_key(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='disable-key') - client.disable_key( - KeyId=key['KeyMetadata']['KeyId'] - ) - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == 'Disabled' - - -@mock_kms -def test_enable_key(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='enable-key') - client.disable_key( - KeyId=key['KeyMetadata']['KeyId'] - ) - client.enable_key( - KeyId=key['KeyMetadata']['KeyId'] - ) - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == True - assert result["KeyMetadata"]["KeyState"] == 'Enabled' - - -@mock_kms -def test_schedule_key_deletion(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='schedule-key-deletion') - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': - with freeze_time("2015-01-01 12:00:00"): - response = client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'] - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzlocal()) - else: - # Can't manipulate time in server mode - response = client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'] - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' - assert 'DeletionDate' in result["KeyMetadata"] - - -@mock_kms -def test_schedule_key_deletion_custom(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='schedule-key-deletion') - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': - with freeze_time("2015-01-01 12:00:00"): - response = client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'], - PendingWindowInDays=7 - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzlocal()) - else: - # Can't manipulate time in server mode - response = client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'], - PendingWindowInDays=7 - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' - assert 'DeletionDate' in result["KeyMetadata"] - - -@mock_kms -def test_cancel_key_deletion(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='cancel-key-deletion') - client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'] - ) - response = client.cancel_key_deletion( - KeyId=key['KeyMetadata']['KeyId'] - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == 'Disabled' - assert 'DeletionDate' not in result["KeyMetadata"] +from __future__ import unicode_literals +import os, re + +import boto3 +import boto.kms +from boto.exception import JSONResponseError +from boto.kms.exceptions import AlreadyExistsException, NotFoundException +import sure # noqa +from moto import mock_kms, mock_kms_deprecated +from nose.tools import assert_raises +from freezegun import freeze_time +from datetime import datetime, timedelta +from dateutil.tz import tzlocal + + +@mock_kms_deprecated +def test_create_key(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + + key['KeyMetadata']['Description'].should.equal("my key") + key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + key['KeyMetadata']['Enabled'].should.equal(True) + + +@mock_kms_deprecated +def test_describe_key(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + key = conn.describe_key(key_id) + key['KeyMetadata']['Description'].should.equal("my key") + key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + + +@mock_kms_deprecated +def test_describe_key_via_alias(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) + + alias_key = conn.describe_key('alias/my-key-alias') + alias_key['KeyMetadata']['Description'].should.equal("my key") + alias_key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) + + +@mock_kms_deprecated +def test_describe_key_via_alias_not_found(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) + + conn.describe_key.when.called_with( + 'alias/not-found-alias').should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_describe_key_via_arn(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + arn = key['KeyMetadata']['Arn'] + + the_key = conn.describe_key(arn) + the_key['KeyMetadata']['Description'].should.equal("my key") + the_key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + the_key['KeyMetadata']['KeyId'].should.equal(key['KeyMetadata']['KeyId']) + + +@mock_kms_deprecated +def test_describe_missing_key(): + conn = boto.kms.connect_to_region("us-west-2") + conn.describe_key.when.called_with( + "not-a-key").should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_list_keys(): + conn = boto.kms.connect_to_region("us-west-2") + + conn.create_key(policy="my policy", description="my key1", + key_usage='ENCRYPT_DECRYPT') + conn.create_key(policy="my policy", description="my key2", + key_usage='ENCRYPT_DECRYPT') + + keys = conn.list_keys() + keys['Keys'].should.have.length_of(2) + + +@mock_kms_deprecated +def test_enable_key_rotation(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.enable_key_rotation(key_id) + + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) + + +@mock_kms_deprecated +def test_enable_key_rotation_via_arn(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['Arn'] + + conn.enable_key_rotation(key_id) + + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) + + +@mock_kms_deprecated +def test_enable_key_rotation_with_missing_key(): + conn = boto.kms.connect_to_region("us-west-2") + conn.enable_key_rotation.when.called_with( + "not-a-key").should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_enable_key_rotation_with_alias_name_should_fail(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) + + alias_key = conn.describe_key('alias/my-key-alias') + alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) + + conn.enable_key_rotation.when.called_with( + 'alias/my-alias').should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_disable_key_rotation(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.enable_key_rotation(key_id) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) + + conn.disable_key_rotation(key_id) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) + + +@mock_kms_deprecated +def test_encrypt(): + """ + test_encrypt + Using base64 encoding to merely test that the endpoint was called + """ + conn = boto.kms.connect_to_region("us-west-2") + response = conn.encrypt('key_id', 'encryptme'.encode('utf-8')) + response['CiphertextBlob'].should.equal(b'ZW5jcnlwdG1l') + + +@mock_kms_deprecated +def test_decrypt(): + conn = boto.kms.connect_to_region('us-west-2') + response = conn.decrypt('ZW5jcnlwdG1l'.encode('utf-8')) + response['Plaintext'].should.equal(b'encryptme') + + +@mock_kms_deprecated +def test_disable_key_rotation_with_missing_key(): + conn = boto.kms.connect_to_region("us-west-2") + conn.disable_key_rotation.when.called_with( + "not-a-key").should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_get_key_rotation_status_with_missing_key(): + conn = boto.kms.connect_to_region("us-west-2") + conn.get_key_rotation_status.when.called_with( + "not-a-key").should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_get_key_rotation_status(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) + + +@mock_kms_deprecated +def test_create_key_defaults_key_rotation(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) + + +@mock_kms_deprecated +def test_get_key_policy(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + policy = conn.get_key_policy(key_id, 'default') + policy['Policy'].should.equal('my policy') + + +@mock_kms_deprecated +def test_get_key_policy_via_arn(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + policy = conn.get_key_policy(key['KeyMetadata']['Arn'], 'default') + + policy['Policy'].should.equal('my policy') + + +@mock_kms_deprecated +def test_put_key_policy(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.put_key_policy(key_id, 'default', 'new policy') + policy = conn.get_key_policy(key_id, 'default') + policy['Policy'].should.equal('new policy') + + +@mock_kms_deprecated +def test_put_key_policy_via_arn(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['Arn'] + + conn.put_key_policy(key_id, 'default', 'new policy') + policy = conn.get_key_policy(key_id, 'default') + policy['Policy'].should.equal('new policy') + + +@mock_kms_deprecated +def test_put_key_policy_via_alias_should_not_update(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) + + conn.put_key_policy.when.called_with( + 'alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) + + policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') + policy['Policy'].should.equal('my policy') + + +@mock_kms_deprecated +def test_put_key_policy(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + conn.put_key_policy(key['KeyMetadata']['Arn'], 'default', 'new policy') + + policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') + policy['Policy'].should.equal('new policy') + + +@mock_kms_deprecated +def test_list_key_policies(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + policies = conn.list_key_policies(key_id) + policies['PolicyNames'].should.equal(['default']) + + +@mock_kms_deprecated +def test__create_alias__returns_none_if_correct(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + resp = kms.create_alias('alias/my-alias', key_id) + + resp.should.be.none + + +@mock_kms_deprecated +def test__create_alias__raises_if_reserved_alias(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + reserved_aliases = [ + 'alias/aws/ebs', + 'alias/aws/s3', + 'alias/aws/redshift', + 'alias/aws/rds', + ] + + for alias_name in reserved_aliases: + with assert_raises(JSONResponseError) as err: + kms.create_alias(alias_name, key_id) + + ex = err.exception + ex.error_message.should.be.none + ex.error_code.should.equal('NotAuthorizedException') + ex.body.should.equal({'__type': 'NotAuthorizedException'}) + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__can_create_multiple_aliases_for_same_key_id(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + kms.create_alias('alias/my-alias3', key_id).should.be.none + kms.create_alias('alias/my-alias4', key_id).should.be.none + kms.create_alias('alias/my-alias5', key_id).should.be.none + + +@mock_kms_deprecated +def test__create_alias__raises_if_wrong_prefix(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + with assert_raises(JSONResponseError) as err: + kms.create_alias('wrongprefix/my-alias', key_id) + + ex = err.exception + ex.error_message.should.equal('Invalid identifier') + ex.error_code.should.equal('ValidationException') + ex.body.should.equal({'message': 'Invalid identifier', + '__type': 'ValidationException'}) + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__raises_if_duplicate(): + region = 'us-west-2' + kms = boto.kms.connect_to_region(region) + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + alias = 'alias/my-alias' + + kms.create_alias(alias, key_id) + + with assert_raises(AlreadyExistsException) as err: + kms.create_alias(alias, key_id) + + ex = err.exception + ex.error_message.should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' + .format(**locals())) + ex.error_code.should.be.none + ex.box_usage.should.be.none + ex.request_id.should.be.none + ex.body['message'].should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' + .format(**locals())) + ex.body['__type'].should.equal('AlreadyExistsException') + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__raises_if_alias_has_restricted_characters(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + alias_names_with_restricted_characters = [ + 'alias/my-alias!', + 'alias/my-alias$', + 'alias/my-alias@', + ] + + for alias_name in alias_names_with_restricted_characters: + with assert_raises(JSONResponseError) as err: + kms.create_alias(alias_name, key_id) + ex = err.exception + ex.body['__type'].should.equal('ValidationException') + ex.body['message'].should.equal( + "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) + ex.error_code.should.equal('ValidationException') + ex.message.should.equal( + "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__raises_if_alias_has_colon_character(): + # For some reason, colons are not accepted for an alias, even though they + # are accepted by regex ^[a-zA-Z0-9:/_-]+$ + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + alias_names_with_restricted_characters = [ + 'alias/my:alias', + ] + + for alias_name in alias_names_with_restricted_characters: + with assert_raises(JSONResponseError) as err: + kms.create_alias(alias_name, key_id) + ex = err.exception + ex.body['__type'].should.equal('ValidationException') + ex.body['message'].should.equal( + "{alias_name} contains invalid characters for an alias".format(**locals())) + ex.error_code.should.equal('ValidationException') + ex.message.should.equal( + "{alias_name} contains invalid characters for an alias".format(**locals())) + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__accepted_characters(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + alias_names_with_accepted_characters = [ + 'alias/my-alias_/', + 'alias/my_alias-/', + ] + + for alias_name in alias_names_with_accepted_characters: + kms.create_alias(alias_name, key_id) + + +@mock_kms_deprecated +def test__create_alias__raises_if_target_key_id_is_existing_alias(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + alias = 'alias/my-alias' + + kms.create_alias(alias, key_id) + + with assert_raises(JSONResponseError) as err: + kms.create_alias(alias, alias) + + ex = err.exception + ex.body['__type'].should.equal('ValidationException') + ex.body['message'].should.equal('Aliases must refer to keys. Not aliases') + ex.error_code.should.equal('ValidationException') + ex.message.should.equal('Aliases must refer to keys. Not aliases') + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__delete_alias(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + alias = 'alias/my-alias' + + # added another alias here to make sure that the deletion of the alias can + # be done when there are multiple existing aliases. + another_create_resp = kms.create_key() + another_key_id = create_resp['KeyMetadata']['KeyId'] + another_alias = 'alias/another-alias' + + kms.create_alias(alias, key_id) + kms.create_alias(another_alias, another_key_id) + + resp = kms.delete_alias(alias) + + resp.should.be.none + + # we can create the alias again, since it has been deleted + kms.create_alias(alias, key_id) + + +@mock_kms_deprecated +def test__delete_alias__raises_if_wrong_prefix(): + kms = boto.connect_kms() + + with assert_raises(JSONResponseError) as err: + kms.delete_alias('wrongprefix/my-alias') + + ex = err.exception + ex.body['__type'].should.equal('ValidationException') + ex.body['message'].should.equal('Invalid identifier') + ex.error_code.should.equal('ValidationException') + ex.message.should.equal('Invalid identifier') + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__delete_alias__raises_if_alias_is_not_found(): + region = 'us-west-2' + kms = boto.kms.connect_to_region(region) + alias_name = 'alias/unexisting-alias' + + with assert_raises(NotFoundException) as err: + kms.delete_alias(alias_name) + + ex = err.exception + ex.body['__type'].should.equal('NotFoundException') + ex.body['message'].should.match( + r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) + ex.box_usage.should.be.none + ex.error_code.should.be.none + ex.message.should.match( + r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) + ex.reason.should.equal('Bad Request') + ex.request_id.should.be.none + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__list_aliases(): + region = "eu-west-1" + kms = boto.kms.connect_to_region(region) + + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + kms.create_alias('alias/my-alias1', key_id) + kms.create_alias('alias/my-alias2', key_id) + kms.create_alias('alias/my-alias3', key_id) + + resp = kms.list_aliases() + + resp['Truncated'].should.be.false + + aliases = resp['Aliases'] + + def has_correct_arn(alias_obj): + alias_name = alias_obj['AliasName'] + alias_arn = alias_obj['AliasArn'] + return re.match(r'arn:aws:kms:{region}:\d{{12}}:{alias_name}'.format(region=region, alias_name=alias_name), + alias_arn) + + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/aws/ebs' == alias['AliasName']]).should.equal(1) + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/aws/rds' == alias['AliasName']]).should.equal(1) + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/aws/redshift' == alias['AliasName']]).should.equal(1) + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/aws/s3' == alias['AliasName']]).should.equal(1) + + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/my-alias1' == alias['AliasName']]).should.equal(1) + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/my-alias2' == alias['AliasName']]).should.equal(1) + + len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == + alias['TargetKeyId']]).should.equal(3) + + len(aliases).should.equal(7) + + +@mock_kms_deprecated +def test__assert_valid_key_id(): + from moto.kms.responses import _assert_valid_key_id + import uuid + + _assert_valid_key_id.when.called_with( + "not-a-key").should.throw(JSONResponseError) + _assert_valid_key_id.when.called_with( + str(uuid.uuid4())).should_not.throw(JSONResponseError) + + +@mock_kms_deprecated +def test__assert_default_policy(): + from moto.kms.responses import _assert_default_policy + + _assert_default_policy.when.called_with( + "not-default").should.throw(JSONResponseError) + _assert_default_policy.when.called_with( + "default").should_not.throw(JSONResponseError) + + +@mock_kms +def test_kms_encrypt_boto3(): + client = boto3.client('kms', region_name='us-east-1') + response = client.encrypt(KeyId='foo', Plaintext=b'bar') + + response = client.decrypt(CiphertextBlob=response['CiphertextBlob']) + response['Plaintext'].should.equal(b'bar') + + +@mock_kms +def test_disable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='disable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + + +@mock_kms +def test_enable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='enable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + client.enable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == True + assert result["KeyMetadata"]["KeyState"] == 'Enabled' + + +@mock_kms +def test_schedule_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzlocal()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_schedule_key_deletion_custom(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzlocal()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_cancel_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='cancel-key-deletion') + client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + response = client.cancel_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + assert 'DeletionDate' not in result["KeyMetadata"] diff --git a/tests/test_kms/test_server.py b/tests/test_kms/test_server.py index 7b8f74e3bb94..a5aac7d94884 100644 --- a/tests/test_kms/test_server.py +++ b/tests/test_kms/test_server.py @@ -1,25 +1,25 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_kms - -''' -Test the different server responses -''' - - -@mock_kms -def test_list_keys(): - backend = server.create_backend_app("kms") - test_client = backend.test_client() - - res = test_client.get('/?Action=ListKeys') - - json.loads(res.data.decode("utf-8")).should.equal({ - "Keys": [], - "NextMarker": None, - "Truncated": False, - }) +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_kms + +''' +Test the different server responses +''' + + +@mock_kms +def test_list_keys(): + backend = server.create_backend_app("kms") + test_client = backend.test_client() + + res = test_client.get('/?Action=ListKeys') + + json.loads(res.data.decode("utf-8")).should.equal({ + "Keys": [], + "NextMarker": None, + "Truncated": False, + }) diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index e3d46fd87af2..443bc8c2fa79 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,128 +1,128 @@ -import boto3 -import sure # noqa -import six -from botocore.exceptions import ClientError - -from moto import mock_logs, settings -from nose.tools import assert_raises - -_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' - - -@mock_logs -def test_log_group_create(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - response = conn.create_log_group(logGroupName=log_group_name) - - response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) - assert len(response['logGroups']) == 1 - - response = conn.delete_log_group(logGroupName=log_group_name) - - -@mock_logs -def test_exceptions(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - log_stream_name = 'dummp-stream' - conn.create_log_group(logGroupName=log_group_name) - with assert_raises(ClientError): - conn.create_log_group(logGroupName=log_group_name) - - # descrine_log_groups is not implemented yet - - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - with assert_raises(ClientError): - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - - conn.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=[ - { - 'timestamp': 0, - 'message': 'line' - }, - ], - ) - - with assert_raises(ClientError): - conn.put_log_events( - logGroupName=log_group_name, - logStreamName="invalid-stream", - logEvents=[ - { - 'timestamp': 0, - 'message': 'line' - }, - ], - ) - - -@mock_logs -def test_put_logs(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - log_stream_name = 'stream' - conn.create_log_group(logGroupName=log_group_name) - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - messages = [ - {'timestamp': 0, 'message': 'hello'}, - {'timestamp': 0, 'message': 'world'} - ] - putRes = conn.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=messages - ) - res = conn.get_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - events = res['events'] - nextSequenceToken = putRes['nextSequenceToken'] - assert isinstance(nextSequenceToken, six.string_types) == True - assert len(nextSequenceToken) == 56 - events.should.have.length_of(2) - - -@mock_logs -def test_filter_logs_interleaved(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - log_stream_name = 'stream' - conn.create_log_group(logGroupName=log_group_name) - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - messages = [ - {'timestamp': 0, 'message': 'hello'}, - {'timestamp': 0, 'message': 'world'} - ] - conn.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=messages - ) - res = conn.filter_log_events( - logGroupName=log_group_name, - logStreamNames=[log_stream_name], - interleaved=True, - ) - events = res['events'] - for original_message, resulting_event in zip(messages, events): - resulting_event['eventId'].should.equal(str(resulting_event['eventId'])) - resulting_event['timestamp'].should.equal(original_message['timestamp']) - resulting_event['message'].should.equal(original_message['message']) - +import boto3 +import sure # noqa +import six +from botocore.exceptions import ClientError + +from moto import mock_logs, settings +from nose.tools import assert_raises + +_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' + + +@mock_logs +def test_log_group_create(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + + response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_exceptions(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'dummp-stream' + conn.create_log_group(logGroupName=log_group_name) + with assert_raises(ClientError): + conn.create_log_group(logGroupName=log_group_name) + + # descrine_log_groups is not implemented yet + + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + with assert_raises(ClientError): + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + with assert_raises(ClientError): + conn.put_log_events( + logGroupName=log_group_name, + logStreamName="invalid-stream", + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + +@mock_logs +def test_put_logs(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + putRes = conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.get_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + events = res['events'] + nextSequenceToken = putRes['nextSequenceToken'] + assert isinstance(nextSequenceToken, six.string_types) == True + assert len(nextSequenceToken) == 56 + events.should.have.length_of(2) + + +@mock_logs +def test_filter_logs_interleaved(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.filter_log_events( + logGroupName=log_group_name, + logStreamNames=[log_stream_name], + interleaved=True, + ) + events = res['events'] + for original_message, resulting_event in zip(messages, events): + resulting_event['eventId'].should.equal(str(resulting_event['eventId'])) + resulting_event['timestamp'].should.equal(original_message['timestamp']) + resulting_event['message'].should.equal(original_message['message']) + diff --git a/tests/test_opsworks/test_apps.py b/tests/test_opsworks/test_apps.py index 37d0f2fe4736..d13ce8eaf268 100644 --- a/tests/test_opsworks/test_apps.py +++ b/tests/test_opsworks/test_apps.py @@ -1,102 +1,102 @@ -from __future__ import unicode_literals -import boto3 -from freezegun import freeze_time -import sure # noqa -import re - -from moto import mock_opsworks - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_create_app_response(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - response = client.create_app( - StackId=stack_id, - Type="other", - Name="TestApp" - ) - - response.should.contain("AppId") - - second_stack_id = client.create_stack( - Name="test_stack_2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - response = client.create_app( - StackId=second_stack_id, - Type="other", - Name="TestApp" - ) - - response.should.contain("AppId") - - # ClientError - client.create_app.when.called_with( - StackId=stack_id, - Type="other", - Name="TestApp" - ).should.throw( - Exception, re.compile(r'already an app named "TestApp"') - ) - - # ClientError - client.create_app.when.called_with( - StackId="nothere", - Type="other", - Name="TestApp" - ).should.throw( - Exception, "nothere" - ) - -@freeze_time("2015-01-01") -@mock_opsworks -def test_describe_apps(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - app_id = client.create_app( - StackId=stack_id, - Type="other", - Name="TestApp" - )['AppId'] - - rv1 = client.describe_apps(StackId=stack_id) - rv2 = client.describe_apps(AppIds=[app_id]) - rv1['Apps'].should.equal(rv2['Apps']) - - rv1['Apps'][0]['Name'].should.equal("TestApp") - - # ClientError - client.describe_apps.when.called_with( - StackId=stack_id, - AppIds=[app_id] - ).should.throw( - Exception, "Please provide one or more app IDs or a stack ID" - ) - # ClientError - client.describe_apps.when.called_with( - StackId="nothere" - ).should.throw( - Exception, "Unable to find stack with ID nothere" - ) - # ClientError - client.describe_apps.when.called_with( - AppIds=["nothere"] - ).should.throw( - Exception, "nothere" - ) +from __future__ import unicode_literals +import boto3 +from freezegun import freeze_time +import sure # noqa +import re + +from moto import mock_opsworks + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_create_app_response(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=second_stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + # ClientError + client.create_app.when.called_with( + StackId=stack_id, + Type="other", + Name="TestApp" + ).should.throw( + Exception, re.compile(r'already an app named "TestApp"') + ) + + # ClientError + client.create_app.when.called_with( + StackId="nothere", + Type="other", + Name="TestApp" + ).should.throw( + Exception, "nothere" + ) + +@freeze_time("2015-01-01") +@mock_opsworks +def test_describe_apps(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + app_id = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + )['AppId'] + + rv1 = client.describe_apps(StackId=stack_id) + rv2 = client.describe_apps(AppIds=[app_id]) + rv1['Apps'].should.equal(rv2['Apps']) + + rv1['Apps'][0]['Name'].should.equal("TestApp") + + # ClientError + client.describe_apps.when.called_with( + StackId=stack_id, + AppIds=[app_id] + ).should.throw( + Exception, "Please provide one or more app IDs or a stack ID" + ) + # ClientError + client.describe_apps.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_apps.when.called_with( + AppIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index f594a87c8c6b..25260ad789c4 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -1,224 +1,224 @@ -from __future__ import unicode_literals -import boto3 -import sure # noqa - -from moto import mock_opsworks -from moto import mock_ec2 - - -@mock_opsworks -def test_create_instance(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - layer_id = client.create_layer( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName" - )['LayerId'] - - second_stack_id = client.create_stack( - Name="test_stack_2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - second_layer_id = client.create_layer( - StackId=second_stack_id, - Type="custom", - Name="SecondTestLayer", - Shortname="SecondTestLayerShortName" - )['LayerId'] - - response = client.create_instance( - StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" - ) - - response.should.contain("InstanceId") - - client.create_instance.when.called_with( - StackId="nothere", LayerIds=[layer_id], InstanceType="t2.micro" - ).should.throw(Exception, "Unable to find stack with ID nothere") - - client.create_instance.when.called_with( - StackId=stack_id, LayerIds=["nothere"], InstanceType="t2.micro" - ).should.throw(Exception, "nothere") - # ClientError - client.create_instance.when.called_with( - StackId=stack_id, LayerIds=[second_layer_id], InstanceType="t2.micro" - ).should.throw(Exception, "Please only provide layer IDs from the same stack") - # ClientError - client.start_instance.when.called_with( - InstanceId="nothere" - ).should.throw(Exception, "Unable to find instance with ID nothere") - - -@mock_opsworks -def test_describe_instances(): - """ - create two stacks, with 1 layer and 2 layers (S1L1, S2L1, S2L2) - - populate S1L1 with 2 instances (S1L1_i1, S1L1_i2) - populate S2L1 with 1 instance (S2L1_i1) - populate S2L2 with 3 instances (S2L2_i1..2) - """ - - client = boto3.client('opsworks', region_name='us-east-1') - S1 = client.create_stack( - Name="S1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - S1L1 = client.create_layer( - StackId=S1, - Type="custom", - Name="S1L1", - Shortname="S1L1" - )['LayerId'] - S2 = client.create_stack( - Name="S2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - S2L1 = client.create_layer( - StackId=S2, - Type="custom", - Name="S2L1", - Shortname="S2L1" - )['LayerId'] - S2L2 = client.create_layer( - StackId=S2, - Type="custom", - Name="S2L2", - Shortname="S2L2" - )['LayerId'] - - S1L1_i1 = client.create_instance( - StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" - )['InstanceId'] - S1L1_i2 = client.create_instance( - StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" - )['InstanceId'] - S2L1_i1 = client.create_instance( - StackId=S2, LayerIds=[S2L1], InstanceType="t2.micro" - )['InstanceId'] - S2L2_i1 = client.create_instance( - StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" - )['InstanceId'] - S2L2_i2 = client.create_instance( - StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" - )['InstanceId'] - - # instances in Stack 1 - response = client.describe_instances(StackId=S1)['Instances'] - response.should.have.length_of(2) - S1L1_i1.should.be.within([i["InstanceId"] for i in response]) - S1L1_i2.should.be.within([i["InstanceId"] for i in response]) - - response2 = client.describe_instances( - InstanceIds=[S1L1_i1, S1L1_i2])['Instances'] - sorted(response2, key=lambda d: d['InstanceId']).should.equal( - sorted(response, key=lambda d: d['InstanceId'])) - - response3 = client.describe_instances(LayerId=S1L1)['Instances'] - sorted(response3, key=lambda d: d['InstanceId']).should.equal( - sorted(response, key=lambda d: d['InstanceId'])) - - response = client.describe_instances(StackId=S1)['Instances'] - response.should.have.length_of(2) - S1L1_i1.should.be.within([i["InstanceId"] for i in response]) - S1L1_i2.should.be.within([i["InstanceId"] for i in response]) - - # instances in Stack 2 - response = client.describe_instances(StackId=S2)['Instances'] - response.should.have.length_of(3) - S2L1_i1.should.be.within([i["InstanceId"] for i in response]) - S2L2_i1.should.be.within([i["InstanceId"] for i in response]) - S2L2_i2.should.be.within([i["InstanceId"] for i in response]) - - response = client.describe_instances(LayerId=S2L1)['Instances'] - response.should.have.length_of(1) - S2L1_i1.should.be.within([i["InstanceId"] for i in response]) - - response = client.describe_instances(LayerId=S2L2)['Instances'] - response.should.have.length_of(2) - S2L1_i1.should_not.be.within([i["InstanceId"] for i in response]) - - # ClientError - client.describe_instances.when.called_with( - StackId=S1, - LayerId=S1L1 - ).should.throw( - Exception, "Please provide either one or more" - ) - # ClientError - client.describe_instances.when.called_with( - StackId="nothere" - ).should.throw( - Exception, "nothere" - ) - # ClientError - client.describe_instances.when.called_with( - LayerId="nothere" - ).should.throw( - Exception, "nothere" - ) - # ClientError - client.describe_instances.when.called_with( - InstanceIds=["nothere"] - ).should.throw( - Exception, "nothere" - ) - - -@mock_opsworks -@mock_ec2 -def test_ec2_integration(): - """ - instances created via OpsWorks should be discoverable via ec2 - """ - - opsworks = boto3.client('opsworks', region_name='us-east-1') - stack_id = opsworks.create_stack( - Name="S1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - layer_id = opsworks.create_layer( - StackId=stack_id, - Type="custom", - Name="S1L1", - Shortname="S1L1" - )['LayerId'] - - instance_id = opsworks.create_instance( - StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro", SshKeyName="testSSH" - )['InstanceId'] - - ec2 = boto3.client('ec2', region_name='us-east-1') - - # Before starting the instance, it shouldn't be discoverable via ec2 - reservations = ec2.describe_instances()['Reservations'] - assert reservations.should.be.empty - - # After starting the instance, it should be discoverable via ec2 - opsworks.start_instance(InstanceId=instance_id) - reservations = ec2.describe_instances()['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - instance = reservations[0]['Instances'][0] - opsworks_instance = opsworks.describe_instances(StackId=stack_id)[ - 'Instances'][0] - - instance['InstanceId'].should.equal(opsworks_instance['Ec2InstanceId']) - instance['PrivateIpAddress'].should.equal(opsworks_instance['PrivateIp']) +from __future__ import unicode_literals +import boto3 +import sure # noqa + +from moto import mock_opsworks +from moto import mock_ec2 + + +@mock_opsworks +def test_create_instance(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + layer_id = client.create_layer( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName" + )['LayerId'] + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + second_layer_id = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="SecondTestLayer", + Shortname="SecondTestLayerShortName" + )['LayerId'] + + response = client.create_instance( + StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" + ) + + response.should.contain("InstanceId") + + client.create_instance.when.called_with( + StackId="nothere", LayerIds=[layer_id], InstanceType="t2.micro" + ).should.throw(Exception, "Unable to find stack with ID nothere") + + client.create_instance.when.called_with( + StackId=stack_id, LayerIds=["nothere"], InstanceType="t2.micro" + ).should.throw(Exception, "nothere") + # ClientError + client.create_instance.when.called_with( + StackId=stack_id, LayerIds=[second_layer_id], InstanceType="t2.micro" + ).should.throw(Exception, "Please only provide layer IDs from the same stack") + # ClientError + client.start_instance.when.called_with( + InstanceId="nothere" + ).should.throw(Exception, "Unable to find instance with ID nothere") + + +@mock_opsworks +def test_describe_instances(): + """ + create two stacks, with 1 layer and 2 layers (S1L1, S2L1, S2L2) + + populate S1L1 with 2 instances (S1L1_i1, S1L1_i2) + populate S2L1 with 1 instance (S2L1_i1) + populate S2L2 with 3 instances (S2L2_i1..2) + """ + + client = boto3.client('opsworks', region_name='us-east-1') + S1 = client.create_stack( + Name="S1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + S1L1 = client.create_layer( + StackId=S1, + Type="custom", + Name="S1L1", + Shortname="S1L1" + )['LayerId'] + S2 = client.create_stack( + Name="S2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + S2L1 = client.create_layer( + StackId=S2, + Type="custom", + Name="S2L1", + Shortname="S2L1" + )['LayerId'] + S2L2 = client.create_layer( + StackId=S2, + Type="custom", + Name="S2L2", + Shortname="S2L2" + )['LayerId'] + + S1L1_i1 = client.create_instance( + StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" + )['InstanceId'] + S1L1_i2 = client.create_instance( + StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" + )['InstanceId'] + S2L1_i1 = client.create_instance( + StackId=S2, LayerIds=[S2L1], InstanceType="t2.micro" + )['InstanceId'] + S2L2_i1 = client.create_instance( + StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" + )['InstanceId'] + S2L2_i2 = client.create_instance( + StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" + )['InstanceId'] + + # instances in Stack 1 + response = client.describe_instances(StackId=S1)['Instances'] + response.should.have.length_of(2) + S1L1_i1.should.be.within([i["InstanceId"] for i in response]) + S1L1_i2.should.be.within([i["InstanceId"] for i in response]) + + response2 = client.describe_instances( + InstanceIds=[S1L1_i1, S1L1_i2])['Instances'] + sorted(response2, key=lambda d: d['InstanceId']).should.equal( + sorted(response, key=lambda d: d['InstanceId'])) + + response3 = client.describe_instances(LayerId=S1L1)['Instances'] + sorted(response3, key=lambda d: d['InstanceId']).should.equal( + sorted(response, key=lambda d: d['InstanceId'])) + + response = client.describe_instances(StackId=S1)['Instances'] + response.should.have.length_of(2) + S1L1_i1.should.be.within([i["InstanceId"] for i in response]) + S1L1_i2.should.be.within([i["InstanceId"] for i in response]) + + # instances in Stack 2 + response = client.describe_instances(StackId=S2)['Instances'] + response.should.have.length_of(3) + S2L1_i1.should.be.within([i["InstanceId"] for i in response]) + S2L2_i1.should.be.within([i["InstanceId"] for i in response]) + S2L2_i2.should.be.within([i["InstanceId"] for i in response]) + + response = client.describe_instances(LayerId=S2L1)['Instances'] + response.should.have.length_of(1) + S2L1_i1.should.be.within([i["InstanceId"] for i in response]) + + response = client.describe_instances(LayerId=S2L2)['Instances'] + response.should.have.length_of(2) + S2L1_i1.should_not.be.within([i["InstanceId"] for i in response]) + + # ClientError + client.describe_instances.when.called_with( + StackId=S1, + LayerId=S1L1 + ).should.throw( + Exception, "Please provide either one or more" + ) + # ClientError + client.describe_instances.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + LayerId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + InstanceIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) + + +@mock_opsworks +@mock_ec2 +def test_ec2_integration(): + """ + instances created via OpsWorks should be discoverable via ec2 + """ + + opsworks = boto3.client('opsworks', region_name='us-east-1') + stack_id = opsworks.create_stack( + Name="S1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + layer_id = opsworks.create_layer( + StackId=stack_id, + Type="custom", + Name="S1L1", + Shortname="S1L1" + )['LayerId'] + + instance_id = opsworks.create_instance( + StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro", SshKeyName="testSSH" + )['InstanceId'] + + ec2 = boto3.client('ec2', region_name='us-east-1') + + # Before starting the instance, it shouldn't be discoverable via ec2 + reservations = ec2.describe_instances()['Reservations'] + assert reservations.should.be.empty + + # After starting the instance, it should be discoverable via ec2 + opsworks.start_instance(InstanceId=instance_id) + reservations = ec2.describe_instances()['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + instance = reservations[0]['Instances'][0] + opsworks_instance = opsworks.describe_instances(StackId=stack_id)[ + 'Instances'][0] + + instance['InstanceId'].should.equal(opsworks_instance['Ec2InstanceId']) + instance['PrivateIpAddress'].should.equal(opsworks_instance['PrivateIp']) diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index 9c640dfc352b..035c246e2f1a 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -1,117 +1,117 @@ -from __future__ import unicode_literals -import boto3 -from freezegun import freeze_time -import sure # noqa -import re - -from moto import mock_opsworks - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_create_layer_response(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - response = client.create_layer( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName" - ) - - response.should.contain("LayerId") - - second_stack_id = client.create_stack( - Name="test_stack_2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - response = client.create_layer( - StackId=second_stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName" - ) - - response.should.contain("LayerId") - - # ClientError - client.create_layer.when.called_with( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="_" - ).should.throw( - Exception, re.compile(r'already a layer named "TestLayer"') - ) - # ClientError - client.create_layer.when.called_with( - StackId=stack_id, - Type="custom", - Name="_", - Shortname="TestLayerShortName" - ).should.throw( - Exception, re.compile( - r'already a layer with shortname "TestLayerShortName"') - ) - # ClientError - client.create_layer.when.called_with( - StackId="nothere", - Type="custom", - Name="TestLayer", - Shortname="_" - ).should.throw( - Exception, "nothere" - ) - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_describe_layers(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - layer_id = client.create_layer( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName" - )['LayerId'] - - rv1 = client.describe_layers(StackId=stack_id) - rv2 = client.describe_layers(LayerIds=[layer_id]) - rv1['Layers'].should.equal(rv2['Layers']) - - rv1['Layers'][0]['Name'].should.equal("TestLayer") - - # ClientError - client.describe_layers.when.called_with( - StackId=stack_id, - LayerIds=[layer_id] - ).should.throw( - Exception, "Please provide one or more layer IDs or a stack ID" - ) - # ClientError - client.describe_layers.when.called_with( - StackId="nothere" - ).should.throw( - Exception, "Unable to find stack with ID nothere" - ) - # ClientError - client.describe_layers.when.called_with( - LayerIds=["nothere"] - ).should.throw( - Exception, "nothere" - ) +from __future__ import unicode_literals +import boto3 +from freezegun import freeze_time +import sure # noqa +import re + +from moto import mock_opsworks + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_create_layer_response(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_layer( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName" + ) + + response.should.contain("LayerId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName" + ) + + response.should.contain("LayerId") + + # ClientError + client.create_layer.when.called_with( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="_" + ).should.throw( + Exception, re.compile(r'already a layer named "TestLayer"') + ) + # ClientError + client.create_layer.when.called_with( + StackId=stack_id, + Type="custom", + Name="_", + Shortname="TestLayerShortName" + ).should.throw( + Exception, re.compile( + r'already a layer with shortname "TestLayerShortName"') + ) + # ClientError + client.create_layer.when.called_with( + StackId="nothere", + Type="custom", + Name="TestLayer", + Shortname="_" + ).should.throw( + Exception, "nothere" + ) + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_describe_layers(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + layer_id = client.create_layer( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName" + )['LayerId'] + + rv1 = client.describe_layers(StackId=stack_id) + rv2 = client.describe_layers(LayerIds=[layer_id]) + rv1['Layers'].should.equal(rv2['Layers']) + + rv1['Layers'][0]['Name'].should.equal("TestLayer") + + # ClientError + client.describe_layers.when.called_with( + StackId=stack_id, + LayerIds=[layer_id] + ).should.throw( + Exception, "Please provide one or more layer IDs or a stack ID" + ) + # ClientError + client.describe_layers.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_layers.when.called_with( + LayerIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_opsworks/test_stack.py b/tests/test_opsworks/test_stack.py index 5913ce6d5e8f..2a1b6cc67388 100644 --- a/tests/test_opsworks/test_stack.py +++ b/tests/test_opsworks/test_stack.py @@ -1,46 +1,46 @@ -from __future__ import unicode_literals -import boto3 -import sure # noqa -import re - -from moto import mock_opsworks - - -@mock_opsworks -def test_create_stack_response(): - client = boto3.client('opsworks', region_name='us-east-1') - response = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - ) - response.should.contain("StackId") - - -@mock_opsworks -def test_describe_stacks(): - client = boto3.client('opsworks', region_name='us-east-1') - for i in range(1, 4): - client.create_stack( - Name="test_stack_{0}".format(i), - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - ) - - response = client.describe_stacks() - response['Stacks'].should.have.length_of(3) - for stack in response['Stacks']: - stack['ServiceRoleArn'].should.equal("service_arn") - stack['DefaultInstanceProfileArn'].should.equal("profile_arn") - - _id = response['Stacks'][0]['StackId'] - response = client.describe_stacks(StackIds=[_id]) - response['Stacks'].should.have.length_of(1) - response['Stacks'][0]['Arn'].should.contain(_id) - - # ClientError/ResourceNotFoundException - client.describe_stacks.when.called_with(StackIds=["foo"]).should.throw( - Exception, re.compile(r'foo') - ) +from __future__ import unicode_literals +import boto3 +import sure # noqa +import re + +from moto import mock_opsworks + + +@mock_opsworks +def test_create_stack_response(): + client = boto3.client('opsworks', region_name='us-east-1') + response = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + ) + response.should.contain("StackId") + + +@mock_opsworks +def test_describe_stacks(): + client = boto3.client('opsworks', region_name='us-east-1') + for i in range(1, 4): + client.create_stack( + Name="test_stack_{0}".format(i), + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + ) + + response = client.describe_stacks() + response['Stacks'].should.have.length_of(3) + for stack in response['Stacks']: + stack['ServiceRoleArn'].should.equal("service_arn") + stack['DefaultInstanceProfileArn'].should.equal("profile_arn") + + _id = response['Stacks'][0]['StackId'] + response = client.describe_stacks(StackIds=[_id]) + response['Stacks'].should.have.length_of(1) + response['Stacks'][0]['Arn'].should.contain(_id) + + # ClientError/ResourceNotFoundException + client.describe_stacks.when.called_with(StackIds=["foo"]).should.throw( + Exception, re.compile(r'foo') + ) diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py index 6548b1830c4a..1f21eee74fcb 100644 --- a/tests/test_organizations/organizations_test_utils.py +++ b/tests/test_organizations/organizations_test_utils.py @@ -1,136 +1,136 @@ -from __future__ import unicode_literals - -import six -import sure # noqa -import datetime -from moto.organizations import utils - -EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" -ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE -ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE -OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE) -ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE -CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE - - -def test_make_random_org_id(): - org_id = utils.make_random_org_id() - org_id.should.match(ORG_ID_REGEX) - - -def test_make_random_root_id(): - root_id = utils.make_random_root_id() - root_id.should.match(ROOT_ID_REGEX) - - -def test_make_random_ou_id(): - root_id = utils.make_random_root_id() - ou_id = utils.make_random_ou_id(root_id) - ou_id.should.match(OU_ID_REGEX) - - -def test_make_random_account_id(): - account_id = utils.make_random_account_id() - account_id.should.match(ACCOUNT_ID_REGEX) - - -def test_make_random_create_account_status_id(): - create_account_status_id = utils.make_random_create_account_status_id() - create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) - - -def validate_organization(response): - org = response['Organization'] - sorted(org.keys()).should.equal([ - 'Arn', - 'AvailablePolicyTypes', - 'FeatureSet', - 'Id', - 'MasterAccountArn', - 'MasterAccountEmail', - 'MasterAccountId', - ]) - org['Id'].should.match(ORG_ID_REGEX) - org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) - org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - )) - org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - )) - org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL) - org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING']) - org['AvailablePolicyTypes'].should.equal([{ - 'Type': 'SERVICE_CONTROL_POLICY', - 'Status': 'ENABLED' - }]) - - -def validate_roots(org, response): - response.should.have.key('Roots').should.be.a(list) - response['Roots'].should_not.be.empty - root = response['Roots'][0] - root.should.have.key('Id').should.match(ROOT_ID_REGEX) - root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - root['Id'], - )) - root.should.have.key('Name').should.be.a(six.string_types) - root.should.have.key('PolicyTypes').should.be.a(list) - root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') - root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED') - - -def validate_organizational_unit(org, response): - response.should.have.key('OrganizationalUnit').should.be.a(dict) - ou = response['OrganizationalUnit'] - ou.should.have.key('Id').should.match(OU_ID_REGEX) - ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - ou['Id'], - )) - ou.should.have.key('Name').should.be.a(six.string_types) - - -def validate_account(org, account): - sorted(account.keys()).should.equal([ - 'Arn', - 'Email', - 'Id', - 'JoinedMethod', - 'JoinedTimestamp', - 'Name', - 'Status', - ]) - account['Id'].should.match(ACCOUNT_ID_REGEX) - account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - account['Id'], - )) - account['Email'].should.match(EMAIL_REGEX) - account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) - account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) - account['Name'].should.be.a(six.string_types) - account['JoinedTimestamp'].should.be.a(datetime.datetime) - - -def validate_create_account_status(create_status): - sorted(create_status.keys()).should.equal([ - 'AccountId', - 'AccountName', - 'CompletedTimestamp', - 'Id', - 'RequestedTimestamp', - 'State', - ]) - create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) - create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) - create_status['AccountName'].should.be.a(six.string_types) - create_status['State'].should.equal('SUCCEEDED') - create_status['RequestedTimestamp'].should.be.a(datetime.datetime) - create_status['CompletedTimestamp'].should.be.a(datetime.datetime) +from __future__ import unicode_literals + +import six +import sure # noqa +import datetime +from moto.organizations import utils + +EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" +ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE +ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE +OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE) +ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE +CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE + + +def test_make_random_org_id(): + org_id = utils.make_random_org_id() + org_id.should.match(ORG_ID_REGEX) + + +def test_make_random_root_id(): + root_id = utils.make_random_root_id() + root_id.should.match(ROOT_ID_REGEX) + + +def test_make_random_ou_id(): + root_id = utils.make_random_root_id() + ou_id = utils.make_random_ou_id(root_id) + ou_id.should.match(OU_ID_REGEX) + + +def test_make_random_account_id(): + account_id = utils.make_random_account_id() + account_id.should.match(ACCOUNT_ID_REGEX) + + +def test_make_random_create_account_status_id(): + create_account_status_id = utils.make_random_create_account_status_id() + create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + + +def validate_organization(response): + org = response['Organization'] + sorted(org.keys()).should.equal([ + 'Arn', + 'AvailablePolicyTypes', + 'FeatureSet', + 'Id', + 'MasterAccountArn', + 'MasterAccountEmail', + 'MasterAccountId', + ]) + org['Id'].should.match(ORG_ID_REGEX) + org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) + org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL) + org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING']) + org['AvailablePolicyTypes'].should.equal([{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }]) + + +def validate_roots(org, response): + response.should.have.key('Roots').should.be.a(list) + response['Roots'].should_not.be.empty + root = response['Roots'][0] + root.should.have.key('Id').should.match(ROOT_ID_REGEX) + root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + root['Id'], + )) + root.should.have.key('Name').should.be.a(six.string_types) + root.should.have.key('PolicyTypes').should.be.a(list) + root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED') + + +def validate_organizational_unit(org, response): + response.should.have.key('OrganizationalUnit').should.be.a(dict) + ou = response['OrganizationalUnit'] + ou.should.have.key('Id').should.match(OU_ID_REGEX) + ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + ou['Id'], + )) + ou.should.have.key('Name').should.be.a(six.string_types) + + +def validate_account(org, account): + sorted(account.keys()).should.equal([ + 'Arn', + 'Email', + 'Id', + 'JoinedMethod', + 'JoinedTimestamp', + 'Name', + 'Status', + ]) + account['Id'].should.match(ACCOUNT_ID_REGEX) + account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + account['Id'], + )) + account['Email'].should.match(EMAIL_REGEX) + account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) + account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) + account['Name'].should.be.a(six.string_types) + account['JoinedTimestamp'].should.be.a(datetime.datetime) + + +def validate_create_account_status(create_status): + sorted(create_status.keys()).should.equal([ + 'AccountId', + 'AccountName', + 'CompletedTimestamp', + 'Id', + 'RequestedTimestamp', + 'State', + ]) + create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) + create_status['AccountName'].should.be.a(six.string_types) + create_status['State'].should.equal('SUCCEEDED') + create_status['RequestedTimestamp'].should.be.a(datetime.datetime) + create_status['CompletedTimestamp'].should.be.a(datetime.datetime) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index dfac5feeb294..ea3e17962fd5 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -1,322 +1,322 @@ -from __future__ import unicode_literals - -import boto3 -import sure # noqa -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_organizations -from moto.organizations import utils -from .organizations_test_utils import ( - validate_organization, - validate_roots, - validate_organizational_unit, - validate_account, - validate_create_account_status, -) - - -@mock_organizations -def test_create_organization(): - client = boto3.client('organizations', region_name='us-east-1') - response = client.create_organization(FeatureSet='ALL') - validate_organization(response) - response['Organization']['FeatureSet'].should.equal('ALL') - - -@mock_organizations -def test_describe_organization(): - client = boto3.client('organizations', region_name='us-east-1') - client.create_organization(FeatureSet='ALL') - response = client.describe_organization() - validate_organization(response) - - -@mock_organizations -def test_describe_organization_exception(): - client = boto3.client('organizations', region_name='us-east-1') - with assert_raises(ClientError) as e: - response = client.describe_organization() - ex = e.exception - ex.operation_name.should.equal('DescribeOrganization') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException') - - -# Organizational Units - -@mock_organizations -def test_list_roots(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - response = client.list_roots() - validate_roots(org, response) - - -@mock_organizations -def test_create_organizational_unit(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou_name = 'ou01' - response = client.create_organizational_unit( - ParentId=root_id, - Name=ou_name, - ) - validate_organizational_unit(org, response) - response['OrganizationalUnit']['Name'].should.equal(ou_name) - - -@mock_organizations -def test_describe_organizational_unit(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou_id = client.create_organizational_unit( - ParentId=root_id, - Name='ou01', - )['OrganizationalUnit']['Id'] - response = client.describe_organizational_unit(OrganizationalUnitId=ou_id) - validate_organizational_unit(org, response) - - -@mock_organizations -def test_describe_organizational_unit_exception(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - with assert_raises(ClientError) as e: - response = client.describe_organizational_unit( - OrganizationalUnitId=utils.make_random_root_id() - ) - ex = e.exception - ex.operation_name.should.equal('DescribeOrganizationalUnit') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') - - -@mock_organizations -def test_list_organizational_units_for_parent(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - client.create_organizational_unit(ParentId=root_id, Name='ou01') - client.create_organizational_unit(ParentId=root_id, Name='ou02') - client.create_organizational_unit(ParentId=root_id, Name='ou03') - response = client.list_organizational_units_for_parent(ParentId=root_id) - response.should.have.key('OrganizationalUnits').should.be.a(list) - for ou in response['OrganizationalUnits']: - validate_organizational_unit(org, dict(OrganizationalUnit=ou)) - - -@mock_organizations -def test_list_organizational_units_for_parent_exception(): - client = boto3.client('organizations', region_name='us-east-1') - with assert_raises(ClientError) as e: - response = client.list_organizational_units_for_parent( - ParentId=utils.make_random_root_id() - ) - ex = e.exception - ex.operation_name.should.equal('ListOrganizationalUnitsForParent') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('ParentNotFoundException') - - -# Accounts -mockname = 'mock-account' -mockdomain = 'moto-example.org' -mockemail = '@'.join([mockname, mockdomain]) - - -@mock_organizations -def test_create_account(): - client = boto3.client('organizations', region_name='us-east-1') - client.create_organization(FeatureSet='ALL') - create_status = client.create_account( - AccountName=mockname, Email=mockemail - )['CreateAccountStatus'] - validate_create_account_status(create_status) - create_status['AccountName'].should.equal(mockname) - - -@mock_organizations -def test_describe_account(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - account_id = client.create_account( - AccountName=mockname, Email=mockemail - )['CreateAccountStatus']['AccountId'] - response = client.describe_account(AccountId=account_id) - validate_account(org, response['Account']) - response['Account']['Name'].should.equal(mockname) - response['Account']['Email'].should.equal(mockemail) - - -@mock_organizations -def test_describe_account_exception(): - client = boto3.client('organizations', region_name='us-east-1') - with assert_raises(ClientError) as e: - response = client.describe_account(AccountId=utils.make_random_account_id()) - ex = e.exception - ex.operation_name.should.equal('DescribeAccount') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('AccountNotFoundException') - - -@mock_organizations -def test_list_accounts(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - for i in range(5): - name = mockname + str(i) - email = name + '@' + mockdomain - client.create_account(AccountName=name, Email=email) - response = client.list_accounts() - response.should.have.key('Accounts') - accounts = response['Accounts'] - len(accounts).should.equal(5) - for account in accounts: - validate_account(org, account) - accounts[3]['Name'].should.equal(mockname + '3') - accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain) - - -@mock_organizations -def test_list_accounts_for_parent(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - account_id = client.create_account( - AccountName=mockname, - Email=mockemail, - )['CreateAccountStatus']['AccountId'] - response = client.list_accounts_for_parent(ParentId=root_id) - account_id.should.be.within([account['Id'] for account in response['Accounts']]) - - -@mock_organizations -def test_move_account(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - account_id = client.create_account( - AccountName=mockname, Email=mockemail - )['CreateAccountStatus']['AccountId'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - client.move_account( - AccountId=account_id, - SourceParentId=root_id, - DestinationParentId=ou01_id, - ) - response = client.list_accounts_for_parent(ParentId=ou01_id) - account_id.should.be.within([account['Id'] for account in response['Accounts']]) - - -@mock_organizations -def test_list_parents_for_ou(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - response01 = client.list_parents(ChildId=ou01_id) - response01.should.have.key('Parents').should.be.a(list) - response01['Parents'][0].should.have.key('Id').should.equal(root_id) - response01['Parents'][0].should.have.key('Type').should.equal('ROOT') - ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') - ou02_id = ou02['OrganizationalUnit']['Id'] - response02 = client.list_parents(ChildId=ou02_id) - response02.should.have.key('Parents').should.be.a(list) - response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) - response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') - - -@mock_organizations -def test_list_parents_for_accounts(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - account01_id = client.create_account( - AccountName='account01', - Email='account01@moto-example.org' - )['CreateAccountStatus']['AccountId'] - account02_id = client.create_account( - AccountName='account02', - Email='account02@moto-example.org' - )['CreateAccountStatus']['AccountId'] - client.move_account( - AccountId=account02_id, - SourceParentId=root_id, - DestinationParentId=ou01_id, - ) - response01 = client.list_parents(ChildId=account01_id) - response01.should.have.key('Parents').should.be.a(list) - response01['Parents'][0].should.have.key('Id').should.equal(root_id) - response01['Parents'][0].should.have.key('Type').should.equal('ROOT') - response02 = client.list_parents(ChildId=account02_id) - response02.should.have.key('Parents').should.be.a(list) - response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) - response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') - - -@mock_organizations -def test_list_children(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') - ou02_id = ou02['OrganizationalUnit']['Id'] - account01_id = client.create_account( - AccountName='account01', - Email='account01@moto-example.org' - )['CreateAccountStatus']['AccountId'] - account02_id = client.create_account( - AccountName='account02', - Email='account02@moto-example.org' - )['CreateAccountStatus']['AccountId'] - client.move_account( - AccountId=account02_id, - SourceParentId=root_id, - DestinationParentId=ou01_id, - ) - response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT') - response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT') - response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT') - response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT') - response01['Children'][0]['Id'].should.equal(account01_id) - response01['Children'][0]['Type'].should.equal('ACCOUNT') - response02['Children'][0]['Id'].should.equal(ou01_id) - response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') - response03['Children'][0]['Id'].should.equal(account02_id) - response03['Children'][0]['Type'].should.equal('ACCOUNT') - response04['Children'][0]['Id'].should.equal(ou02_id) - response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') - - -@mock_organizations -def test_list_children_exception(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - with assert_raises(ClientError) as e: - response = client.list_children( - ParentId=utils.make_random_root_id(), - ChildType='ACCOUNT' - ) - ex = e.exception - ex.operation_name.should.equal('ListChildren') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('ParentNotFoundException') - with assert_raises(ClientError) as e: - response = client.list_children( - ParentId=root_id, - ChildType='BLEE' - ) - ex = e.exception - ex.operation_name.should.equal('ListChildren') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('InvalidInputException') +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_organizations +from moto.organizations import utils +from .organizations_test_utils import ( + validate_organization, + validate_roots, + validate_organizational_unit, + validate_account, + validate_create_account_status, +) + + +@mock_organizations +def test_create_organization(): + client = boto3.client('organizations', region_name='us-east-1') + response = client.create_organization(FeatureSet='ALL') + validate_organization(response) + response['Organization']['FeatureSet'].should.equal('ALL') + + +@mock_organizations +def test_describe_organization(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + response = client.describe_organization() + validate_organization(response) + + +@mock_organizations +def test_describe_organization_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_organization() + ex = e.exception + ex.operation_name.should.equal('DescribeOrganization') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException') + + +# Organizational Units + +@mock_organizations +def test_list_roots(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + response = client.list_roots() + validate_roots(org, response) + + +@mock_organizations +def test_create_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_name = 'ou01' + response = client.create_organizational_unit( + ParentId=root_id, + Name=ou_name, + ) + validate_organizational_unit(org, response) + response['OrganizationalUnit']['Name'].should.equal(ou_name) + + +@mock_organizations +def test_describe_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + response = client.describe_organizational_unit(OrganizationalUnitId=ou_id) + validate_organizational_unit(org, response) + + +@mock_organizations +def test_describe_organizational_unit_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + with assert_raises(ClientError) as e: + response = client.describe_organizational_unit( + OrganizationalUnitId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('DescribeOrganizationalUnit') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + + +@mock_organizations +def test_list_organizational_units_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + client.create_organizational_unit(ParentId=root_id, Name='ou01') + client.create_organizational_unit(ParentId=root_id, Name='ou02') + client.create_organizational_unit(ParentId=root_id, Name='ou03') + response = client.list_organizational_units_for_parent(ParentId=root_id) + response.should.have.key('OrganizationalUnits').should.be.a(list) + for ou in response['OrganizationalUnits']: + validate_organizational_unit(org, dict(OrganizationalUnit=ou)) + + +@mock_organizations +def test_list_organizational_units_for_parent_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.list_organizational_units_for_parent( + ParentId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('ListOrganizationalUnitsForParent') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + + +# Accounts +mockname = 'mock-account' +mockdomain = 'moto-example.org' +mockemail = '@'.join([mockname, mockdomain]) + + +@mock_organizations +def test_create_account(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + create_status = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus'] + validate_create_account_status(create_status) + create_status['AccountName'].should.equal(mockname) + + +@mock_organizations +def test_describe_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + response = client.describe_account(AccountId=account_id) + validate_account(org, response['Account']) + response['Account']['Name'].should.equal(mockname) + response['Account']['Email'].should.equal(mockemail) + + +@mock_organizations +def test_describe_account_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_account(AccountId=utils.make_random_account_id()) + ex = e.exception + ex.operation_name.should.equal('DescribeAccount') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + + +@mock_organizations +def test_list_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(5): + name = mockname + str(i) + email = name + '@' + mockdomain + client.create_account(AccountName=name, Email=email) + response = client.list_accounts() + response.should.have.key('Accounts') + accounts = response['Accounts'] + len(accounts).should.equal(5) + for account in accounts: + validate_account(org, account) + accounts[3]['Name'].should.equal(mockname + '3') + accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain) + + +@mock_organizations +def test_list_accounts_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + response = client.list_accounts_for_parent(ParentId=root_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_move_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + client.move_account( + AccountId=account_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response = client.list_accounts_for_parent(ParentId=ou01_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_list_parents_for_ou(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + response01 = client.list_parents(ChildId=ou01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + response02 = client.list_parents(ChildId=ou02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_parents_for_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_parents(ChildId=account01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + response02 = client.list_parents(ChildId=account02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT') + response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT') + response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT') + response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT') + response01['Children'][0]['Id'].should.equal(account01_id) + response01['Children'][0]['Type'].should.equal('ACCOUNT') + response02['Children'][0]['Id'].should.equal(ou01_id) + response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + response03['Children'][0]['Id'].should.equal(account02_id) + response03['Children'][0]['Type'].should.equal('ACCOUNT') + response04['Children'][0]['Id'].should.equal(ou02_id) + response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=utils.make_random_root_id(), + ChildType='ACCOUNT' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=root_id, + ChildType='BLEE' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') diff --git a/tests/test_polly/test_polly.py b/tests/test_polly/test_polly.py index c5c864835cfa..ec85142fa90b 100644 --- a/tests/test_polly/test_polly.py +++ b/tests/test_polly/test_polly.py @@ -1,275 +1,275 @@ -from __future__ import unicode_literals - -from botocore.exceptions import ClientError -import boto3 -import sure # noqa -from nose.tools import assert_raises -from moto import mock_polly - -# Polly only available in a few regions -DEFAULT_REGION = 'eu-west-1' - -LEXICON_XML = """ - - - W3C - World Wide Web Consortium - -""" - - -@mock_polly -def test_describe_voices(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - resp = client.describe_voices() - len(resp['Voices']).should.be.greater_than(1) - - resp = client.describe_voices(LanguageCode='en-GB') - len(resp['Voices']).should.equal(3) - - try: - client.describe_voices(LanguageCode='SOME_LANGUAGE') - except ClientError as err: - err.response['Error']['Code'].should.equal('400') - else: - raise RuntimeError('Should of raised an exception') - - -@mock_polly -def test_put_list_lexicon(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - # Return nothing - client.put_lexicon( - Name='test', - Content=LEXICON_XML - ) - - resp = client.list_lexicons() - len(resp['Lexicons']).should.equal(1) - - -@mock_polly -def test_put_get_lexicon(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - # Return nothing - client.put_lexicon( - Name='test', - Content=LEXICON_XML - ) - - resp = client.get_lexicon(Name='test') - resp.should.contain('Lexicon') - resp.should.contain('LexiconAttributes') - - -@mock_polly -def test_put_lexicon_bad_name(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - try: - client.put_lexicon( - Name='test-invalid', - Content=LEXICON_XML - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised an exception') - - -@mock_polly -def test_synthesize_speech(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - # Return nothing - client.put_lexicon( - Name='test', - Content=LEXICON_XML - ) - - tests = ( - ('pcm', 'audio/pcm'), - ('mp3', 'audio/mpeg'), - ('ogg_vorbis', 'audio/ogg'), - ) - for output_format, content_type in tests: - resp = client.synthesize_speech( - LexiconNames=['test'], - OutputFormat=output_format, - SampleRate='16000', - Text='test1234', - TextType='text', - VoiceId='Astrid' - ) - resp['ContentType'].should.equal(content_type) - - -@mock_polly -def test_synthesize_speech_bad_lexicon(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test2'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='text', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('LexiconNotFoundException') - else: - raise RuntimeError('Should of raised LexiconNotFoundException') - - -@mock_polly -def test_synthesize_speech_bad_output_format(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='invalid', - SampleRate='16000', - Text='test1234', - TextType='text', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_sample_rate(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='18000', - Text='test1234', - TextType='text', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidSampleRateException') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_text_type(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='invalid', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_voice_id(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='text', - VoiceId='Luke' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_text_too_long(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234'*376, # = 3008 characters - TextType='text', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('TextLengthExceededException') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_speech_marks1(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='text', - SpeechMarkTypes=['word'], - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_speech_marks2(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='ssml', - SpeechMarkTypes=['word'], - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') - else: - raise RuntimeError('Should of raised ') +from __future__ import unicode_literals + +from botocore.exceptions import ClientError +import boto3 +import sure # noqa +from nose.tools import assert_raises +from moto import mock_polly + +# Polly only available in a few regions +DEFAULT_REGION = 'eu-west-1' + +LEXICON_XML = """ + + + W3C + World Wide Web Consortium + +""" + + +@mock_polly +def test_describe_voices(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + resp = client.describe_voices() + len(resp['Voices']).should.be.greater_than(1) + + resp = client.describe_voices(LanguageCode='en-GB') + len(resp['Voices']).should.equal(3) + + try: + client.describe_voices(LanguageCode='SOME_LANGUAGE') + except ClientError as err: + err.response['Error']['Code'].should.equal('400') + else: + raise RuntimeError('Should of raised an exception') + + +@mock_polly +def test_put_list_lexicon(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon( + Name='test', + Content=LEXICON_XML + ) + + resp = client.list_lexicons() + len(resp['Lexicons']).should.equal(1) + + +@mock_polly +def test_put_get_lexicon(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon( + Name='test', + Content=LEXICON_XML + ) + + resp = client.get_lexicon(Name='test') + resp.should.contain('Lexicon') + resp.should.contain('LexiconAttributes') + + +@mock_polly +def test_put_lexicon_bad_name(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + try: + client.put_lexicon( + Name='test-invalid', + Content=LEXICON_XML + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised an exception') + + +@mock_polly +def test_synthesize_speech(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon( + Name='test', + Content=LEXICON_XML + ) + + tests = ( + ('pcm', 'audio/pcm'), + ('mp3', 'audio/mpeg'), + ('ogg_vorbis', 'audio/ogg'), + ) + for output_format, content_type in tests: + resp = client.synthesize_speech( + LexiconNames=['test'], + OutputFormat=output_format, + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + resp['ContentType'].should.equal(content_type) + + +@mock_polly +def test_synthesize_speech_bad_lexicon(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test2'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('LexiconNotFoundException') + else: + raise RuntimeError('Should of raised LexiconNotFoundException') + + +@mock_polly +def test_synthesize_speech_bad_output_format(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='invalid', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_sample_rate(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='18000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidSampleRateException') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_text_type(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='invalid', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_voice_id(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Luke' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_text_too_long(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234'*376, # = 3008 characters + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('TextLengthExceededException') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_speech_marks1(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + SpeechMarkTypes=['word'], + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_speech_marks2(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='ssml', + SpeechMarkTypes=['word'], + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') + else: + raise RuntimeError('Should of raised ') diff --git a/tests/test_polly/test_server.py b/tests/test_polly/test_server.py index 3ae7f225422a..e080c75510ab 100644 --- a/tests/test_polly/test_server.py +++ b/tests/test_polly/test_server.py @@ -1,19 +1,19 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_polly - -''' -Test the different server responses -''' - - -@mock_polly -def test_polly_list(): - backend = server.create_backend_app("polly") - test_client = backend.test_client() - - res = test_client.get('/v1/lexicons') - res.status_code.should.equal(200) +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_polly + +''' +Test the different server responses +''' + + +@mock_polly +def test_polly_list(): + backend = server.create_backend_app("polly") + test_client = backend.test_client() + + res = test_client.get('/v1/lexicons') + res.status_code.should.equal(200) diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 5bf733dc6713..064598012418 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -1,324 +1,324 @@ -from __future__ import unicode_literals - -import boto3 -import boto.rds -import boto.vpc -from boto.exception import BotoServerError -import sure # noqa - -from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds -from tests.helpers import disable_on_py3 - - -@mock_rds_deprecated -def test_create_database(): - conn = boto.rds.connect_to_region("us-west-2") - - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) - - database.status.should.equal('available') - database.id.should.equal("db-master-1") - database.allocated_storage.should.equal(10) - database.instance_class.should.equal("db.m1.small") - database.master_username.should.equal("root") - database.endpoint.should.equal( - ('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) - database.security_groups[0].name.should.equal('my_sg') - - -@mock_rds_deprecated -def test_get_databases(): - conn = boto.rds.connect_to_region("us-west-2") - - list(conn.get_all_dbinstances()).should.have.length_of(0) - - conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2') - - list(conn.get_all_dbinstances()).should.have.length_of(2) - - databases = conn.get_all_dbinstances("db-master-1") - list(databases).should.have.length_of(1) - - databases[0].id.should.equal("db-master-1") - - -@mock_rds -def test_get_databases_paginated(): - conn = boto3.client('rds', region_name="us-west-2") - - for i in range(51): - conn.create_db_instance(AllocatedStorage=5, - Port=5432, - DBInstanceIdentifier='rds%d' % i, - DBInstanceClass='db.t1.micro', - Engine='postgres') - - resp = conn.describe_db_instances() - resp["DBInstances"].should.have.length_of(50) - resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) - - resp2 = conn.describe_db_instances(Marker=resp["Marker"]) - resp2["DBInstances"].should.have.length_of(1) - - -@mock_rds_deprecated -def test_describe_non_existant_database(): - conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbinstances.when.called_with( - "not-a-db").should.throw(BotoServerError) - - -@mock_rds_deprecated -def test_delete_database(): - conn = boto.rds.connect_to_region("us-west-2") - list(conn.get_all_dbinstances()).should.have.length_of(0) - - conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - list(conn.get_all_dbinstances()).should.have.length_of(1) - - conn.delete_dbinstance("db-master-1") - list(conn.get_all_dbinstances()).should.have.length_of(0) - - -@mock_rds_deprecated -def test_delete_non_existant_database(): - conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbinstance.when.called_with( - "not-a-db").should.throw(BotoServerError) - - -@mock_rds_deprecated -def test_create_database_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - - security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') - security_group.name.should.equal('db_sg') - security_group.description.should.equal("DB Security Group") - list(security_group.ip_ranges).should.equal([]) - - -@mock_rds_deprecated -def test_get_security_groups(): - conn = boto.rds.connect_to_region("us-west-2") - - list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) - - conn.create_dbsecurity_group('db_sg1', 'DB Security Group') - conn.create_dbsecurity_group('db_sg2', 'DB Security Group') - - list(conn.get_all_dbsecurity_groups()).should.have.length_of(2) - - databases = conn.get_all_dbsecurity_groups("db_sg1") - list(databases).should.have.length_of(1) - - databases[0].name.should.equal("db_sg1") - - -@mock_rds_deprecated -def test_get_non_existant_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbsecurity_groups.when.called_with( - "not-a-sg").should.throw(BotoServerError) - - -@mock_rds_deprecated -def test_delete_database_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - conn.create_dbsecurity_group('db_sg', 'DB Security Group') - - list(conn.get_all_dbsecurity_groups()).should.have.length_of(1) - - conn.delete_dbsecurity_group("db_sg") - list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) - - -@mock_rds_deprecated -def test_delete_non_existant_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbsecurity_group.when.called_with( - "not-a-db").should.throw(BotoServerError) - - -@disable_on_py3() -@mock_rds_deprecated -def test_security_group_authorize(): - conn = boto.rds.connect_to_region("us-west-2") - security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') - list(security_group.ip_ranges).should.equal([]) - - security_group.authorize(cidr_ip='10.3.2.45/32') - security_group = conn.get_all_dbsecurity_groups()[0] - list(security_group.ip_ranges).should.have.length_of(1) - security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32') - - -@mock_rds_deprecated -def test_add_security_group_to_database(): - conn = boto.rds.connect_to_region("us-west-2") - - database = conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') - database.modify(security_groups=[security_group]) - - database = conn.get_all_dbinstances()[0] - list(database.security_groups).should.have.length_of(1) - - database.security_groups[0].name.should.equal("db_sg") - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_add_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") - - subnet_ids = [subnet1.id, subnet2.id] - conn = boto.rds.connect_to_region("us-west-2") - subnet_group = conn.create_db_subnet_group( - "db_subnet", "my db subnet", subnet_ids) - subnet_group.name.should.equal('db_subnet') - subnet_group.description.should.equal("my db subnet") - list(subnet_group.subnet_ids).should.equal(subnet_ids) - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_describe_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - - conn = boto.rds.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id]) - - list(conn.get_all_db_subnet_groups()).should.have.length_of(2) - list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1) - - conn.get_all_db_subnet_groups.when.called_with( - "not-a-subnet").should.throw(BotoServerError) - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_delete_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - - conn = boto.rds.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - list(conn.get_all_db_subnet_groups()).should.have.length_of(1) - - conn.delete_db_subnet_group("db_subnet1") - list(conn.get_all_db_subnet_groups()).should.have.length_of(0) - - conn.delete_db_subnet_group.when.called_with( - "db_subnet1").should.throw(BotoServerError) - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_create_database_in_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - - conn = boto.rds.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', - 'root', 'hunter2', db_subnet_group_name="db_subnet1") - - database = conn.get_all_dbinstances("db-master-1")[0] - database.subnet_group.name.should.equal("db_subnet1") - - -@mock_rds_deprecated -def test_create_database_replica(): - conn = boto.rds.connect_to_region("us-west-2") - - primary = conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - - replica = conn.create_dbinstance_read_replica( - "replica", "db-master-1", "db.m1.small") - replica.id.should.equal("replica") - replica.instance_class.should.equal("db.m1.small") - status_info = replica.status_infos[0] - status_info.normal.should.equal(True) - status_info.status_type.should.equal('read replication') - status_info.status.should.equal('replicating') - - primary = conn.get_all_dbinstances("db-master-1")[0] - primary.read_replica_dbinstance_identifiers[0].should.equal("replica") - - conn.delete_dbinstance("replica") - - primary = conn.get_all_dbinstances("db-master-1")[0] - list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) - - -@mock_rds_deprecated -def test_create_cross_region_database_replica(): - west_1_conn = boto.rds.connect_to_region("us-west-1") - west_2_conn = boto.rds.connect_to_region("us-west-2") - - primary = west_1_conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - - primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1" - replica = west_2_conn.create_dbinstance_read_replica( - "replica", - primary_arn, - "db.m1.small", - ) - - primary = west_1_conn.get_all_dbinstances("db-master-1")[0] - primary.read_replica_dbinstance_identifiers[0].should.equal("replica") - - replica = west_2_conn.get_all_dbinstances("replica")[0] - replica.instance_class.should.equal("db.m1.small") - - west_2_conn.delete_dbinstance("replica") - - primary = west_1_conn.get_all_dbinstances("db-master-1")[0] - list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) - - -@mock_rds_deprecated -def test_connecting_to_us_east_1(): - # boto does not use us-east-1 in the URL for RDS, - # and that broke moto in the past: - # https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285 - conn = boto.rds.connect_to_region("us-east-1") - - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) - - database.status.should.equal('available') - database.id.should.equal("db-master-1") - database.allocated_storage.should.equal(10) - database.instance_class.should.equal("db.m1.small") - database.master_username.should.equal("root") - database.endpoint.should.equal( - ('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) - database.security_groups[0].name.should.equal('my_sg') - - -@mock_rds_deprecated -def test_create_database_with_iops(): - conn = boto.rds.connect_to_region("us-west-2") - - database = conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) - - database.status.should.equal('available') - database.iops.should.equal(6000) - # boto>2.36.0 may change the following property name to `storage_type` - database.StorageType.should.equal('io1') +from __future__ import unicode_literals + +import boto3 +import boto.rds +import boto.vpc +from boto.exception import BotoServerError +import sure # noqa + +from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds +from tests.helpers import disable_on_py3 + + +@mock_rds_deprecated +def test_create_database(): + conn = boto.rds.connect_to_region("us-west-2") + + database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', + security_groups=["my_sg"]) + + database.status.should.equal('available') + database.id.should.equal("db-master-1") + database.allocated_storage.should.equal(10) + database.instance_class.should.equal("db.m1.small") + database.master_username.should.equal("root") + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) + database.security_groups[0].name.should.equal('my_sg') + + +@mock_rds_deprecated +def test_get_databases(): + conn = boto.rds.connect_to_region("us-west-2") + + list(conn.get_all_dbinstances()).should.have.length_of(0) + + conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2') + + list(conn.get_all_dbinstances()).should.have.length_of(2) + + databases = conn.get_all_dbinstances("db-master-1") + list(databases).should.have.length_of(1) + + databases[0].id.should.equal("db-master-1") + + +@mock_rds +def test_get_databases_paginated(): + conn = boto3.client('rds', region_name="us-west-2") + + for i in range(51): + conn.create_db_instance(AllocatedStorage=5, + Port=5432, + DBInstanceIdentifier='rds%d' % i, + DBInstanceClass='db.t1.micro', + Engine='postgres') + + resp = conn.describe_db_instances() + resp["DBInstances"].should.have.length_of(50) + resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) + + resp2 = conn.describe_db_instances(Marker=resp["Marker"]) + resp2["DBInstances"].should.have.length_of(1) + + +@mock_rds_deprecated +def test_describe_non_existant_database(): + conn = boto.rds.connect_to_region("us-west-2") + conn.get_all_dbinstances.when.called_with( + "not-a-db").should.throw(BotoServerError) + + +@mock_rds_deprecated +def test_delete_database(): + conn = boto.rds.connect_to_region("us-west-2") + list(conn.get_all_dbinstances()).should.have.length_of(0) + + conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + list(conn.get_all_dbinstances()).should.have.length_of(1) + + conn.delete_dbinstance("db-master-1") + list(conn.get_all_dbinstances()).should.have.length_of(0) + + +@mock_rds_deprecated +def test_delete_non_existant_database(): + conn = boto.rds.connect_to_region("us-west-2") + conn.delete_dbinstance.when.called_with( + "not-a-db").should.throw(BotoServerError) + + +@mock_rds_deprecated +def test_create_database_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + + security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') + security_group.name.should.equal('db_sg') + security_group.description.should.equal("DB Security Group") + list(security_group.ip_ranges).should.equal([]) + + +@mock_rds_deprecated +def test_get_security_groups(): + conn = boto.rds.connect_to_region("us-west-2") + + list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) + + conn.create_dbsecurity_group('db_sg1', 'DB Security Group') + conn.create_dbsecurity_group('db_sg2', 'DB Security Group') + + list(conn.get_all_dbsecurity_groups()).should.have.length_of(2) + + databases = conn.get_all_dbsecurity_groups("db_sg1") + list(databases).should.have.length_of(1) + + databases[0].name.should.equal("db_sg1") + + +@mock_rds_deprecated +def test_get_non_existant_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + conn.get_all_dbsecurity_groups.when.called_with( + "not-a-sg").should.throw(BotoServerError) + + +@mock_rds_deprecated +def test_delete_database_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + conn.create_dbsecurity_group('db_sg', 'DB Security Group') + + list(conn.get_all_dbsecurity_groups()).should.have.length_of(1) + + conn.delete_dbsecurity_group("db_sg") + list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) + + +@mock_rds_deprecated +def test_delete_non_existant_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + conn.delete_dbsecurity_group.when.called_with( + "not-a-db").should.throw(BotoServerError) + + +@disable_on_py3() +@mock_rds_deprecated +def test_security_group_authorize(): + conn = boto.rds.connect_to_region("us-west-2") + security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') + list(security_group.ip_ranges).should.equal([]) + + security_group.authorize(cidr_ip='10.3.2.45/32') + security_group = conn.get_all_dbsecurity_groups()[0] + list(security_group.ip_ranges).should.have.length_of(1) + security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32') + + +@mock_rds_deprecated +def test_add_security_group_to_database(): + conn = boto.rds.connect_to_region("us-west-2") + + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') + database.modify(security_groups=[security_group]) + + database = conn.get_all_dbinstances()[0] + list(database.security_groups).should.have.length_of(1) + + database.security_groups[0].name.should.equal("db_sg") + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_add_database_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") + + subnet_ids = [subnet1.id, subnet2.id] + conn = boto.rds.connect_to_region("us-west-2") + subnet_group = conn.create_db_subnet_group( + "db_subnet", "my db subnet", subnet_ids) + subnet_group.name.should.equal('db_subnet') + subnet_group.description.should.equal("my db subnet") + list(subnet_group.subnet_ids).should.equal(subnet_ids) + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_describe_database_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + + conn = boto.rds.connect_to_region("us-west-2") + conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id]) + + list(conn.get_all_db_subnet_groups()).should.have.length_of(2) + list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1) + + conn.get_all_db_subnet_groups.when.called_with( + "not-a-subnet").should.throw(BotoServerError) + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_delete_database_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + + conn = boto.rds.connect_to_region("us-west-2") + conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + list(conn.get_all_db_subnet_groups()).should.have.length_of(1) + + conn.delete_db_subnet_group("db_subnet1") + list(conn.get_all_db_subnet_groups()).should.have.length_of(0) + + conn.delete_db_subnet_group.when.called_with( + "db_subnet1").should.throw(BotoServerError) + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_create_database_in_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + + conn = boto.rds.connect_to_region("us-west-2") + conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + + database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', + 'root', 'hunter2', db_subnet_group_name="db_subnet1") + + database = conn.get_all_dbinstances("db-master-1")[0] + database.subnet_group.name.should.equal("db_subnet1") + + +@mock_rds_deprecated +def test_create_database_replica(): + conn = boto.rds.connect_to_region("us-west-2") + + primary = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + + replica = conn.create_dbinstance_read_replica( + "replica", "db-master-1", "db.m1.small") + replica.id.should.equal("replica") + replica.instance_class.should.equal("db.m1.small") + status_info = replica.status_infos[0] + status_info.normal.should.equal(True) + status_info.status_type.should.equal('read replication') + status_info.status.should.equal('replicating') + + primary = conn.get_all_dbinstances("db-master-1")[0] + primary.read_replica_dbinstance_identifiers[0].should.equal("replica") + + conn.delete_dbinstance("replica") + + primary = conn.get_all_dbinstances("db-master-1")[0] + list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) + + +@mock_rds_deprecated +def test_create_cross_region_database_replica(): + west_1_conn = boto.rds.connect_to_region("us-west-1") + west_2_conn = boto.rds.connect_to_region("us-west-2") + + primary = west_1_conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + + primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1" + replica = west_2_conn.create_dbinstance_read_replica( + "replica", + primary_arn, + "db.m1.small", + ) + + primary = west_1_conn.get_all_dbinstances("db-master-1")[0] + primary.read_replica_dbinstance_identifiers[0].should.equal("replica") + + replica = west_2_conn.get_all_dbinstances("replica")[0] + replica.instance_class.should.equal("db.m1.small") + + west_2_conn.delete_dbinstance("replica") + + primary = west_1_conn.get_all_dbinstances("db-master-1")[0] + list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) + + +@mock_rds_deprecated +def test_connecting_to_us_east_1(): + # boto does not use us-east-1 in the URL for RDS, + # and that broke moto in the past: + # https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285 + conn = boto.rds.connect_to_region("us-east-1") + + database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', + security_groups=["my_sg"]) + + database.status.should.equal('available') + database.id.should.equal("db-master-1") + database.allocated_storage.should.equal(10) + database.instance_class.should.equal("db.m1.small") + database.master_username.should.equal("root") + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) + database.security_groups[0].name.should.equal('my_sg') + + +@mock_rds_deprecated +def test_create_database_with_iops(): + conn = boto.rds.connect_to_region("us-west-2") + + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) + + database.status.should.equal('available') + database.iops.should.equal(6000) + # boto>2.36.0 may change the following property name to `storage_type` + database.StorageType.should.equal('io1') diff --git a/tests/test_rds/test_server.py b/tests/test_rds/test_server.py index 224704a0b2f8..814620331fc8 100644 --- a/tests/test_rds/test_server.py +++ b/tests/test_rds/test_server.py @@ -1,20 +1,20 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_rds - -''' -Test the different server responses -''' - - -@mock_rds -def test_list_databases(): - backend = server.create_backend_app("rds") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeDBInstances') - - res.data.decode("utf-8").should.contain("") +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_rds + +''' +Test the different server responses +''' + + +@mock_rds +def test_list_databases(): + backend = server.create_backend_app("rds") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeDBInstances') + + res.data.decode("utf-8").should.contain("") diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index cf9805444825..311cd7fd74d1 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1,1472 +1,1472 @@ -from __future__ import unicode_literals - -from botocore.exceptions import ClientError, ParamValidationError -import boto3 -import sure # noqa -from moto import mock_ec2, mock_kms, mock_rds2 - - -@mock_rds2 -def test_create_database(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - db_instance = database['DBInstance'] - db_instance['AllocatedStorage'].should.equal(10) - db_instance['DBInstanceClass'].should.equal("db.m1.small") - db_instance['LicenseModel'].should.equal("license-included") - db_instance['MasterUsername'].should.equal("root") - db_instance['DBSecurityGroups'][0][ - 'DBSecurityGroupName'].should.equal('my_sg') - db_instance['DBInstanceArn'].should.equal( - 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') - db_instance['DBInstanceStatus'].should.equal('available') - db_instance['DBName'].should.equal('staging-postgres') - db_instance['DBInstanceIdentifier'].should.equal("db-master-1") - db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) - db_instance['DbiResourceId'].should.contain("db-") - db_instance['CopyTagsToSnapshot'].should.equal(False) - - -@mock_rds2 -def test_stop_database(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # test stopping database should shutdown - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('stopped') - # test rdsclient error when trying to stop an already stopped database - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # test stopping a stopped database with snapshot should error and no snapshot should exist for that call - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) - response = conn.describe_db_snapshots() - response['DBSnapshots'].should.equal([]) - - -@mock_rds2 -def test_start_database(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # test starting an already started database should error - conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # stop and test start - should go from stopped to available, create snapshot and check snapshot - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap') - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('stopped') - response = conn.describe_db_snapshots() - response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') - response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('available') - # starting database should not remove snapshot - response = conn.describe_db_snapshots() - response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') - # test stopping database, create snapshot with existing snapshot already created should throw error - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) - # test stopping database not invoking snapshot should succeed. - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('stopped') - - -@mock_rds2 -def test_fail_to_stop_multi_az(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"], - MultiAZ=True) - - mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # multi-az databases arent allowed to be shutdown at this time. - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # multi-az databases arent allowed to be started up at this time. - conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - - -@mock_rds2 -def test_fail_to_stop_readreplica(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", - SourceDBInstanceIdentifier="db-master-1", - DBInstanceClass="db.m1.small") - - mydb = conn.describe_db_instances(DBInstanceIdentifier=replica['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # read-replicas are not allowed to be stopped at this time. - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # read-replicas are not allowed to be started at this time. - conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - - -@mock_rds2 -def test_get_databases(): - conn = boto3.client('rds', region_name='us-west-2') - - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(0) - - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - conn.create_db_instance(DBInstanceIdentifier='db-master-2', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(2) - - instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - list(instances['DBInstances']).should.have.length_of(1) - instances['DBInstances'][0][ - 'DBInstanceIdentifier'].should.equal("db-master-1") - instances['DBInstances'][0]['DBInstanceArn'].should.equal( - 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') - - -@mock_rds2 -def test_get_databases_paginated(): - conn = boto3.client('rds', region_name="us-west-2") - - for i in range(51): - conn.create_db_instance(AllocatedStorage=5, - Port=5432, - DBInstanceIdentifier='rds%d' % i, - DBInstanceClass='db.t1.micro', - Engine='postgres') - - resp = conn.describe_db_instances() - resp["DBInstances"].should.have.length_of(50) - resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) - - resp2 = conn.describe_db_instances(Marker=resp["Marker"]) - resp2["DBInstances"].should.have.length_of(1) - - resp3 = conn.describe_db_instances(MaxRecords=100) - resp3["DBInstances"].should.have.length_of(51) - -@mock_rds2 -def test_describe_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_instances.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_modify_db_instance(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - instances['DBInstances'][0]['AllocatedStorage'].should.equal(10) - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=20, - ApplyImmediately=True) - instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) - - -@mock_rds2 -def test_rename_db_instance(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - list(instances['DBInstances']).should.have.length_of(1) - conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-2").should.throw(ClientError) - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - NewDBInstanceIdentifier='db-master-2', - ApplyImmediately=True) - conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-1").should.throw(ClientError) - instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2") - list(instances['DBInstances']).should.have.length_of(1) - - -@mock_rds2 -def test_modify_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.modify_db_instance.when.called_with(DBInstanceIdentifier='not-a-db', - AllocatedStorage=20, - ApplyImmediately=True).should.throw(ClientError) - - -@mock_rds2 -def test_reboot_db_instance(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - database = conn.reboot_db_instance(DBInstanceIdentifier='db-master-1') - database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") - - -@mock_rds2 -def test_reboot_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.reboot_db_instance.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_delete_database(): - conn = boto3.client('rds', region_name='us-west-2') - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(0) - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(1) - - conn.delete_db_instance(DBInstanceIdentifier="db-primary-1", - FinalDBSnapshotIdentifier='primary-1-snapshot') - - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(0) - - # Saved the snapshot - snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get('DBSnapshots') - snapshots[0].get('Engine').should.equal('postgres') - - -@mock_rds2 -def test_delete_non_existant_database(): - conn = boto3.client('rds2', region_name="us-west-2") - conn.delete_db_instance.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_create_db_snapshots(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_snapshot.when.called_with( - DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) - - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='g-1').get('DBSnapshot') - - snapshot.get('Engine').should.equal('postgres') - snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') - snapshot.get('DBSnapshotIdentifier').should.equal('g-1') - result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) - result['TagList'].should.equal([]) - - -@mock_rds2 -def test_create_db_snapshots_copy_tags(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_snapshot.when.called_with( - DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) - - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"], - CopyTagsToSnapshot=True, - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='g-1').get('DBSnapshot') - - snapshot.get('Engine').should.equal('postgres') - snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') - snapshot.get('DBSnapshotIdentifier').should.equal('g-1') - result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_describe_db_snapshots(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - created = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1').get('DBSnapshot') - - created.get('Engine').should.equal('postgres') - - by_database_id = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') - by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots') - by_snapshot_id.should.equal(by_database_id) - - snapshot = by_snapshot_id[0] - snapshot.should.equal(created) - snapshot.get('Engine').should.equal('postgres') - - conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-2') - snapshots = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') - snapshots.should.have.length_of(2) - - -@mock_rds2 -def test_delete_db_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1') - - conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots')[0] - conn.delete_db_snapshot(DBSnapshotIdentifier='snapshot-1') - conn.describe_db_snapshots.when.called_with( - DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - option_group = conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - option_group['OptionGroup']['OptionGroupName'].should.equal('test') - option_group['OptionGroup']['EngineName'].should.equal('mysql') - option_group['OptionGroup'][ - 'OptionGroupDescription'].should.equal('test option group') - option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') - - -@mock_rds2 -def test_create_option_group_bad_engine_name(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='invalid_engine', - MajorEngineVersion='5.6', - OptionGroupDescription='test invalid engine').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group_bad_engine_major_version(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='6.6.6', - OptionGroupDescription='test invalid engine version').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group_empty_description(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group_duplicate(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group').should.throw(ClientError) - - -@mock_rds2 -def test_describe_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0][ - 'OptionGroupName'].should.equal('test') - - -@mock_rds2 -def test_describe_non_existant_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.describe_option_groups.when.called_with( - OptionGroupName="not-a-option-group").should.throw(ClientError) - - -@mock_rds2 -def test_delete_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0][ - 'OptionGroupName'].should.equal('test') - conn.delete_option_group(OptionGroupName='test') - conn.describe_option_groups.when.called_with( - OptionGroupName='test').should.throw(ClientError) - - -@mock_rds2 -def test_delete_non_existant_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_option_group.when.called_with( - OptionGroupName='non-existant').should.throw(ClientError) - - -@mock_rds2 -def test_describe_option_group_options(): - conn = boto3.client('rds', region_name='us-west-2') - option_group_options = conn.describe_option_group_options( - EngineName='sqlserver-ee') - len(option_group_options['OptionGroupOptions']).should.equal(4) - option_group_options = conn.describe_option_group_options( - EngineName='sqlserver-ee', MajorEngineVersion='11.00') - len(option_group_options['OptionGroupOptions']).should.equal(2) - option_group_options = conn.describe_option_group_options( - EngineName='mysql', MajorEngineVersion='5.6') - len(option_group_options['OptionGroupOptions']).should.equal(1) - conn.describe_option_group_options.when.called_with( - EngineName='non-existent').should.throw(ClientError) - conn.describe_option_group_options.when.called_with( - EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) - - -@mock_rds2 -def test_modify_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', - MajorEngineVersion='5.6', OptionGroupDescription='test option group') - # TODO: create option and validate before deleting. - # if Someone can tell me how the hell to use this function - # to add options to an option_group, I can finish coding this. - result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[ - ], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) - result['OptionGroup']['EngineName'].should.equal('mysql') - result['OptionGroup']['Options'].should.equal([]) - result['OptionGroup']['OptionGroupName'].should.equal('test') - - -@mock_rds2 -def test_modify_option_group_no_options(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', - MajorEngineVersion='5.6', OptionGroupDescription='test option group') - conn.modify_option_group.when.called_with( - OptionGroupName='test').should.throw(ClientError) - - -@mock_rds2 -def test_modify_non_existant_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[( - 'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) - - -@mock_rds2 -def test_delete_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_instance.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_list_tags_invalid_arn(): - conn = boto3.client('rds', region_name='us-west-2') - conn.list_tags_for_resource.when.called_with( - ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) - - -@mock_rds2 -def test_list_tags_db(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') - result['TagList'].should.equal([]) - test_instance = conn.create_db_instance( - DBInstanceIdentifier='db-with-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName=test_instance['DBInstance']['DBInstanceArn']) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_add_tags_db(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-without-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') - list(result['TagList']).should.have.length_of(2) - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') - list(result['TagList']).should.have.length_of(3) - - -@mock_rds2 -def test_remove_tags_db(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-with-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') - list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') - len(result['TagList']).should.equal(1) - - -@mock_rds2 -def test_list_tags_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:foo') - result['TagList'].should.equal([]) - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-with-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshot']['DBSnapshotArn']) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_add_tags_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-without-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') - list(result['TagList']).should.have.length_of(2) - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') - list(result['TagList']).should.have.length_of(3) - - -@mock_rds2 -def test_remove_tags_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-with-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') - list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags', TagKeys=['foo']) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') - len(result['TagList']).should.equal(1) - - -@mock_rds2 -def test_add_tags_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(0) - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(2) - - -@mock_rds2 -def test_remove_tags_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', - TagKeys=['foo']) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(1) - - -@mock_rds2 -def test_create_database_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - - result = conn.create_db_security_group( - DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') - result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") - result['DBSecurityGroup'][ - 'DBSecurityGroupDescription'].should.equal("DB Security Group") - result['DBSecurityGroup']['IPRanges'].should.equal([]) - - -@mock_rds2 -def test_get_security_groups(): - conn = boto3.client('rds', region_name='us-west-2') - - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(0) - - conn.create_db_security_group( - DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') - conn.create_db_security_group( - DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') - - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(2) - - result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1") - result['DBSecurityGroups'].should.have.length_of(1) - result['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1") - - -@mock_rds2 -def test_get_non_existant_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_security_groups.when.called_with( - DBSecurityGroupName="not-a-sg").should.throw(ClientError) - - -@mock_rds2 -def test_delete_database_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_security_group( - DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') - - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(1) - - conn.delete_db_security_group(DBSecurityGroupName="db_sg") - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(0) - - -@mock_rds2 -def test_delete_non_existant_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_security_group.when.called_with( - DBSecurityGroupName="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_security_group_authorize(): - conn = boto3.client('rds', region_name='us-west-2') - security_group = conn.create_db_security_group(DBSecurityGroupName='db_sg', - DBSecurityGroupDescription='DB Security Group') - security_group['DBSecurityGroup']['IPRanges'].should.equal([]) - - conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', - CIDRIP='10.3.2.45/32') - - result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") - result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) - result['DBSecurityGroups'][0]['IPRanges'].should.equal( - [{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) - - conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', - CIDRIP='10.3.2.46/32') - result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") - result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(2) - result['DBSecurityGroups'][0]['IPRanges'].should.equal([ - {'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}, - {'Status': 'authorized', 'CIDRIP': '10.3.2.46/32'}, - ]) - - -@mock_rds2 -def test_add_security_group_to_database(): - conn = boto3.client('rds', region_name='us-west-2') - - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234) - - result = conn.describe_db_instances() - result['DBInstances'][0]['DBSecurityGroups'].should.equal([]) - conn.create_db_security_group(DBSecurityGroupName='db_sg', - DBSecurityGroupDescription='DB Security Group') - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - DBSecurityGroups=['db_sg']) - result = conn.describe_db_instances() - result['DBInstances'][0]['DBSecurityGroups'][0][ - 'DBSecurityGroupName'].should.equal('db_sg') - - -@mock_rds2 -def test_list_tags_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", - DBSecurityGroupDescription='DB Security Group', - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( - security_group) - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_add_tags_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", - DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName'] - - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( - security_group) - conn.add_tags_to_resource(ResourceName=resource, - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_remove_tags_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", - DBSecurityGroupDescription='DB Security Group', - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( - security_group) - conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) - - -@mock_ec2 -@mock_rds2 -def test_create_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet1 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - subnet2 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] - - subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] - conn = boto3.client('rds', region_name='us-west-2') - result = conn.create_db_subnet_group(DBSubnetGroupName='db_subnet', - DBSubnetGroupDescription='my db subnet', - SubnetIds=subnet_ids) - result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") - result['DBSubnetGroup'][ - 'DBSubnetGroupDescription'].should.equal("my db subnet") - subnets = result['DBSubnetGroup']['Subnets'] - subnet_group_ids = [subnets[0]['SubnetIdentifier'], - subnets[1]['SubnetIdentifier']] - list(subnet_group_ids).should.equal(subnet_ids) - - -@mock_ec2 -@mock_rds2 -def test_create_database_in_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSubnetGroupName='db_subnet1') - result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - result['DBInstances'][0]['DBSubnetGroup'][ - 'DBSubnetGroupName'].should.equal('db_subnet1') - - -@mock_ec2 -@mock_rds2 -def test_describe_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - conn.create_db_subnet_group(DBSubnetGroupName='db_subnet2', - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - - resp = conn.describe_db_subnet_groups() - resp['DBSubnetGroups'].should.have.length_of(2) - - subnets = resp['DBSubnetGroups'][0]['Subnets'] - subnets.should.have.length_of(1) - - list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1") - ['DBSubnetGroups']).should.have.length_of(1) - - conn.describe_db_subnet_groups.when.called_with( - DBSubnetGroupName="not-a-subnet").should.throw(ClientError) - - -@mock_ec2 -@mock_rds2 -def test_delete_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(1) - - conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1") - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - conn.delete_db_subnet_group.when.called_with( - DBSubnetGroupName="db_subnet1").should.throw(ClientError) - - -@mock_ec2 -@mock_rds2 -def test_list_tags_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']], - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_ec2 -@mock_rds2 -def test_add_tags_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']], - Tags=[])['DBSubnetGroup']['DBSubnetGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) - - conn.add_tags_to_resource(ResourceName=resource, - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_ec2 -@mock_rds2 -def test_remove_tags_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']], - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) - - conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) - - -@mock_rds2 -def test_create_database_replica(): - conn = boto3.client('rds', region_name='us-west-2') - - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", - SourceDBInstanceIdentifier="db-master-1", - DBInstanceClass="db.m1.small") - replica['DBInstance'][ - 'ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') - replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') - replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') - - master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([ - 'db-replica-1']) - - conn.delete_db_instance( - DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) - - master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0][ - 'ReadReplicaDBInstanceIdentifiers'].should.equal([]) - - -@mock_rds2 -@mock_kms -def test_create_database_with_encrypted_storage(): - kms_conn = boto3.client('kms', region_name='us-west-2') - key = kms_conn.create_key(Policy='my RDS encryption policy', - Description='RDS encryption key', - KeyUsage='ENCRYPT_DECRYPT') - - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"], - StorageEncrypted=True, - KmsKeyId=key['KeyMetadata']['KeyId']) - - database['DBInstance']['StorageEncrypted'].should.equal(True) - database['DBInstance']['KmsKeyId'].should.equal( - key['KeyMetadata']['KeyId']) - - -@mock_rds2 -def test_create_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - - db_parameter_group['DBParameterGroup'][ - 'DBParameterGroupName'].should.equal('test') - db_parameter_group['DBParameterGroup'][ - 'DBParameterGroupFamily'].should.equal('mysql5.6') - db_parameter_group['DBParameterGroup'][ - 'Description'].should.equal('test parameter group') - - -@mock_rds2 -def test_create_db_instance_with_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='mysql', - DBInstanceClass='db.m1.small', - DBParameterGroupName='test', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234) - - len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - database['DBInstance']['DBParameterGroups'][0][ - 'ParameterApplyStatus'].should.equal('in-sync') - - -@mock_rds2 -def test_create_database_with_default_port(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - DBSecurityGroups=["my_sg"]) - database['DBInstance']['Endpoint']['Port'].should.equal(5432) - - -@mock_rds2 -def test_modify_db_instance_with_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='mysql', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234) - - len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('default.mysql5.6') - database['DBInstance']['DBParameterGroups'][0][ - 'ParameterApplyStatus'].should.equal('in-sync') - - db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - DBParameterGroupName='test', - ApplyImmediately=True) - - database = conn.describe_db_instances( - DBInstanceIdentifier='db-master-1')['DBInstances'][0] - len(database['DBParameterGroups']).should.equal(1) - database['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - database['DBParameterGroups'][0][ - 'ParameterApplyStatus'].should.equal('in-sync') - - -@mock_rds2 -def test_create_db_parameter_group_empty_description(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='').should.throw(ClientError) - - -@mock_rds2 -def test_create_db_parameter_group_duplicate(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group').should.throw(ClientError) - - -@mock_rds2 -def test_describe_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - - -@mock_rds2 -def test_describe_non_existant_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - len(db_parameter_groups['DBParameterGroups']).should.equal(0) - - -@mock_rds2 -def test_delete_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - conn.delete_db_parameter_group(DBParameterGroupName='test') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - len(db_parameter_groups['DBParameterGroups']).should.equal(0) - - -@mock_rds2 -def test_modify_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - - modify_result = conn.modify_db_parameter_group(DBParameterGroupName='test', - Parameters=[{ - 'ParameterName': 'foo', - 'ParameterValue': 'foo_val', - 'Description': 'test param', - 'ApplyMethod': 'immediate' - }] - ) - - modify_result['DBParameterGroupName'].should.equal('test') - - db_parameters = conn.describe_db_parameters(DBParameterGroupName='test') - db_parameters['Parameters'][0]['ParameterName'].should.equal('foo') - db_parameters['Parameters'][0]['ParameterValue'].should.equal('foo_val') - db_parameters['Parameters'][0]['Description'].should.equal('test param') - db_parameters['Parameters'][0]['ApplyMethod'].should.equal('immediate') - - -@mock_rds2 -def test_delete_non_existant_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_parameter_group.when.called_with( - DBParameterGroupName='non-existant').should.throw(ClientError) - - -@mock_rds2 -def test_create_parameter_group_with_tags(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group', - Tags=[{ - 'Key': 'foo', - 'Value': 'bar', - }]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') - result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}]) +from __future__ import unicode_literals + +from botocore.exceptions import ClientError, ParamValidationError +import boto3 +import sure # noqa +from moto import mock_ec2, mock_kms, mock_rds2 + + +@mock_rds2 +def test_create_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + db_instance = database['DBInstance'] + db_instance['AllocatedStorage'].should.equal(10) + db_instance['DBInstanceClass'].should.equal("db.m1.small") + db_instance['LicenseModel'].should.equal("license-included") + db_instance['MasterUsername'].should.equal("root") + db_instance['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('my_sg') + db_instance['DBInstanceArn'].should.equal( + 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') + db_instance['DBInstanceStatus'].should.equal('available') + db_instance['DBName'].should.equal('staging-postgres') + db_instance['DBInstanceIdentifier'].should.equal("db-master-1") + db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) + db_instance['DbiResourceId'].should.contain("db-") + db_instance['CopyTagsToSnapshot'].should.equal(False) + + +@mock_rds2 +def test_stop_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # test stopping database should shutdown + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') + # test rdsclient error when trying to stop an already stopped database + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # test stopping a stopped database with snapshot should error and no snapshot should exist for that call + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) + response = conn.describe_db_snapshots() + response['DBSnapshots'].should.equal([]) + + +@mock_rds2 +def test_start_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # test starting an already started database should error + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # stop and test start - should go from stopped to available, create snapshot and check snapshot + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap') + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') + response = conn.describe_db_snapshots() + response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') + response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('available') + # starting database should not remove snapshot + response = conn.describe_db_snapshots() + response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') + # test stopping database, create snapshot with existing snapshot already created should throw error + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) + # test stopping database not invoking snapshot should succeed. + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') + + +@mock_rds2 +def test_fail_to_stop_multi_az(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + MultiAZ=True) + + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # multi-az databases arent allowed to be shutdown at this time. + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # multi-az databases arent allowed to be started up at this time. + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + + +@mock_rds2 +def test_fail_to_stop_readreplica(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", + SourceDBInstanceIdentifier="db-master-1", + DBInstanceClass="db.m1.small") + + mydb = conn.describe_db_instances(DBInstanceIdentifier=replica['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # read-replicas are not allowed to be stopped at this time. + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # read-replicas are not allowed to be started at this time. + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + + +@mock_rds2 +def test_get_databases(): + conn = boto3.client('rds', region_name='us-west-2') + + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(0) + + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + conn.create_db_instance(DBInstanceIdentifier='db-master-2', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(2) + + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + list(instances['DBInstances']).should.have.length_of(1) + instances['DBInstances'][0][ + 'DBInstanceIdentifier'].should.equal("db-master-1") + instances['DBInstances'][0]['DBInstanceArn'].should.equal( + 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') + + +@mock_rds2 +def test_get_databases_paginated(): + conn = boto3.client('rds', region_name="us-west-2") + + for i in range(51): + conn.create_db_instance(AllocatedStorage=5, + Port=5432, + DBInstanceIdentifier='rds%d' % i, + DBInstanceClass='db.t1.micro', + Engine='postgres') + + resp = conn.describe_db_instances() + resp["DBInstances"].should.have.length_of(50) + resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) + + resp2 = conn.describe_db_instances(Marker=resp["Marker"]) + resp2["DBInstances"].should.have.length_of(1) + + resp3 = conn.describe_db_instances(MaxRecords=100) + resp3["DBInstances"].should.have.length_of(51) + +@mock_rds2 +def test_describe_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_db_instances.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_modify_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + instances['DBInstances'][0]['AllocatedStorage'].should.equal(10) + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=20, + ApplyImmediately=True) + instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) + + +@mock_rds2 +def test_rename_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + list(instances['DBInstances']).should.have.length_of(1) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-2").should.throw(ClientError) + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + NewDBInstanceIdentifier='db-master-2', + ApplyImmediately=True) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-1").should.throw(ClientError) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2") + list(instances['DBInstances']).should.have.length_of(1) + + +@mock_rds2 +def test_modify_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.modify_db_instance.when.called_with(DBInstanceIdentifier='not-a-db', + AllocatedStorage=20, + ApplyImmediately=True).should.throw(ClientError) + + +@mock_rds2 +def test_reboot_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + database = conn.reboot_db_instance(DBInstanceIdentifier='db-master-1') + database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") + + +@mock_rds2 +def test_reboot_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.reboot_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_delete_database(): + conn = boto3.client('rds', region_name='us-west-2') + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(0) + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(1) + + conn.delete_db_instance(DBInstanceIdentifier="db-primary-1", + FinalDBSnapshotIdentifier='primary-1-snapshot') + + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(0) + + # Saved the snapshot + snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get('DBSnapshots') + snapshots[0].get('Engine').should.equal('postgres') + + +@mock_rds2 +def test_delete_non_existant_database(): + conn = boto3.client('rds2', region_name="us-west-2") + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_create_db_snapshots(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([]) + + +@mock_rds2 +def test_create_db_snapshots_copy_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + CopyTagsToSnapshot=True, + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_describe_db_snapshots(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + created = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').get('DBSnapshot') + + created.get('Engine').should.equal('postgres') + + by_database_id = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') + by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots') + by_snapshot_id.should.equal(by_database_id) + + snapshot = by_snapshot_id[0] + snapshot.should.equal(created) + snapshot.get('Engine').should.equal('postgres') + + conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-2') + snapshots = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') + snapshots.should.have.length_of(2) + + +@mock_rds2 +def test_delete_db_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1') + + conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots')[0] + conn.delete_db_snapshot(DBSnapshotIdentifier='snapshot-1') + conn.describe_db_snapshots.when.called_with( + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + option_group = conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_group['OptionGroup']['OptionGroupName'].should.equal('test') + option_group['OptionGroup']['EngineName'].should.equal('mysql') + option_group['OptionGroup'][ + 'OptionGroupDescription'].should.equal('test option group') + option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') + + +@mock_rds2 +def test_create_option_group_bad_engine_name(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='invalid_engine', + MajorEngineVersion='5.6', + OptionGroupDescription='test invalid engine').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group_bad_engine_major_version(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='6.6.6', + OptionGroupDescription='test invalid engine version').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group_empty_description(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group_duplicate(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group').should.throw(ClientError) + + +@mock_rds2 +def test_describe_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_groups = conn.describe_option_groups(OptionGroupName='test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') + + +@mock_rds2 +def test_describe_non_existant_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_option_groups.when.called_with( + OptionGroupName="not-a-option-group").should.throw(ClientError) + + +@mock_rds2 +def test_delete_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_groups = conn.describe_option_groups(OptionGroupName='test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') + conn.delete_option_group(OptionGroupName='test') + conn.describe_option_groups.when.called_with( + OptionGroupName='test').should.throw(ClientError) + + +@mock_rds2 +def test_delete_non_existant_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_option_group.when.called_with( + OptionGroupName='non-existant').should.throw(ClientError) + + +@mock_rds2 +def test_describe_option_group_options(): + conn = boto3.client('rds', region_name='us-west-2') + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee') + len(option_group_options['OptionGroupOptions']).should.equal(4) + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee', MajorEngineVersion='11.00') + len(option_group_options['OptionGroupOptions']).should.equal(2) + option_group_options = conn.describe_option_group_options( + EngineName='mysql', MajorEngineVersion='5.6') + len(option_group_options['OptionGroupOptions']).should.equal(1) + conn.describe_option_group_options.when.called_with( + EngineName='non-existent').should.throw(ClientError) + conn.describe_option_group_options.when.called_with( + EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) + + +@mock_rds2 +def test_modify_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') + # TODO: create option and validate before deleting. + # if Someone can tell me how the hell to use this function + # to add options to an option_group, I can finish coding this. + result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[ + ], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) + result['OptionGroup']['EngineName'].should.equal('mysql') + result['OptionGroup']['Options'].should.equal([]) + result['OptionGroup']['OptionGroupName'].should.equal('test') + + +@mock_rds2 +def test_modify_option_group_no_options(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') + conn.modify_option_group.when.called_with( + OptionGroupName='test').should.throw(ClientError) + + +@mock_rds2 +def test_modify_non_existant_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[( + 'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) + + +@mock_rds2 +def test_delete_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_list_tags_invalid_arn(): + conn = boto3.client('rds', region_name='us-west-2') + conn.list_tags_for_resource.when.called_with( + ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) + + +@mock_rds2 +def test_list_tags_db(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') + result['TagList'].should.equal([]) + test_instance = conn.create_db_instance( + DBInstanceIdentifier='db-with-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName=test_instance['DBInstance']['DBInstanceArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_db(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-without-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + list(result['TagList']).should.have.length_of(3) + + +@mock_rds2 +def test_remove_tags_db(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-with-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + len(result['TagList']).should.equal(1) + + +@mock_rds2 +def test_list_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:foo') + result['TagList'].should.equal([]) + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshot']['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(3) + + +@mock_rds2 +def test_remove_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + len(result['TagList']).should.equal(1) + + +@mock_rds2 +def test_add_tags_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(0) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(2) + + +@mock_rds2 +def test_remove_tags_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(1) + + +@mock_rds2 +def test_create_database_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + + result = conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") + result['DBSecurityGroup'][ + 'DBSecurityGroupDescription'].should.equal("DB Security Group") + result['DBSecurityGroup']['IPRanges'].should.equal([]) + + +@mock_rds2 +def test_get_security_groups(): + conn = boto3.client('rds', region_name='us-west-2') + + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(0) + + conn.create_db_security_group( + DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group( + DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') + + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(2) + + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1") + result['DBSecurityGroups'].should.have.length_of(1) + result['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1") + + +@mock_rds2 +def test_get_non_existant_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_db_security_groups.when.called_with( + DBSecurityGroupName="not-a-sg").should.throw(ClientError) + + +@mock_rds2 +def test_delete_database_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(1) + + conn.delete_db_security_group(DBSecurityGroupName="db_sg") + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(0) + + +@mock_rds2 +def test_delete_non_existant_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_security_group.when.called_with( + DBSecurityGroupName="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_security_group_authorize(): + conn = boto3.client('rds', region_name='us-west-2') + security_group = conn.create_db_security_group(DBSecurityGroupName='db_sg', + DBSecurityGroupDescription='DB Security Group') + security_group['DBSecurityGroup']['IPRanges'].should.equal([]) + + conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', + CIDRIP='10.3.2.45/32') + + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") + result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) + result['DBSecurityGroups'][0]['IPRanges'].should.equal( + [{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) + + conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', + CIDRIP='10.3.2.46/32') + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") + result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(2) + result['DBSecurityGroups'][0]['IPRanges'].should.equal([ + {'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}, + {'Status': 'authorized', 'CIDRIP': '10.3.2.46/32'}, + ]) + + +@mock_rds2 +def test_add_security_group_to_database(): + conn = boto3.client('rds', region_name='us-west-2') + + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + result = conn.describe_db_instances() + result['DBInstances'][0]['DBSecurityGroups'].should.equal([]) + conn.create_db_security_group(DBSecurityGroupName='db_sg', + DBSecurityGroupDescription='DB Security Group') + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + DBSecurityGroups=['db_sg']) + result = conn.describe_db_instances() + result['DBInstances'][0]['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('db_sg') + + +@mock_rds2 +def test_list_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group', + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName'] + + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) + conn.add_tags_to_resource(ResourceName=resource, + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_remove_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group', + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] + + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) + conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) + + +@mock_ec2 +@mock_rds2 +def test_create_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet1 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet2 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] + + subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] + conn = boto3.client('rds', region_name='us-west-2') + result = conn.create_db_subnet_group(DBSubnetGroupName='db_subnet', + DBSubnetGroupDescription='my db subnet', + SubnetIds=subnet_ids) + result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") + result['DBSubnetGroup'][ + 'DBSubnetGroupDescription'].should.equal("my db subnet") + subnets = result['DBSubnetGroup']['Subnets'] + subnet_group_ids = [subnets[0]['SubnetIdentifier'], + subnets[1]['SubnetIdentifier']] + list(subnet_group_ids).should.equal(subnet_ids) + + +@mock_ec2 +@mock_rds2 +def test_create_database_in_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSubnetGroupName='db_subnet1') + result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + result['DBInstances'][0]['DBSubnetGroup'][ + 'DBSubnetGroupName'].should.equal('db_subnet1') + + +@mock_ec2 +@mock_rds2 +def test_describe_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + conn.create_db_subnet_group(DBSubnetGroupName='db_subnet2', + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + + resp = conn.describe_db_subnet_groups() + resp['DBSubnetGroups'].should.have.length_of(2) + + subnets = resp['DBSubnetGroups'][0]['Subnets'] + subnets.should.have.length_of(1) + + list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1") + ['DBSubnetGroups']).should.have.length_of(1) + + conn.describe_db_subnet_groups.when.called_with( + DBSubnetGroupName="not-a-subnet").should.throw(ClientError) + + +@mock_ec2 +@mock_rds2 +def test_delete_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(1) + + conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1") + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + conn.delete_db_subnet_group.when.called_with( + DBSubnetGroupName="db_subnet1").should.throw(ClientError) + + +@mock_ec2 +@mock_rds2 +def test_list_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_ec2 +@mock_rds2 +def test_add_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[])['DBSubnetGroup']['DBSubnetGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) + + conn.add_tags_to_resource(ResourceName=resource, + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_ec2 +@mock_rds2 +def test_remove_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) + + conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) + + +@mock_rds2 +def test_create_database_replica(): + conn = boto3.client('rds', region_name='us-west-2') + + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", + SourceDBInstanceIdentifier="db-master-1", + DBInstanceClass="db.m1.small") + replica['DBInstance'][ + 'ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') + replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') + replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') + + master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([ + 'db-replica-1']) + + conn.delete_db_instance( + DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) + + master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + master['DBInstances'][0][ + 'ReadReplicaDBInstanceIdentifiers'].should.equal([]) + + +@mock_rds2 +@mock_kms +def test_create_database_with_encrypted_storage(): + kms_conn = boto3.client('kms', region_name='us-west-2') + key = kms_conn.create_key(Policy='my RDS encryption policy', + Description='RDS encryption key', + KeyUsage='ENCRYPT_DECRYPT') + + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + StorageEncrypted=True, + KmsKeyId=key['KeyMetadata']['KeyId']) + + database['DBInstance']['StorageEncrypted'].should.equal(True) + database['DBInstance']['KmsKeyId'].should.equal( + key['KeyMetadata']['KeyId']) + + +@mock_rds2 +def test_create_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupName'].should.equal('test') + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupFamily'].should.equal('mysql5.6') + db_parameter_group['DBParameterGroup'][ + 'Description'].should.equal('test parameter group') + + +@mock_rds2 +def test_create_db_instance_with_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='mysql', + DBInstanceClass='db.m1.small', + DBParameterGroupName='test', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + len(database['DBInstance']['DBParameterGroups']).should.equal(1) + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + + +@mock_rds2 +def test_create_database_with_default_port(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + DBSecurityGroups=["my_sg"]) + database['DBInstance']['Endpoint']['Port'].should.equal(5432) + + +@mock_rds2 +def test_modify_db_instance_with_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='mysql', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + len(database['DBInstance']['DBParameterGroups']).should.equal(1) + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('default.mysql5.6') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + DBParameterGroupName='test', + ApplyImmediately=True) + + database = conn.describe_db_instances( + DBInstanceIdentifier='db-master-1')['DBInstances'][0] + len(database['DBParameterGroups']).should.equal(1) + database['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + + +@mock_rds2 +def test_create_db_parameter_group_empty_description(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='').should.throw(ClientError) + + +@mock_rds2 +def test_create_db_parameter_group_duplicate(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group').should.throw(ClientError) + + +@mock_rds2 +def test_describe_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + + +@mock_rds2 +def test_describe_non_existant_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + len(db_parameter_groups['DBParameterGroups']).should.equal(0) + + +@mock_rds2 +def test_delete_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + conn.delete_db_parameter_group(DBParameterGroupName='test') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + len(db_parameter_groups['DBParameterGroups']).should.equal(0) + + +@mock_rds2 +def test_modify_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + modify_result = conn.modify_db_parameter_group(DBParameterGroupName='test', + Parameters=[{ + 'ParameterName': 'foo', + 'ParameterValue': 'foo_val', + 'Description': 'test param', + 'ApplyMethod': 'immediate' + }] + ) + + modify_result['DBParameterGroupName'].should.equal('test') + + db_parameters = conn.describe_db_parameters(DBParameterGroupName='test') + db_parameters['Parameters'][0]['ParameterName'].should.equal('foo') + db_parameters['Parameters'][0]['ParameterValue'].should.equal('foo_val') + db_parameters['Parameters'][0]['Description'].should.equal('test param') + db_parameters['Parameters'][0]['ApplyMethod'].should.equal('immediate') + + +@mock_rds2 +def test_delete_non_existant_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_parameter_group.when.called_with( + DBParameterGroupName='non-existant').should.throw(ClientError) + + +@mock_rds2 +def test_create_parameter_group_with_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group', + Tags=[{ + 'Key': 'foo', + 'Value': 'bar', + }]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') + result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}]) diff --git a/tests/test_rds2/test_server.py b/tests/test_rds2/test_server.py index f9489e054b4f..8ae44fb5820a 100644 --- a/tests/test_rds2/test_server.py +++ b/tests/test_rds2/test_server.py @@ -1,20 +1,20 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_rds2 - -''' -Test the different server responses -''' - - -#@mock_rds2 -# def test_list_databases(): -# backend = server.create_backend_app("rds2") -# test_client = backend.test_client() -# -# res = test_client.get('/?Action=DescribeDBInstances') -# -# res.data.decode("utf-8").should.contain("") +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_rds2 + +''' +Test the different server responses +''' + + +#@mock_rds2 +# def test_list_databases(): +# backend = server.create_backend_app("rds2") +# test_client = backend.test_client() +# +# res = test_client.get('/?Action=DescribeDBInstances') +# +# res.data.decode("utf-8").should.contain("") diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 9208c92dd7d1..f0e227a5d9fc 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1,1242 +1,1242 @@ -from __future__ import unicode_literals - -import datetime - -import boto -import boto3 -from boto.redshift.exceptions import ( - ClusterNotFound, - ClusterParameterGroupNotFound, - ClusterSecurityGroupNotFound, - ClusterSubnetGroupNotFound, - InvalidSubnet, -) -from botocore.exceptions import ( - ClientError -) -import sure # noqa - -from moto import mock_ec2 -from moto import mock_ec2_deprecated -from moto import mock_redshift -from moto import mock_redshift_deprecated - - -@mock_redshift -def test_create_cluster_boto3(): - client = boto3.client('redshift', region_name='us-east-1') - response = client.create_cluster( - DBName='test', - ClusterIdentifier='test', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - ) - response['Cluster']['NodeType'].should.equal('ds2.xlarge') - create_time = response['Cluster']['ClusterCreateTime'] - create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo)) - - -@mock_redshift -def test_create_snapshot_copy_grant(): - client = boto3.client('redshift', region_name='us-east-1') - grants = client.create_snapshot_copy_grant( - SnapshotCopyGrantName='test-us-east-1', - KmsKeyId='fake', - ) - grants['SnapshotCopyGrant']['SnapshotCopyGrantName'].should.equal('test-us-east-1') - grants['SnapshotCopyGrant']['KmsKeyId'].should.equal('fake') - - client.delete_snapshot_copy_grant( - SnapshotCopyGrantName='test-us-east-1', - ) - - client.describe_snapshot_copy_grants.when.called_with( - SnapshotCopyGrantName='test-us-east-1', - ).should.throw(Exception) - - -@mock_redshift -def test_create_many_snapshot_copy_grants(): - client = boto3.client('redshift', region_name='us-east-1') - - for i in range(10): - client.create_snapshot_copy_grant( - SnapshotCopyGrantName='test-us-east-1-{0}'.format(i), - KmsKeyId='fake', - ) - response = client.describe_snapshot_copy_grants() - len(response['SnapshotCopyGrants']).should.equal(10) - - -@mock_redshift -def test_no_snapshot_copy_grants(): - client = boto3.client('redshift', region_name='us-east-1') - response = client.describe_snapshot_copy_grants() - len(response['SnapshotCopyGrants']).should.equal(0) - - -@mock_redshift_deprecated -def test_create_cluster(): - conn = boto.redshift.connect_to_region("us-east-1") - cluster_identifier = 'my_cluster' - - cluster_response = conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - db_name="my_db", - cluster_type="multi-node", - availability_zone="us-east-1d", - preferred_maintenance_window="Mon:03:00-Mon:11:00", - automated_snapshot_retention_period=10, - port=1234, - cluster_version="1.0", - allow_version_upgrade=True, - number_of_nodes=3, - ) - cluster_response['CreateClusterResponse']['CreateClusterResult'][ - 'Cluster']['ClusterStatus'].should.equal('creating') - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['ClusterIdentifier'].should.equal(cluster_identifier) - cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['MasterUsername'].should.equal("username") - cluster['DBName'].should.equal("my_db") - cluster['ClusterSecurityGroups'][0][ - 'ClusterSecurityGroupName'].should.equal("Default") - cluster['VpcSecurityGroups'].should.equal([]) - cluster['ClusterSubnetGroupName'].should.equal(None) - cluster['AvailabilityZone'].should.equal("us-east-1d") - cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00") - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("default.redshift-1.0") - cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10) - cluster['Port'].should.equal(1234) - cluster['ClusterVersion'].should.equal("1.0") - cluster['AllowVersionUpgrade'].should.equal(True) - cluster['NumberOfNodes'].should.equal(3) - - -@mock_redshift_deprecated -def test_create_single_node_cluster(): - conn = boto.redshift.connect_to_region("us-east-1") - cluster_identifier = 'my_cluster' - - conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - db_name="my_db", - cluster_type="single-node", - ) - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['ClusterIdentifier'].should.equal(cluster_identifier) - cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['MasterUsername'].should.equal("username") - cluster['DBName'].should.equal("my_db") - cluster['NumberOfNodes'].should.equal(1) - - -@mock_redshift_deprecated -def test_default_cluster_attributes(): - conn = boto.redshift.connect_to_region("us-east-1") - cluster_identifier = 'my_cluster' - - conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - ) - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['DBName'].should.equal("dev") - cluster['ClusterSubnetGroupName'].should.equal(None) - assert "us-east-" in cluster['AvailabilityZone'] - cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30") - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("default.redshift-1.0") - cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1) - cluster['Port'].should.equal(5439) - cluster['ClusterVersion'].should.equal("1.0") - cluster['AllowVersionUpgrade'].should.equal(True) - cluster['NumberOfNodes'].should.equal(1) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_cluster_in_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() - redshift_conn.create_cluster_subnet_group( - "my_subnet_group", - "This is my subnet group", - subnet_ids=[subnet.id], - ) - - redshift_conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_subnet_group_name='my_subnet_group', - ) - - cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') - - -@mock_redshift -@mock_ec2 -def test_create_cluster_in_subnet_group_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster_subnet_group( - ClusterSubnetGroupName='my_subnet_group', - Description='This is my subnet group', - SubnetIds=[subnet.id] - ) - - client.create_cluster( - ClusterIdentifier="my_cluster", - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - ClusterSubnetGroupName='my_subnet_group', - ) - - cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") - cluster = cluster_response['Clusters'][0] - cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') - - -@mock_redshift_deprecated -def test_create_cluster_with_security_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.create_cluster_security_group( - "security_group1", - "This is my security group", - ) - conn.create_cluster_security_group( - "security_group2", - "This is my security group", - ) - - cluster_identifier = 'my_cluster' - conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_security_groups=["security_group1", "security_group2"] - ) - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - group_names = [group['ClusterSecurityGroupName'] - for group in cluster['ClusterSecurityGroups']] - set(group_names).should.equal(set(["security_group1", "security_group2"])) - - -@mock_redshift -def test_create_cluster_with_security_group_boto3(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster_security_group( - ClusterSecurityGroupName="security_group1", - Description="This is my security group", - ) - client.create_cluster_security_group( - ClusterSecurityGroupName="security_group2", - Description="This is my security group", - ) - - cluster_identifier = 'my_cluster' - client.create_cluster( - ClusterIdentifier=cluster_identifier, - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - ClusterSecurityGroups=["security_group1", "security_group2"] - ) - response = client.describe_clusters(ClusterIdentifier=cluster_identifier) - cluster = response['Clusters'][0] - group_names = [group['ClusterSecurityGroupName'] - for group in cluster['ClusterSecurityGroups']] - set(group_names).should.equal({"security_group1", "security_group2"}) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_cluster_with_vpc_security_groups(): - vpc_conn = boto.connect_vpc() - ec2_conn = boto.connect_ec2() - redshift_conn = boto.connect_redshift() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - security_group = ec2_conn.create_security_group( - "vpc_security_group", "a group", vpc_id=vpc.id) - - redshift_conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - vpc_security_group_ids=[security_group.id], - ) - - cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - group_ids = [group['VpcSecurityGroupId'] - for group in cluster['VpcSecurityGroups']] - list(group_ids).should.equal([security_group.id]) - - -@mock_redshift -@mock_ec2 -def test_create_cluster_with_vpc_security_groups_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - client = boto3.client('redshift', region_name='us-east-1') - cluster_id = 'my_cluster' - security_group = ec2.create_security_group( - Description="vpc_security_group", - GroupName="a group", - VpcId=vpc.id) - client.create_cluster( - ClusterIdentifier=cluster_id, - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - VpcSecurityGroupIds=[security_group.id], - ) - response = client.describe_clusters(ClusterIdentifier=cluster_id) - cluster = response['Clusters'][0] - group_ids = [group['VpcSecurityGroupId'] - for group in cluster['VpcSecurityGroups']] - list(group_ids).should.equal([security_group.id]) - - -@mock_redshift -def test_create_cluster_with_iam_roles(): - iam_roles_arn = ['arn:aws:iam:::role/my-iam-role',] - client = boto3.client('redshift', region_name='us-east-1') - cluster_id = 'my_cluster' - client.create_cluster( - ClusterIdentifier=cluster_id, - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - IamRoles=iam_roles_arn - ) - response = client.describe_clusters(ClusterIdentifier=cluster_id) - cluster = response['Clusters'][0] - iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] - iam_roles_arn.should.equal(iam_roles) - - -@mock_redshift_deprecated -def test_create_cluster_with_parameter_group(): - conn = boto.connect_redshift() - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_parameter_group_name='my_parameter_group', - ) - - cluster_response = conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("my_parameter_group") - - -@mock_redshift_deprecated -def test_describe_non_existent_cluster(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_clusters.when.called_with( - "not-a-cluster").should.throw(ClusterNotFound) - - -@mock_redshift_deprecated -def test_delete_cluster(): - conn = boto.connect_redshift() - cluster_identifier = 'my_cluster' - - conn.create_cluster( - cluster_identifier, - node_type='single-node', - master_username="username", - master_user_password="password", - ) - - clusters = conn.describe_clusters()['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'] - list(clusters).should.have.length_of(1) - - conn.delete_cluster(cluster_identifier) - - clusters = conn.describe_clusters()['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'] - list(clusters).should.have.length_of(0) - - # Delete invalid id - conn.delete_cluster.when.called_with( - "not-a-cluster").should.throw(ClusterNotFound) - - -@mock_redshift_deprecated -def test_modify_cluster(): - conn = boto.connect_redshift() - cluster_identifier = 'my_cluster' - conn.create_cluster_security_group( - "security_group", - "This is my security group", - ) - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - conn.create_cluster( - cluster_identifier, - node_type='single-node', - master_username="username", - master_user_password="password", - ) - - conn.modify_cluster( - cluster_identifier, - cluster_type="multi-node", - node_type="dw.hs1.xlarge", - cluster_security_groups="security_group", - master_user_password="new_password", - cluster_parameter_group_name="my_parameter_group", - automated_snapshot_retention_period=7, - preferred_maintenance_window="Tue:03:00-Tue:11:00", - allow_version_upgrade=False, - new_cluster_identifier="new_identifier", - ) - - cluster_response = conn.describe_clusters("new_identifier") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['ClusterIdentifier'].should.equal("new_identifier") - cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['ClusterSecurityGroups'][0][ - 'ClusterSecurityGroupName'].should.equal("security_group") - cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("my_parameter_group") - cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) - cluster['AllowVersionUpgrade'].should.equal(False) - # This one should remain unmodified. - cluster['NumberOfNodes'].should.equal(1) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") - - redshift_conn = boto.connect_redshift() - - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet1.id, subnet2.id], - ) - - subnets_response = redshift_conn.describe_cluster_subnet_groups( - "my_subnet") - my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] - - my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") - my_subnet['Description'].should.equal("This is my subnet group") - subnet_ids = [subnet['SubnetIdentifier'] - for subnet in my_subnet['Subnets']] - set(subnet_ids).should.equal(set([subnet1.id, subnet2.id])) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_invalid_cluster_subnet_group(): - redshift_conn = boto.connect_redshift() - redshift_conn.create_cluster_subnet_group.when.called_with( - "my_subnet", - "This is my subnet group", - subnet_ids=["subnet-1234"], - ).should.throw(InvalidSubnet) - - -@mock_redshift_deprecated -def test_describe_non_existent_subnet_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_subnet_groups.when.called_with( - "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_delete_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() - - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet.id], - ) - - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] - subnets.should.have.length_of(1) - - redshift_conn.delete_cluster_subnet_group("my_subnet") - - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] - subnets.should.have.length_of(0) - - # Delete invalid id - redshift_conn.delete_cluster_subnet_group.when.called_with( - "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) - - -@mock_redshift_deprecated -def test_create_cluster_security_group(): - conn = boto.connect_redshift() - conn.create_cluster_security_group( - "my_security_group", - "This is my security group", - ) - - groups_response = conn.describe_cluster_security_groups( - "my_security_group") - my_group = groups_response['DescribeClusterSecurityGroupsResponse'][ - 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] - - my_group['ClusterSecurityGroupName'].should.equal("my_security_group") - my_group['Description'].should.equal("This is my security group") - list(my_group['IPRanges']).should.equal([]) - - -@mock_redshift_deprecated -def test_describe_non_existent_security_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_security_groups.when.called_with( - "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) - - -@mock_redshift_deprecated -def test_delete_cluster_security_group(): - conn = boto.connect_redshift() - conn.create_cluster_security_group( - "my_security_group", - "This is my security group", - ) - - groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse'][ - 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] - groups.should.have.length_of(2) # The default group already exists - - conn.delete_cluster_security_group("my_security_group") - - groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse'][ - 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] - groups.should.have.length_of(1) - - # Delete invalid id - conn.delete_cluster_security_group.when.called_with( - "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) - - -@mock_redshift_deprecated -def test_create_cluster_parameter_group(): - conn = boto.connect_redshift() - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - groups_response = conn.describe_cluster_parameter_groups( - "my_parameter_group") - my_group = groups_response['DescribeClusterParameterGroupsResponse'][ - 'DescribeClusterParameterGroupsResult']['ParameterGroups'][0] - - my_group['ParameterGroupName'].should.equal("my_parameter_group") - my_group['ParameterGroupFamily'].should.equal("redshift-1.0") - my_group['Description'].should.equal("This is my parameter group") - - -@mock_redshift_deprecated -def test_describe_non_existent_parameter_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_parameter_groups.when.called_with( - "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) - - -@mock_redshift_deprecated -def test_delete_cluster_parameter_group(): - conn = boto.connect_redshift() - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse'][ - 'DescribeClusterParameterGroupsResult']['ParameterGroups'] - groups.should.have.length_of(2) # The default group already exists - - conn.delete_cluster_parameter_group("my_parameter_group") - - groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse'][ - 'DescribeClusterParameterGroupsResult']['ParameterGroups'] - groups.should.have.length_of(1) - - # Delete invalid id - conn.delete_cluster_parameter_group.when.called_with( - "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) - - - -@mock_redshift -def test_create_cluster_snapshot_of_non_existent_cluster(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'non-existent-cluster-id' - client.create_cluster_snapshot.when.called_with( - SnapshotIdentifier='snapshot-id', - ClusterIdentifier=cluster_identifier, - ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) - - -@mock_redshift -def test_create_cluster_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - cluster_response = client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - cluster_response['Cluster']['NodeType'].should.equal('ds2.xlarge') - - snapshot_response = client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': 'test-tag-key', - 'Value': 'test-tag-value'}] - ) - snapshot = snapshot_response['Snapshot'] - snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) - snapshot['ClusterIdentifier'].should.equal(cluster_identifier) - snapshot['NumberOfNodes'].should.equal(1) - snapshot['NodeType'].should.equal('ds2.xlarge') - snapshot['MasterUsername'].should.equal('username') - - -@mock_redshift -def test_describe_cluster_snapshots(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - ) - - resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) - resp_snap = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier) - resp_clust['Snapshots'][0].should.equal(resp_snap['Snapshots'][0]) - snapshot = resp_snap['Snapshots'][0] - snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) - snapshot['ClusterIdentifier'].should.equal(cluster_identifier) - snapshot['NumberOfNodes'].should.equal(1) - snapshot['NodeType'].should.equal('ds2.xlarge') - snapshot['MasterUsername'].should.equal('username') - - -@mock_redshift -def test_describe_cluster_snapshots_not_found_error(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.describe_cluster_snapshots.when.called_with( - ClusterIdentifier=cluster_identifier, - ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) - - client.describe_cluster_snapshots.when.called_with( - SnapshotIdentifier=snapshot_identifier - ).should.throw(ClientError, 'Snapshot {} not found.'.format(snapshot_identifier)) - - -@mock_redshift -def test_delete_cluster_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.create_cluster( - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier - ) - - snapshots = client.describe_cluster_snapshots()['Snapshots'] - list(snapshots).should.have.length_of(1) - - client.delete_cluster_snapshot(SnapshotIdentifier=snapshot_identifier)[ - 'Snapshot']['Status'].should.equal('deleted') - - snapshots = client.describe_cluster_snapshots()['Snapshots'] - list(snapshots).should.have.length_of(0) - - # Delete invalid id - client.delete_cluster_snapshot.when.called_with( - SnapshotIdentifier="not-a-snapshot").should.throw(ClientError) - - -@mock_redshift -def test_cluster_snapshot_already_exists(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier - ) - - client.create_cluster_snapshot.when.called_with( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier - ).should.throw(ClientError) - - -@mock_redshift -def test_create_cluster_from_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - original_cluster_identifier = 'original-cluster' - original_snapshot_identifier = 'original-snapshot' - new_cluster_identifier = 'new-cluster' - - client.create_cluster( - ClusterIdentifier=original_cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_cluster_snapshot( - SnapshotIdentifier=original_snapshot_identifier, - ClusterIdentifier=original_cluster_identifier - ) - response = client.restore_from_cluster_snapshot( - ClusterIdentifier=new_cluster_identifier, - SnapshotIdentifier=original_snapshot_identifier, - Port=1234 - ) - response['Cluster']['ClusterStatus'].should.equal('creating') - - response = client.describe_clusters( - ClusterIdentifier=new_cluster_identifier - ) - new_cluster = response['Clusters'][0] - new_cluster['NodeType'].should.equal('ds2.xlarge') - new_cluster['MasterUsername'].should.equal('username') - new_cluster['Endpoint']['Port'].should.equal(1234) - - -@mock_redshift -def test_create_cluster_from_snapshot_with_waiter(): - client = boto3.client('redshift', region_name='us-east-1') - original_cluster_identifier = 'original-cluster' - original_snapshot_identifier = 'original-snapshot' - new_cluster_identifier = 'new-cluster' - - client.create_cluster( - ClusterIdentifier=original_cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_cluster_snapshot( - SnapshotIdentifier=original_snapshot_identifier, - ClusterIdentifier=original_cluster_identifier - ) - response = client.restore_from_cluster_snapshot( - ClusterIdentifier=new_cluster_identifier, - SnapshotIdentifier=original_snapshot_identifier, - Port=1234 - ) - response['Cluster']['ClusterStatus'].should.equal('creating') - - client.get_waiter('cluster_restored').wait( - ClusterIdentifier=new_cluster_identifier, - WaiterConfig={ - 'Delay': 1, - 'MaxAttempts': 2, - } - ) - - response = client.describe_clusters( - ClusterIdentifier=new_cluster_identifier - ) - new_cluster = response['Clusters'][0] - new_cluster['NodeType'].should.equal('ds2.xlarge') - new_cluster['MasterUsername'].should.equal('username') - new_cluster['Endpoint']['Port'].should.equal(1234) - - -@mock_redshift -def test_create_cluster_from_non_existent_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - client.restore_from_cluster_snapshot.when.called_with( - ClusterIdentifier='cluster-id', - SnapshotIdentifier='non-existent-snapshot', - ).should.throw(ClientError, 'Snapshot non-existent-snapshot not found.') - - -@mock_redshift -def test_create_cluster_status_update(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'test-cluster' - - response = client.create_cluster( - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - response['Cluster']['ClusterStatus'].should.equal('creating') - - response = client.describe_clusters( - ClusterIdentifier=cluster_identifier - ) - response['Clusters'][0]['ClusterStatus'].should.equal('available') - - -@mock_redshift -def test_describe_tags_with_resource_type(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - snapshot_identifier = 'my_snapshot' - snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'snapshot:{}/{}'.format(cluster_identifier, - snapshot_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceType='cluster') - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('cluster') - tagged_resources[0]['ResourceName'].should.equal(cluster_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceType='snapshot') - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('snapshot') - tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - -@mock_redshift -def test_describe_tags_cannot_specify_resource_type_and_resource_name(): - client = boto3.client('redshift', region_name='us-east-1') - resource_name = 'arn:aws:redshift:us-east-1:123456789012:cluster:cluster-id' - resource_type = 'cluster' - client.describe_tags.when.called_with( - ResourceName=resource_name, - ResourceType=resource_type - ).should.throw(ClientError, 'using either an ARN or a resource type') - - -@mock_redshift -def test_describe_tags_with_resource_name(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'cluster-id' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - snapshot_identifier = 'snapshot-id' - snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'snapshot:{}/{}'.format(cluster_identifier, - snapshot_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceName=cluster_arn) - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('cluster') - tagged_resources[0]['ResourceName'].should.equal(cluster_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceName=snapshot_arn) - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('snapshot') - tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - -@mock_redshift -def test_create_tags(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'cluster-id' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - num_tags = 5 - tags = [] - for i in range(0, num_tags): - tag = {'Key': '{}-{}'.format(tag_key, i), - 'Value': '{}-{}'.format(tag_value, i)} - tags.append(tag) - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_tags( - ResourceName=cluster_arn, - Tags=tags - ) - response = client.describe_clusters(ClusterIdentifier=cluster_identifier) - cluster = response['Clusters'][0] - list(cluster['Tags']).should.have.length_of(num_tags) - response = client.describe_tags(ResourceName=cluster_arn) - list(response['TaggedResources']).should.have.length_of(num_tags) - - -@mock_redshift -def test_delete_tags(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'cluster-id' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - tags = [] - for i in range(1, 2): - tag = {'Key': '{}-{}'.format(tag_key, i), - 'Value': '{}-{}'.format(tag_value, i)} - tags.append(tag) - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - Tags=tags - ) - client.delete_tags( - ResourceName=cluster_arn, - TagKeys=[tag['Key'] for tag in tags - if tag['Key'] != '{}-1'.format(tag_key)] - ) - response = client.describe_clusters(ClusterIdentifier=cluster_identifier) - cluster = response['Clusters'][0] - list(cluster['Tags']).should.have.length_of(1) - response = client.describe_tags(ResourceName=cluster_arn) - list(response['TaggedResources']).should.have.length_of(1) - - -@mock_ec2 -@mock_redshift -def test_describe_tags_all_resource_types(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') - client = boto3.client('redshift', region_name='us-east-1') - response = client.describe_tags() - list(response['TaggedResources']).should.have.length_of(0) - client.create_cluster_subnet_group( - ClusterSubnetGroupName='my_subnet_group', - Description='This is my subnet group', - SubnetIds=[subnet.id], - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster_security_group( - ClusterSecurityGroupName="security_group1", - Description="This is my security group", - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster( - DBName='test', - ClusterIdentifier='my_cluster', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster_snapshot( - SnapshotIdentifier='my_snapshot', - ClusterIdentifier='my_cluster', - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster_parameter_group( - ParameterGroupName="my_parameter_group", - ParameterGroupFamily="redshift-1.0", - Description="This is my parameter group", - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - response = client.describe_tags() - expected_types = ['cluster', 'parametergroup', 'securitygroup', 'snapshot', 'subnetgroup'] - tagged_resources = response['TaggedResources'] - returned_types = [resource['ResourceType'] for resource in tagged_resources] - list(tagged_resources).should.have.length_of(len(expected_types)) - set(returned_types).should.equal(set(expected_types)) - - -@mock_redshift -def test_tagged_resource_not_found_error(): - client = boto3.client('redshift', region_name='us-east-1') - - cluster_arn = 'arn:aws:redshift:us-east-1::cluster:fake' - client.describe_tags.when.called_with( - ResourceName=cluster_arn - ).should.throw(ClientError, 'cluster (fake) not found.') - - snapshot_arn = 'arn:aws:redshift:us-east-1::snapshot:cluster-id/snap-id' - client.delete_tags.when.called_with( - ResourceName=snapshot_arn, - TagKeys=['test'] - ).should.throw(ClientError, 'snapshot (snap-id) not found.') - - client.describe_tags.when.called_with( - ResourceType='cluster' - ).should.throw(ClientError, "resource of type 'cluster' not found.") - - client.describe_tags.when.called_with( - ResourceName='bad:arn' - ).should.throw(ClientError, "Tagging is not supported for this type of resource") - - -@mock_redshift -def test_enable_snapshot_copy(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - ClusterIdentifier='test', - ClusterType='single-node', - DBName='test', - Encrypted=True, - MasterUsername='user', - MasterUserPassword='password', - NodeType='ds2.xlarge', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - RetentionPeriod=3, - SnapshotCopyGrantName='copy-us-east-1-to-us-west-2' - ) - response = client.describe_clusters(ClusterIdentifier='test') - cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] - cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3) - cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') - cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') - - -@mock_redshift -def test_enable_snapshot_copy_unencrypted(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - ClusterIdentifier='test', - ClusterType='single-node', - DBName='test', - MasterUsername='user', - MasterUserPassword='password', - NodeType='ds2.xlarge', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - ) - response = client.describe_clusters(ClusterIdentifier='test') - cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] - cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7) - cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') - - -@mock_redshift -def test_disable_snapshot_copy(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - DBName='test', - ClusterIdentifier='test', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - RetentionPeriod=3, - SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', - ) - client.disable_snapshot_copy( - ClusterIdentifier='test', - ) - response = client.describe_clusters(ClusterIdentifier='test') - response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') - - -@mock_redshift -def test_modify_snapshot_copy_retention_period(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - DBName='test', - ClusterIdentifier='test', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - RetentionPeriod=3, - SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', - ) - client.modify_snapshot_copy_retention_period( - ClusterIdentifier='test', - RetentionPeriod=5, - ) - response = client.describe_clusters(ClusterIdentifier='test') - cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] - cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5) +from __future__ import unicode_literals + +import datetime + +import boto +import boto3 +from boto.redshift.exceptions import ( + ClusterNotFound, + ClusterParameterGroupNotFound, + ClusterSecurityGroupNotFound, + ClusterSubnetGroupNotFound, + InvalidSubnet, +) +from botocore.exceptions import ( + ClientError +) +import sure # noqa + +from moto import mock_ec2 +from moto import mock_ec2_deprecated +from moto import mock_redshift +from moto import mock_redshift_deprecated + + +@mock_redshift +def test_create_cluster_boto3(): + client = boto3.client('redshift', region_name='us-east-1') + response = client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + response['Cluster']['NodeType'].should.equal('ds2.xlarge') + create_time = response['Cluster']['ClusterCreateTime'] + create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo)) + + +@mock_redshift +def test_create_snapshot_copy_grant(): + client = boto3.client('redshift', region_name='us-east-1') + grants = client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + KmsKeyId='fake', + ) + grants['SnapshotCopyGrant']['SnapshotCopyGrantName'].should.equal('test-us-east-1') + grants['SnapshotCopyGrant']['KmsKeyId'].should.equal('fake') + + client.delete_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + ) + + client.describe_snapshot_copy_grants.when.called_with( + SnapshotCopyGrantName='test-us-east-1', + ).should.throw(Exception) + + +@mock_redshift +def test_create_many_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + + for i in range(10): + client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1-{0}'.format(i), + KmsKeyId='fake', + ) + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(10) + + +@mock_redshift +def test_no_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(0) + + +@mock_redshift_deprecated +def test_create_cluster(): + conn = boto.redshift.connect_to_region("us-east-1") + cluster_identifier = 'my_cluster' + + cluster_response = conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + db_name="my_db", + cluster_type="multi-node", + availability_zone="us-east-1d", + preferred_maintenance_window="Mon:03:00-Mon:11:00", + automated_snapshot_retention_period=10, + port=1234, + cluster_version="1.0", + allow_version_upgrade=True, + number_of_nodes=3, + ) + cluster_response['CreateClusterResponse']['CreateClusterResult'][ + 'Cluster']['ClusterStatus'].should.equal('creating') + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['ClusterIdentifier'].should.equal(cluster_identifier) + cluster['NodeType'].should.equal("dw.hs1.xlarge") + cluster['MasterUsername'].should.equal("username") + cluster['DBName'].should.equal("my_db") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("Default") + cluster['VpcSecurityGroups'].should.equal([]) + cluster['ClusterSubnetGroupName'].should.equal(None) + cluster['AvailabilityZone'].should.equal("us-east-1d") + cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10) + cluster['Port'].should.equal(1234) + cluster['ClusterVersion'].should.equal("1.0") + cluster['AllowVersionUpgrade'].should.equal(True) + cluster['NumberOfNodes'].should.equal(3) + + +@mock_redshift_deprecated +def test_create_single_node_cluster(): + conn = boto.redshift.connect_to_region("us-east-1") + cluster_identifier = 'my_cluster' + + conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + db_name="my_db", + cluster_type="single-node", + ) + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['ClusterIdentifier'].should.equal(cluster_identifier) + cluster['NodeType'].should.equal("dw.hs1.xlarge") + cluster['MasterUsername'].should.equal("username") + cluster['DBName'].should.equal("my_db") + cluster['NumberOfNodes'].should.equal(1) + + +@mock_redshift_deprecated +def test_default_cluster_attributes(): + conn = boto.redshift.connect_to_region("us-east-1") + cluster_identifier = 'my_cluster' + + conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + ) + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['DBName'].should.equal("dev") + cluster['ClusterSubnetGroupName'].should.equal(None) + assert "us-east-" in cluster['AvailabilityZone'] + cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1) + cluster['Port'].should.equal(5439) + cluster['ClusterVersion'].should.equal("1.0") + cluster['AllowVersionUpgrade'].should.equal(True) + cluster['NumberOfNodes'].should.equal(1) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_cluster_in_subnet_group(): + vpc_conn = boto.connect_vpc() + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") + redshift_conn = boto.connect_redshift() + redshift_conn.create_cluster_subnet_group( + "my_subnet_group", + "This is my subnet group", + subnet_ids=[subnet.id], + ) + + redshift_conn.create_cluster( + "my_cluster", + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + cluster_subnet_group_name='my_subnet_group', + ) + + cluster_response = redshift_conn.describe_clusters("my_cluster") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') + + +@mock_redshift +@mock_ec2 +def test_create_cluster_in_subnet_group_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id] + ) + + client.create_cluster( + ClusterIdentifier="my_cluster", + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSubnetGroupName='my_subnet_group', + ) + + cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") + cluster = cluster_response['Clusters'][0] + cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') + + +@mock_redshift_deprecated +def test_create_cluster_with_security_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.create_cluster_security_group( + "security_group1", + "This is my security group", + ) + conn.create_cluster_security_group( + "security_group2", + "This is my security group", + ) + + cluster_identifier = 'my_cluster' + conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + cluster_security_groups=["security_group1", "security_group2"] + ) + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_names = [group['ClusterSecurityGroupName'] + for group in cluster['ClusterSecurityGroups']] + set(group_names).should.equal(set(["security_group1", "security_group2"])) + + +@mock_redshift +def test_create_cluster_with_security_group_boto3(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group1", + Description="This is my security group", + ) + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group2", + Description="This is my security group", + ) + + cluster_identifier = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_identifier, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSecurityGroups=["security_group1", "security_group2"] + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + group_names = [group['ClusterSecurityGroupName'] + for group in cluster['ClusterSecurityGroups']] + set(group_names).should.equal({"security_group1", "security_group2"}) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_cluster_with_vpc_security_groups(): + vpc_conn = boto.connect_vpc() + ec2_conn = boto.connect_ec2() + redshift_conn = boto.connect_redshift() + vpc = vpc_conn.create_vpc("10.0.0.0/16") + security_group = ec2_conn.create_security_group( + "vpc_security_group", "a group", vpc_id=vpc.id) + + redshift_conn.create_cluster( + "my_cluster", + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + vpc_security_group_ids=[security_group.id], + ) + + cluster_response = redshift_conn.describe_clusters("my_cluster") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_ids = [group['VpcSecurityGroupId'] + for group in cluster['VpcSecurityGroups']] + list(group_ids).should.equal([security_group.id]) + + +@mock_redshift +@mock_ec2 +def test_create_cluster_with_vpc_security_groups_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + security_group = ec2.create_security_group( + Description="vpc_security_group", + GroupName="a group", + VpcId=vpc.id) + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + VpcSecurityGroupIds=[security_group.id], + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + group_ids = [group['VpcSecurityGroupId'] + for group in cluster['VpcSecurityGroups']] + list(group_ids).should.equal([security_group.id]) + + +@mock_redshift +def test_create_cluster_with_iam_roles(): + iam_roles_arn = ['arn:aws:iam:::role/my-iam-role',] + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + IamRoles=iam_roles_arn + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] + iam_roles_arn.should.equal(iam_roles) + + +@mock_redshift_deprecated +def test_create_cluster_with_parameter_group(): + conn = boto.connect_redshift() + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + conn.create_cluster( + "my_cluster", + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + cluster_parameter_group_name='my_parameter_group', + ) + + cluster_response = conn.describe_clusters("my_cluster") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") + + +@mock_redshift_deprecated +def test_describe_non_existent_cluster(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_clusters.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) + + +@mock_redshift_deprecated +def test_delete_cluster(): + conn = boto.connect_redshift() + cluster_identifier = 'my_cluster' + + conn.create_cluster( + cluster_identifier, + node_type='single-node', + master_username="username", + master_user_password="password", + ) + + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] + list(clusters).should.have.length_of(1) + + conn.delete_cluster(cluster_identifier) + + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] + list(clusters).should.have.length_of(0) + + # Delete invalid id + conn.delete_cluster.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) + + +@mock_redshift_deprecated +def test_modify_cluster(): + conn = boto.connect_redshift() + cluster_identifier = 'my_cluster' + conn.create_cluster_security_group( + "security_group", + "This is my security group", + ) + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + conn.create_cluster( + cluster_identifier, + node_type='single-node', + master_username="username", + master_user_password="password", + ) + + conn.modify_cluster( + cluster_identifier, + cluster_type="multi-node", + node_type="dw.hs1.xlarge", + cluster_security_groups="security_group", + master_user_password="new_password", + cluster_parameter_group_name="my_parameter_group", + automated_snapshot_retention_period=7, + preferred_maintenance_window="Tue:03:00-Tue:11:00", + allow_version_upgrade=False, + new_cluster_identifier="new_identifier", + ) + + cluster_response = conn.describe_clusters("new_identifier") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['ClusterIdentifier'].should.equal("new_identifier") + cluster['NodeType'].should.equal("dw.hs1.xlarge") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("security_group") + cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") + cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) + cluster['AllowVersionUpgrade'].should.equal(False) + # This one should remain unmodified. + cluster['NumberOfNodes'].should.equal(1) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_cluster_subnet_group(): + vpc_conn = boto.connect_vpc() + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") + subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + + redshift_conn = boto.connect_redshift() + + redshift_conn.create_cluster_subnet_group( + "my_subnet", + "This is my subnet group", + subnet_ids=[subnet1.id, subnet2.id], + ) + + subnets_response = redshift_conn.describe_cluster_subnet_groups( + "my_subnet") + my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] + + my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") + my_subnet['Description'].should.equal("This is my subnet group") + subnet_ids = [subnet['SubnetIdentifier'] + for subnet in my_subnet['Subnets']] + set(subnet_ids).should.equal(set([subnet1.id, subnet2.id])) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_invalid_cluster_subnet_group(): + redshift_conn = boto.connect_redshift() + redshift_conn.create_cluster_subnet_group.when.called_with( + "my_subnet", + "This is my subnet group", + subnet_ids=["subnet-1234"], + ).should.throw(InvalidSubnet) + + +@mock_redshift_deprecated +def test_describe_non_existent_subnet_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_cluster_subnet_groups.when.called_with( + "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_delete_cluster_subnet_group(): + vpc_conn = boto.connect_vpc() + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") + redshift_conn = boto.connect_redshift() + + redshift_conn.create_cluster_subnet_group( + "my_subnet", + "This is my subnet group", + subnet_ids=[subnet.id], + ) + + subnets_response = redshift_conn.describe_cluster_subnet_groups() + subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets.should.have.length_of(1) + + redshift_conn.delete_cluster_subnet_group("my_subnet") + + subnets_response = redshift_conn.describe_cluster_subnet_groups() + subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets.should.have.length_of(0) + + # Delete invalid id + redshift_conn.delete_cluster_subnet_group.when.called_with( + "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + + +@mock_redshift_deprecated +def test_create_cluster_security_group(): + conn = boto.connect_redshift() + conn.create_cluster_security_group( + "my_security_group", + "This is my security group", + ) + + groups_response = conn.describe_cluster_security_groups( + "my_security_group") + my_group = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] + + my_group['ClusterSecurityGroupName'].should.equal("my_security_group") + my_group['Description'].should.equal("This is my security group") + list(my_group['IPRanges']).should.equal([]) + + +@mock_redshift_deprecated +def test_describe_non_existent_security_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_cluster_security_groups.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + + +@mock_redshift_deprecated +def test_delete_cluster_security_group(): + conn = boto.connect_redshift() + conn.create_cluster_security_group( + "my_security_group", + "This is my security group", + ) + + groups_response = conn.describe_cluster_security_groups() + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups.should.have.length_of(2) # The default group already exists + + conn.delete_cluster_security_group("my_security_group") + + groups_response = conn.describe_cluster_security_groups() + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups.should.have.length_of(1) + + # Delete invalid id + conn.delete_cluster_security_group.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + + +@mock_redshift_deprecated +def test_create_cluster_parameter_group(): + conn = boto.connect_redshift() + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + groups_response = conn.describe_cluster_parameter_groups( + "my_parameter_group") + my_group = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'][0] + + my_group['ParameterGroupName'].should.equal("my_parameter_group") + my_group['ParameterGroupFamily'].should.equal("redshift-1.0") + my_group['Description'].should.equal("This is my parameter group") + + +@mock_redshift_deprecated +def test_describe_non_existent_parameter_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_cluster_parameter_groups.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + + +@mock_redshift_deprecated +def test_delete_cluster_parameter_group(): + conn = boto.connect_redshift() + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + groups_response = conn.describe_cluster_parameter_groups() + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups.should.have.length_of(2) # The default group already exists + + conn.delete_cluster_parameter_group("my_parameter_group") + + groups_response = conn.describe_cluster_parameter_groups() + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups.should.have.length_of(1) + + # Delete invalid id + conn.delete_cluster_parameter_group.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + + + +@mock_redshift +def test_create_cluster_snapshot_of_non_existent_cluster(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'non-existent-cluster-id' + client.create_cluster_snapshot.when.called_with( + SnapshotIdentifier='snapshot-id', + ClusterIdentifier=cluster_identifier, + ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) + + +@mock_redshift +def test_create_cluster_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + cluster_response = client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + cluster_response['Cluster']['NodeType'].should.equal('ds2.xlarge') + + snapshot_response = client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': 'test-tag-key', + 'Value': 'test-tag-value'}] + ) + snapshot = snapshot_response['Snapshot'] + snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) + snapshot['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot['NumberOfNodes'].should.equal(1) + snapshot['NodeType'].should.equal('ds2.xlarge') + snapshot['MasterUsername'].should.equal('username') + + +@mock_redshift +def test_describe_cluster_snapshots(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + ) + + resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp_snap = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier) + resp_clust['Snapshots'][0].should.equal(resp_snap['Snapshots'][0]) + snapshot = resp_snap['Snapshots'][0] + snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) + snapshot['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot['NumberOfNodes'].should.equal(1) + snapshot['NodeType'].should.equal('ds2.xlarge') + snapshot['MasterUsername'].should.equal('username') + + +@mock_redshift +def test_describe_cluster_snapshots_not_found_error(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.describe_cluster_snapshots.when.called_with( + ClusterIdentifier=cluster_identifier, + ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) + + client.describe_cluster_snapshots.when.called_with( + SnapshotIdentifier=snapshot_identifier + ).should.throw(ClientError, 'Snapshot {} not found.'.format(snapshot_identifier)) + + +@mock_redshift +def test_delete_cluster_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ) + + snapshots = client.describe_cluster_snapshots()['Snapshots'] + list(snapshots).should.have.length_of(1) + + client.delete_cluster_snapshot(SnapshotIdentifier=snapshot_identifier)[ + 'Snapshot']['Status'].should.equal('deleted') + + snapshots = client.describe_cluster_snapshots()['Snapshots'] + list(snapshots).should.have.length_of(0) + + # Delete invalid id + client.delete_cluster_snapshot.when.called_with( + SnapshotIdentifier="not-a-snapshot").should.throw(ClientError) + + +@mock_redshift +def test_cluster_snapshot_already_exists(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ) + + client.create_cluster_snapshot.when.called_with( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ).should.throw(ClientError) + + +@mock_redshift +def test_create_cluster_from_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + original_cluster_identifier = 'original-cluster' + original_snapshot_identifier = 'original-snapshot' + new_cluster_identifier = 'new-cluster' + + client.create_cluster( + ClusterIdentifier=original_cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=original_snapshot_identifier, + ClusterIdentifier=original_cluster_identifier + ) + response = client.restore_from_cluster_snapshot( + ClusterIdentifier=new_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + Port=1234 + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + response = client.describe_clusters( + ClusterIdentifier=new_cluster_identifier + ) + new_cluster = response['Clusters'][0] + new_cluster['NodeType'].should.equal('ds2.xlarge') + new_cluster['MasterUsername'].should.equal('username') + new_cluster['Endpoint']['Port'].should.equal(1234) + + +@mock_redshift +def test_create_cluster_from_snapshot_with_waiter(): + client = boto3.client('redshift', region_name='us-east-1') + original_cluster_identifier = 'original-cluster' + original_snapshot_identifier = 'original-snapshot' + new_cluster_identifier = 'new-cluster' + + client.create_cluster( + ClusterIdentifier=original_cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=original_snapshot_identifier, + ClusterIdentifier=original_cluster_identifier + ) + response = client.restore_from_cluster_snapshot( + ClusterIdentifier=new_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + Port=1234 + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + client.get_waiter('cluster_restored').wait( + ClusterIdentifier=new_cluster_identifier, + WaiterConfig={ + 'Delay': 1, + 'MaxAttempts': 2, + } + ) + + response = client.describe_clusters( + ClusterIdentifier=new_cluster_identifier + ) + new_cluster = response['Clusters'][0] + new_cluster['NodeType'].should.equal('ds2.xlarge') + new_cluster['MasterUsername'].should.equal('username') + new_cluster['Endpoint']['Port'].should.equal(1234) + + +@mock_redshift +def test_create_cluster_from_non_existent_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + client.restore_from_cluster_snapshot.when.called_with( + ClusterIdentifier='cluster-id', + SnapshotIdentifier='non-existent-snapshot', + ).should.throw(ClientError, 'Snapshot non-existent-snapshot not found.') + + +@mock_redshift +def test_create_cluster_status_update(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'test-cluster' + + response = client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + response = client.describe_clusters( + ClusterIdentifier=cluster_identifier + ) + response['Clusters'][0]['ClusterStatus'].should.equal('available') + + +@mock_redshift +def test_describe_tags_with_resource_type(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + snapshot_identifier = 'my_snapshot' + snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'snapshot:{}/{}'.format(cluster_identifier, + snapshot_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceType='cluster') + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('cluster') + tagged_resources[0]['ResourceName'].should.equal(cluster_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceType='snapshot') + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('snapshot') + tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + +@mock_redshift +def test_describe_tags_cannot_specify_resource_type_and_resource_name(): + client = boto3.client('redshift', region_name='us-east-1') + resource_name = 'arn:aws:redshift:us-east-1:123456789012:cluster:cluster-id' + resource_type = 'cluster' + client.describe_tags.when.called_with( + ResourceName=resource_name, + ResourceType=resource_type + ).should.throw(ClientError, 'using either an ARN or a resource type') + + +@mock_redshift +def test_describe_tags_with_resource_name(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + snapshot_identifier = 'snapshot-id' + snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'snapshot:{}/{}'.format(cluster_identifier, + snapshot_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceName=cluster_arn) + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('cluster') + tagged_resources[0]['ResourceName'].should.equal(cluster_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceName=snapshot_arn) + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('snapshot') + tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + +@mock_redshift +def test_create_tags(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + num_tags = 5 + tags = [] + for i in range(0, num_tags): + tag = {'Key': '{}-{}'.format(tag_key, i), + 'Value': '{}-{}'.format(tag_value, i)} + tags.append(tag) + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_tags( + ResourceName=cluster_arn, + Tags=tags + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + list(cluster['Tags']).should.have.length_of(num_tags) + response = client.describe_tags(ResourceName=cluster_arn) + list(response['TaggedResources']).should.have.length_of(num_tags) + + +@mock_redshift +def test_delete_tags(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + tags = [] + for i in range(1, 2): + tag = {'Key': '{}-{}'.format(tag_key, i), + 'Value': '{}-{}'.format(tag_value, i)} + tags.append(tag) + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=tags + ) + client.delete_tags( + ResourceName=cluster_arn, + TagKeys=[tag['Key'] for tag in tags + if tag['Key'] != '{}-1'.format(tag_key)] + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + list(cluster['Tags']).should.have.length_of(1) + response = client.describe_tags(ResourceName=cluster_arn) + list(response['TaggedResources']).should.have.length_of(1) + + +@mock_ec2 +@mock_redshift +def test_describe_tags_all_resource_types(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_tags() + list(response['TaggedResources']).should.have.length_of(0) + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id], + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group1", + Description="This is my security group", + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster( + DBName='test', + ClusterIdentifier='my_cluster', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_snapshot( + SnapshotIdentifier='my_snapshot', + ClusterIdentifier='my_cluster', + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_parameter_group( + ParameterGroupName="my_parameter_group", + ParameterGroupFamily="redshift-1.0", + Description="This is my parameter group", + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + response = client.describe_tags() + expected_types = ['cluster', 'parametergroup', 'securitygroup', 'snapshot', 'subnetgroup'] + tagged_resources = response['TaggedResources'] + returned_types = [resource['ResourceType'] for resource in tagged_resources] + list(tagged_resources).should.have.length_of(len(expected_types)) + set(returned_types).should.equal(set(expected_types)) + + +@mock_redshift +def test_tagged_resource_not_found_error(): + client = boto3.client('redshift', region_name='us-east-1') + + cluster_arn = 'arn:aws:redshift:us-east-1::cluster:fake' + client.describe_tags.when.called_with( + ResourceName=cluster_arn + ).should.throw(ClientError, 'cluster (fake) not found.') + + snapshot_arn = 'arn:aws:redshift:us-east-1::snapshot:cluster-id/snap-id' + client.delete_tags.when.called_with( + ResourceName=snapshot_arn, + TagKeys=['test'] + ).should.throw(ClientError, 'snapshot (snap-id) not found.') + + client.describe_tags.when.called_with( + ResourceType='cluster' + ).should.throw(ClientError, "resource of type 'cluster' not found.") + + client.describe_tags.when.called_with( + ResourceName='bad:arn' + ).should.throw(ClientError, "Tagging is not supported for this type of resource") + + +@mock_redshift +def test_enable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + Encrypted=True, + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2' + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') + + +@mock_redshift +def test_enable_snapshot_copy_unencrypted(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + + +@mock_redshift +def test_disable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.disable_snapshot_copy( + ClusterIdentifier='test', + ) + response = client.describe_clusters(ClusterIdentifier='test') + response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') + + +@mock_redshift +def test_modify_snapshot_copy_retention_period(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.modify_snapshot_copy_retention_period( + ClusterIdentifier='test', + RetentionPeriod=5, + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5) diff --git a/tests/test_redshift/test_server.py b/tests/test_redshift/test_server.py index c37e9cab77b9..47ccdc5f3e13 100644 --- a/tests/test_redshift/test_server.py +++ b/tests/test_redshift/test_server.py @@ -1,22 +1,22 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_redshift - -''' -Test the different server responses -''' - - -@mock_redshift -def test_describe_clusters(): - backend = server.create_backend_app("redshift") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeClusters') - - result = res.data.decode("utf-8") - result.should.contain("") +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_redshift + +''' +Test the different server responses +''' + + +@mock_redshift +def test_describe_clusters(): + backend = server.create_backend_app("redshift") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeClusters') + + result = res.data.decode("utf-8") + result.should.contain("") diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 759063329db5..3961d05bc192 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -1,285 +1,285 @@ -from __future__ import unicode_literals - -import boto3 -import sure # noqa -from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2, mock_elbv2 - - -@mock_s3 -@mock_resourcegroupstaggingapi -def test_get_resources_s3(): - # Tests pagination - s3_client = boto3.client('s3', region_name='eu-central-1') - - # Will end up having key1,key2,key3,key4 - response_keys = set() - - # Create 4 buckets - for i in range(1, 5): - i_str = str(i) - s3_client.create_bucket(Bucket='test_bucket' + i_str) - s3_client.put_bucket_tagging( - Bucket='test_bucket' + i_str, - Tagging={'TagSet': [{'Key': 'key' + i_str, 'Value': 'value' + i_str}]} - ) - response_keys.add('key' + i_str) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_resources(ResourcesPerPage=2) - for resource in resp['ResourceTagMappingList']: - response_keys.remove(resource['Tags'][0]['Key']) - - response_keys.should.have.length_of(2) - - resp = rtapi.get_resources( - ResourcesPerPage=2, - PaginationToken=resp['PaginationToken'] - ) - for resource in resp['ResourceTagMappingList']: - response_keys.remove(resource['Tags'][0]['Key']) - - response_keys.should.have.length_of(0) - - -@mock_ec2 -@mock_resourcegroupstaggingapi -def test_get_resources_ec2(): - client = boto3.client('ec2', region_name='eu-central-1') - - instances = client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - instance_id = instances['Instances'][0]['InstanceId'] - image_id = client.create_image(Name='testami', InstanceId=instance_id)['ImageId'] - - client.create_tags( - Resources=[image_id], - Tags=[{'Key': 'ami', 'Value': 'test'}] - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_resources() - # Check we have 1 entry for Instance, 1 Entry for AMI - resp['ResourceTagMappingList'].should.have.length_of(2) - - # 1 Entry for AMI - resp = rtapi.get_resources(ResourceTypeFilters=['ec2:image']) - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('image/') - - # As were iterating the same data, this rules out that the test above was a fluke - resp = rtapi.get_resources(ResourceTypeFilters=['ec2:instance']) - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') - - # Basic test of tag filters - resp = rtapi.get_resources(TagFilters=[{'Key': 'MY_TAG1', 'Values': ['MY_VALUE1', 'some_other_value']}]) - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') - - -@mock_ec2 -@mock_resourcegroupstaggingapi -def test_get_tag_keys_ec2(): - client = boto3.client('ec2', region_name='eu-central-1') - - client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_tag_keys() - - resp['TagKeys'].should.contain('MY_TAG1') - resp['TagKeys'].should.contain('MY_TAG2') - resp['TagKeys'].should.contain('MY_TAG3') - - # TODO test pagenation - - -@mock_ec2 -@mock_resourcegroupstaggingapi -def test_get_tag_values_ec2(): - client = boto3.client('ec2', region_name='eu-central-1') - - client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE4', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE5', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE6', - }, - ] - }, - ], - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_tag_values(Key='MY_TAG1') - - resp['TagValues'].should.contain('MY_VALUE1') - resp['TagValues'].should.contain('MY_VALUE4') - -@mock_ec2 -@mock_elbv2 -@mock_resourcegroupstaggingapi -def test_get_resources_elbv2(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[ - { - 'Key': 'key_name', - 'Value': 'a_value' - }, - { - 'Key': 'key_2', - 'Value': 'val2' - } - ] - ) - - conn.create_load_balancer( - Name='my-other-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='us-east-1') - - resp = rtapi.get_resources(ResourceTypeFilters=['elasticloadbalancer:loadbalancer']) - - resp['ResourceTagMappingList'].should.have.length_of(2) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('loadbalancer/') - resp = rtapi.get_resources( - ResourceTypeFilters=['elasticloadbalancer:loadbalancer'], - TagFilters=[{ - 'Key': 'key_name' - }] - ) - - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['Tags'].should.contain({'Key': 'key_name', 'Value': 'a_value'}) - - # TODO test pagenation +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2, mock_elbv2 + + +@mock_s3 +@mock_resourcegroupstaggingapi +def test_get_resources_s3(): + # Tests pagination + s3_client = boto3.client('s3', region_name='eu-central-1') + + # Will end up having key1,key2,key3,key4 + response_keys = set() + + # Create 4 buckets + for i in range(1, 5): + i_str = str(i) + s3_client.create_bucket(Bucket='test_bucket' + i_str) + s3_client.put_bucket_tagging( + Bucket='test_bucket' + i_str, + Tagging={'TagSet': [{'Key': 'key' + i_str, 'Value': 'value' + i_str}]} + ) + response_keys.add('key' + i_str) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources(ResourcesPerPage=2) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(2) + + resp = rtapi.get_resources( + ResourcesPerPage=2, + PaginationToken=resp['PaginationToken'] + ) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(0) + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_resources_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + instances = client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + instance_id = instances['Instances'][0]['InstanceId'] + image_id = client.create_image(Name='testami', InstanceId=instance_id)['ImageId'] + + client.create_tags( + Resources=[image_id], + Tags=[{'Key': 'ami', 'Value': 'test'}] + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources() + # Check we have 1 entry for Instance, 1 Entry for AMI + resp['ResourceTagMappingList'].should.have.length_of(2) + + # 1 Entry for AMI + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:image']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('image/') + + # As were iterating the same data, this rules out that the test above was a fluke + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:instance']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + # Basic test of tag filters + resp = rtapi.get_resources(TagFilters=[{'Key': 'MY_TAG1', 'Values': ['MY_VALUE1', 'some_other_value']}]) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_keys_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_keys() + + resp['TagKeys'].should.contain('MY_TAG1') + resp['TagKeys'].should.contain('MY_TAG2') + resp['TagKeys'].should.contain('MY_TAG3') + + # TODO test pagenation + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_values_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE4', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE5', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE6', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_values(Key='MY_TAG1') + + resp['TagValues'].should.contain('MY_VALUE1') + resp['TagValues'].should.contain('MY_VALUE4') + +@mock_ec2 +@mock_elbv2 +@mock_resourcegroupstaggingapi +def test_get_resources_elbv2(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[ + { + 'Key': 'key_name', + 'Value': 'a_value' + }, + { + 'Key': 'key_2', + 'Value': 'val2' + } + ] + ) + + conn.create_load_balancer( + Name='my-other-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='us-east-1') + + resp = rtapi.get_resources(ResourceTypeFilters=['elasticloadbalancer:loadbalancer']) + + resp['ResourceTagMappingList'].should.have.length_of(2) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('loadbalancer/') + resp = rtapi.get_resources( + ResourceTypeFilters=['elasticloadbalancer:loadbalancer'], + TagFilters=[{ + 'Key': 'key_name' + }] + ) + + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['Tags'].should.contain({'Key': 'key_name', 'Value': 'a_value'}) + + # TODO test pagenation diff --git a/tests/test_resourcegroupstaggingapi/test_server.py b/tests/test_resourcegroupstaggingapi/test_server.py index 311b1f03ed1e..80a74b0b810d 100644 --- a/tests/test_resourcegroupstaggingapi/test_server.py +++ b/tests/test_resourcegroupstaggingapi/test_server.py @@ -1,24 +1,24 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_resourcegroupstaggingapi_list(): - backend = server.create_backend_app("resourcegroupstaggingapi") - test_client = backend.test_client() - # do test - - headers = { - 'X-Amz-Target': 'ResourceGroupsTaggingAPI_20170126.GetResources', - 'X-Amz-Date': '20171114T234623Z' - } - resp = test_client.post('/', headers=headers, data='{}') - - assert resp.status_code == 200 - assert b'ResourceTagMappingList' in resp.data +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_resourcegroupstaggingapi_list(): + backend = server.create_backend_app("resourcegroupstaggingapi") + test_client = backend.test_client() + # do test + + headers = { + 'X-Amz-Target': 'ResourceGroupsTaggingAPI_20170126.GetResources', + 'X-Amz-Date': '20171114T234623Z' + } + resp = test_client.post('/', headers=headers, data='{}') + + assert resp.status_code == 200 + assert b'ResourceTagMappingList' in resp.data diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 76217b9d939c..1a76a5454829 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -1,711 +1,711 @@ -from __future__ import unicode_literals - -import boto -import boto3 -from boto.route53.healthcheck import HealthCheck -from boto.route53.record import ResourceRecordSets - -import sure # noqa - -import uuid - -import botocore -from nose.tools import assert_raises - -from moto import mock_route53, mock_route53_deprecated - - -@mock_route53_deprecated -def test_hosted_zone(): - conn = boto.connect_route53('the_key', 'the_secret') - firstzone = conn.create_hosted_zone("testdns.aws.com") - zones = conn.get_all_hosted_zones() - len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) - - conn.create_hosted_zone("testdns1.aws.com") - zones = conn.get_all_hosted_zones() - len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(2) - - id1 = firstzone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - zone = conn.get_hosted_zone(id1) - zone["GetHostedZoneResponse"]["HostedZone"][ - "Name"].should.equal("testdns.aws.com.") - - conn.delete_hosted_zone(id1) - zones = conn.get_all_hosted_zones() - len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) - - conn.get_hosted_zone.when.called_with("abcd").should.throw( - boto.route53.exception.DNSServerError, "404 Not Found") - - -@mock_route53_deprecated -def test_rrset(): - conn = boto.connect_route53('the_key', 'the_secret') - - conn.get_all_rrsets.when.called_with("abcd", type="A").should.throw( - boto.route53.exception.DNSServerError, "404 Not Found") - - zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") - change.add_value("1.2.3.4") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('1.2.3.4') - - rrsets = conn.get_all_rrsets(zoneid, type="CNAME") - rrsets.should.have.length_of(0) - - changes = ResourceRecordSets(conn, zoneid) - changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") - change.add_value("5.6.7.8") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('5.6.7.8') - - changes = ResourceRecordSets(conn, zoneid) - changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid) - rrsets.should.have.length_of(0) - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "A") - change.add_value("1.2.3.4") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('1.2.3.4') - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "A") - change.add_value("5.6.7.8") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('5.6.7.8') - - changes = ResourceRecordSets(conn, zoneid) - changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") - changes.commit() - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") - change.add_value("1.2.3.4") - change = changes.add_change("CREATE", "bar.foo.testdns.aws.com", "A") - change.add_value("5.6.7.8") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(2) - - rrsets = conn.get_all_rrsets( - zoneid, name="foo.bar.testdns.aws.com", type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('1.2.3.4') - - rrsets = conn.get_all_rrsets( - zoneid, name="bar.foo.testdns.aws.com", type="A") - rrsets.should.have.length_of(2) - resource_records = [rr for rr_set in rrsets for rr in rr_set.resource_records] - resource_records.should.contain('1.2.3.4') - resource_records.should.contain('5.6.7.8') - - rrsets = conn.get_all_rrsets( - zoneid, name="foo.foo.testdns.aws.com", type="A") - rrsets.should.have.length_of(0) - - -@mock_route53_deprecated -def test_rrset_with_multiple_values(): - conn = boto.connect_route53('the_key', 'the_secret') - zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") - change.add_value("1.2.3.4") - change.add_value("5.6.7.8") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - set(rrsets[0].resource_records).should.equal(set(['1.2.3.4', '5.6.7.8'])) - - -@mock_route53_deprecated -def test_alias_rrset(): - conn = boto.connect_route53('the_key', 'the_secret') - zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - changes = ResourceRecordSets(conn, zoneid) - changes.add_change("CREATE", "foo.alias.testdns.aws.com", "A", - alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="foo.testdns.aws.com") - changes.add_change("CREATE", "bar.alias.testdns.aws.com", "CNAME", - alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="bar.testdns.aws.com") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrset_records = [(rr_set.name, rr) for rr_set in rrsets for rr in rr_set.resource_records] - rrset_records.should.have.length_of(2) - rrset_records.should.contain(('foo.alias.testdns.aws.com', 'foo.testdns.aws.com')) - rrset_records.should.contain(('bar.alias.testdns.aws.com', 'bar.testdns.aws.com')) - rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com') - rrsets = conn.get_all_rrsets(zoneid, type="CNAME") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('bar.testdns.aws.com') - - -@mock_route53_deprecated -def test_create_health_check(): - conn = boto.connect_route53('the_key', 'the_secret') - - check = HealthCheck( - ip_addr="10.0.0.25", - port=80, - hc_type="HTTP", - resource_path="/", - fqdn="example.com", - string_match="a good response", - request_interval=10, - failure_threshold=2, - ) - conn.create_health_check(check) - - checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ - 'HealthChecks'] - list(checks).should.have.length_of(1) - check = checks[0] - config = check['HealthCheckConfig'] - config['IPAddress'].should.equal("10.0.0.25") - config['Port'].should.equal("80") - config['Type'].should.equal("HTTP") - config['ResourcePath'].should.equal("/") - config['FullyQualifiedDomainName'].should.equal("example.com") - config['SearchString'].should.equal("a good response") - config['RequestInterval'].should.equal("10") - config['FailureThreshold'].should.equal("2") - - -@mock_route53_deprecated -def test_delete_health_check(): - conn = boto.connect_route53('the_key', 'the_secret') - - check = HealthCheck( - ip_addr="10.0.0.25", - port=80, - hc_type="HTTP", - resource_path="/", - ) - conn.create_health_check(check) - - checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ - 'HealthChecks'] - list(checks).should.have.length_of(1) - health_check_id = checks[0]['Id'] - - conn.delete_health_check(health_check_id) - checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ - 'HealthChecks'] - list(checks).should.have.length_of(0) - - -@mock_route53_deprecated -def test_use_health_check_in_resource_record_set(): - conn = boto.connect_route53('the_key', 'the_secret') - - check = HealthCheck( - ip_addr="10.0.0.25", - port=80, - hc_type="HTTP", - resource_path="/", - ) - check = conn.create_health_check( - check)['CreateHealthCheckResponse']['HealthCheck'] - check_id = check['Id'] - - zone = conn.create_hosted_zone("testdns.aws.com") - zone_id = zone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - changes = ResourceRecordSets(conn, zone_id) - change = changes.add_change( - "CREATE", "foo.bar.testdns.aws.com", "A", health_check=check_id) - change.add_value("1.2.3.4") - changes.commit() - - record_sets = conn.get_all_rrsets(zone_id) - record_sets[0].health_check.should.equal(check_id) - - -@mock_route53_deprecated -def test_hosted_zone_comment_preserved(): - conn = boto.connect_route53('the_key', 'the_secret') - - firstzone = conn.create_hosted_zone( - "testdns.aws.com.", comment="test comment") - zone_id = firstzone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - hosted_zone = conn.get_hosted_zone(zone_id) - hosted_zone["GetHostedZoneResponse"]["HostedZone"][ - "Config"]["Comment"].should.equal("test comment") - - hosted_zones = conn.get_all_hosted_zones() - hosted_zones["ListHostedZonesResponse"]["HostedZones"][ - 0]["Config"]["Comment"].should.equal("test comment") - - zone = conn.get_zone("testdns.aws.com.") - zone.config["Comment"].should.equal("test comment") - - -@mock_route53_deprecated -def test_deleting_weighted_route(): - conn = boto.connect_route53() - - conn.create_hosted_zone("testdns.aws.com.") - zone = conn.get_zone("testdns.aws.com.") - - zone.add_cname("cname.testdns.aws.com", "example.com", - identifier=('success-test-foo', '50')) - zone.add_cname("cname.testdns.aws.com", "example.com", - identifier=('success-test-bar', '50')) - - cnames = zone.get_cname('cname.testdns.aws.com.', all=True) - cnames.should.have.length_of(2) - foo_cname = [cname for cname in cnames if cname.identifier == - 'success-test-foo'][0] - - zone.delete_record(foo_cname) - cname = zone.get_cname('cname.testdns.aws.com.', all=True) - # When get_cname only had one result, it returns just that result instead - # of a list. - cname.identifier.should.equal('success-test-bar') - - -@mock_route53_deprecated -def test_deleting_latency_route(): - conn = boto.connect_route53() - - conn.create_hosted_zone("testdns.aws.com.") - zone = conn.get_zone("testdns.aws.com.") - - zone.add_cname("cname.testdns.aws.com", "example.com", - identifier=('success-test-foo', 'us-west-2')) - zone.add_cname("cname.testdns.aws.com", "example.com", - identifier=('success-test-bar', 'us-west-1')) - - cnames = zone.get_cname('cname.testdns.aws.com.', all=True) - cnames.should.have.length_of(2) - foo_cname = [cname for cname in cnames if cname.identifier == - 'success-test-foo'][0] - foo_cname.region.should.equal('us-west-2') - - zone.delete_record(foo_cname) - cname = zone.get_cname('cname.testdns.aws.com.', all=True) - # When get_cname only had one result, it returns just that result instead - # of a list. - cname.identifier.should.equal('success-test-bar') - cname.region.should.equal('us-west-1') - - -@mock_route53_deprecated -def test_hosted_zone_private_zone_preserved(): - conn = boto.connect_route53('the_key', 'the_secret') - - firstzone = conn.create_hosted_zone( - "testdns.aws.com.", private_zone=True, vpc_id='vpc-fake', vpc_region='us-east-1') - zone_id = firstzone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - hosted_zone = conn.get_hosted_zone(zone_id) - # in (original) boto, these bools returned as strings. - hosted_zone["GetHostedZoneResponse"]["HostedZone"][ - "Config"]["PrivateZone"].should.equal('True') - - hosted_zones = conn.get_all_hosted_zones() - hosted_zones["ListHostedZonesResponse"]["HostedZones"][ - 0]["Config"]["PrivateZone"].should.equal('True') - - zone = conn.get_zone("testdns.aws.com.") - zone.config["PrivateZone"].should.equal('True') - - -@mock_route53 -def test_hosted_zone_private_zone_preserved_boto3(): - conn = boto3.client('route53', region_name='us-east-1') - # TODO: actually create_hosted_zone statements with PrivateZone=True, but without - # a _valid_ vpc-id should fail. - firstzone = conn.create_hosted_zone( - Name="testdns.aws.com.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="Test", - ) - ) - - zone_id = firstzone["HostedZone"]["Id"].split("/")[-1] - - hosted_zone = conn.get_hosted_zone(Id=zone_id) - hosted_zone["HostedZone"]["Config"]["PrivateZone"].should.equal(True) - - hosted_zones = conn.list_hosted_zones() - hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) - - hosted_zones = conn.list_hosted_zones_by_name(DNSName="testdns.aws.com.") - len(hosted_zones["HostedZones"]).should.equal(1) - hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) - - -@mock_route53 -def test_list_or_change_tags_for_resource_request(): - conn = boto3.client('route53', region_name='us-east-1') - health_check = conn.create_health_check( - CallerReference='foobar', - HealthCheckConfig={ - 'IPAddress': '192.0.2.44', - 'Port': 123, - 'Type': 'HTTP', - 'ResourcePath': '/', - 'RequestInterval': 30, - 'FailureThreshold': 123, - 'HealthThreshold': 123, - } - ) - healthcheck_id = health_check['HealthCheck']['Id'] - - tag1 = {"Key": "Deploy", "Value": "True"} - tag2 = {"Key": "Name", "Value": "UnitTest"} - - # Test adding a tag for a resource id - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - AddTags=[tag1, tag2] - ) - - # Check to make sure that the response has the 'ResourceTagSet' key - response = conn.list_tags_for_resource( - ResourceType='healthcheck', ResourceId=healthcheck_id) - response.should.contain('ResourceTagSet') - - # Validate that each key was added - response['ResourceTagSet']['Tags'].should.contain(tag1) - response['ResourceTagSet']['Tags'].should.contain(tag2) - - len(response['ResourceTagSet']['Tags']).should.equal(2) - - # Try to remove the tags - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - RemoveTagKeys=[tag1['Key']] - ) - - # Check to make sure that the response has the 'ResourceTagSet' key - response = conn.list_tags_for_resource( - ResourceType='healthcheck', ResourceId=healthcheck_id) - response.should.contain('ResourceTagSet') - response['ResourceTagSet']['Tags'].should_not.contain(tag1) - response['ResourceTagSet']['Tags'].should.contain(tag2) - - # Remove the second tag - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - RemoveTagKeys=[tag2['Key']] - ) - - response = conn.list_tags_for_resource( - ResourceType='healthcheck', ResourceId=healthcheck_id) - response['ResourceTagSet']['Tags'].should_not.contain(tag2) - - # Re-add the tags - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - AddTags=[tag1, tag2] - ) - - # Remove both - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - RemoveTagKeys=[tag1['Key'], tag2['Key']] - ) - - response = conn.list_tags_for_resource( - ResourceType='healthcheck', ResourceId=healthcheck_id) - response['ResourceTagSet']['Tags'].should.be.empty - - -@mock_route53 -def test_list_hosted_zones_by_name(): - conn = boto3.client('route53', region_name='us-east-1') - conn.create_hosted_zone( - Name="test.b.com.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="test com", - ) - ) - conn.create_hosted_zone( - Name="test.a.org.", - CallerReference=str(hash('bar')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="test org", - ) - ) - conn.create_hosted_zone( - Name="test.a.org.", - CallerReference=str(hash('bar')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="test org 2", - ) - ) - - # test lookup - zones = conn.list_hosted_zones_by_name(DNSName="test.b.com.") - len(zones["HostedZones"]).should.equal(1) - zones["HostedZones"][0]["Name"].should.equal("test.b.com.") - zones = conn.list_hosted_zones_by_name(DNSName="test.a.org.") - len(zones["HostedZones"]).should.equal(2) - zones["HostedZones"][0]["Name"].should.equal("test.a.org.") - zones["HostedZones"][1]["Name"].should.equal("test.a.org.") - - # test sort order - zones = conn.list_hosted_zones_by_name() - len(zones["HostedZones"]).should.equal(3) - zones["HostedZones"][0]["Name"].should.equal("test.b.com.") - zones["HostedZones"][1]["Name"].should.equal("test.a.org.") - zones["HostedZones"][2]["Name"].should.equal("test.a.org.") - - -@mock_route53 -def test_change_resource_record_sets_crud_valid(): - conn = boto3.client('route53', region_name='us-east-1') - conn.create_hosted_zone( - Name="db.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="db", - ) - ) - - zones = conn.list_hosted_zones_by_name(DNSName="db.") - len(zones["HostedZones"]).should.equal(1) - zones["HostedZones"][0]["Name"].should.equal("db.") - hosted_zone_id = zones["HostedZones"][0]["Id"] - - # Create A Record. - a_record_endpoint_payload = { - 'Comment': 'create A record prod.redis.db', - 'Changes': [ - { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': 'prod.redis.db', - 'Type': 'A', - 'TTL': 10, - 'ResourceRecords': [{ - 'Value': '127.0.0.1' - }] - } - } - ] - } - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=a_record_endpoint_payload) - - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(1) - a_record_detail = response['ResourceRecordSets'][0] - a_record_detail['Name'].should.equal('prod.redis.db') - a_record_detail['Type'].should.equal('A') - a_record_detail['TTL'].should.equal(10) - a_record_detail['ResourceRecords'].should.equal([{'Value': '127.0.0.1'}]) - - # Update type to CNAME - cname_record_endpoint_payload = { - 'Comment': 'Update to CNAME prod.redis.db', - 'Changes': [ - { - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'Name': 'prod.redis.db', - 'Type': 'CNAME', - 'TTL': 60, - 'ResourceRecords': [{ - 'Value': '192.168.1.1' - }] - } - } - ] - } - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=cname_record_endpoint_payload) - - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(1) - cname_record_detail = response['ResourceRecordSets'][0] - cname_record_detail['Name'].should.equal('prod.redis.db') - cname_record_detail['Type'].should.equal('CNAME') - cname_record_detail['TTL'].should.equal(60) - cname_record_detail['ResourceRecords'].should.equal([{'Value': '192.168.1.1'}]) - - # Delete record. - delete_payload = { - 'Comment': 'delete prod.redis.db', - 'Changes': [ - { - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'Name': 'prod.redis.db', - 'Type': 'CNAME', - } - } - ] - } - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=delete_payload) - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(0) - - -@mock_route53 -def test_change_resource_record_invalid(): - conn = boto3.client('route53', region_name='us-east-1') - conn.create_hosted_zone( - Name="db.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="db", - ) - ) - - zones = conn.list_hosted_zones_by_name(DNSName="db.") - len(zones["HostedZones"]).should.equal(1) - zones["HostedZones"][0]["Name"].should.equal("db.") - hosted_zone_id = zones["HostedZones"][0]["Id"] - - invalid_a_record_payload = { - 'Comment': 'this should fail', - 'Changes': [ - { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': 'prod.scooby.doo', - 'Type': 'A', - 'TTL': 10, - 'ResourceRecords': [{ - 'Value': '127.0.0.1' - }] - } - } - ] - } - - with assert_raises(botocore.exceptions.ClientError): - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=invalid_a_record_payload) - - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(0) - - invalid_cname_record_payload = { - 'Comment': 'this should also fail', - 'Changes': [ - { - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'Name': 'prod.scooby.doo', - 'Type': 'CNAME', - 'TTL': 10, - 'ResourceRecords': [{ - 'Value': '127.0.0.1' - }] - } - } - ] - } - - with assert_raises(botocore.exceptions.ClientError): - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=invalid_cname_record_payload) - - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(0) - - -@mock_route53 -def test_list_resource_record_sets_name_type_filters(): - conn = boto3.client('route53', region_name='us-east-1') - create_hosted_zone_response = conn.create_hosted_zone( - Name="db.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="db", - ) - ) - hosted_zone_id = create_hosted_zone_response['HostedZone']['Id'] - - def create_resource_record_set(rec_type, rec_name): - payload = { - 'Comment': 'create {} record {}'.format(rec_type, rec_name), - 'Changes': [ - { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': rec_name, - 'Type': rec_type, - 'TTL': 10, - 'ResourceRecords': [{ - 'Value': '127.0.0.1' - }] - } - } - ] - } - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=payload) - - # record_type, record_name - all_records = [ - ('A', 'a.a.db'), - ('A', 'a.b.db'), - ('A', 'b.b.db'), - ('CNAME', 'b.b.db'), - ('CNAME', 'b.c.db'), - ('CNAME', 'c.c.db') - ] - for record_type, record_name in all_records: - create_resource_record_set(record_type, record_name) - - start_with = 2 - response = conn.list_resource_record_sets( - HostedZoneId=hosted_zone_id, - StartRecordType=all_records[start_with][0], - StartRecordName=all_records[start_with][1] - ) - - returned_records = [(record['Type'], record['Name']) for record in response['ResourceRecordSets']] - len(returned_records).should.equal(len(all_records) - start_with) - for desired_record in all_records[start_with:]: - returned_records.should.contain(desired_record) +from __future__ import unicode_literals + +import boto +import boto3 +from boto.route53.healthcheck import HealthCheck +from boto.route53.record import ResourceRecordSets + +import sure # noqa + +import uuid + +import botocore +from nose.tools import assert_raises + +from moto import mock_route53, mock_route53_deprecated + + +@mock_route53_deprecated +def test_hosted_zone(): + conn = boto.connect_route53('the_key', 'the_secret') + firstzone = conn.create_hosted_zone("testdns.aws.com") + zones = conn.get_all_hosted_zones() + len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) + + conn.create_hosted_zone("testdns1.aws.com") + zones = conn.get_all_hosted_zones() + len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(2) + + id1 = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + zone = conn.get_hosted_zone(id1) + zone["GetHostedZoneResponse"]["HostedZone"][ + "Name"].should.equal("testdns.aws.com.") + + conn.delete_hosted_zone(id1) + zones = conn.get_all_hosted_zones() + len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) + + conn.get_hosted_zone.when.called_with("abcd").should.throw( + boto.route53.exception.DNSServerError, "404 Not Found") + + +@mock_route53_deprecated +def test_rrset(): + conn = boto.connect_route53('the_key', 'the_secret') + + conn.get_all_rrsets.when.called_with("abcd", type="A").should.throw( + boto.route53.exception.DNSServerError, "404 Not Found") + + zone = conn.create_hosted_zone("testdns.aws.com") + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") + change.add_value("1.2.3.4") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('1.2.3.4') + + rrsets = conn.get_all_rrsets(zoneid, type="CNAME") + rrsets.should.have.length_of(0) + + changes = ResourceRecordSets(conn, zoneid) + changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") + change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") + change.add_value("5.6.7.8") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('5.6.7.8') + + changes = ResourceRecordSets(conn, zoneid) + changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid) + rrsets.should.have.length_of(0) + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "A") + change.add_value("1.2.3.4") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('1.2.3.4') + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "A") + change.add_value("5.6.7.8") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('5.6.7.8') + + changes = ResourceRecordSets(conn, zoneid) + changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") + changes.commit() + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") + change.add_value("1.2.3.4") + change = changes.add_change("CREATE", "bar.foo.testdns.aws.com", "A") + change.add_value("5.6.7.8") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(2) + + rrsets = conn.get_all_rrsets( + zoneid, name="foo.bar.testdns.aws.com", type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('1.2.3.4') + + rrsets = conn.get_all_rrsets( + zoneid, name="bar.foo.testdns.aws.com", type="A") + rrsets.should.have.length_of(2) + resource_records = [rr for rr_set in rrsets for rr in rr_set.resource_records] + resource_records.should.contain('1.2.3.4') + resource_records.should.contain('5.6.7.8') + + rrsets = conn.get_all_rrsets( + zoneid, name="foo.foo.testdns.aws.com", type="A") + rrsets.should.have.length_of(0) + + +@mock_route53_deprecated +def test_rrset_with_multiple_values(): + conn = boto.connect_route53('the_key', 'the_secret') + zone = conn.create_hosted_zone("testdns.aws.com") + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") + change.add_value("1.2.3.4") + change.add_value("5.6.7.8") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + set(rrsets[0].resource_records).should.equal(set(['1.2.3.4', '5.6.7.8'])) + + +@mock_route53_deprecated +def test_alias_rrset(): + conn = boto.connect_route53('the_key', 'the_secret') + zone = conn.create_hosted_zone("testdns.aws.com") + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + changes = ResourceRecordSets(conn, zoneid) + changes.add_change("CREATE", "foo.alias.testdns.aws.com", "A", + alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="foo.testdns.aws.com") + changes.add_change("CREATE", "bar.alias.testdns.aws.com", "CNAME", + alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="bar.testdns.aws.com") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrset_records = [(rr_set.name, rr) for rr_set in rrsets for rr in rr_set.resource_records] + rrset_records.should.have.length_of(2) + rrset_records.should.contain(('foo.alias.testdns.aws.com', 'foo.testdns.aws.com')) + rrset_records.should.contain(('bar.alias.testdns.aws.com', 'bar.testdns.aws.com')) + rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com') + rrsets = conn.get_all_rrsets(zoneid, type="CNAME") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('bar.testdns.aws.com') + + +@mock_route53_deprecated +def test_create_health_check(): + conn = boto.connect_route53('the_key', 'the_secret') + + check = HealthCheck( + ip_addr="10.0.0.25", + port=80, + hc_type="HTTP", + resource_path="/", + fqdn="example.com", + string_match="a good response", + request_interval=10, + failure_threshold=2, + ) + conn.create_health_check(check) + + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] + list(checks).should.have.length_of(1) + check = checks[0] + config = check['HealthCheckConfig'] + config['IPAddress'].should.equal("10.0.0.25") + config['Port'].should.equal("80") + config['Type'].should.equal("HTTP") + config['ResourcePath'].should.equal("/") + config['FullyQualifiedDomainName'].should.equal("example.com") + config['SearchString'].should.equal("a good response") + config['RequestInterval'].should.equal("10") + config['FailureThreshold'].should.equal("2") + + +@mock_route53_deprecated +def test_delete_health_check(): + conn = boto.connect_route53('the_key', 'the_secret') + + check = HealthCheck( + ip_addr="10.0.0.25", + port=80, + hc_type="HTTP", + resource_path="/", + ) + conn.create_health_check(check) + + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] + list(checks).should.have.length_of(1) + health_check_id = checks[0]['Id'] + + conn.delete_health_check(health_check_id) + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] + list(checks).should.have.length_of(0) + + +@mock_route53_deprecated +def test_use_health_check_in_resource_record_set(): + conn = boto.connect_route53('the_key', 'the_secret') + + check = HealthCheck( + ip_addr="10.0.0.25", + port=80, + hc_type="HTTP", + resource_path="/", + ) + check = conn.create_health_check( + check)['CreateHealthCheckResponse']['HealthCheck'] + check_id = check['Id'] + + zone = conn.create_hosted_zone("testdns.aws.com") + zone_id = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + changes = ResourceRecordSets(conn, zone_id) + change = changes.add_change( + "CREATE", "foo.bar.testdns.aws.com", "A", health_check=check_id) + change.add_value("1.2.3.4") + changes.commit() + + record_sets = conn.get_all_rrsets(zone_id) + record_sets[0].health_check.should.equal(check_id) + + +@mock_route53_deprecated +def test_hosted_zone_comment_preserved(): + conn = boto.connect_route53('the_key', 'the_secret') + + firstzone = conn.create_hosted_zone( + "testdns.aws.com.", comment="test comment") + zone_id = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + hosted_zone = conn.get_hosted_zone(zone_id) + hosted_zone["GetHostedZoneResponse"]["HostedZone"][ + "Config"]["Comment"].should.equal("test comment") + + hosted_zones = conn.get_all_hosted_zones() + hosted_zones["ListHostedZonesResponse"]["HostedZones"][ + 0]["Config"]["Comment"].should.equal("test comment") + + zone = conn.get_zone("testdns.aws.com.") + zone.config["Comment"].should.equal("test comment") + + +@mock_route53_deprecated +def test_deleting_weighted_route(): + conn = boto.connect_route53() + + conn.create_hosted_zone("testdns.aws.com.") + zone = conn.get_zone("testdns.aws.com.") + + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-foo', '50')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-bar', '50')) + + cnames = zone.get_cname('cname.testdns.aws.com.', all=True) + cnames.should.have.length_of(2) + foo_cname = [cname for cname in cnames if cname.identifier == + 'success-test-foo'][0] + + zone.delete_record(foo_cname) + cname = zone.get_cname('cname.testdns.aws.com.', all=True) + # When get_cname only had one result, it returns just that result instead + # of a list. + cname.identifier.should.equal('success-test-bar') + + +@mock_route53_deprecated +def test_deleting_latency_route(): + conn = boto.connect_route53() + + conn.create_hosted_zone("testdns.aws.com.") + zone = conn.get_zone("testdns.aws.com.") + + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-foo', 'us-west-2')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-bar', 'us-west-1')) + + cnames = zone.get_cname('cname.testdns.aws.com.', all=True) + cnames.should.have.length_of(2) + foo_cname = [cname for cname in cnames if cname.identifier == + 'success-test-foo'][0] + foo_cname.region.should.equal('us-west-2') + + zone.delete_record(foo_cname) + cname = zone.get_cname('cname.testdns.aws.com.', all=True) + # When get_cname only had one result, it returns just that result instead + # of a list. + cname.identifier.should.equal('success-test-bar') + cname.region.should.equal('us-west-1') + + +@mock_route53_deprecated +def test_hosted_zone_private_zone_preserved(): + conn = boto.connect_route53('the_key', 'the_secret') + + firstzone = conn.create_hosted_zone( + "testdns.aws.com.", private_zone=True, vpc_id='vpc-fake', vpc_region='us-east-1') + zone_id = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + hosted_zone = conn.get_hosted_zone(zone_id) + # in (original) boto, these bools returned as strings. + hosted_zone["GetHostedZoneResponse"]["HostedZone"][ + "Config"]["PrivateZone"].should.equal('True') + + hosted_zones = conn.get_all_hosted_zones() + hosted_zones["ListHostedZonesResponse"]["HostedZones"][ + 0]["Config"]["PrivateZone"].should.equal('True') + + zone = conn.get_zone("testdns.aws.com.") + zone.config["PrivateZone"].should.equal('True') + + +@mock_route53 +def test_hosted_zone_private_zone_preserved_boto3(): + conn = boto3.client('route53', region_name='us-east-1') + # TODO: actually create_hosted_zone statements with PrivateZone=True, but without + # a _valid_ vpc-id should fail. + firstzone = conn.create_hosted_zone( + Name="testdns.aws.com.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="Test", + ) + ) + + zone_id = firstzone["HostedZone"]["Id"].split("/")[-1] + + hosted_zone = conn.get_hosted_zone(Id=zone_id) + hosted_zone["HostedZone"]["Config"]["PrivateZone"].should.equal(True) + + hosted_zones = conn.list_hosted_zones() + hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) + + hosted_zones = conn.list_hosted_zones_by_name(DNSName="testdns.aws.com.") + len(hosted_zones["HostedZones"]).should.equal(1) + hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) + + +@mock_route53 +def test_list_or_change_tags_for_resource_request(): + conn = boto3.client('route53', region_name='us-east-1') + health_check = conn.create_health_check( + CallerReference='foobar', + HealthCheckConfig={ + 'IPAddress': '192.0.2.44', + 'Port': 123, + 'Type': 'HTTP', + 'ResourcePath': '/', + 'RequestInterval': 30, + 'FailureThreshold': 123, + 'HealthThreshold': 123, + } + ) + healthcheck_id = health_check['HealthCheck']['Id'] + + tag1 = {"Key": "Deploy", "Value": "True"} + tag2 = {"Key": "Name", "Value": "UnitTest"} + + # Test adding a tag for a resource id + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + AddTags=[tag1, tag2] + ) + + # Check to make sure that the response has the 'ResourceTagSet' key + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) + response.should.contain('ResourceTagSet') + + # Validate that each key was added + response['ResourceTagSet']['Tags'].should.contain(tag1) + response['ResourceTagSet']['Tags'].should.contain(tag2) + + len(response['ResourceTagSet']['Tags']).should.equal(2) + + # Try to remove the tags + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + RemoveTagKeys=[tag1['Key']] + ) + + # Check to make sure that the response has the 'ResourceTagSet' key + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) + response.should.contain('ResourceTagSet') + response['ResourceTagSet']['Tags'].should_not.contain(tag1) + response['ResourceTagSet']['Tags'].should.contain(tag2) + + # Remove the second tag + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + RemoveTagKeys=[tag2['Key']] + ) + + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) + response['ResourceTagSet']['Tags'].should_not.contain(tag2) + + # Re-add the tags + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + AddTags=[tag1, tag2] + ) + + # Remove both + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + RemoveTagKeys=[tag1['Key'], tag2['Key']] + ) + + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) + response['ResourceTagSet']['Tags'].should.be.empty + + +@mock_route53 +def test_list_hosted_zones_by_name(): + conn = boto3.client('route53', region_name='us-east-1') + conn.create_hosted_zone( + Name="test.b.com.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="test com", + ) + ) + conn.create_hosted_zone( + Name="test.a.org.", + CallerReference=str(hash('bar')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="test org", + ) + ) + conn.create_hosted_zone( + Name="test.a.org.", + CallerReference=str(hash('bar')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="test org 2", + ) + ) + + # test lookup + zones = conn.list_hosted_zones_by_name(DNSName="test.b.com.") + len(zones["HostedZones"]).should.equal(1) + zones["HostedZones"][0]["Name"].should.equal("test.b.com.") + zones = conn.list_hosted_zones_by_name(DNSName="test.a.org.") + len(zones["HostedZones"]).should.equal(2) + zones["HostedZones"][0]["Name"].should.equal("test.a.org.") + zones["HostedZones"][1]["Name"].should.equal("test.a.org.") + + # test sort order + zones = conn.list_hosted_zones_by_name() + len(zones["HostedZones"]).should.equal(3) + zones["HostedZones"][0]["Name"].should.equal("test.b.com.") + zones["HostedZones"][1]["Name"].should.equal("test.a.org.") + zones["HostedZones"][2]["Name"].should.equal("test.a.org.") + + +@mock_route53 +def test_change_resource_record_sets_crud_valid(): + conn = boto3.client('route53', region_name='us-east-1') + conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="db", + ) + ) + + zones = conn.list_hosted_zones_by_name(DNSName="db.") + len(zones["HostedZones"]).should.equal(1) + zones["HostedZones"][0]["Name"].should.equal("db.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + # Create A Record. + a_record_endpoint_payload = { + 'Comment': 'create A record prod.redis.db', + 'Changes': [ + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': 'prod.redis.db', + 'Type': 'A', + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=a_record_endpoint_payload) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(1) + a_record_detail = response['ResourceRecordSets'][0] + a_record_detail['Name'].should.equal('prod.redis.db') + a_record_detail['Type'].should.equal('A') + a_record_detail['TTL'].should.equal(10) + a_record_detail['ResourceRecords'].should.equal([{'Value': '127.0.0.1'}]) + + # Update type to CNAME + cname_record_endpoint_payload = { + 'Comment': 'Update to CNAME prod.redis.db', + 'Changes': [ + { + 'Action': 'UPSERT', + 'ResourceRecordSet': { + 'Name': 'prod.redis.db', + 'Type': 'CNAME', + 'TTL': 60, + 'ResourceRecords': [{ + 'Value': '192.168.1.1' + }] + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=cname_record_endpoint_payload) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(1) + cname_record_detail = response['ResourceRecordSets'][0] + cname_record_detail['Name'].should.equal('prod.redis.db') + cname_record_detail['Type'].should.equal('CNAME') + cname_record_detail['TTL'].should.equal(60) + cname_record_detail['ResourceRecords'].should.equal([{'Value': '192.168.1.1'}]) + + # Delete record. + delete_payload = { + 'Comment': 'delete prod.redis.db', + 'Changes': [ + { + 'Action': 'DELETE', + 'ResourceRecordSet': { + 'Name': 'prod.redis.db', + 'Type': 'CNAME', + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=delete_payload) + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(0) + + +@mock_route53 +def test_change_resource_record_invalid(): + conn = boto3.client('route53', region_name='us-east-1') + conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="db", + ) + ) + + zones = conn.list_hosted_zones_by_name(DNSName="db.") + len(zones["HostedZones"]).should.equal(1) + zones["HostedZones"][0]["Name"].should.equal("db.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + invalid_a_record_payload = { + 'Comment': 'this should fail', + 'Changes': [ + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': 'prod.scooby.doo', + 'Type': 'A', + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + + with assert_raises(botocore.exceptions.ClientError): + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=invalid_a_record_payload) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(0) + + invalid_cname_record_payload = { + 'Comment': 'this should also fail', + 'Changes': [ + { + 'Action': 'UPSERT', + 'ResourceRecordSet': { + 'Name': 'prod.scooby.doo', + 'Type': 'CNAME', + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + + with assert_raises(botocore.exceptions.ClientError): + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=invalid_cname_record_payload) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(0) + + +@mock_route53 +def test_list_resource_record_sets_name_type_filters(): + conn = boto3.client('route53', region_name='us-east-1') + create_hosted_zone_response = conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="db", + ) + ) + hosted_zone_id = create_hosted_zone_response['HostedZone']['Id'] + + def create_resource_record_set(rec_type, rec_name): + payload = { + 'Comment': 'create {} record {}'.format(rec_type, rec_name), + 'Changes': [ + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': rec_name, + 'Type': rec_type, + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=payload) + + # record_type, record_name + all_records = [ + ('A', 'a.a.db'), + ('A', 'a.b.db'), + ('A', 'b.b.db'), + ('CNAME', 'b.b.db'), + ('CNAME', 'b.c.db'), + ('CNAME', 'c.c.db') + ] + for record_type, record_name in all_records: + create_resource_record_set(record_type, record_name) + + start_with = 2 + response = conn.list_resource_record_sets( + HostedZoneId=hosted_zone_id, + StartRecordType=all_records[start_with][0], + StartRecordName=all_records[start_with][1] + ) + + returned_records = [(record['Type'], record['Name']) for record in response['ResourceRecordSets']] + len(returned_records).should.equal(len(all_records) - start_with) + for desired_record in all_records[start_with:]: + returned_records.should.contain(desired_record) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 6e339abb6699..aa9050e04135 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1,2583 +1,2583 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals - -import datetime -from six.moves.urllib.request import urlopen -from six.moves.urllib.error import HTTPError -from functools import wraps -from gzip import GzipFile -from io import BytesIO -import zlib - -import json -import boto -import boto3 -from botocore.client import ClientError -import botocore.exceptions -from boto.exception import S3CreateError, S3ResponseError -from botocore.handlers import disable_signing -from boto.s3.connection import S3Connection -from boto.s3.key import Key -from freezegun import freeze_time -import six -import requests -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -import sure # noqa - -from moto import settings, mock_s3, mock_s3_deprecated -import moto.s3.models as s3model - -if settings.TEST_SERVER_MODE: - REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE - EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"' -else: - REDUCED_PART_SIZE = 256 - EXPECTED_ETAG = '"66d1a1a2ed08fd05c137f316af4ff255-2"' - - -def reduced_min_part_size(f): - """ speed up tests by temporarily making the multipart minimum part size - small - """ - orig_size = s3model.UPLOAD_PART_MIN_SIZE - - @wraps(f) - def wrapped(*args, **kwargs): - try: - s3model.UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE - return f(*args, **kwargs) - finally: - s3model.UPLOAD_PART_MIN_SIZE = orig_size - - return wrapped - - -class MyModel(object): - - def __init__(self, name, value): - self.name = name - self.value = value - - def save(self): - s3 = boto3.client('s3', region_name='us-east-1') - s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) - - -@mock_s3 -def test_my_model_save(): - # Create Bucket so that test can run - conn = boto3.resource('s3', region_name='us-east-1') - conn.create_bucket(Bucket='mybucket') - #################################### - - model_instance = MyModel('steve', 'is awesome') - model_instance.save() - - body = conn.Object('mybucket', 'steve').get()['Body'].read().decode() - - assert body == 'is awesome' - - -@mock_s3 -def test_key_etag(): - conn = boto3.resource('s3', region_name='us-east-1') - conn.create_bucket(Bucket='mybucket') - - model_instance = MyModel('steve', 'is awesome') - model_instance.save() - - conn.Bucket('mybucket').Object('steve').e_tag.should.equal( - '"d32bda93738f7e03adb22e66c90fbc04"') - - -@mock_s3_deprecated -def test_multipart_upload_too_small(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - multipart.upload_part_from_file(BytesIO(b'hello'), 1) - multipart.upload_part_from_file(BytesIO(b'world'), 2) - # Multipart with total size under 5MB is refused - multipart.complete_upload.should.throw(S3ResponseError) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - # last part, can be less than 5 MB - part2 = b'1' - multipart.upload_part_from_file(BytesIO(part2), 2) - multipart.complete_upload() - # we should get both parts as the key contents - bucket.get_key( - "the-key").get_contents_as_string().should.equal(part1 + part2) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload_out_of_order(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - # last part, can be less than 5 MB - part2 = b'1' - multipart.upload_part_from_file(BytesIO(part2), 4) - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 2) - multipart.complete_upload() - # we should get both parts as the key contents - bucket.get_key( - "the-key").get_contents_as_string().should.equal(part1 + part2) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload_with_headers(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload( - "the-key", metadata={"foo": "bar"}) - part1 = b'0' * 10 - multipart.upload_part_from_file(BytesIO(part1), 1) - multipart.complete_upload() - - key = bucket.get_key("the-key") - key.metadata.should.equal({"foo": "bar"}) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload_with_copy_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "original-key" - key.set_contents_from_string("key_value") - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - multipart.copy_part_from_key("foobar", "original-key", 2, 0, 3) - multipart.complete_upload() - bucket.get_key( - "the-key").get_contents_as_string().should.equal(part1 + b"key_") - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload_cancel(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - multipart.cancel_upload() - # TODO we really need some sort of assertion here, but we don't currently - # have the ability to list mulipart uploads for a bucket. - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_etag(): - # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('mybucket') - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - # last part, can be less than 5 MB - part2 = b'1' - multipart.upload_part_from_file(BytesIO(part2), 2) - multipart.complete_upload() - # we should get both parts as the key contents - bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_invalid_order(): - # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('mybucket') - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * 5242880 - etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag - # last part, can be less than 5 MB - part2 = b'1' - etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag - xml = "{0}{1}" - xml = xml.format(2, etag2) + xml.format(1, etag1) - xml = "{0}".format(xml) - bucket.complete_multipart_upload.when.called_with( - multipart.key_name, multipart.id, xml).should.throw(S3ResponseError) - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_etag_quotes_stripped(): - # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('mybucket') - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag - # last part, can be less than 5 MB - part2 = b'1' - etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag - # Strip quotes from etags - etag1 = etag1.replace('"','') - etag2 = etag2.replace('"','') - xml = "{0}{1}" - xml = xml.format(1, etag1) + xml.format(2, etag2) - xml = "{0}".format(xml) - bucket.complete_multipart_upload.when.called_with( - multipart.key_name, multipart.id, xml).should_not.throw(S3ResponseError) - # we should get both parts as the key contents - bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_duplicate_upload(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - # same part again - multipart.upload_part_from_file(BytesIO(part1), 1) - part2 = b'1' * 1024 - multipart.upload_part_from_file(BytesIO(part2), 2) - multipart.complete_upload() - # We should get only one copy of part 1. - bucket.get_key( - "the-key").get_contents_as_string().should.equal(part1 + part2) - - -@mock_s3_deprecated -def test_list_multiparts(): - # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('mybucket') - - multipart1 = bucket.initiate_multipart_upload("one-key") - multipart2 = bucket.initiate_multipart_upload("two-key") - uploads = bucket.get_all_multipart_uploads() - uploads.should.have.length_of(2) - dict([(u.key_name, u.id) for u in uploads]).should.equal( - {'one-key': multipart1.id, 'two-key': multipart2.id}) - multipart2.cancel_upload() - uploads = bucket.get_all_multipart_uploads() - uploads.should.have.length_of(1) - uploads[0].key_name.should.equal("one-key") - multipart1.cancel_upload() - uploads = bucket.get_all_multipart_uploads() - uploads.should.be.empty - - -@mock_s3_deprecated -def test_key_save_to_missing_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.get_bucket('mybucket', validate=False) - - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string.when.called_with( - "foobar").should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_missing_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - bucket.get_key("the-key").should.equal(None) - - -@mock_s3_deprecated -def test_missing_key_urllib2(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.create_bucket("foobar") - - urlopen.when.called_with( - "http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError) - - -@mock_s3_deprecated -def test_empty_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("") - - key = bucket.get_key("the-key") - key.size.should.equal(0) - key.get_contents_as_string().should.equal(b'') - - -@mock_s3_deprecated -def test_empty_key_set_on_existing_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("foobar") - - key = bucket.get_key("the-key") - key.size.should.equal(6) - key.get_contents_as_string().should.equal(b'foobar') - - key.set_contents_from_string("") - bucket.get_key("the-key").get_contents_as_string().should.equal(b'') - - -@mock_s3_deprecated -def test_large_key_save(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("foobar" * 100000) - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) - - -@mock_s3_deprecated -def test_copy_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - bucket.copy_key('new-key', 'foobar', 'the-key') - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b"some value") - bucket.get_key( - "new-key").get_contents_as_string().should.equal(b"some value") - - -@mock_s3_deprecated -def test_copy_key_with_version(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - bucket.configure_versioning(versioning=True) - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - key.set_contents_from_string("another value") - - bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id='0') - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b"another value") - bucket.get_key( - "new-key").get_contents_as_string().should.equal(b"some value") - - -@mock_s3_deprecated -def test_set_metadata(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = 'the-key' - key.set_metadata('md', 'Metadatastring') - key.set_contents_from_string("Testval") - - bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') - - -@mock_s3_deprecated -def test_copy_key_replace_metadata(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_metadata('md', 'Metadatastring') - key.set_contents_from_string("some value") - - bucket.copy_key('new-key', 'foobar', 'the-key', - metadata={'momd': 'Mometadatastring'}) - - bucket.get_key("new-key").get_metadata('md').should.be.none - bucket.get_key( - "new-key").get_metadata('momd').should.equal('Mometadatastring') - - -@freeze_time("2012-01-01 12:00:00") -@mock_s3_deprecated -def test_last_modified(): - # See https://github.com/boto/boto/issues/466 - conn = boto.connect_s3() - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - rs = bucket.get_all_keys() - rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') - - bucket.get_key( - "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') - - -@mock_s3_deprecated -def test_missing_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_bucket_with_dash(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.get_bucket.when.called_with( - 'mybucket-test').should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_create_existing_bucket(): - "Trying to create a bucket that already exists should raise an Error" - conn = boto.s3.connect_to_region("us-west-2") - conn.create_bucket("foobar") - with assert_raises(S3CreateError): - conn.create_bucket('foobar') - - -@mock_s3_deprecated -def test_create_existing_bucket_in_us_east_1(): - "Trying to create a bucket that already exists in us-east-1 returns the bucket" - - """" - http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html - Your previous request to create the named bucket succeeded and you already - own it. You get this error in all AWS regions except US Standard, - us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if - bucket exists it Amazon S3 will not do anything). - """ - conn = boto.s3.connect_to_region("us-east-1") - conn.create_bucket("foobar") - bucket = conn.create_bucket("foobar") - bucket.name.should.equal("foobar") - - -@mock_s3_deprecated -def test_other_region(): - conn = S3Connection( - 'key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com') - conn.create_bucket("foobar") - list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) - - -@mock_s3_deprecated -def test_bucket_deletion(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - # Try to delete a bucket that still has keys - conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - bucket.delete_key("the-key") - conn.delete_bucket("foobar") - - # Get non-existing bucket - conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - # Delete non-existant bucket - conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_get_all_buckets(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.create_bucket("foobar") - conn.create_bucket("foobar2") - buckets = conn.get_all_buckets() - - buckets.should.have.length_of(2) - - -@mock_s3 -@mock_s3_deprecated -def test_post_to_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - requests.post("https://foobar.s3.amazonaws.com/", { - 'key': 'the-key', - 'file': 'nothing' - }) - - bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing') - - -@mock_s3 -@mock_s3_deprecated -def test_post_with_metadata_to_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - requests.post("https://foobar.s3.amazonaws.com/", { - 'key': 'the-key', - 'file': 'nothing', - 'x-amz-meta-test': 'metadata' - }) - - bucket.get_key('the-key').get_metadata('test').should.equal('metadata') - - -@mock_s3_deprecated -def test_delete_missing_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - - deleted_key = bucket.delete_key("foobar") - deleted_key.key.should.equal("foobar") - - -@mock_s3_deprecated -def test_delete_keys(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - - Key(bucket=bucket, name='file1').set_contents_from_string('abc') - Key(bucket=bucket, name='file2').set_contents_from_string('abc') - Key(bucket=bucket, name='file3').set_contents_from_string('abc') - Key(bucket=bucket, name='file4').set_contents_from_string('abc') - - result = bucket.delete_keys(['file2', 'file3']) - result.deleted.should.have.length_of(2) - result.errors.should.have.length_of(0) - keys = bucket.get_all_keys() - keys.should.have.length_of(2) - keys[0].name.should.equal('file1') - - -@mock_s3_deprecated -def test_delete_keys_with_invalid(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - - Key(bucket=bucket, name='file1').set_contents_from_string('abc') - Key(bucket=bucket, name='file2').set_contents_from_string('abc') - Key(bucket=bucket, name='file3').set_contents_from_string('abc') - Key(bucket=bucket, name='file4').set_contents_from_string('abc') - - result = bucket.delete_keys(['abc', 'file3']) - - result.deleted.should.have.length_of(1) - result.errors.should.have.length_of(1) - keys = bucket.get_all_keys() - keys.should.have.length_of(3) - keys[0].name.should.equal('file1') - - -@mock_s3_deprecated -def test_bucket_name_with_dot(): - conn = boto.connect_s3() - bucket = conn.create_bucket('firstname.lastname') - - k = Key(bucket, 'somekey') - k.set_contents_from_string('somedata') - - -@mock_s3_deprecated -def test_key_with_special_characters(): - conn = boto.connect_s3() - bucket = conn.create_bucket('test_bucket_name') - - key = Key(bucket, 'test_list_keys_2/x?y') - key.set_contents_from_string('value1') - - key_list = bucket.list('test_list_keys_2/', '/') - keys = [x for x in key_list] - keys[0].name.should.equal("test_list_keys_2/x?y") - - -@mock_s3_deprecated -def test_unicode_key_with_slash(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "/the-key-unîcode/test" - key.set_contents_from_string("value") - - key = bucket.get_key("/the-key-unîcode/test") - key.get_contents_as_string().should.equal(b'value') - - -@mock_s3_deprecated -def test_bucket_key_listing_order(): - conn = boto.connect_s3() - bucket = conn.create_bucket('test_bucket') - prefix = 'toplevel/' - - def store(name): - k = Key(bucket, prefix + name) - k.set_contents_from_string('somedata') - - names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key'] - - for name in names: - store(name) - - delimiter = None - keys = [x.name for x in bucket.list(prefix, delimiter)] - keys.should.equal([ - 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key', - 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3' - ]) - - delimiter = '/' - keys = [x.name for x in bucket.list(prefix, delimiter)] - keys.should.equal([ - 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/' - ]) - - # Test delimiter with no prefix - delimiter = '/' - keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] - keys.should.equal(['toplevel/']) - - delimiter = None - keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal( - [u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key']) - - delimiter = '/' - keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal([u'toplevel/x/']) - - -@mock_s3_deprecated -def test_key_with_reduced_redundancy(): - conn = boto.connect_s3() - bucket = conn.create_bucket('test_bucket_name') - - key = Key(bucket, 'test_rr_key') - key.set_contents_from_string('value1', reduced_redundancy=True) - # we use the bucket iterator because of: - # https:/github.com/boto/boto/issues/1173 - list(bucket)[0].storage_class.should.equal('REDUCED_REDUNDANCY') - - -@mock_s3_deprecated -def test_copy_key_reduced_redundancy(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - bucket.copy_key('new-key', 'foobar', 'the-key', - storage_class='REDUCED_REDUNDANCY') - - # we use the bucket iterator because of: - # https:/github.com/boto/boto/issues/1173 - keys = dict([(k.name, k) for k in bucket]) - keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY") - keys['the-key'].storage_class.should.equal("STANDARD") - - -@freeze_time("2012-01-01 12:00:00") -@mock_s3_deprecated -def test_restore_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - list(bucket)[0].ongoing_restore.should.be.none - key.restore(1) - key = bucket.get_key('the-key') - key.ongoing_restore.should_not.be.none - key.ongoing_restore.should.be.false - key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") - key.restore(2) - key = bucket.get_key('the-key') - key.ongoing_restore.should_not.be.none - key.ongoing_restore.should.be.false - key.expiry_date.should.equal("Tue, 03 Jan 2012 12:00:00 GMT") - - -@freeze_time("2012-01-01 12:00:00") -@mock_s3_deprecated -def test_restore_key_headers(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - key.restore(1, headers={'foo': 'bar'}) - key = bucket.get_key('the-key') - key.ongoing_restore.should_not.be.none - key.ongoing_restore.should.be.false - key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") - - -@mock_s3_deprecated -def test_get_versioning_status(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - d = bucket.get_versioning_status() - d.should.be.empty - - bucket.configure_versioning(versioning=True) - d = bucket.get_versioning_status() - d.shouldnt.be.empty - d.should.have.key('Versioning').being.equal('Enabled') - - bucket.configure_versioning(versioning=False) - d = bucket.get_versioning_status() - d.should.have.key('Versioning').being.equal('Suspended') - - -@mock_s3_deprecated -def test_key_version(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - bucket.configure_versioning(versioning=True) - - key = Key(bucket) - key.key = 'the-key' - key.version_id.should.be.none - key.set_contents_from_string('some string') - key.version_id.should.equal('0') - key.set_contents_from_string('some string') - key.version_id.should.equal('1') - - key = bucket.get_key('the-key') - key.version_id.should.equal('1') - - -@mock_s3_deprecated -def test_list_versions(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - bucket.configure_versioning(versioning=True) - - key = Key(bucket, 'the-key') - key.version_id.should.be.none - key.set_contents_from_string("Version 1") - key.version_id.should.equal('0') - key.set_contents_from_string("Version 2") - key.version_id.should.equal('1') - - versions = list(bucket.list_versions()) - - versions.should.have.length_of(2) - - versions[0].name.should.equal('the-key') - versions[0].version_id.should.equal('0') - versions[0].get_contents_as_string().should.equal(b"Version 1") - - versions[1].name.should.equal('the-key') - versions[1].version_id.should.equal('1') - versions[1].get_contents_as_string().should.equal(b"Version 2") - - key = Key(bucket, 'the2-key') - key.set_contents_from_string("Version 1") - - keys = list(bucket.list()) - keys.should.have.length_of(2) - versions = list(bucket.list_versions(prefix='the2-')) - versions.should.have.length_of(1) - - -@mock_s3_deprecated -def test_acl_setting(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - content = b'imafile' - keyname = 'test.txt' - - key = Key(bucket, name=keyname) - key.content_type = 'text/plain' - key.set_contents_from_string(content) - key.make_public() - - key = bucket.get_key(keyname) - - assert key.get_contents_as_string() == content - - grants = key.get_acl().acl.grants - assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'READ' for g in grants), grants - - -@mock_s3_deprecated -def test_acl_setting_via_headers(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - content = b'imafile' - keyname = 'test.txt' - - key = Key(bucket, name=keyname) - key.content_type = 'text/plain' - key.set_contents_from_string(content, headers={ - 'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/global/AllUsers"' - }) - - key = bucket.get_key(keyname) - - assert key.get_contents_as_string() == content - - grants = key.get_acl().acl.grants - assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'FULL_CONTROL' for g in grants), grants - - -@mock_s3_deprecated -def test_acl_switching(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - content = b'imafile' - keyname = 'test.txt' - - key = Key(bucket, name=keyname) - key.content_type = 'text/plain' - key.set_contents_from_string(content, policy='public-read') - key.set_acl('private') - - grants = key.get_acl().acl.grants - assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'READ' for g in grants), grants - - -@mock_s3_deprecated -def test_bucket_acl_setting(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - - bucket.make_public() - - grants = bucket.get_acl().acl.grants - assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'READ' for g in grants), grants - - -@mock_s3_deprecated -def test_bucket_acl_switching(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - bucket.make_public() - - bucket.set_acl('private') - - grants = bucket.get_acl().acl.grants - assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'READ' for g in grants), grants - - -@mock_s3 -def test_s3_object_in_public_bucket(): - s3 = boto3.resource('s3') - bucket = s3.Bucket('test-bucket') - bucket.create(ACL='public-read') - bucket.put_object(Body=b'ABCD', Key='file.txt') - - s3_anonymous = boto3.resource('s3') - s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) - - contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() - contents.should.equal(b'ABCD') - - bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') - - with assert_raises(ClientError) as exc: - s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() - exc.exception.response['Error']['Code'].should.equal('403') - - params = {'Bucket': 'test-bucket', 'Key': 'file.txt'} - presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) - response = requests.get(presigned_url) - assert response.status_code == 200 - - -@mock_s3 -def test_s3_object_in_private_bucket(): - s3 = boto3.resource('s3') - bucket = s3.Bucket('test-bucket') - bucket.create(ACL='private') - bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') - - s3_anonymous = boto3.resource('s3') - s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) - - with assert_raises(ClientError) as exc: - s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() - exc.exception.response['Error']['Code'].should.equal('403') - - bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt') - contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() - contents.should.equal(b'ABCD') - - -@mock_s3_deprecated -def test_unicode_key(): - conn = boto.connect_s3() - bucket = conn.create_bucket('mybucket') - key = Key(bucket) - key.key = u'こんにちは.jpg' - key.set_contents_from_string('Hello world!') - assert [listed_key.key for listed_key in bucket.list()] == [key.key] - fetched_key = bucket.get_key(key.key) - assert fetched_key.key == key.key - assert fetched_key.get_contents_as_string().decode("utf-8") == 'Hello world!' - - -@mock_s3_deprecated -def test_unicode_value(): - conn = boto.connect_s3() - bucket = conn.create_bucket('mybucket') - key = Key(bucket) - key.key = 'some_key' - key.set_contents_from_string(u'こんにちは.jpg') - list(bucket.list()) - key = bucket.get_key(key.key) - assert key.get_contents_as_string().decode("utf-8") == u'こんにちは.jpg' - - -@mock_s3_deprecated -def test_setting_content_encoding(): - conn = boto.connect_s3() - bucket = conn.create_bucket('mybucket') - key = bucket.new_key("keyname") - key.set_metadata("Content-Encoding", "gzip") - compressed_data = "abcdef" - key.set_contents_from_string(compressed_data) - - key = bucket.get_key("keyname") - key.content_encoding.should.equal("gzip") - - -@mock_s3_deprecated -def test_bucket_location(): - conn = boto.s3.connect_to_region("us-west-2") - bucket = conn.create_bucket('mybucket') - bucket.get_location().should.equal("us-west-2") - - -@mock_s3_deprecated -def test_ranged_get(): - conn = boto.connect_s3() - bucket = conn.create_bucket('mybucket') - key = Key(bucket) - key.key = 'bigkey' - rep = b"0123456789" - key.set_contents_from_string(rep * 10) - - # Implicitly bounded range requests. - key.get_contents_as_string( - headers={'Range': 'bytes=0-'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=50-'}).should.equal(rep * 5) - key.get_contents_as_string( - headers={'Range': 'bytes=99-'}).should.equal(b'9') - - # Explicitly bounded range requests starting from the first byte. - key.get_contents_as_string( - headers={'Range': 'bytes=0-0'}).should.equal(b'0') - key.get_contents_as_string( - headers={'Range': 'bytes=0-49'}).should.equal(rep * 5) - key.get_contents_as_string( - headers={'Range': 'bytes=0-99'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=0-100'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=0-700'}).should.equal(rep * 10) - - # Explicitly bounded range requests starting from the / a middle byte. - key.get_contents_as_string( - headers={'Range': 'bytes=50-54'}).should.equal(rep[:5]) - key.get_contents_as_string( - headers={'Range': 'bytes=50-99'}).should.equal(rep * 5) - key.get_contents_as_string( - headers={'Range': 'bytes=50-100'}).should.equal(rep * 5) - key.get_contents_as_string( - headers={'Range': 'bytes=50-700'}).should.equal(rep * 5) - - # Explicitly bounded range requests starting from the last byte. - key.get_contents_as_string( - headers={'Range': 'bytes=99-99'}).should.equal(b'9') - key.get_contents_as_string( - headers={'Range': 'bytes=99-100'}).should.equal(b'9') - key.get_contents_as_string( - headers={'Range': 'bytes=99-700'}).should.equal(b'9') - - # Suffix range requests. - key.get_contents_as_string( - headers={'Range': 'bytes=-1'}).should.equal(b'9') - key.get_contents_as_string( - headers={'Range': 'bytes=-60'}).should.equal(rep * 6) - key.get_contents_as_string( - headers={'Range': 'bytes=-100'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=-101'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=-700'}).should.equal(rep * 10) - - key.size.should.equal(100) - - -@mock_s3_deprecated -def test_policy(): - conn = boto.connect_s3() - bucket_name = 'mybucket' - bucket = conn.create_bucket(bucket_name) - - policy = json.dumps({ - "Version": "2012-10-17", - "Id": "PutObjPolicy", - "Statement": [ - { - "Sid": "DenyUnEncryptedObjectUploads", - "Effect": "Deny", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::{bucket_name}/*".format(bucket_name=bucket_name), - "Condition": { - "StringNotEquals": { - "s3:x-amz-server-side-encryption": "aws:kms" - } - } - } - ] - }) - - with assert_raises(S3ResponseError) as err: - bucket.get_policy() - - ex = err.exception - ex.box_usage.should.be.none - ex.error_code.should.equal('NoSuchBucketPolicy') - ex.message.should.equal('The bucket policy does not exist') - ex.reason.should.equal('Not Found') - ex.resource.should.be.none - ex.status.should.equal(404) - ex.body.should.contain(bucket_name) - ex.request_id.should_not.be.none - - bucket.set_policy(policy).should.be.true - - bucket = conn.get_bucket(bucket_name) - - bucket.get_policy().decode('utf-8').should.equal(policy) - - bucket.delete_policy() - - with assert_raises(S3ResponseError) as err: - bucket.get_policy() - - -@mock_s3_deprecated -def test_website_configuration_xml(): - conn = boto.connect_s3() - bucket = conn.create_bucket('test-bucket') - bucket.set_website_configuration_xml(TEST_XML) - bucket.get_website_configuration_xml().should.equal(TEST_XML) - - -@mock_s3_deprecated -def test_key_with_trailing_slash_in_ordinary_calling_format(): - conn = boto.connect_s3( - 'access_key', - 'secret_key', - calling_format=boto.s3.connection.OrdinaryCallingFormat() - ) - bucket = conn.create_bucket('test_bucket_name') - - key_name = 'key_with_slash/' - - key = Key(bucket, key_name) - key.set_contents_from_string('some value') - - [k.name for k in bucket.get_all_keys()].should.contain(key_name) - - -""" -boto3 -""" - - -@mock_s3 -def test_boto3_key_etag(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome') - resp = s3.get_object(Bucket='mybucket', Key='steve') - resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') - - -@mock_s3 -def test_website_redirect_location(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - - s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome') - resp = s3.get_object(Bucket='mybucket', Key='steve') - resp.get('WebsiteRedirectLocation').should.be.none - - url = 'https://github.com/spulec/moto' - s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome', WebsiteRedirectLocation=url) - resp = s3.get_object(Bucket='mybucket', Key='steve') - resp['WebsiteRedirectLocation'].should.equal(url) - - -@mock_s3 -def test_boto3_list_keys_xml_escaped(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - key_name = 'Q&A.txt' - s3.put_object(Bucket='mybucket', Key=key_name, Body=b'is awesome') - - resp = s3.list_objects_v2(Bucket='mybucket', Prefix=key_name) - - assert resp['Contents'][0]['Key'] == key_name - assert resp['KeyCount'] == 1 - assert resp['MaxKeys'] == 1000 - assert resp['Prefix'] == key_name - assert resp['IsTruncated'] == False - assert 'Delimiter' not in resp - assert 'StartAfter' not in resp - assert 'NextContinuationToken' not in resp - assert 'Owner' not in resp['Contents'][0] - - -@mock_s3 -def test_boto3_list_objects_v2_truncated_response(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - s3.put_object(Bucket='mybucket', Key='one', Body=b'1') - s3.put_object(Bucket='mybucket', Key='two', Body=b'22') - s3.put_object(Bucket='mybucket', Key='three', Body=b'333') - - # First list - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1) - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'one' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == True - assert 'Delimiter' not in resp - assert 'StartAfter' not in resp - assert 'Owner' not in listed_object # owner info was not requested - - next_token = resp['NextContinuationToken'] - - # Second list - resp = s3.list_objects_v2( - Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'three' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == True - assert 'Delimiter' not in resp - assert 'StartAfter' not in resp - assert 'Owner' not in listed_object - - next_token = resp['NextContinuationToken'] - - # Third list - resp = s3.list_objects_v2( - Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'two' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == False - assert 'Delimiter' not in resp - assert 'Owner' not in listed_object - assert 'StartAfter' not in resp - assert 'NextContinuationToken' not in resp - - -@mock_s3 -def test_boto3_list_objects_v2_truncated_response_start_after(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - s3.put_object(Bucket='mybucket', Key='one', Body=b'1') - s3.put_object(Bucket='mybucket', Key='two', Body=b'22') - s3.put_object(Bucket='mybucket', Key='three', Body=b'333') - - # First list - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one') - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'three' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == True - assert resp['StartAfter'] == 'one' - assert 'Delimiter' not in resp - assert 'Owner' not in listed_object - - next_token = resp['NextContinuationToken'] - - # Second list - # The ContinuationToken must take precedence over StartAfter. - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one', - ContinuationToken=next_token) - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'two' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == False - # When ContinuationToken is given, StartAfter is ignored. This also means - # AWS does not return it in the response. - assert 'StartAfter' not in resp - assert 'Delimiter' not in resp - assert 'Owner' not in listed_object - - -@mock_s3 -def test_boto3_list_objects_v2_fetch_owner(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - s3.put_object(Bucket='mybucket', Key='one', Body=b'11') - - resp = s3.list_objects_v2(Bucket='mybucket', FetchOwner=True) - owner = resp['Contents'][0]['Owner'] - - assert 'ID' in owner - assert 'DisplayName' in owner - assert len(owner.keys()) == 2 - - -@mock_s3 -def test_boto3_bucket_create(): - s3 = boto3.resource('s3', region_name='us-east-1') - s3.create_bucket(Bucket="blah") - - s3.Object('blah', 'hello.txt').put(Body="some text") - - s3.Object('blah', 'hello.txt').get()['Body'].read().decode( - "utf-8").should.equal("some text") - - -@mock_s3 -def test_bucket_create_duplicate(): - s3 = boto3.resource('s3', region_name='us-west-2') - s3.create_bucket(Bucket="blah", CreateBucketConfiguration={ - 'LocationConstraint': 'us-west-2', - }) - with assert_raises(ClientError) as exc: - s3.create_bucket( - Bucket="blah", - CreateBucketConfiguration={ - 'LocationConstraint': 'us-west-2', - } - ) - exc.exception.response['Error']['Code'].should.equal('BucketAlreadyExists') - - -@mock_s3 -def test_boto3_bucket_create_eu_central(): - s3 = boto3.resource('s3', region_name='eu-central-1') - s3.create_bucket(Bucket="blah") - - s3.Object('blah', 'hello.txt').put(Body="some text") - - s3.Object('blah', 'hello.txt').get()['Body'].read().decode( - "utf-8").should.equal("some text") - - -@mock_s3 -def test_boto3_head_object(): - s3 = boto3.resource('s3', region_name='us-east-1') - s3.create_bucket(Bucket="blah") - - s3.Object('blah', 'hello.txt').put(Body="some text") - - s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt') - - with assert_raises(ClientError) as e: - s3.Object('blah', 'hello2.txt').meta.client.head_object( - Bucket='blah', Key='hello_bad.txt') - e.exception.response['Error']['Code'].should.equal('404') - - -@mock_s3 -def test_boto3_bucket_deletion(): - cli = boto3.client('s3', region_name='us-east-1') - cli.create_bucket(Bucket="foobar") - - cli.put_object(Bucket="foobar", Key="the-key", Body="some value") - - # Try to delete a bucket that still has keys - cli.delete_bucket.when.called_with(Bucket="foobar").should.throw( - cli.exceptions.ClientError, - ('An error occurred (BucketNotEmpty) when calling the DeleteBucket operation: ' - 'The bucket you tried to delete is not empty')) - - cli.delete_object(Bucket="foobar", Key="the-key") - cli.delete_bucket(Bucket="foobar") - - # Get non-existing bucket - cli.head_bucket.when.called_with(Bucket="foobar").should.throw( - cli.exceptions.ClientError, - "An error occurred (404) when calling the HeadBucket operation: Not Found") - - # Delete non-existing bucket - cli.delete_bucket.when.called_with(Bucket="foobar").should.throw(cli.exceptions.NoSuchBucket) - - -@mock_s3 -def test_boto3_get_object(): - s3 = boto3.resource('s3', region_name='us-east-1') - s3.create_bucket(Bucket="blah") - - s3.Object('blah', 'hello.txt').put(Body="some text") - - s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt') - - with assert_raises(ClientError) as e: - s3.Object('blah', 'hello2.txt').get() - - e.exception.response['Error']['Code'].should.equal('NoSuchKey') - - -@mock_s3 -def test_boto3_head_object_with_versioning(): - s3 = boto3.resource('s3', region_name='us-east-1') - bucket = s3.create_bucket(Bucket='blah') - bucket.Versioning().enable() - - old_content = 'some text' - new_content = 'some new text' - s3.Object('blah', 'hello.txt').put(Body=old_content) - s3.Object('blah', 'hello.txt').put(Body=new_content) - - head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt') - head_object['VersionId'].should.equal('1') - head_object['ContentLength'].should.equal(len(new_content)) - - old_head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt', VersionId='0') - old_head_object['VersionId'].should.equal('0') - old_head_object['ContentLength'].should.equal(len(old_content)) - - -@mock_s3 -def test_boto3_copy_object_with_versioning(): - client = boto3.client('s3', region_name='us-east-1') - - client.create_bucket(Bucket='blah', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) - client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) - - client.put_object(Bucket='blah', Key='test1', Body=b'test1') - client.put_object(Bucket='blah', Key='test2', Body=b'test2') - - obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId'] - obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId'] - - # Versions should be the same - obj1_version.should.equal(obj2_version) - - client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2') - obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId'] - - # Version should be different to previous version - obj2_version_new.should_not.equal(obj2_version) - - -@mock_s3 -def test_boto3_deleted_versionings_list(): - client = boto3.client('s3', region_name='us-east-1') - - client.create_bucket(Bucket='blah') - client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) - - client.put_object(Bucket='blah', Key='test1', Body=b'test1') - client.put_object(Bucket='blah', Key='test2', Body=b'test2') - client.delete_objects(Bucket='blah', Delete={'Objects': [{'Key': 'test1'}]}) - - listed = client.list_objects_v2(Bucket='blah') - assert len(listed['Contents']) == 1 - - -@mock_s3 -def test_boto3_delete_versioned_bucket(): - client = boto3.client('s3', region_name='us-east-1') - - client.create_bucket(Bucket='blah') - client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) - - resp = client.put_object(Bucket='blah', Key='test1', Body=b'test1') - client.delete_object(Bucket='blah', Key='test1', VersionId=resp["VersionId"]) - - client.delete_bucket(Bucket='blah') - - -@mock_s3 -def test_boto3_head_object_if_modified_since(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = "blah" - s3.create_bucket(Bucket=bucket_name) - - key = 'hello.txt' - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test' - ) - - with assert_raises(botocore.exceptions.ClientError) as err: - s3.head_object( - Bucket=bucket_name, - Key=key, - IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1) - ) - e = err.exception - e.response['Error'].should.equal({'Code': '304', 'Message': 'Not Modified'}) - - -@mock_s3 -@reduced_min_part_size -def test_boto3_multipart_etag(): - # Create Bucket so that test can run - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - - upload_id = s3.create_multipart_upload( - Bucket='mybucket', Key='the-key')['UploadId'] - part1 = b'0' * REDUCED_PART_SIZE - etags = [] - etags.append( - s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=1, - UploadId=upload_id, Body=part1)['ETag']) - # last part, can be less than 5 MB - part2 = b'1' - etags.append( - s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=2, - UploadId=upload_id, Body=part2)['ETag']) - s3.complete_multipart_upload( - Bucket='mybucket', Key='the-key', UploadId=upload_id, - MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': i} - for i, etag in enumerate(etags, 1)]}) - # we should get both parts as the key contents - resp = s3.get_object(Bucket='mybucket', Key='the-key') - resp['ETag'].should.equal(EXPECTED_ETAG) - - -@mock_s3 -def test_boto3_put_object_with_tagging(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-tags' - s3.create_bucket(Bucket=bucket_name) - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test', - Tagging='foo=bar', - ) - - resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) - - resp['TagSet'].should.contain({'Key': 'foo', 'Value': 'bar'}) - - -@mock_s3 -def test_boto3_put_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - - # With 1 tag: - resp = s3.put_bucket_tagging(Bucket=bucket_name, - Tagging={ - "TagSet": [ - { - "Key": "TagOne", - "Value": "ValueOne" - } - ] - }) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - # With multiple tags: - resp = s3.put_bucket_tagging(Bucket=bucket_name, - Tagging={ - "TagSet": [ - { - "Key": "TagOne", - "Value": "ValueOne" - }, - { - "Key": "TagTwo", - "Value": "ValueTwo" - } - ] - }) - - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - # No tags is also OK: - resp = s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ - "TagSet": [] - }) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - -@mock_s3 -def test_boto3_get_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_tagging(Bucket=bucket_name, - Tagging={ - "TagSet": [ - { - "Key": "TagOne", - "Value": "ValueOne" - }, - { - "Key": "TagTwo", - "Value": "ValueTwo" - } - ] - }) - - # Get the tags for the bucket: - resp = s3.get_bucket_tagging(Bucket=bucket_name) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - len(resp["TagSet"]).should.equal(2) - - # With no tags: - s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ - "TagSet": [] - }) - - with assert_raises(ClientError) as err: - s3.get_bucket_tagging(Bucket=bucket_name) - - e = err.exception - e.response["Error"]["Code"].should.equal("NoSuchTagSet") - e.response["Error"]["Message"].should.equal("The TagSet does not exist") - - -@mock_s3 -def test_boto3_delete_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - - s3.put_bucket_tagging(Bucket=bucket_name, - Tagging={ - "TagSet": [ - { - "Key": "TagOne", - "Value": "ValueOne" - }, - { - "Key": "TagTwo", - "Value": "ValueTwo" - } - ] - }) - - resp = s3.delete_bucket_tagging(Bucket=bucket_name) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) - - with assert_raises(ClientError) as err: - s3.get_bucket_tagging(Bucket=bucket_name) - - e = err.exception - e.response["Error"]["Code"].should.equal("NoSuchTagSet") - e.response["Error"]["Message"].should.equal("The TagSet does not exist") - - -@mock_s3 -def test_boto3_put_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - - resp = s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [ - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "GET", - "POST" - ], - "AllowedHeaders": [ - "Authorization" - ], - "ExposeHeaders": [ - "x-amz-request-id" - ], - "MaxAgeSeconds": 123 - }, - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "PUT" - ], - "AllowedHeaders": [ - "Authorization" - ], - "ExposeHeaders": [ - "x-amz-request-id" - ], - "MaxAgeSeconds": 123 - } - ] - }) - - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - with assert_raises(ClientError) as err: - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [ - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "NOTREAL", - "POST" - ] - } - ] - }) - e = err.exception - e.response["Error"]["Code"].should.equal("InvalidRequest") - e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " - "Unsupported method is NOTREAL") - - with assert_raises(ClientError) as err: - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [] - }) - e = err.exception - e.response["Error"]["Code"].should.equal("MalformedXML") - - # And 101: - many_rules = [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}] * 101 - with assert_raises(ClientError) as err: - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": many_rules - }) - e = err.exception - e.response["Error"]["Code"].should.equal("MalformedXML") - - -@mock_s3 -def test_boto3_get_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - - # Without CORS: - with assert_raises(ClientError) as err: - s3.get_bucket_cors(Bucket=bucket_name) - - e = err.exception - e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") - e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") - - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [ - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "GET", - "POST" - ], - "AllowedHeaders": [ - "Authorization" - ], - "ExposeHeaders": [ - "x-amz-request-id" - ], - "MaxAgeSeconds": 123 - }, - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "PUT" - ], - "AllowedHeaders": [ - "Authorization" - ], - "ExposeHeaders": [ - "x-amz-request-id" - ], - "MaxAgeSeconds": 123 - } - ] - }) - - resp = s3.get_bucket_cors(Bucket=bucket_name) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - len(resp["CORSRules"]).should.equal(2) - - -@mock_s3 -def test_boto3_delete_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [ - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "GET" - ] - } - ] - }) - - resp = s3.delete_bucket_cors(Bucket=bucket_name) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) - - # Verify deletion: - with assert_raises(ClientError) as err: - s3.get_bucket_cors(Bucket=bucket_name) - - e = err.exception - e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") - e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") - - -@mock_s3 -def test_put_bucket_acl_body(): - s3 = boto3.client("s3", region_name="us-east-1") - s3.create_bucket(Bucket="bucket") - bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] - s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "WRITE" - }, - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "READ_ACP" - } - ], - "Owner": bucket_owner - }) - - result = s3.get_bucket_acl(Bucket="bucket") - assert len(result["Grants"]) == 2 - for g in result["Grants"]: - assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery" - assert g["Grantee"]["Type"] == "Group" - assert g["Permission"] in ["WRITE", "READ_ACP"] - - # With one: - s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "WRITE" - } - ], - "Owner": bucket_owner - }) - result = s3.get_bucket_acl(Bucket="bucket") - assert len(result["Grants"]) == 1 - - # With no owner: - with assert_raises(ClientError) as err: - s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "WRITE" - } - ] - }) - assert err.exception.response["Error"]["Code"] == "MalformedACLError" - - # With incorrect permission: - with assert_raises(ClientError) as err: - s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "lskjflkasdjflkdsjfalisdjflkdsjf" - } - ], - "Owner": bucket_owner - }) - assert err.exception.response["Error"]["Code"] == "MalformedACLError" - - # Clear the ACLs: - result = s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner}) - assert not result.get("Grants") - - -@mock_s3 -def test_put_bucket_notification(): - s3 = boto3.client("s3", region_name="us-east-1") - s3.create_bucket(Bucket="bucket") - - # With no configuration: - result = s3.get_bucket_notification(Bucket="bucket") - assert not result.get("TopicConfigurations") - assert not result.get("QueueConfigurations") - assert not result.get("LambdaFunctionConfigurations") - - # Place proper topic configuration: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "TopicConfigurations": [ - { - "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", - "Events": [ - "s3:ObjectCreated:*", - "s3:ObjectRemoved:*" - ] - }, - { - "TopicArn": "arn:aws:sns:us-east-1:012345678910:myothertopic", - "Events": [ - "s3:ObjectCreated:*" - ], - "Filter": { - "Key": { - "FilterRules": [ - { - "Name": "prefix", - "Value": "images/" - }, - { - "Name": "suffix", - "Value": "png" - } - ] - } - } - } - ] - }) - - # Verify to completion: - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert len(result["TopicConfigurations"]) == 2 - assert not result.get("QueueConfigurations") - assert not result.get("LambdaFunctionConfigurations") - assert result["TopicConfigurations"][0]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:mytopic" - assert result["TopicConfigurations"][1]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:myothertopic" - assert len(result["TopicConfigurations"][0]["Events"]) == 2 - assert len(result["TopicConfigurations"][1]["Events"]) == 1 - assert result["TopicConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" - assert result["TopicConfigurations"][0]["Events"][1] == "s3:ObjectRemoved:*" - assert result["TopicConfigurations"][1]["Events"][0] == "s3:ObjectCreated:*" - assert result["TopicConfigurations"][0]["Id"] - assert result["TopicConfigurations"][1]["Id"] - assert not result["TopicConfigurations"][0].get("Filter") - assert len(result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"]) == 2 - assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" - assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" - assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Name"] == "suffix" - assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Value"] == "png" - - # Place proper queue configuration: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "QueueConfigurations": [ - { - "Id": "SomeID", - "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", - "Events": ["s3:ObjectCreated:*"], - "Filter": { - "Key": { - "FilterRules": [ - { - "Name": "prefix", - "Value": "images/" - } - ] - } - } - } - ] - }) - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert len(result["QueueConfigurations"]) == 1 - assert not result.get("TopicConfigurations") - assert not result.get("LambdaFunctionConfigurations") - assert result["QueueConfigurations"][0]["Id"] == "SomeID" - assert result["QueueConfigurations"][0]["QueueArn"] == "arn:aws:sqs:us-east-1:012345678910:myQueue" - assert result["QueueConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" - assert len(result["QueueConfigurations"][0]["Events"]) == 1 - assert len(result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 - assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" - assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" - - # Place proper Lambda configuration: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "LambdaFunctionConfigurations": [ - { - "LambdaFunctionArn": - "arn:aws:lambda:us-east-1:012345678910:function:lambda", - "Events": ["s3:ObjectCreated:*"], - "Filter": { - "Key": { - "FilterRules": [ - { - "Name": "prefix", - "Value": "images/" - } - ] - } - } - } - ] - }) - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert len(result["LambdaFunctionConfigurations"]) == 1 - assert not result.get("TopicConfigurations") - assert not result.get("QueueConfigurations") - assert result["LambdaFunctionConfigurations"][0]["Id"] - assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \ - "arn:aws:lambda:us-east-1:012345678910:function:lambda" - assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" - assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1 - assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 - assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" - assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" - - # And with all 3 set: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "TopicConfigurations": [ - { - "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", - "Events": [ - "s3:ObjectCreated:*", - "s3:ObjectRemoved:*" - ] - } - ], - "LambdaFunctionConfigurations": [ - { - "LambdaFunctionArn": - "arn:aws:lambda:us-east-1:012345678910:function:lambda", - "Events": ["s3:ObjectCreated:*"] - } - ], - "QueueConfigurations": [ - { - "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", - "Events": ["s3:ObjectCreated:*"] - } - ] - }) - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert len(result["LambdaFunctionConfigurations"]) == 1 - assert len(result["TopicConfigurations"]) == 1 - assert len(result["QueueConfigurations"]) == 1 - - # And clear it out: - s3.put_bucket_notification_configuration(Bucket="bucket", NotificationConfiguration={}) - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert not result.get("TopicConfigurations") - assert not result.get("QueueConfigurations") - assert not result.get("LambdaFunctionConfigurations") - - -@mock_s3 -def test_put_bucket_notification_errors(): - s3 = boto3.client("s3", region_name="us-east-1") - s3.create_bucket(Bucket="bucket") - - # With incorrect ARNs: - for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]: - with assert_raises(ClientError) as err: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "{}Configurations".format(tech): [ - { - "{}Arn".format(tech): - "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj", - "Events": ["s3:ObjectCreated:*"] - } - ] - }) - - assert err.exception.response["Error"]["Code"] == "InvalidArgument" - assert err.exception.response["Error"]["Message"] == "The ARN is not well formed" - - # Region not the same as the bucket: - with assert_raises(ClientError) as err: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "QueueConfigurations": [ - { - "QueueArn": - "arn:aws:sqs:us-west-2:012345678910:lksajdfkldskfj", - "Events": ["s3:ObjectCreated:*"] - } - ] - }) - - assert err.exception.response["Error"]["Code"] == "InvalidArgument" - assert err.exception.response["Error"]["Message"] == \ - "The notification destination service region is not valid for the bucket location constraint" - - # Invalid event name: - with assert_raises(ClientError) as err: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "QueueConfigurations": [ - { - "QueueArn": - "arn:aws:sqs:us-east-1:012345678910:lksajdfkldskfj", - "Events": ["notarealeventname"] - } - ] - }) - assert err.exception.response["Error"]["Code"] == "InvalidArgument" - assert err.exception.response["Error"]["Message"] == "The event is not supported for notifications" - - -@mock_s3 -def test_boto3_put_bucket_logging(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - log_bucket = "logbucket" - wrong_region_bucket = "wrongregionlogbucket" - s3.create_bucket(Bucket=bucket_name) - s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later... - s3.create_bucket(Bucket=wrong_region_bucket, CreateBucketConfiguration={"LocationConstraint": "us-west-2"}) - - # No logging config: - result = s3.get_bucket_logging(Bucket=bucket_name) - assert not result.get("LoggingEnabled") - - # A log-bucket that doesn't exist: - with assert_raises(ClientError) as err: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": "IAMNOTREAL", - "TargetPrefix": "" - } - }) - assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" - - # A log-bucket that's missing the proper ACLs for LogDelivery: - with assert_raises(ClientError) as err: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "" - } - }) - assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" - assert "log-delivery" in err.exception.response["Error"]["Message"] - - # Add the proper "log-delivery" ACL to the log buckets: - bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] - for bucket in [log_bucket, wrong_region_bucket]: - s3.put_bucket_acl(Bucket=bucket, AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "WRITE" - }, - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "READ_ACP" - }, - { - "Grantee": { - "Type": "CanonicalUser", - "ID": bucket_owner["ID"] - }, - "Permission": "FULL_CONTROL" - } - ], - "Owner": bucket_owner - }) - - # A log-bucket that's in the wrong region: - with assert_raises(ClientError) as err: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": wrong_region_bucket, - "TargetPrefix": "" - } - }) - assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" - - # Correct logging: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name) - } - }) - result = s3.get_bucket_logging(Bucket=bucket_name) - assert result["LoggingEnabled"]["TargetBucket"] == log_bucket - assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name) - assert not result["LoggingEnabled"].get("TargetGrants") - - # And disabling: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={}) - assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled") - - # And enabling with multiple target grants: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), - "TargetGrants": [ - { - "Grantee": { - "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", - "Type": "CanonicalUser" - }, - "Permission": "READ" - }, - { - "Grantee": { - "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", - "Type": "CanonicalUser" - }, - "Permission": "WRITE" - } - ] - } - }) - - result = s3.get_bucket_logging(Bucket=bucket_name) - assert len(result["LoggingEnabled"]["TargetGrants"]) == 2 - assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \ - "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" - - # Test with just 1 grant: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), - "TargetGrants": [ - { - "Grantee": { - "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", - "Type": "CanonicalUser" - }, - "Permission": "READ" - } - ] - } - }) - result = s3.get_bucket_logging(Bucket=bucket_name) - assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 - - # With an invalid grant: - with assert_raises(ClientError) as err: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), - "TargetGrants": [ - { - "Grantee": { - "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", - "Type": "CanonicalUser" - }, - "Permission": "NOTAREALPERM" - } - ] - } - }) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - -@mock_s3 -def test_boto3_put_object_tagging(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-tags' - s3.create_bucket(Bucket=bucket_name) - - with assert_raises(ClientError) as err: - s3.put_object_tagging( - Bucket=bucket_name, - Key=key, - Tagging={'TagSet': [ - {'Key': 'item1', 'Value': 'foo'}, - {'Key': 'item2', 'Value': 'bar'}, - ]} - ) - - e = err.exception - e.response['Error'].should.equal({ - 'Code': 'NoSuchKey', - 'Message': 'The specified key does not exist.', - 'RequestID': '7a62c49f-347e-4fc4-9331-6e8eEXAMPLE', - }) - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test' - ) - - resp = s3.put_object_tagging( - Bucket=bucket_name, - Key=key, - Tagging={'TagSet': [ - {'Key': 'item1', 'Value': 'foo'}, - {'Key': 'item2', 'Value': 'bar'}, - ]} - ) - - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - -@mock_s3 -def test_boto3_put_object_tagging_with_single_tag(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-tags' - s3.create_bucket(Bucket=bucket_name) - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test' - ) - - resp = s3.put_object_tagging( - Bucket=bucket_name, - Key=key, - Tagging={'TagSet': [ - {'Key': 'item1', 'Value': 'foo'} - ]} - ) - - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - -@mock_s3 -def test_boto3_get_object_tagging(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-tags' - s3.create_bucket(Bucket=bucket_name) - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test' - ) - - resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) - resp['TagSet'].should.have.length_of(0) - - resp = s3.put_object_tagging( - Bucket=bucket_name, - Key=key, - Tagging={'TagSet': [ - {'Key': 'item1', 'Value': 'foo'}, - {'Key': 'item2', 'Value': 'bar'}, - ]} - ) - resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) - - resp['TagSet'].should.have.length_of(2) - resp['TagSet'].should.contain({'Key': 'item1', 'Value': 'foo'}) - resp['TagSet'].should.contain({'Key': 'item2', 'Value': 'bar'}) - - -@mock_s3 -def test_boto3_list_object_versions(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-versions' - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_versioning( - Bucket=bucket_name, - VersioningConfiguration={ - 'Status': 'Enabled' - } - ) - items = (six.b('v1'), six.b('v2')) - for body in items: - s3.put_object( - Bucket=bucket_name, - Key=key, - Body=body - ) - response = s3.list_object_versions( - Bucket=bucket_name - ) - # Two object versions should be returned - len(response['Versions']).should.equal(2) - keys = set([item['Key'] for item in response['Versions']]) - keys.should.equal({key}) - # Test latest object version is returned - response = s3.get_object(Bucket=bucket_name, Key=key) - response['Body'].read().should.equal(items[-1]) - - -@mock_s3 -def test_boto3_bad_prefix_list_object_versions(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-versions' - bad_prefix = 'key-that-does-not-exist' - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_versioning( - Bucket=bucket_name, - VersioningConfiguration={ - 'Status': 'Enabled' - } - ) - items = (six.b('v1'), six.b('v2')) - for body in items: - s3.put_object( - Bucket=bucket_name, - Key=key, - Body=body - ) - response = s3.list_object_versions( - Bucket=bucket_name, - Prefix=bad_prefix, - ) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response.should_not.contain('Versions') - response.should_not.contain('DeleteMarkers') - - -@mock_s3 -def test_boto3_delete_markers(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = u'key-with-versions-and-unicode-ó' - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_versioning( - Bucket=bucket_name, - VersioningConfiguration={ - 'Status': 'Enabled' - } - ) - items = (six.b('v1'), six.b('v2')) - for body in items: - s3.put_object( - Bucket=bucket_name, - Key=key, - Body=body - ) - - s3.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]}) - - with assert_raises(ClientError) as e: - s3.get_object( - Bucket=bucket_name, - Key=key - ) - e.response['Error']['Code'].should.equal('404') - - s3.delete_object( - Bucket=bucket_name, - Key=key, - VersionId='2' - ) - response = s3.get_object( - Bucket=bucket_name, - Key=key - ) - response['Body'].read().should.equal(items[-1]) - response = s3.list_object_versions( - Bucket=bucket_name - ) - response['Versions'].should.have.length_of(2) - - # We've asserted there is only 2 records so one is newest, one is oldest - latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] - oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] - - # Double check ordering of version ID's - latest['VersionId'].should.equal('1') - oldest['VersionId'].should.equal('0') - - # Double check the name is still unicode - latest['Key'].should.equal('key-with-versions-and-unicode-ó') - oldest['Key'].should.equal('key-with-versions-and-unicode-ó') - - -@mock_s3 -def test_boto3_multiple_delete_markers(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = u'key-with-versions-and-unicode-ó' - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_versioning( - Bucket=bucket_name, - VersioningConfiguration={ - 'Status': 'Enabled' - } - ) - items = (six.b('v1'), six.b('v2')) - for body in items: - s3.put_object( - Bucket=bucket_name, - Key=key, - Body=body - ) - - # Delete the object twice to add multiple delete markers - s3.delete_object(Bucket=bucket_name, Key=key) - s3.delete_object(Bucket=bucket_name, Key=key) - - response = s3.list_object_versions(Bucket=bucket_name) - response['DeleteMarkers'].should.have.length_of(2) - - with assert_raises(ClientError) as e: - s3.get_object( - Bucket=bucket_name, - Key=key - ) - e.response['Error']['Code'].should.equal('404') - - # Remove both delete markers to restore the object - s3.delete_object( - Bucket=bucket_name, - Key=key, - VersionId='2' - ) - s3.delete_object( - Bucket=bucket_name, - Key=key, - VersionId='3' - ) - - response = s3.get_object( - Bucket=bucket_name, - Key=key - ) - response['Body'].read().should.equal(items[-1]) - response = s3.list_object_versions(Bucket=bucket_name) - response['Versions'].should.have.length_of(2) - - # We've asserted there is only 2 records so one is newest, one is oldest - latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] - oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] - - # Double check ordering of version ID's - latest['VersionId'].should.equal('1') - oldest['VersionId'].should.equal('0') - - # Double check the name is still unicode - latest['Key'].should.equal('key-with-versions-and-unicode-ó') - oldest['Key'].should.equal('key-with-versions-and-unicode-ó') - -@mock_s3 -def test_get_stream_gzipped(): - payload = b"this is some stuff here" - - s3_client = boto3.client("s3", region_name='us-east-1') - s3_client.create_bucket(Bucket='moto-tests') - buffer_ = BytesIO() - with GzipFile(fileobj=buffer_, mode='w') as f: - f.write(payload) - payload_gz = buffer_.getvalue() - - s3_client.put_object( - Bucket='moto-tests', - Key='keyname', - Body=payload_gz, - ContentEncoding='gzip', - ) - - obj = s3_client.get_object( - Bucket='moto-tests', - Key='keyname', - ) - res = zlib.decompress(obj['Body'].read(), 16 + zlib.MAX_WBITS) - assert res == payload - - -TEST_XML = """\ - - - - index.html - - - - - test/testing - - - test.txt - - - - -""" +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import datetime +from six.moves.urllib.request import urlopen +from six.moves.urllib.error import HTTPError +from functools import wraps +from gzip import GzipFile +from io import BytesIO +import zlib + +import json +import boto +import boto3 +from botocore.client import ClientError +import botocore.exceptions +from boto.exception import S3CreateError, S3ResponseError +from botocore.handlers import disable_signing +from boto.s3.connection import S3Connection +from boto.s3.key import Key +from freezegun import freeze_time +import six +import requests +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +import sure # noqa + +from moto import settings, mock_s3, mock_s3_deprecated +import moto.s3.models as s3model + +if settings.TEST_SERVER_MODE: + REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE + EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"' +else: + REDUCED_PART_SIZE = 256 + EXPECTED_ETAG = '"66d1a1a2ed08fd05c137f316af4ff255-2"' + + +def reduced_min_part_size(f): + """ speed up tests by temporarily making the multipart minimum part size + small + """ + orig_size = s3model.UPLOAD_PART_MIN_SIZE + + @wraps(f) + def wrapped(*args, **kwargs): + try: + s3model.UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE + return f(*args, **kwargs) + finally: + s3model.UPLOAD_PART_MIN_SIZE = orig_size + + return wrapped + + +class MyModel(object): + + def __init__(self, name, value): + self.name = name + self.value = value + + def save(self): + s3 = boto3.client('s3', region_name='us-east-1') + s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) + + +@mock_s3 +def test_my_model_save(): + # Create Bucket so that test can run + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') + #################################### + + model_instance = MyModel('steve', 'is awesome') + model_instance.save() + + body = conn.Object('mybucket', 'steve').get()['Body'].read().decode() + + assert body == 'is awesome' + + +@mock_s3 +def test_key_etag(): + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') + + model_instance = MyModel('steve', 'is awesome') + model_instance.save() + + conn.Bucket('mybucket').Object('steve').e_tag.should.equal( + '"d32bda93738f7e03adb22e66c90fbc04"') + + +@mock_s3_deprecated +def test_multipart_upload_too_small(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + multipart.upload_part_from_file(BytesIO(b'hello'), 1) + multipart.upload_part_from_file(BytesIO(b'world'), 2) + # Multipart with total size under 5MB is refused + multipart.complete_upload.should.throw(S3ResponseError) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + # last part, can be less than 5 MB + part2 = b'1' + multipart.upload_part_from_file(BytesIO(part2), 2) + multipart.complete_upload() + # we should get both parts as the key contents + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload_out_of_order(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + # last part, can be less than 5 MB + part2 = b'1' + multipart.upload_part_from_file(BytesIO(part2), 4) + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 2) + multipart.complete_upload() + # we should get both parts as the key contents + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload_with_headers(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload( + "the-key", metadata={"foo": "bar"}) + part1 = b'0' * 10 + multipart.upload_part_from_file(BytesIO(part1), 1) + multipart.complete_upload() + + key = bucket.get_key("the-key") + key.metadata.should.equal({"foo": "bar"}) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload_with_copy_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "original-key" + key.set_contents_from_string("key_value") + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + multipart.copy_part_from_key("foobar", "original-key", 2, 0, 3) + multipart.complete_upload() + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + b"key_") + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload_cancel(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + multipart.cancel_upload() + # TODO we really need some sort of assertion here, but we don't currently + # have the ability to list mulipart uploads for a bucket. + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_etag(): + # Create Bucket so that test can run + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('mybucket') + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + # last part, can be less than 5 MB + part2 = b'1' + multipart.upload_part_from_file(BytesIO(part2), 2) + multipart.complete_upload() + # we should get both parts as the key contents + bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_invalid_order(): + # Create Bucket so that test can run + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('mybucket') + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * 5242880 + etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag + # last part, can be less than 5 MB + part2 = b'1' + etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag + xml = "{0}{1}" + xml = xml.format(2, etag2) + xml.format(1, etag1) + xml = "{0}".format(xml) + bucket.complete_multipart_upload.when.called_with( + multipart.key_name, multipart.id, xml).should.throw(S3ResponseError) + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_etag_quotes_stripped(): + # Create Bucket so that test can run + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('mybucket') + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag + # last part, can be less than 5 MB + part2 = b'1' + etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag + # Strip quotes from etags + etag1 = etag1.replace('"','') + etag2 = etag2.replace('"','') + xml = "{0}{1}" + xml = xml.format(1, etag1) + xml.format(2, etag2) + xml = "{0}".format(xml) + bucket.complete_multipart_upload.when.called_with( + multipart.key_name, multipart.id, xml).should_not.throw(S3ResponseError) + # we should get both parts as the key contents + bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_duplicate_upload(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + # same part again + multipart.upload_part_from_file(BytesIO(part1), 1) + part2 = b'1' * 1024 + multipart.upload_part_from_file(BytesIO(part2), 2) + multipart.complete_upload() + # We should get only one copy of part 1. + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) + + +@mock_s3_deprecated +def test_list_multiparts(): + # Create Bucket so that test can run + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('mybucket') + + multipart1 = bucket.initiate_multipart_upload("one-key") + multipart2 = bucket.initiate_multipart_upload("two-key") + uploads = bucket.get_all_multipart_uploads() + uploads.should.have.length_of(2) + dict([(u.key_name, u.id) for u in uploads]).should.equal( + {'one-key': multipart1.id, 'two-key': multipart2.id}) + multipart2.cancel_upload() + uploads = bucket.get_all_multipart_uploads() + uploads.should.have.length_of(1) + uploads[0].key_name.should.equal("one-key") + multipart1.cancel_upload() + uploads = bucket.get_all_multipart_uploads() + uploads.should.be.empty + + +@mock_s3_deprecated +def test_key_save_to_missing_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.get_bucket('mybucket', validate=False) + + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string.when.called_with( + "foobar").should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_missing_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + bucket.get_key("the-key").should.equal(None) + + +@mock_s3_deprecated +def test_missing_key_urllib2(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.create_bucket("foobar") + + urlopen.when.called_with( + "http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError) + + +@mock_s3_deprecated +def test_empty_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("") + + key = bucket.get_key("the-key") + key.size.should.equal(0) + key.get_contents_as_string().should.equal(b'') + + +@mock_s3_deprecated +def test_empty_key_set_on_existing_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar") + + key = bucket.get_key("the-key") + key.size.should.equal(6) + key.get_contents_as_string().should.equal(b'foobar') + + key.set_contents_from_string("") + bucket.get_key("the-key").get_contents_as_string().should.equal(b'') + + +@mock_s3_deprecated +def test_large_key_save(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar" * 100000) + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) + + +@mock_s3_deprecated +def test_copy_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key') + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") + + +@mock_s3_deprecated +def test_copy_key_with_version(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + bucket.configure_versioning(versioning=True) + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + key.set_contents_from_string("another value") + + bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id='0') + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"another value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") + + +@mock_s3_deprecated +def test_set_metadata(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = 'the-key' + key.set_metadata('md', 'Metadatastring') + key.set_contents_from_string("Testval") + + bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') + + +@mock_s3_deprecated +def test_copy_key_replace_metadata(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_metadata('md', 'Metadatastring') + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key', + metadata={'momd': 'Mometadatastring'}) + + bucket.get_key("new-key").get_metadata('md').should.be.none + bucket.get_key( + "new-key").get_metadata('momd').should.equal('Mometadatastring') + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3_deprecated +def test_last_modified(): + # See https://github.com/boto/boto/issues/466 + conn = boto.connect_s3() + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + rs = bucket.get_all_keys() + rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') + + bucket.get_key( + "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') + + +@mock_s3_deprecated +def test_missing_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_bucket_with_dash(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.get_bucket.when.called_with( + 'mybucket-test').should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_create_existing_bucket(): + "Trying to create a bucket that already exists should raise an Error" + conn = boto.s3.connect_to_region("us-west-2") + conn.create_bucket("foobar") + with assert_raises(S3CreateError): + conn.create_bucket('foobar') + + +@mock_s3_deprecated +def test_create_existing_bucket_in_us_east_1(): + "Trying to create a bucket that already exists in us-east-1 returns the bucket" + + """" + http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + Your previous request to create the named bucket succeeded and you already + own it. You get this error in all AWS regions except US Standard, + us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if + bucket exists it Amazon S3 will not do anything). + """ + conn = boto.s3.connect_to_region("us-east-1") + conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar") + bucket.name.should.equal("foobar") + + +@mock_s3_deprecated +def test_other_region(): + conn = S3Connection( + 'key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com') + conn.create_bucket("foobar") + list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) + + +@mock_s3_deprecated +def test_bucket_deletion(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + # Try to delete a bucket that still has keys + conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + bucket.delete_key("the-key") + conn.delete_bucket("foobar") + + # Get non-existing bucket + conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + # Delete non-existant bucket + conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_get_all_buckets(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.create_bucket("foobar") + conn.create_bucket("foobar2") + buckets = conn.get_all_buckets() + + buckets.should.have.length_of(2) + + +@mock_s3 +@mock_s3_deprecated +def test_post_to_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://foobar.s3.amazonaws.com/", { + 'key': 'the-key', + 'file': 'nothing' + }) + + bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing') + + +@mock_s3 +@mock_s3_deprecated +def test_post_with_metadata_to_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://foobar.s3.amazonaws.com/", { + 'key': 'the-key', + 'file': 'nothing', + 'x-amz-meta-test': 'metadata' + }) + + bucket.get_key('the-key').get_metadata('test').should.equal('metadata') + + +@mock_s3_deprecated +def test_delete_missing_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + + deleted_key = bucket.delete_key("foobar") + deleted_key.key.should.equal("foobar") + + +@mock_s3_deprecated +def test_delete_keys(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + + Key(bucket=bucket, name='file1').set_contents_from_string('abc') + Key(bucket=bucket, name='file2').set_contents_from_string('abc') + Key(bucket=bucket, name='file3').set_contents_from_string('abc') + Key(bucket=bucket, name='file4').set_contents_from_string('abc') + + result = bucket.delete_keys(['file2', 'file3']) + result.deleted.should.have.length_of(2) + result.errors.should.have.length_of(0) + keys = bucket.get_all_keys() + keys.should.have.length_of(2) + keys[0].name.should.equal('file1') + + +@mock_s3_deprecated +def test_delete_keys_with_invalid(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + + Key(bucket=bucket, name='file1').set_contents_from_string('abc') + Key(bucket=bucket, name='file2').set_contents_from_string('abc') + Key(bucket=bucket, name='file3').set_contents_from_string('abc') + Key(bucket=bucket, name='file4').set_contents_from_string('abc') + + result = bucket.delete_keys(['abc', 'file3']) + + result.deleted.should.have.length_of(1) + result.errors.should.have.length_of(1) + keys = bucket.get_all_keys() + keys.should.have.length_of(3) + keys[0].name.should.equal('file1') + + +@mock_s3_deprecated +def test_bucket_name_with_dot(): + conn = boto.connect_s3() + bucket = conn.create_bucket('firstname.lastname') + + k = Key(bucket, 'somekey') + k.set_contents_from_string('somedata') + + +@mock_s3_deprecated +def test_key_with_special_characters(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test_bucket_name') + + key = Key(bucket, 'test_list_keys_2/x?y') + key.set_contents_from_string('value1') + + key_list = bucket.list('test_list_keys_2/', '/') + keys = [x for x in key_list] + keys[0].name.should.equal("test_list_keys_2/x?y") + + +@mock_s3_deprecated +def test_unicode_key_with_slash(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "/the-key-unîcode/test" + key.set_contents_from_string("value") + + key = bucket.get_key("/the-key-unîcode/test") + key.get_contents_as_string().should.equal(b'value') + + +@mock_s3_deprecated +def test_bucket_key_listing_order(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test_bucket') + prefix = 'toplevel/' + + def store(name): + k = Key(bucket, prefix + name) + k.set_contents_from_string('somedata') + + names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key'] + + for name in names: + store(name) + + delimiter = None + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key', + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3' + ]) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/' + ]) + + # Test delimiter with no prefix + delimiter = '/' + keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] + keys.should.equal(['toplevel/']) + + delimiter = None + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal( + [u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key']) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal([u'toplevel/x/']) + + +@mock_s3_deprecated +def test_key_with_reduced_redundancy(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test_bucket_name') + + key = Key(bucket, 'test_rr_key') + key.set_contents_from_string('value1', reduced_redundancy=True) + # we use the bucket iterator because of: + # https:/github.com/boto/boto/issues/1173 + list(bucket)[0].storage_class.should.equal('REDUCED_REDUNDANCY') + + +@mock_s3_deprecated +def test_copy_key_reduced_redundancy(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key', + storage_class='REDUCED_REDUNDANCY') + + # we use the bucket iterator because of: + # https:/github.com/boto/boto/issues/1173 + keys = dict([(k.name, k) for k in bucket]) + keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY") + keys['the-key'].storage_class.should.equal("STANDARD") + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3_deprecated +def test_restore_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + list(bucket)[0].ongoing_restore.should.be.none + key.restore(1) + key = bucket.get_key('the-key') + key.ongoing_restore.should_not.be.none + key.ongoing_restore.should.be.false + key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") + key.restore(2) + key = bucket.get_key('the-key') + key.ongoing_restore.should_not.be.none + key.ongoing_restore.should.be.false + key.expiry_date.should.equal("Tue, 03 Jan 2012 12:00:00 GMT") + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3_deprecated +def test_restore_key_headers(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + key.restore(1, headers={'foo': 'bar'}) + key = bucket.get_key('the-key') + key.ongoing_restore.should_not.be.none + key.ongoing_restore.should.be.false + key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") + + +@mock_s3_deprecated +def test_get_versioning_status(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + d = bucket.get_versioning_status() + d.should.be.empty + + bucket.configure_versioning(versioning=True) + d = bucket.get_versioning_status() + d.shouldnt.be.empty + d.should.have.key('Versioning').being.equal('Enabled') + + bucket.configure_versioning(versioning=False) + d = bucket.get_versioning_status() + d.should.have.key('Versioning').being.equal('Suspended') + + +@mock_s3_deprecated +def test_key_version(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + bucket.configure_versioning(versioning=True) + + key = Key(bucket) + key.key = 'the-key' + key.version_id.should.be.none + key.set_contents_from_string('some string') + key.version_id.should.equal('0') + key.set_contents_from_string('some string') + key.version_id.should.equal('1') + + key = bucket.get_key('the-key') + key.version_id.should.equal('1') + + +@mock_s3_deprecated +def test_list_versions(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + bucket.configure_versioning(versioning=True) + + key = Key(bucket, 'the-key') + key.version_id.should.be.none + key.set_contents_from_string("Version 1") + key.version_id.should.equal('0') + key.set_contents_from_string("Version 2") + key.version_id.should.equal('1') + + versions = list(bucket.list_versions()) + + versions.should.have.length_of(2) + + versions[0].name.should.equal('the-key') + versions[0].version_id.should.equal('0') + versions[0].get_contents_as_string().should.equal(b"Version 1") + + versions[1].name.should.equal('the-key') + versions[1].version_id.should.equal('1') + versions[1].get_contents_as_string().should.equal(b"Version 2") + + key = Key(bucket, 'the2-key') + key.set_contents_from_string("Version 1") + + keys = list(bucket.list()) + keys.should.have.length_of(2) + versions = list(bucket.list_versions(prefix='the2-')) + versions.should.have.length_of(1) + + +@mock_s3_deprecated +def test_acl_setting(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + content = b'imafile' + keyname = 'test.txt' + + key = Key(bucket, name=keyname) + key.content_type = 'text/plain' + key.set_contents_from_string(content) + key.make_public() + + key = bucket.get_key(keyname) + + assert key.get_contents_as_string() == content + + grants = key.get_acl().acl.grants + assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'READ' for g in grants), grants + + +@mock_s3_deprecated +def test_acl_setting_via_headers(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + content = b'imafile' + keyname = 'test.txt' + + key = Key(bucket, name=keyname) + key.content_type = 'text/plain' + key.set_contents_from_string(content, headers={ + 'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/global/AllUsers"' + }) + + key = bucket.get_key(keyname) + + assert key.get_contents_as_string() == content + + grants = key.get_acl().acl.grants + assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'FULL_CONTROL' for g in grants), grants + + +@mock_s3_deprecated +def test_acl_switching(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + content = b'imafile' + keyname = 'test.txt' + + key = Key(bucket, name=keyname) + key.content_type = 'text/plain' + key.set_contents_from_string(content, policy='public-read') + key.set_acl('private') + + grants = key.get_acl().acl.grants + assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'READ' for g in grants), grants + + +@mock_s3_deprecated +def test_bucket_acl_setting(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + + bucket.make_public() + + grants = bucket.get_acl().acl.grants + assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'READ' for g in grants), grants + + +@mock_s3_deprecated +def test_bucket_acl_switching(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + bucket.make_public() + + bucket.set_acl('private') + + grants = bucket.get_acl().acl.grants + assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'READ' for g in grants), grants + + +@mock_s3 +def test_s3_object_in_public_bucket(): + s3 = boto3.resource('s3') + bucket = s3.Bucket('test-bucket') + bucket.create(ACL='public-read') + bucket.put_object(Body=b'ABCD', Key='file.txt') + + s3_anonymous = boto3.resource('s3') + s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) + + contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() + contents.should.equal(b'ABCD') + + bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') + + with assert_raises(ClientError) as exc: + s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() + exc.exception.response['Error']['Code'].should.equal('403') + + params = {'Bucket': 'test-bucket', 'Key': 'file.txt'} + presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) + response = requests.get(presigned_url) + assert response.status_code == 200 + + +@mock_s3 +def test_s3_object_in_private_bucket(): + s3 = boto3.resource('s3') + bucket = s3.Bucket('test-bucket') + bucket.create(ACL='private') + bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') + + s3_anonymous = boto3.resource('s3') + s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) + + with assert_raises(ClientError) as exc: + s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() + exc.exception.response['Error']['Code'].should.equal('403') + + bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt') + contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() + contents.should.equal(b'ABCD') + + +@mock_s3_deprecated +def test_unicode_key(): + conn = boto.connect_s3() + bucket = conn.create_bucket('mybucket') + key = Key(bucket) + key.key = u'こんにちは.jpg' + key.set_contents_from_string('Hello world!') + assert [listed_key.key for listed_key in bucket.list()] == [key.key] + fetched_key = bucket.get_key(key.key) + assert fetched_key.key == key.key + assert fetched_key.get_contents_as_string().decode("utf-8") == 'Hello world!' + + +@mock_s3_deprecated +def test_unicode_value(): + conn = boto.connect_s3() + bucket = conn.create_bucket('mybucket') + key = Key(bucket) + key.key = 'some_key' + key.set_contents_from_string(u'こんにちは.jpg') + list(bucket.list()) + key = bucket.get_key(key.key) + assert key.get_contents_as_string().decode("utf-8") == u'こんにちは.jpg' + + +@mock_s3_deprecated +def test_setting_content_encoding(): + conn = boto.connect_s3() + bucket = conn.create_bucket('mybucket') + key = bucket.new_key("keyname") + key.set_metadata("Content-Encoding", "gzip") + compressed_data = "abcdef" + key.set_contents_from_string(compressed_data) + + key = bucket.get_key("keyname") + key.content_encoding.should.equal("gzip") + + +@mock_s3_deprecated +def test_bucket_location(): + conn = boto.s3.connect_to_region("us-west-2") + bucket = conn.create_bucket('mybucket') + bucket.get_location().should.equal("us-west-2") + + +@mock_s3_deprecated +def test_ranged_get(): + conn = boto.connect_s3() + bucket = conn.create_bucket('mybucket') + key = Key(bucket) + key.key = 'bigkey' + rep = b"0123456789" + key.set_contents_from_string(rep * 10) + + # Implicitly bounded range requests. + key.get_contents_as_string( + headers={'Range': 'bytes=0-'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=50-'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=99-'}).should.equal(b'9') + + # Explicitly bounded range requests starting from the first byte. + key.get_contents_as_string( + headers={'Range': 'bytes=0-0'}).should.equal(b'0') + key.get_contents_as_string( + headers={'Range': 'bytes=0-49'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=0-99'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-100'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-700'}).should.equal(rep * 10) + + # Explicitly bounded range requests starting from the / a middle byte. + key.get_contents_as_string( + headers={'Range': 'bytes=50-54'}).should.equal(rep[:5]) + key.get_contents_as_string( + headers={'Range': 'bytes=50-99'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-100'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-700'}).should.equal(rep * 5) + + # Explicitly bounded range requests starting from the last byte. + key.get_contents_as_string( + headers={'Range': 'bytes=99-99'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-100'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-700'}).should.equal(b'9') + + # Suffix range requests. + key.get_contents_as_string( + headers={'Range': 'bytes=-1'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=-60'}).should.equal(rep * 6) + key.get_contents_as_string( + headers={'Range': 'bytes=-100'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-101'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-700'}).should.equal(rep * 10) + + key.size.should.equal(100) + + +@mock_s3_deprecated +def test_policy(): + conn = boto.connect_s3() + bucket_name = 'mybucket' + bucket = conn.create_bucket(bucket_name) + + policy = json.dumps({ + "Version": "2012-10-17", + "Id": "PutObjPolicy", + "Statement": [ + { + "Sid": "DenyUnEncryptedObjectUploads", + "Effect": "Deny", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::{bucket_name}/*".format(bucket_name=bucket_name), + "Condition": { + "StringNotEquals": { + "s3:x-amz-server-side-encryption": "aws:kms" + } + } + } + ] + }) + + with assert_raises(S3ResponseError) as err: + bucket.get_policy() + + ex = err.exception + ex.box_usage.should.be.none + ex.error_code.should.equal('NoSuchBucketPolicy') + ex.message.should.equal('The bucket policy does not exist') + ex.reason.should.equal('Not Found') + ex.resource.should.be.none + ex.status.should.equal(404) + ex.body.should.contain(bucket_name) + ex.request_id.should_not.be.none + + bucket.set_policy(policy).should.be.true + + bucket = conn.get_bucket(bucket_name) + + bucket.get_policy().decode('utf-8').should.equal(policy) + + bucket.delete_policy() + + with assert_raises(S3ResponseError) as err: + bucket.get_policy() + + +@mock_s3_deprecated +def test_website_configuration_xml(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test-bucket') + bucket.set_website_configuration_xml(TEST_XML) + bucket.get_website_configuration_xml().should.equal(TEST_XML) + + +@mock_s3_deprecated +def test_key_with_trailing_slash_in_ordinary_calling_format(): + conn = boto.connect_s3( + 'access_key', + 'secret_key', + calling_format=boto.s3.connection.OrdinaryCallingFormat() + ) + bucket = conn.create_bucket('test_bucket_name') + + key_name = 'key_with_slash/' + + key = Key(bucket, key_name) + key.set_contents_from_string('some value') + + [k.name for k in bucket.get_all_keys()].should.contain(key_name) + + +""" +boto3 +""" + + +@mock_s3 +def test_boto3_key_etag(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome') + resp = s3.get_object(Bucket='mybucket', Key='steve') + resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') + + +@mock_s3 +def test_website_redirect_location(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + + s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome') + resp = s3.get_object(Bucket='mybucket', Key='steve') + resp.get('WebsiteRedirectLocation').should.be.none + + url = 'https://github.com/spulec/moto' + s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome', WebsiteRedirectLocation=url) + resp = s3.get_object(Bucket='mybucket', Key='steve') + resp['WebsiteRedirectLocation'].should.equal(url) + + +@mock_s3 +def test_boto3_list_keys_xml_escaped(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + key_name = 'Q&A.txt' + s3.put_object(Bucket='mybucket', Key=key_name, Body=b'is awesome') + + resp = s3.list_objects_v2(Bucket='mybucket', Prefix=key_name) + + assert resp['Contents'][0]['Key'] == key_name + assert resp['KeyCount'] == 1 + assert resp['MaxKeys'] == 1000 + assert resp['Prefix'] == key_name + assert resp['IsTruncated'] == False + assert 'Delimiter' not in resp + assert 'StartAfter' not in resp + assert 'NextContinuationToken' not in resp + assert 'Owner' not in resp['Contents'][0] + + +@mock_s3 +def test_boto3_list_objects_v2_truncated_response(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='one', Body=b'1') + s3.put_object(Bucket='mybucket', Key='two', Body=b'22') + s3.put_object(Bucket='mybucket', Key='three', Body=b'333') + + # First list + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'one' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == True + assert 'Delimiter' not in resp + assert 'StartAfter' not in resp + assert 'Owner' not in listed_object # owner info was not requested + + next_token = resp['NextContinuationToken'] + + # Second list + resp = s3.list_objects_v2( + Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'three' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == True + assert 'Delimiter' not in resp + assert 'StartAfter' not in resp + assert 'Owner' not in listed_object + + next_token = resp['NextContinuationToken'] + + # Third list + resp = s3.list_objects_v2( + Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'two' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == False + assert 'Delimiter' not in resp + assert 'Owner' not in listed_object + assert 'StartAfter' not in resp + assert 'NextContinuationToken' not in resp + + +@mock_s3 +def test_boto3_list_objects_v2_truncated_response_start_after(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='one', Body=b'1') + s3.put_object(Bucket='mybucket', Key='two', Body=b'22') + s3.put_object(Bucket='mybucket', Key='three', Body=b'333') + + # First list + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one') + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'three' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == True + assert resp['StartAfter'] == 'one' + assert 'Delimiter' not in resp + assert 'Owner' not in listed_object + + next_token = resp['NextContinuationToken'] + + # Second list + # The ContinuationToken must take precedence over StartAfter. + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one', + ContinuationToken=next_token) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'two' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == False + # When ContinuationToken is given, StartAfter is ignored. This also means + # AWS does not return it in the response. + assert 'StartAfter' not in resp + assert 'Delimiter' not in resp + assert 'Owner' not in listed_object + + +@mock_s3 +def test_boto3_list_objects_v2_fetch_owner(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='one', Body=b'11') + + resp = s3.list_objects_v2(Bucket='mybucket', FetchOwner=True) + owner = resp['Contents'][0]['Owner'] + + assert 'ID' in owner + assert 'DisplayName' in owner + assert len(owner.keys()) == 2 + + +@mock_s3 +def test_boto3_bucket_create(): + s3 = boto3.resource('s3', region_name='us-east-1') + s3.create_bucket(Bucket="blah") + + s3.Object('blah', 'hello.txt').put(Body="some text") + + s3.Object('blah', 'hello.txt').get()['Body'].read().decode( + "utf-8").should.equal("some text") + + +@mock_s3 +def test_bucket_create_duplicate(): + s3 = boto3.resource('s3', region_name='us-west-2') + s3.create_bucket(Bucket="blah", CreateBucketConfiguration={ + 'LocationConstraint': 'us-west-2', + }) + with assert_raises(ClientError) as exc: + s3.create_bucket( + Bucket="blah", + CreateBucketConfiguration={ + 'LocationConstraint': 'us-west-2', + } + ) + exc.exception.response['Error']['Code'].should.equal('BucketAlreadyExists') + + +@mock_s3 +def test_boto3_bucket_create_eu_central(): + s3 = boto3.resource('s3', region_name='eu-central-1') + s3.create_bucket(Bucket="blah") + + s3.Object('blah', 'hello.txt').put(Body="some text") + + s3.Object('blah', 'hello.txt').get()['Body'].read().decode( + "utf-8").should.equal("some text") + + +@mock_s3 +def test_boto3_head_object(): + s3 = boto3.resource('s3', region_name='us-east-1') + s3.create_bucket(Bucket="blah") + + s3.Object('blah', 'hello.txt').put(Body="some text") + + s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') + + with assert_raises(ClientError) as e: + s3.Object('blah', 'hello2.txt').meta.client.head_object( + Bucket='blah', Key='hello_bad.txt') + e.exception.response['Error']['Code'].should.equal('404') + + +@mock_s3 +def test_boto3_bucket_deletion(): + cli = boto3.client('s3', region_name='us-east-1') + cli.create_bucket(Bucket="foobar") + + cli.put_object(Bucket="foobar", Key="the-key", Body="some value") + + # Try to delete a bucket that still has keys + cli.delete_bucket.when.called_with(Bucket="foobar").should.throw( + cli.exceptions.ClientError, + ('An error occurred (BucketNotEmpty) when calling the DeleteBucket operation: ' + 'The bucket you tried to delete is not empty')) + + cli.delete_object(Bucket="foobar", Key="the-key") + cli.delete_bucket(Bucket="foobar") + + # Get non-existing bucket + cli.head_bucket.when.called_with(Bucket="foobar").should.throw( + cli.exceptions.ClientError, + "An error occurred (404) when calling the HeadBucket operation: Not Found") + + # Delete non-existing bucket + cli.delete_bucket.when.called_with(Bucket="foobar").should.throw(cli.exceptions.NoSuchBucket) + + +@mock_s3 +def test_boto3_get_object(): + s3 = boto3.resource('s3', region_name='us-east-1') + s3.create_bucket(Bucket="blah") + + s3.Object('blah', 'hello.txt').put(Body="some text") + + s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') + + with assert_raises(ClientError) as e: + s3.Object('blah', 'hello2.txt').get() + + e.exception.response['Error']['Code'].should.equal('NoSuchKey') + + +@mock_s3 +def test_boto3_head_object_with_versioning(): + s3 = boto3.resource('s3', region_name='us-east-1') + bucket = s3.create_bucket(Bucket='blah') + bucket.Versioning().enable() + + old_content = 'some text' + new_content = 'some new text' + s3.Object('blah', 'hello.txt').put(Body=old_content) + s3.Object('blah', 'hello.txt').put(Body=new_content) + + head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') + head_object['VersionId'].should.equal('1') + head_object['ContentLength'].should.equal(len(new_content)) + + old_head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt', VersionId='0') + old_head_object['VersionId'].should.equal('0') + old_head_object['ContentLength'].should.equal(len(old_content)) + + +@mock_s3 +def test_boto3_copy_object_with_versioning(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + + obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId'] + obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Versions should be the same + obj1_version.should.equal(obj2_version) + + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2') + obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Version should be different to previous version + obj2_version_new.should_not.equal(obj2_version) + + +@mock_s3 +def test_boto3_deleted_versionings_list(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah') + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + client.delete_objects(Bucket='blah', Delete={'Objects': [{'Key': 'test1'}]}) + + listed = client.list_objects_v2(Bucket='blah') + assert len(listed['Contents']) == 1 + + +@mock_s3 +def test_boto3_delete_versioned_bucket(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah') + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + resp = client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.delete_object(Bucket='blah', Key='test1', VersionId=resp["VersionId"]) + + client.delete_bucket(Bucket='blah') + + +@mock_s3 +def test_boto3_head_object_if_modified_since(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = 'hello.txt' + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.head_object( + Bucket=bucket_name, + Key=key, + IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1) + ) + e = err.exception + e.response['Error'].should.equal({'Code': '304', 'Message': 'Not Modified'}) + + +@mock_s3 +@reduced_min_part_size +def test_boto3_multipart_etag(): + # Create Bucket so that test can run + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + + upload_id = s3.create_multipart_upload( + Bucket='mybucket', Key='the-key')['UploadId'] + part1 = b'0' * REDUCED_PART_SIZE + etags = [] + etags.append( + s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=1, + UploadId=upload_id, Body=part1)['ETag']) + # last part, can be less than 5 MB + part2 = b'1' + etags.append( + s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=2, + UploadId=upload_id, Body=part2)['ETag']) + s3.complete_multipart_upload( + Bucket='mybucket', Key='the-key', UploadId=upload_id, + MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': i} + for i, etag in enumerate(etags, 1)]}) + # we should get both parts as the key contents + resp = s3.get_object(Bucket='mybucket', Key='the-key') + resp['ETag'].should.equal(EXPECTED_ETAG) + + +@mock_s3 +def test_boto3_put_object_with_tagging(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test', + Tagging='foo=bar', + ) + + resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + + resp['TagSet'].should.contain({'Key': 'foo', 'Value': 'bar'}) + + +@mock_s3 +def test_boto3_put_bucket_tagging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + # With 1 tag: + resp = s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + } + ] + }) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # With multiple tags: + resp = s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagTwo", + "Value": "ValueTwo" + } + ] + }) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # No tags is also OK: + resp = s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ + "TagSet": [] + }) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_s3 +def test_boto3_get_bucket_tagging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagTwo", + "Value": "ValueTwo" + } + ] + }) + + # Get the tags for the bucket: + resp = s3.get_bucket_tagging(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + len(resp["TagSet"]).should.equal(2) + + # With no tags: + s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ + "TagSet": [] + }) + + with assert_raises(ClientError) as err: + s3.get_bucket_tagging(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchTagSet") + e.response["Error"]["Message"].should.equal("The TagSet does not exist") + + +@mock_s3 +def test_boto3_delete_bucket_tagging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagTwo", + "Value": "ValueTwo" + } + ] + }) + + resp = s3.delete_bucket_tagging(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + + with assert_raises(ClientError) as err: + s3.get_bucket_tagging(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchTagSet") + e.response["Error"]["Message"].should.equal("The TagSet does not exist") + + +@mock_s3 +def test_boto3_put_bucket_cors(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + resp = s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "GET", + "POST" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + }, + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "PUT" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + } + ] + }) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + with assert_raises(ClientError) as err: + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "NOTREAL", + "POST" + ] + } + ] + }) + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidRequest") + e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " + "Unsupported method is NOTREAL") + + with assert_raises(ClientError) as err: + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [] + }) + e = err.exception + e.response["Error"]["Code"].should.equal("MalformedXML") + + # And 101: + many_rules = [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}] * 101 + with assert_raises(ClientError) as err: + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": many_rules + }) + e = err.exception + e.response["Error"]["Code"].should.equal("MalformedXML") + + +@mock_s3 +def test_boto3_get_bucket_cors(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + # Without CORS: + with assert_raises(ClientError) as err: + s3.get_bucket_cors(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") + e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") + + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "GET", + "POST" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + }, + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "PUT" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + } + ] + }) + + resp = s3.get_bucket_cors(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + len(resp["CORSRules"]).should.equal(2) + + +@mock_s3 +def test_boto3_delete_bucket_cors(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "GET" + ] + } + ] + }) + + resp = s3.delete_bucket_cors(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + + # Verify deletion: + with assert_raises(ClientError) as err: + s3.get_bucket_cors(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") + e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") + + +@mock_s3 +def test_put_bucket_acl_body(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + } + ], + "Owner": bucket_owner + }) + + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 2 + for g in result["Grants"]: + assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery" + assert g["Grantee"]["Type"] == "Group" + assert g["Permission"] in ["WRITE", "READ_ACP"] + + # With one: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ], + "Owner": bucket_owner + }) + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 1 + + # With no owner: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ] + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # With incorrect permission: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "lskjflkasdjflkdsjfalisdjflkdsjf" + } + ], + "Owner": bucket_owner + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # Clear the ACLs: + result = s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner}) + assert not result.get("Grants") + + +@mock_s3 +def test_put_bucket_notification(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With no configuration: + result = s3.get_bucket_notification(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + # Place proper topic configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + }, + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:myothertopic", + "Events": [ + "s3:ObjectCreated:*" + ], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + }, + { + "Name": "suffix", + "Value": "png" + } + ] + } + } + } + ] + }) + + # Verify to completion: + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["TopicConfigurations"]) == 2 + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["TopicConfigurations"][0]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:mytopic" + assert result["TopicConfigurations"][1]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:myothertopic" + assert len(result["TopicConfigurations"][0]["Events"]) == 2 + assert len(result["TopicConfigurations"][1]["Events"]) == 1 + assert result["TopicConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Events"][1] == "s3:ObjectRemoved:*" + assert result["TopicConfigurations"][1]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Id"] + assert result["TopicConfigurations"][1]["Id"] + assert not result["TopicConfigurations"][0].get("Filter") + assert len(result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"]) == 2 + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Name"] == "suffix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Value"] == "png" + + # Place proper queue configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "Id": "SomeID", + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["QueueConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["QueueConfigurations"][0]["Id"] == "SomeID" + assert result["QueueConfigurations"][0]["QueueArn"] == "arn:aws:sqs:us-east-1:012345678910:myQueue" + assert result["QueueConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["QueueConfigurations"][0]["Events"]) == 1 + assert len(result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # Place proper Lambda configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert result["LambdaFunctionConfigurations"][0]["Id"] + assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \ + "arn:aws:lambda:us-east-1:012345678910:function:lambda" + assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1 + assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # And with all 3 set: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + } + ], + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"] + } + ], + "QueueConfigurations": [ + { + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert len(result["TopicConfigurations"]) == 1 + assert len(result["QueueConfigurations"]) == 1 + + # And clear it out: + s3.put_bucket_notification_configuration(Bucket="bucket", NotificationConfiguration={}) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + +@mock_s3 +def test_put_bucket_notification_errors(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With incorrect ARNs: + for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "{}Configurations".format(tech): [ + { + "{}Arn".format(tech): + "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The ARN is not well formed" + + # Region not the same as the bucket: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-west-2:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == \ + "The notification destination service region is not valid for the bucket location constraint" + + # Invalid event name: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["notarealeventname"] + } + ] + }) + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The event is not supported for notifications" + + +@mock_s3 +def test_boto3_put_bucket_logging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + log_bucket = "logbucket" + wrong_region_bucket = "wrongregionlogbucket" + s3.create_bucket(Bucket=bucket_name) + s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later... + s3.create_bucket(Bucket=wrong_region_bucket, CreateBucketConfiguration={"LocationConstraint": "us-west-2"}) + + # No logging config: + result = s3.get_bucket_logging(Bucket=bucket_name) + assert not result.get("LoggingEnabled") + + # A log-bucket that doesn't exist: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": "IAMNOTREAL", + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + + # A log-bucket that's missing the proper ACLs for LogDelivery: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert "log-delivery" in err.exception.response["Error"]["Message"] + + # Add the proper "log-delivery" ACL to the log buckets: + bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] + for bucket in [log_bucket, wrong_region_bucket]: + s3.put_bucket_acl(Bucket=bucket, AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + }, + { + "Grantee": { + "Type": "CanonicalUser", + "ID": bucket_owner["ID"] + }, + "Permission": "FULL_CONTROL" + } + ], + "Owner": bucket_owner + }) + + # A log-bucket that's in the wrong region: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": wrong_region_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" + + # Correct logging: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name) + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert result["LoggingEnabled"]["TargetBucket"] == log_bucket + assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name) + assert not result["LoggingEnabled"].get("TargetGrants") + + # And disabling: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={}) + assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled") + + # And enabling with multiple target grants: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + }, + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "WRITE" + } + ] + } + }) + + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 2 + assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \ + "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" + + # Test with just 1 grant: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + } + ] + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 + + # With an invalid grant: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "NOTAREALPERM" + } + ] + } + }) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_boto3_put_object_tagging(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + with assert_raises(ClientError) as err: + s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'}, + {'Key': 'item2', 'Value': 'bar'}, + ]} + ) + + e = err.exception + e.response['Error'].should.equal({ + 'Code': 'NoSuchKey', + 'Message': 'The specified key does not exist.', + 'RequestID': '7a62c49f-347e-4fc4-9331-6e8eEXAMPLE', + }) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'}, + {'Key': 'item2', 'Value': 'bar'}, + ]} + ) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_s3 +def test_boto3_put_object_tagging_with_single_tag(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'} + ]} + ) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_s3 +def test_boto3_get_object_tagging(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + resp['TagSet'].should.have.length_of(0) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'}, + {'Key': 'item2', 'Value': 'bar'}, + ]} + ) + resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + + resp['TagSet'].should.have.length_of(2) + resp['TagSet'].should.contain({'Key': 'item1', 'Value': 'foo'}) + resp['TagSet'].should.contain({'Key': 'item2', 'Value': 'bar'}) + + +@mock_s3 +def test_boto3_list_object_versions(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-versions' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + response = s3.list_object_versions( + Bucket=bucket_name + ) + # Two object versions should be returned + len(response['Versions']).should.equal(2) + keys = set([item['Key'] for item in response['Versions']]) + keys.should.equal({key}) + # Test latest object version is returned + response = s3.get_object(Bucket=bucket_name, Key=key) + response['Body'].read().should.equal(items[-1]) + + +@mock_s3 +def test_boto3_bad_prefix_list_object_versions(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-versions' + bad_prefix = 'key-that-does-not-exist' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + response = s3.list_object_versions( + Bucket=bucket_name, + Prefix=bad_prefix, + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response.should_not.contain('Versions') + response.should_not.contain('DeleteMarkers') + + +@mock_s3 +def test_boto3_delete_markers(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = u'key-with-versions-and-unicode-ó' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + + s3.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]}) + + with assert_raises(ClientError) as e: + s3.get_object( + Bucket=bucket_name, + Key=key + ) + e.response['Error']['Code'].should.equal('404') + + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='2' + ) + response = s3.get_object( + Bucket=bucket_name, + Key=key + ) + response['Body'].read().should.equal(items[-1]) + response = s3.list_object_versions( + Bucket=bucket_name + ) + response['Versions'].should.have.length_of(2) + + # We've asserted there is only 2 records so one is newest, one is oldest + latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] + oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] + + # Double check ordering of version ID's + latest['VersionId'].should.equal('1') + oldest['VersionId'].should.equal('0') + + # Double check the name is still unicode + latest['Key'].should.equal('key-with-versions-and-unicode-ó') + oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + + +@mock_s3 +def test_boto3_multiple_delete_markers(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = u'key-with-versions-and-unicode-ó' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + + # Delete the object twice to add multiple delete markers + s3.delete_object(Bucket=bucket_name, Key=key) + s3.delete_object(Bucket=bucket_name, Key=key) + + response = s3.list_object_versions(Bucket=bucket_name) + response['DeleteMarkers'].should.have.length_of(2) + + with assert_raises(ClientError) as e: + s3.get_object( + Bucket=bucket_name, + Key=key + ) + e.response['Error']['Code'].should.equal('404') + + # Remove both delete markers to restore the object + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='2' + ) + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='3' + ) + + response = s3.get_object( + Bucket=bucket_name, + Key=key + ) + response['Body'].read().should.equal(items[-1]) + response = s3.list_object_versions(Bucket=bucket_name) + response['Versions'].should.have.length_of(2) + + # We've asserted there is only 2 records so one is newest, one is oldest + latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] + oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] + + # Double check ordering of version ID's + latest['VersionId'].should.equal('1') + oldest['VersionId'].should.equal('0') + + # Double check the name is still unicode + latest['Key'].should.equal('key-with-versions-and-unicode-ó') + oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + +@mock_s3 +def test_get_stream_gzipped(): + payload = b"this is some stuff here" + + s3_client = boto3.client("s3", region_name='us-east-1') + s3_client.create_bucket(Bucket='moto-tests') + buffer_ = BytesIO() + with GzipFile(fileobj=buffer_, mode='w') as f: + f.write(payload) + payload_gz = buffer_.getvalue() + + s3_client.put_object( + Bucket='moto-tests', + Key='keyname', + Body=payload_gz, + ContentEncoding='gzip', + ) + + obj = s3_client.get_object( + Bucket='moto-tests', + Key='keyname', + ) + res = zlib.decompress(obj['Body'].read(), 16 + zlib.MAX_WBITS) + assert res == payload + + +TEST_XML = """\ + + + + index.html + + + + + test/testing + + + test.txt + + + + +""" diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 3d533a641048..6cb43e96f959 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -1,387 +1,387 @@ -from __future__ import unicode_literals - -import boto -import boto3 -from boto.exception import S3ResponseError -from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule - -import sure # noqa -from botocore.exceptions import ClientError -from datetime import datetime -from nose.tools import assert_raises - -from moto import mock_s3_deprecated, mock_s3 - - -@mock_s3_deprecated -def test_lifecycle_create(): - conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") - - lifecycle = Lifecycle() - lifecycle.add_rule('myid', '', 'Enabled', 30) - bucket.configure_lifecycle(lifecycle) - response = bucket.get_lifecycle_config() - len(response).should.equal(1) - lifecycle = response[0] - lifecycle.id.should.equal('myid') - lifecycle.prefix.should.equal('') - lifecycle.status.should.equal('Enabled') - list(lifecycle.transition).should.equal([]) - - -@mock_s3 -def test_lifecycle_with_filters(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - # Create a lifecycle rule with a Filter (no tags): - lfc = { - "Rules": [ - { - "Expiration": { - "Days": 7 - }, - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Filter"]["Prefix"] == '' - assert not result["Rules"][0]["Filter"].get("And") - assert not result["Rules"][0]["Filter"].get("Tag") - with assert_raises(KeyError): - assert result["Rules"][0]["Prefix"] - - # With a tag: - lfc["Rules"][0]["Filter"]["Tag"] = { - "Key": "mytag", - "Value": "mytagvalue" - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Filter"]["Prefix"] == '' - assert not result["Rules"][0]["Filter"].get("And") - assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - with assert_raises(KeyError): - assert result["Rules"][0]["Prefix"] - - # With And (single tag): - lfc["Rules"][0]["Filter"]["And"] = { - "Prefix": "some/prefix", - "Tags": [ - { - "Key": "mytag", - "Value": "mytagvalue" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Filter"]["Prefix"] == "" - assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" - assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 1 - assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" - assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - with assert_raises(KeyError): - assert result["Rules"][0]["Prefix"] - - # With multiple And tags: - lfc["Rules"][0]["Filter"]["And"] = { - "Prefix": "some/prefix", - "Tags": [ - { - "Key": "mytag", - "Value": "mytagvalue" - }, - { - "Key": "mytag2", - "Value": "mytagvalue2" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Filter"]["Prefix"] == "" - assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" - assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 2 - assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" - assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" - assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" - assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - with assert_raises(KeyError): - assert result["Rules"][0]["Prefix"] - - # Can't have both filter and prefix: - lfc["Rules"][0]["Prefix"] = '' - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - lfc["Rules"][0]["Prefix"] = 'some/path' - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - # No filters -- just a prefix: - del lfc["Rules"][0]["Filter"] - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert not result["Rules"][0].get("Filter") - assert result["Rules"][0]["Prefix"] == "some/path" - - -@mock_s3 -def test_lifecycle_with_eodm(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - lfc = { - "Rules": [ - { - "Expiration": { - "ExpiredObjectDeleteMarker": True - }, - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] - - # Set to False: - lfc["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] = False - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert not result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] - - # With failure: - lfc["Rules"][0]["Expiration"]["Days"] = 7 - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - del lfc["Rules"][0]["Expiration"]["Days"] - - lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - -@mock_s3 -def test_lifecycle_with_nve(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - lfc = { - "Rules": [ - { - "NoncurrentVersionExpiration": { - "NoncurrentDays": 30 - }, - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 30 - - # Change NoncurrentDays: - lfc["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] = 10 - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 10 - - # TODO: Add test for failures due to missing children - - -@mock_s3 -def test_lifecycle_with_nvt(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - lfc = { - "Rules": [ - { - "NoncurrentVersionTransitions": [{ - "NoncurrentDays": 30, - "StorageClass": "ONEZONE_IA" - }], - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 30 - assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "ONEZONE_IA" - - # Change NoncurrentDays: - lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 10 - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 10 - - # Change StorageClass: - lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] = "GLACIER" - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "GLACIER" - - # With failures for missing children: - del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 - - del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - -@mock_s3 -def test_lifecycle_with_aimu(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - lfc = { - "Rules": [ - { - "AbortIncompleteMultipartUpload": { - "DaysAfterInitiation": 7 - }, - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 7 - - # Change DaysAfterInitiation: - lfc["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] = 30 - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 30 - - # TODO: Add test for failures due to missing children - - -@mock_s3_deprecated -def test_lifecycle_with_glacier_transition(): - conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") - - lifecycle = Lifecycle() - transition = Transition(days=30, storage_class='GLACIER') - rule = Rule('myid', prefix='', status='Enabled', expiration=None, - transition=transition) - lifecycle.append(rule) - bucket.configure_lifecycle(lifecycle) - response = bucket.get_lifecycle_config() - transition = response[0].transition - transition.days.should.equal(30) - transition.storage_class.should.equal('GLACIER') - transition.date.should.equal(None) - - -@mock_s3_deprecated -def test_lifecycle_multi(): - conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") - - date = '2022-10-12T00:00:00.000Z' - sc = 'GLACIER' - lifecycle = Lifecycle() - lifecycle.add_rule("1", "1/", "Enabled", 1) - lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) - lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) - lifecycle.add_rule("4", "4/", "Enabled", None, - Transition(days=4, storage_class=sc)) - lifecycle.add_rule("5", "5/", "Enabled", None, - Transition(date=date, storage_class=sc)) - - bucket.configure_lifecycle(lifecycle) - # read the lifecycle back - rules = bucket.get_lifecycle_config() - - for rule in rules: - if rule.id == "1": - rule.prefix.should.equal("1/") - rule.expiration.days.should.equal(1) - elif rule.id == "2": - rule.prefix.should.equal("2/") - rule.expiration.days.should.equal(2) - elif rule.id == "3": - rule.prefix.should.equal("3/") - rule.expiration.date.should.equal(date) - elif rule.id == "4": - rule.prefix.should.equal("4/") - rule.transition.days.should.equal(4) - rule.transition.storage_class.should.equal(sc) - elif rule.id == "5": - rule.prefix.should.equal("5/") - rule.transition.date.should.equal(date) - rule.transition.storage_class.should.equal(sc) - else: - assert False, "Invalid rule id" - - -@mock_s3_deprecated -def test_lifecycle_delete(): - conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") - - lifecycle = Lifecycle() - lifecycle.add_rule(expiration=30) - bucket.configure_lifecycle(lifecycle) - response = bucket.get_lifecycle_config() - response.should.have.length_of(1) - - bucket.delete_lifecycle_configuration() - bucket.get_lifecycle_config.when.called_with().should.throw(S3ResponseError) +from __future__ import unicode_literals + +import boto +import boto3 +from boto.exception import S3ResponseError +from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule + +import sure # noqa +from botocore.exceptions import ClientError +from datetime import datetime +from nose.tools import assert_raises + +from moto import mock_s3_deprecated, mock_s3 + + +@mock_s3_deprecated +def test_lifecycle_create(): + conn = boto.s3.connect_to_region("us-west-1") + bucket = conn.create_bucket("foobar") + + lifecycle = Lifecycle() + lifecycle.add_rule('myid', '', 'Enabled', 30) + bucket.configure_lifecycle(lifecycle) + response = bucket.get_lifecycle_config() + len(response).should.equal(1) + lifecycle = response[0] + lifecycle.id.should.equal('myid') + lifecycle.prefix.should.equal('') + lifecycle.status.should.equal('Enabled') + list(lifecycle.transition).should.equal([]) + + +@mock_s3 +def test_lifecycle_with_filters(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + # Create a lifecycle rule with a Filter (no tags): + lfc = { + "Rules": [ + { + "Expiration": { + "Days": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert not result["Rules"][0]["Filter"].get("Tag") + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With a tag: + lfc["Rules"][0]["Filter"]["Tag"] = { + "Key": "mytag", + "Value": "mytagvalue" + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With And (single tag): + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 1 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With multiple And tags: + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + }, + { + "Key": "mytag2", + "Value": "mytagvalue2" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 2 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # Can't have both filter and prefix: + lfc["Rules"][0]["Prefix"] = '' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + lfc["Rules"][0]["Prefix"] = 'some/path' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + # No filters -- just a prefix: + del lfc["Rules"][0]["Filter"] + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert not result["Rules"][0].get("Filter") + assert result["Rules"][0]["Prefix"] == "some/path" + + +@mock_s3 +def test_lifecycle_with_eodm(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "Expiration": { + "ExpiredObjectDeleteMarker": True + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # Set to False: + lfc["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] = False + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert not result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # With failure: + lfc["Rules"][0]["Expiration"]["Days"] = 7 + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + del lfc["Rules"][0]["Expiration"]["Days"] + + lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_lifecycle_with_nve(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionExpiration": { + "NoncurrentDays": 30 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 30 + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 10 + + # TODO: Add test for failures due to missing children + + +@mock_s3 +def test_lifecycle_with_nvt(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionTransitions": [{ + "NoncurrentDays": 30, + "StorageClass": "ONEZONE_IA" + }], + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 30 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "ONEZONE_IA" + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 10 + + # Change StorageClass: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] = "GLACIER" + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "GLACIER" + + # With failures for missing children: + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 + + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_lifecycle_with_aimu(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "AbortIncompleteMultipartUpload": { + "DaysAfterInitiation": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 7 + + # Change DaysAfterInitiation: + lfc["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] = 30 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 30 + + # TODO: Add test for failures due to missing children + + +@mock_s3_deprecated +def test_lifecycle_with_glacier_transition(): + conn = boto.s3.connect_to_region("us-west-1") + bucket = conn.create_bucket("foobar") + + lifecycle = Lifecycle() + transition = Transition(days=30, storage_class='GLACIER') + rule = Rule('myid', prefix='', status='Enabled', expiration=None, + transition=transition) + lifecycle.append(rule) + bucket.configure_lifecycle(lifecycle) + response = bucket.get_lifecycle_config() + transition = response[0].transition + transition.days.should.equal(30) + transition.storage_class.should.equal('GLACIER') + transition.date.should.equal(None) + + +@mock_s3_deprecated +def test_lifecycle_multi(): + conn = boto.s3.connect_to_region("us-west-1") + bucket = conn.create_bucket("foobar") + + date = '2022-10-12T00:00:00.000Z' + sc = 'GLACIER' + lifecycle = Lifecycle() + lifecycle.add_rule("1", "1/", "Enabled", 1) + lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) + lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) + lifecycle.add_rule("4", "4/", "Enabled", None, + Transition(days=4, storage_class=sc)) + lifecycle.add_rule("5", "5/", "Enabled", None, + Transition(date=date, storage_class=sc)) + + bucket.configure_lifecycle(lifecycle) + # read the lifecycle back + rules = bucket.get_lifecycle_config() + + for rule in rules: + if rule.id == "1": + rule.prefix.should.equal("1/") + rule.expiration.days.should.equal(1) + elif rule.id == "2": + rule.prefix.should.equal("2/") + rule.expiration.days.should.equal(2) + elif rule.id == "3": + rule.prefix.should.equal("3/") + rule.expiration.date.should.equal(date) + elif rule.id == "4": + rule.prefix.should.equal("4/") + rule.transition.days.should.equal(4) + rule.transition.storage_class.should.equal(sc) + elif rule.id == "5": + rule.prefix.should.equal("5/") + rule.transition.date.should.equal(date) + rule.transition.storage_class.should.equal(sc) + else: + assert False, "Invalid rule id" + + +@mock_s3_deprecated +def test_lifecycle_delete(): + conn = boto.s3.connect_to_region("us-west-1") + bucket = conn.create_bucket("foobar") + + lifecycle = Lifecycle() + lifecycle.add_rule(expiration=30) + bucket.configure_lifecycle(lifecycle) + response = bucket.get_lifecycle_config() + response.should.have.length_of(1) + + bucket.delete_lifecycle_configuration() + bucket.get_lifecycle_config.when.called_with().should.throw(S3ResponseError) diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py index 99908c50139d..982376e23dad 100644 --- a/tests/test_s3/test_s3_storageclass.py +++ b/tests/test_s3/test_s3_storageclass.py @@ -1,106 +1,106 @@ -from __future__ import unicode_literals - -import boto -import boto3 -from boto.exception import S3CreateError, S3ResponseError -from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule - -import sure # noqa -from botocore.exceptions import ClientError -from datetime import datetime -from nose.tools import assert_raises - -from moto import mock_s3_deprecated, mock_s3 - - -@mock_s3 -def test_s3_storage_class_standard(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - - # add an object to the bucket with standard storage - - s3.put_object(Bucket="Bucket", Key="my_key", Body="my_value") - - list_of_objects = s3.list_objects(Bucket="Bucket") - - list_of_objects['Contents'][0]["StorageClass"].should.equal("STANDARD") - - -@mock_s3 -def test_s3_storage_class_infrequent_access(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - - # add an object to the bucket with standard storage - - s3.put_object(Bucket="Bucket", Key="my_key_infrequent", Body="my_value_infrequent", StorageClass="STANDARD_IA") - - D = s3.list_objects(Bucket="Bucket") - - D['Contents'][0]["StorageClass"].should.equal("STANDARD_IA") - - -@mock_s3 -def test_s3_storage_class_copy(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") - - s3.create_bucket(Bucket="Bucket2") - # second object is originally of storage class REDUCED_REDUNDANCY - s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2") - - s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="ONEZONE_IA") - - list_of_copied_objects = s3.list_objects(Bucket="Bucket2") - - # checks that a copied object can be properly copied - list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("ONEZONE_IA") - - -@mock_s3 -def test_s3_invalid_copied_storage_class(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") - - s3.create_bucket(Bucket="Bucket2") - s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2", StorageClass="REDUCED_REDUNDANCY") - - # Try to copy an object with an invalid storage class - with assert_raises(ClientError) as err: - s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="STANDARD2") - - e = err.exception - e.response["Error"]["Code"].should.equal("InvalidStorageClass") - e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") - - -@mock_s3 -def test_s3_invalid_storage_class(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - - # Try to add an object with an invalid storage class - with assert_raises(ClientError) as err: - s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD") - - e = err.exception - e.response["Error"]["Code"].should.equal("InvalidStorageClass") - e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") - -@mock_s3 -def test_s3_default_storage_class(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - - s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body") - - list_of_objects = s3.list_objects(Bucket="Bucket") - - # tests that the default storage class is still STANDARD - list_of_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") - - - +from __future__ import unicode_literals + +import boto +import boto3 +from boto.exception import S3CreateError, S3ResponseError +from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule + +import sure # noqa +from botocore.exceptions import ClientError +from datetime import datetime +from nose.tools import assert_raises + +from moto import mock_s3_deprecated, mock_s3 + + +@mock_s3 +def test_s3_storage_class_standard(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # add an object to the bucket with standard storage + + s3.put_object(Bucket="Bucket", Key="my_key", Body="my_value") + + list_of_objects = s3.list_objects(Bucket="Bucket") + + list_of_objects['Contents'][0]["StorageClass"].should.equal("STANDARD") + + +@mock_s3 +def test_s3_storage_class_infrequent_access(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # add an object to the bucket with standard storage + + s3.put_object(Bucket="Bucket", Key="my_key_infrequent", Body="my_value_infrequent", StorageClass="STANDARD_IA") + + D = s3.list_objects(Bucket="Bucket") + + D['Contents'][0]["StorageClass"].should.equal("STANDARD_IA") + + +@mock_s3 +def test_s3_storage_class_copy(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") + + s3.create_bucket(Bucket="Bucket2") + # second object is originally of storage class REDUCED_REDUNDANCY + s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2") + + s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="ONEZONE_IA") + + list_of_copied_objects = s3.list_objects(Bucket="Bucket2") + + # checks that a copied object can be properly copied + list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("ONEZONE_IA") + + +@mock_s3 +def test_s3_invalid_copied_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") + + s3.create_bucket(Bucket="Bucket2") + s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2", StorageClass="REDUCED_REDUNDANCY") + + # Try to copy an object with an invalid storage class + with assert_raises(ClientError) as err: + s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="STANDARD2") + + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidStorageClass") + e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") + + +@mock_s3 +def test_s3_invalid_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # Try to add an object with an invalid storage class + with assert_raises(ClientError) as err: + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD") + + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidStorageClass") + e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") + +@mock_s3 +def test_s3_default_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body") + + list_of_objects = s3.list_objects(Bucket="Bucket") + + # tests that the default storage class is still STANDARD + list_of_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") + + + diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index ce9f54c75318..5011379104bc 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -1,80 +1,80 @@ -from __future__ import unicode_literals -import os -from sure import expect -from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore, parse_region_from_url - - -def test_base_url(): - expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None) - - -def test_localhost_bucket(): - expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc') - ).should.equal("wfoobar") - - -def test_localhost_without_bucket(): - expect(bucket_name_from_url( - 'https://www.localhost:5000/def')).should.equal(None) - -def test_force_ignore_subdomain_for_bucketnames(): - os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME'] = '1' - expect(bucket_name_from_url('https://subdomain.localhost:5000/abc/resource')).should.equal(None) - del(os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME']) - - - -def test_versioned_key_store(): - d = _VersionedKeyStore() - - d.should.have.length_of(0) - - d['key'] = [1] - - d.should.have.length_of(1) - - d['key'] = 2 - d.should.have.length_of(1) - - d.should.have.key('key').being.equal(2) - - d.get.when.called_with('key').should.return_value(2) - d.get.when.called_with('badkey').should.return_value(None) - d.get.when.called_with('badkey', 'HELLO').should.return_value('HELLO') - - # Tests key[ - d.shouldnt.have.key('badkey') - d.__getitem__.when.called_with('badkey').should.throw(KeyError) - - d.getlist('key').should.have.length_of(2) - d.getlist('key').should.be.equal([[1], 2]) - d.getlist('badkey').should.be.none - - d.setlist('key', 1) - d.getlist('key').should.be.equal([1]) - - d.setlist('key', (1, 2)) - d.getlist('key').shouldnt.be.equal((1, 2)) - d.getlist('key').should.be.equal([1, 2]) - - d.setlist('key', [[1], [2]]) - d['key'].should.have.length_of(1) - d.getlist('key').should.be.equal([[1], [2]]) - - -def test_parse_region_from_url(): - expected = 'us-west-2' - for url in ['http://s3-us-west-2.amazonaws.com/bucket', - 'http://s3.us-west-2.amazonaws.com/bucket', - 'http://bucket.s3-us-west-2.amazonaws.com', - 'https://s3-us-west-2.amazonaws.com/bucket', - 'https://s3.us-west-2.amazonaws.com/bucket', - 'https://bucket.s3-us-west-2.amazonaws.com']: - parse_region_from_url(url).should.equal(expected) - - expected = 'us-east-1' - for url in ['http://s3.amazonaws.com/bucket', - 'http://bucket.s3.amazonaws.com', - 'https://s3.amazonaws.com/bucket', - 'https://bucket.s3.amazonaws.com']: - parse_region_from_url(url).should.equal(expected) +from __future__ import unicode_literals +import os +from sure import expect +from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore, parse_region_from_url + + +def test_base_url(): + expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None) + + +def test_localhost_bucket(): + expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc') + ).should.equal("wfoobar") + + +def test_localhost_without_bucket(): + expect(bucket_name_from_url( + 'https://www.localhost:5000/def')).should.equal(None) + +def test_force_ignore_subdomain_for_bucketnames(): + os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME'] = '1' + expect(bucket_name_from_url('https://subdomain.localhost:5000/abc/resource')).should.equal(None) + del(os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME']) + + + +def test_versioned_key_store(): + d = _VersionedKeyStore() + + d.should.have.length_of(0) + + d['key'] = [1] + + d.should.have.length_of(1) + + d['key'] = 2 + d.should.have.length_of(1) + + d.should.have.key('key').being.equal(2) + + d.get.when.called_with('key').should.return_value(2) + d.get.when.called_with('badkey').should.return_value(None) + d.get.when.called_with('badkey', 'HELLO').should.return_value('HELLO') + + # Tests key[ + d.shouldnt.have.key('badkey') + d.__getitem__.when.called_with('badkey').should.throw(KeyError) + + d.getlist('key').should.have.length_of(2) + d.getlist('key').should.be.equal([[1], 2]) + d.getlist('badkey').should.be.none + + d.setlist('key', 1) + d.getlist('key').should.be.equal([1]) + + d.setlist('key', (1, 2)) + d.getlist('key').shouldnt.be.equal((1, 2)) + d.getlist('key').should.be.equal([1, 2]) + + d.setlist('key', [[1], [2]]) + d['key'].should.have.length_of(1) + d.getlist('key').should.be.equal([[1], [2]]) + + +def test_parse_region_from_url(): + expected = 'us-west-2' + for url in ['http://s3-us-west-2.amazonaws.com/bucket', + 'http://s3.us-west-2.amazonaws.com/bucket', + 'http://bucket.s3-us-west-2.amazonaws.com', + 'https://s3-us-west-2.amazonaws.com/bucket', + 'https://s3.us-west-2.amazonaws.com/bucket', + 'https://bucket.s3-us-west-2.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) + + expected = 'us-east-1' + for url in ['http://s3.amazonaws.com/bucket', + 'http://bucket.s3.amazonaws.com', + 'https://s3.amazonaws.com/bucket', + 'https://bucket.s3.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 9c8252a0434f..efa05b862372 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -1,105 +1,105 @@ -# coding=utf-8 - -from __future__ import unicode_literals -import sure # noqa - -from flask.testing import FlaskClient -import moto.server as server - -''' -Test the different server responses -''' - - -class AuthenticatedClient(FlaskClient): - def open(self, *args, **kwargs): - kwargs['headers'] = kwargs.get('headers', {}) - kwargs['headers']['Authorization'] = "Any authorization header" - return super(AuthenticatedClient, self).open(*args, **kwargs) - - -def authenticated_client(): - backend = server.create_backend_app("s3") - backend.test_client_class = AuthenticatedClient - return backend.test_client() - - -def test_s3_server_get(): - test_client = authenticated_client() - res = test_client.get('/') - - res.data.should.contain(b'ListAllMyBucketsResult') - - -def test_s3_server_bucket_create(): - test_client = authenticated_client() - - res = test_client.put('/', 'http://foobaz.localhost:5000/') - res.status_code.should.equal(200) - - res = test_client.get('/') - res.data.should.contain(b'foobaz') - - res = test_client.get('/', 'http://foobaz.localhost:5000/') - res.status_code.should.equal(200) - res.data.should.contain(b"ListBucketResult") - - res = test_client.put( - '/bar', 'http://foobaz.localhost:5000/', data='test value') - res.status_code.should.equal(200) - assert 'ETag' in dict(res.headers) - - res = test_client.get('/bar', 'http://foobaz.localhost:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"test value") - - -def test_s3_server_bucket_versioning(): - test_client = authenticated_client() - - # Just enough XML to enable versioning - body = 'Enabled' - res = test_client.put( - '/?versioning', 'http://foobaz.localhost:5000', data=body) - res.status_code.should.equal(200) - - -def test_s3_server_post_to_bucket(): - test_client = authenticated_client() - - res = test_client.put('/', 'http://tester.localhost:5000/') - res.status_code.should.equal(200) - - test_client.post('/', "https://tester.localhost:5000/", data={ - 'key': 'the-key', - 'file': 'nothing' - }) - - res = test_client.get('/the-key', 'http://tester.localhost:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"nothing") - - -def test_s3_server_post_without_content_length(): - test_client = authenticated_client() - - res = test_client.put('/', 'http://tester.localhost:5000/', environ_overrides={'CONTENT_LENGTH': ''}) - res.status_code.should.equal(411) - - res = test_client.post('/', "https://tester.localhost:5000/", environ_overrides={'CONTENT_LENGTH': ''}) - res.status_code.should.equal(411) - - -def test_s3_server_post_unicode_bucket_key(): - # Make sure that we can deal with non-ascii characters in request URLs (e.g., S3 object names) - dispatcher = server.DomainDispatcherApplication(server.create_backend_app) - backend_app = dispatcher.get_application({ - 'HTTP_HOST': 's3.amazonaws.com', - 'PATH_INFO': '/test-bucket/test-object-てすと' - }) - assert backend_app - backend_app = dispatcher.get_application({ - 'HTTP_HOST': 's3.amazonaws.com', - 'PATH_INFO': '/test-bucket/test-object-てすと'.encode('utf-8') - }) - assert backend_app +# coding=utf-8 + +from __future__ import unicode_literals +import sure # noqa + +from flask.testing import FlaskClient +import moto.server as server + +''' +Test the different server responses +''' + + +class AuthenticatedClient(FlaskClient): + def open(self, *args, **kwargs): + kwargs['headers'] = kwargs.get('headers', {}) + kwargs['headers']['Authorization'] = "Any authorization header" + return super(AuthenticatedClient, self).open(*args, **kwargs) + + +def authenticated_client(): + backend = server.create_backend_app("s3") + backend.test_client_class = AuthenticatedClient + return backend.test_client() + + +def test_s3_server_get(): + test_client = authenticated_client() + res = test_client.get('/') + + res.data.should.contain(b'ListAllMyBucketsResult') + + +def test_s3_server_bucket_create(): + test_client = authenticated_client() + + res = test_client.put('/', 'http://foobaz.localhost:5000/') + res.status_code.should.equal(200) + + res = test_client.get('/') + res.data.should.contain(b'foobaz') + + res = test_client.get('/', 'http://foobaz.localhost:5000/') + res.status_code.should.equal(200) + res.data.should.contain(b"ListBucketResult") + + res = test_client.put( + '/bar', 'http://foobaz.localhost:5000/', data='test value') + res.status_code.should.equal(200) + assert 'ETag' in dict(res.headers) + + res = test_client.get('/bar', 'http://foobaz.localhost:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"test value") + + +def test_s3_server_bucket_versioning(): + test_client = authenticated_client() + + # Just enough XML to enable versioning + body = 'Enabled' + res = test_client.put( + '/?versioning', 'http://foobaz.localhost:5000', data=body) + res.status_code.should.equal(200) + + +def test_s3_server_post_to_bucket(): + test_client = authenticated_client() + + res = test_client.put('/', 'http://tester.localhost:5000/') + res.status_code.should.equal(200) + + test_client.post('/', "https://tester.localhost:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/the-key', 'http://tester.localhost:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") + + +def test_s3_server_post_without_content_length(): + test_client = authenticated_client() + + res = test_client.put('/', 'http://tester.localhost:5000/', environ_overrides={'CONTENT_LENGTH': ''}) + res.status_code.should.equal(411) + + res = test_client.post('/', "https://tester.localhost:5000/", environ_overrides={'CONTENT_LENGTH': ''}) + res.status_code.should.equal(411) + + +def test_s3_server_post_unicode_bucket_key(): + # Make sure that we can deal with non-ascii characters in request URLs (e.g., S3 object names) + dispatcher = server.DomainDispatcherApplication(server.create_backend_app) + backend_app = dispatcher.get_application({ + 'HTTP_HOST': 's3.amazonaws.com', + 'PATH_INFO': '/test-bucket/test-object-てすと' + }) + assert backend_app + backend_app = dispatcher.get_application({ + 'HTTP_HOST': 's3.amazonaws.com', + 'PATH_INFO': '/test-bucket/test-object-てすと'.encode('utf-8') + }) + assert backend_app diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_bucket_path_server.py index 434110e87f20..0fd73c3b939f 100644 --- a/tests/test_s3bucket_path/test_bucket_path_server.py +++ b/tests/test_s3bucket_path/test_bucket_path_server.py @@ -1,113 +1,113 @@ -from __future__ import unicode_literals -import sure # noqa - -from flask.testing import FlaskClient -import moto.server as server - -''' -Test the different server responses -''' - - -class AuthenticatedClient(FlaskClient): - def open(self, *args, **kwargs): - kwargs['headers'] = kwargs.get('headers', {}) - kwargs['headers']['Authorization'] = "Any authorization header" - return super(AuthenticatedClient, self).open(*args, **kwargs) - - -def authenticated_client(): - backend = server.create_backend_app("s3bucket_path") - backend.test_client_class = AuthenticatedClient - return backend.test_client() - - -def test_s3_server_get(): - test_client = authenticated_client() - - res = test_client.get('/') - - res.data.should.contain(b'ListAllMyBucketsResult') - - -def test_s3_server_bucket_create(): - test_client = authenticated_client() - - res = test_client.put('/foobar', 'http://localhost:5000') - res.status_code.should.equal(200) - - res = test_client.get('/') - res.data.should.contain(b'foobar') - - res = test_client.get('/foobar', 'http://localhost:5000') - res.status_code.should.equal(200) - res.data.should.contain(b"ListBucketResult") - - res = test_client.put('/foobar2/', 'http://localhost:5000') - res.status_code.should.equal(200) - - res = test_client.get('/') - res.data.should.contain(b'foobar2') - - res = test_client.get('/foobar2/', 'http://localhost:5000') - res.status_code.should.equal(200) - res.data.should.contain(b"ListBucketResult") - - res = test_client.get('/missing-bucket', 'http://localhost:5000') - res.status_code.should.equal(404) - - res = test_client.put( - '/foobar/bar', 'http://localhost:5000', data='test value') - res.status_code.should.equal(200) - - res = test_client.get('/foobar/bar', 'http://localhost:5000') - res.status_code.should.equal(200) - res.data.should.equal(b"test value") - - -def test_s3_server_post_to_bucket(): - test_client = authenticated_client() - - res = test_client.put('/foobar2', 'http://localhost:5000/') - res.status_code.should.equal(200) - - test_client.post('/foobar2', "https://localhost:5000/", data={ - 'key': 'the-key', - 'file': 'nothing' - }) - - res = test_client.get('/foobar2/the-key', 'http://localhost:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"nothing") - - -def test_s3_server_put_ipv6(): - test_client = authenticated_client() - - res = test_client.put('/foobar2', 'http://[::]:5000/') - res.status_code.should.equal(200) - - test_client.post('/foobar2', "https://[::]:5000/", data={ - 'key': 'the-key', - 'file': 'nothing' - }) - - res = test_client.get('/foobar2/the-key', 'http://[::]:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"nothing") - - -def test_s3_server_put_ipv4(): - test_client = authenticated_client() - - res = test_client.put('/foobar2', 'http://127.0.0.1:5000/') - res.status_code.should.equal(200) - - test_client.post('/foobar2', "https://127.0.0.1:5000/", data={ - 'key': 'the-key', - 'file': 'nothing' - }) - - res = test_client.get('/foobar2/the-key', 'http://127.0.0.1:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"nothing") +from __future__ import unicode_literals +import sure # noqa + +from flask.testing import FlaskClient +import moto.server as server + +''' +Test the different server responses +''' + + +class AuthenticatedClient(FlaskClient): + def open(self, *args, **kwargs): + kwargs['headers'] = kwargs.get('headers', {}) + kwargs['headers']['Authorization'] = "Any authorization header" + return super(AuthenticatedClient, self).open(*args, **kwargs) + + +def authenticated_client(): + backend = server.create_backend_app("s3bucket_path") + backend.test_client_class = AuthenticatedClient + return backend.test_client() + + +def test_s3_server_get(): + test_client = authenticated_client() + + res = test_client.get('/') + + res.data.should.contain(b'ListAllMyBucketsResult') + + +def test_s3_server_bucket_create(): + test_client = authenticated_client() + + res = test_client.put('/foobar', 'http://localhost:5000') + res.status_code.should.equal(200) + + res = test_client.get('/') + res.data.should.contain(b'foobar') + + res = test_client.get('/foobar', 'http://localhost:5000') + res.status_code.should.equal(200) + res.data.should.contain(b"ListBucketResult") + + res = test_client.put('/foobar2/', 'http://localhost:5000') + res.status_code.should.equal(200) + + res = test_client.get('/') + res.data.should.contain(b'foobar2') + + res = test_client.get('/foobar2/', 'http://localhost:5000') + res.status_code.should.equal(200) + res.data.should.contain(b"ListBucketResult") + + res = test_client.get('/missing-bucket', 'http://localhost:5000') + res.status_code.should.equal(404) + + res = test_client.put( + '/foobar/bar', 'http://localhost:5000', data='test value') + res.status_code.should.equal(200) + + res = test_client.get('/foobar/bar', 'http://localhost:5000') + res.status_code.should.equal(200) + res.data.should.equal(b"test value") + + +def test_s3_server_post_to_bucket(): + test_client = authenticated_client() + + res = test_client.put('/foobar2', 'http://localhost:5000/') + res.status_code.should.equal(200) + + test_client.post('/foobar2', "https://localhost:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/foobar2/the-key', 'http://localhost:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") + + +def test_s3_server_put_ipv6(): + test_client = authenticated_client() + + res = test_client.put('/foobar2', 'http://[::]:5000/') + res.status_code.should.equal(200) + + test_client.post('/foobar2', "https://[::]:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/foobar2/the-key', 'http://[::]:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") + + +def test_s3_server_put_ipv4(): + test_client = authenticated_client() + + res = test_client.put('/foobar2', 'http://127.0.0.1:5000/') + res.status_code.should.equal(200) + + test_client.post('/foobar2', "https://127.0.0.1:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/foobar2/the-key', 'http://127.0.0.1:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") diff --git a/tests/test_s3bucket_path/test_s3bucket_path.py b/tests/test_s3bucket_path/test_s3bucket_path.py index 21d786c61f98..2ec5e8f30e86 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path.py +++ b/tests/test_s3bucket_path/test_s3bucket_path.py @@ -1,321 +1,321 @@ -from __future__ import unicode_literals -from six.moves.urllib.request import urlopen -from six.moves.urllib.error import HTTPError - -import boto -from boto.exception import S3ResponseError -from boto.s3.key import Key -from boto.s3.connection import OrdinaryCallingFormat - -from freezegun import freeze_time -import requests - -import sure # noqa - -from moto import mock_s3, mock_s3_deprecated - - -def create_connection(key=None, secret=None): - return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat()) - - -class MyModel(object): - - def __init__(self, name, value): - self.name = name - self.value = value - - def save(self): - conn = create_connection('the_key', 'the_secret') - bucket = conn.get_bucket('mybucket') - k = Key(bucket) - k.key = self.name - k.set_contents_from_string(self.value) - - -@mock_s3_deprecated -def test_my_model_save(): - # Create Bucket so that test can run - conn = create_connection('the_key', 'the_secret') - conn.create_bucket('mybucket') - #################################### - - model_instance = MyModel('steve', 'is awesome') - model_instance.save() - - conn.get_bucket('mybucket').get_key( - 'steve').get_contents_as_string().should.equal(b'is awesome') - - -@mock_s3_deprecated -def test_missing_key(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - bucket.get_key("the-key").should.equal(None) - - -@mock_s3_deprecated -def test_missing_key_urllib2(): - conn = create_connection('the_key', 'the_secret') - conn.create_bucket("foobar") - - urlopen.when.called_with( - "http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError) - - -@mock_s3_deprecated -def test_empty_key(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("") - - bucket.get_key("the-key").get_contents_as_string().should.equal(b'') - - -@mock_s3_deprecated -def test_empty_key_set_on_existing_key(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("foobar") - - bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar') - - key.set_contents_from_string("") - bucket.get_key("the-key").get_contents_as_string().should.equal(b'') - - -@mock_s3_deprecated -def test_large_key_save(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("foobar" * 100000) - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) - - -@mock_s3_deprecated -def test_copy_key(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - bucket.copy_key('new-key', 'foobar', 'the-key') - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b"some value") - bucket.get_key( - "new-key").get_contents_as_string().should.equal(b"some value") - - -@mock_s3_deprecated -def test_set_metadata(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = 'the-key' - key.set_metadata('md', 'Metadatastring') - key.set_contents_from_string("Testval") - - bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') - - -@freeze_time("2012-01-01 12:00:00") -@mock_s3_deprecated -def test_last_modified(): - # See https://github.com/boto/boto/issues/466 - conn = create_connection() - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - rs = bucket.get_all_keys() - rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') - - bucket.get_key( - "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') - - -@mock_s3_deprecated -def test_missing_bucket(): - conn = create_connection('the_key', 'the_secret') - conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_bucket_with_dash(): - conn = create_connection('the_key', 'the_secret') - conn.get_bucket.when.called_with( - 'mybucket-test').should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_bucket_deletion(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - # Try to delete a bucket that still has keys - conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - bucket.delete_key("the-key") - conn.delete_bucket("foobar") - - # Get non-existing bucket - conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - # Delete non-existant bucket - conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_get_all_buckets(): - conn = create_connection('the_key', 'the_secret') - conn.create_bucket("foobar") - conn.create_bucket("foobar2") - buckets = conn.get_all_buckets() - - buckets.should.have.length_of(2) - - -@mock_s3 -@mock_s3_deprecated -def test_post_to_bucket(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - requests.post("https://s3.amazonaws.com/foobar", { - 'key': 'the-key', - 'file': 'nothing' - }) - - bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing') - - -@mock_s3 -@mock_s3_deprecated -def test_post_with_metadata_to_bucket(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - requests.post("https://s3.amazonaws.com/foobar", { - 'key': 'the-key', - 'file': 'nothing', - 'x-amz-meta-test': 'metadata' - }) - - bucket.get_key('the-key').get_metadata('test').should.equal('metadata') - - -@mock_s3_deprecated -def test_bucket_name_with_dot(): - conn = create_connection() - bucket = conn.create_bucket('firstname.lastname') - - k = Key(bucket, 'somekey') - k.set_contents_from_string('somedata') - - -@mock_s3_deprecated -def test_key_with_special_characters(): - conn = create_connection() - bucket = conn.create_bucket('test_bucket_name') - - key = Key(bucket, 'test_list_keys_2/*x+?^@~!y') - key.set_contents_from_string('value1') - - key_list = bucket.list('test_list_keys_2/', '/') - keys = [x for x in key_list] - keys[0].name.should.equal("test_list_keys_2/*x+?^@~!y") - - -@mock_s3_deprecated -def test_bucket_key_listing_order(): - conn = create_connection() - bucket = conn.create_bucket('test_bucket') - prefix = 'toplevel/' - - def store(name): - k = Key(bucket, prefix + name) - k.set_contents_from_string('somedata') - - names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key'] - - for name in names: - store(name) - - delimiter = None - keys = [x.name for x in bucket.list(prefix, delimiter)] - keys.should.equal([ - 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key', - 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3' - ]) - - delimiter = '/' - keys = [x.name for x in bucket.list(prefix, delimiter)] - keys.should.equal([ - 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/' - ]) - - # Test delimiter with no prefix - delimiter = '/' - keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] - keys.should.equal(['toplevel/']) - - delimiter = None - keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal( - ['toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key']) - - delimiter = '/' - keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal(['toplevel/x/']) - - -@mock_s3_deprecated -def test_delete_keys(): - conn = create_connection() - bucket = conn.create_bucket('foobar') - - Key(bucket=bucket, name='file1').set_contents_from_string('abc') - Key(bucket=bucket, name='file2').set_contents_from_string('abc') - Key(bucket=bucket, name='file3').set_contents_from_string('abc') - Key(bucket=bucket, name='file4').set_contents_from_string('abc') - - result = bucket.delete_keys(['file2', 'file3']) - result.deleted.should.have.length_of(2) - result.errors.should.have.length_of(0) - keys = bucket.get_all_keys() - keys.should.have.length_of(2) - keys[0].name.should.equal('file1') - - -@mock_s3_deprecated -def test_delete_keys_with_invalid(): - conn = create_connection() - bucket = conn.create_bucket('foobar') - - Key(bucket=bucket, name='file1').set_contents_from_string('abc') - Key(bucket=bucket, name='file2').set_contents_from_string('abc') - Key(bucket=bucket, name='file3').set_contents_from_string('abc') - Key(bucket=bucket, name='file4').set_contents_from_string('abc') - - result = bucket.delete_keys(['abc', 'file3']) - - result.deleted.should.have.length_of(1) - result.errors.should.have.length_of(1) - keys = bucket.get_all_keys() - keys.should.have.length_of(3) - keys[0].name.should.equal('file1') +from __future__ import unicode_literals +from six.moves.urllib.request import urlopen +from six.moves.urllib.error import HTTPError + +import boto +from boto.exception import S3ResponseError +from boto.s3.key import Key +from boto.s3.connection import OrdinaryCallingFormat + +from freezegun import freeze_time +import requests + +import sure # noqa + +from moto import mock_s3, mock_s3_deprecated + + +def create_connection(key=None, secret=None): + return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat()) + + +class MyModel(object): + + def __init__(self, name, value): + self.name = name + self.value = value + + def save(self): + conn = create_connection('the_key', 'the_secret') + bucket = conn.get_bucket('mybucket') + k = Key(bucket) + k.key = self.name + k.set_contents_from_string(self.value) + + +@mock_s3_deprecated +def test_my_model_save(): + # Create Bucket so that test can run + conn = create_connection('the_key', 'the_secret') + conn.create_bucket('mybucket') + #################################### + + model_instance = MyModel('steve', 'is awesome') + model_instance.save() + + conn.get_bucket('mybucket').get_key( + 'steve').get_contents_as_string().should.equal(b'is awesome') + + +@mock_s3_deprecated +def test_missing_key(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + bucket.get_key("the-key").should.equal(None) + + +@mock_s3_deprecated +def test_missing_key_urllib2(): + conn = create_connection('the_key', 'the_secret') + conn.create_bucket("foobar") + + urlopen.when.called_with( + "http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError) + + +@mock_s3_deprecated +def test_empty_key(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("") + + bucket.get_key("the-key").get_contents_as_string().should.equal(b'') + + +@mock_s3_deprecated +def test_empty_key_set_on_existing_key(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar") + + bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar') + + key.set_contents_from_string("") + bucket.get_key("the-key").get_contents_as_string().should.equal(b'') + + +@mock_s3_deprecated +def test_large_key_save(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar" * 100000) + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) + + +@mock_s3_deprecated +def test_copy_key(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key') + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") + + +@mock_s3_deprecated +def test_set_metadata(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = 'the-key' + key.set_metadata('md', 'Metadatastring') + key.set_contents_from_string("Testval") + + bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3_deprecated +def test_last_modified(): + # See https://github.com/boto/boto/issues/466 + conn = create_connection() + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + rs = bucket.get_all_keys() + rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') + + bucket.get_key( + "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') + + +@mock_s3_deprecated +def test_missing_bucket(): + conn = create_connection('the_key', 'the_secret') + conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_bucket_with_dash(): + conn = create_connection('the_key', 'the_secret') + conn.get_bucket.when.called_with( + 'mybucket-test').should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_bucket_deletion(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + # Try to delete a bucket that still has keys + conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + bucket.delete_key("the-key") + conn.delete_bucket("foobar") + + # Get non-existing bucket + conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + # Delete non-existant bucket + conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_get_all_buckets(): + conn = create_connection('the_key', 'the_secret') + conn.create_bucket("foobar") + conn.create_bucket("foobar2") + buckets = conn.get_all_buckets() + + buckets.should.have.length_of(2) + + +@mock_s3 +@mock_s3_deprecated +def test_post_to_bucket(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://s3.amazonaws.com/foobar", { + 'key': 'the-key', + 'file': 'nothing' + }) + + bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing') + + +@mock_s3 +@mock_s3_deprecated +def test_post_with_metadata_to_bucket(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://s3.amazonaws.com/foobar", { + 'key': 'the-key', + 'file': 'nothing', + 'x-amz-meta-test': 'metadata' + }) + + bucket.get_key('the-key').get_metadata('test').should.equal('metadata') + + +@mock_s3_deprecated +def test_bucket_name_with_dot(): + conn = create_connection() + bucket = conn.create_bucket('firstname.lastname') + + k = Key(bucket, 'somekey') + k.set_contents_from_string('somedata') + + +@mock_s3_deprecated +def test_key_with_special_characters(): + conn = create_connection() + bucket = conn.create_bucket('test_bucket_name') + + key = Key(bucket, 'test_list_keys_2/*x+?^@~!y') + key.set_contents_from_string('value1') + + key_list = bucket.list('test_list_keys_2/', '/') + keys = [x for x in key_list] + keys[0].name.should.equal("test_list_keys_2/*x+?^@~!y") + + +@mock_s3_deprecated +def test_bucket_key_listing_order(): + conn = create_connection() + bucket = conn.create_bucket('test_bucket') + prefix = 'toplevel/' + + def store(name): + k = Key(bucket, prefix + name) + k.set_contents_from_string('somedata') + + names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key'] + + for name in names: + store(name) + + delimiter = None + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key', + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3' + ]) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/' + ]) + + # Test delimiter with no prefix + delimiter = '/' + keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] + keys.should.equal(['toplevel/']) + + delimiter = None + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal( + ['toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key']) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal(['toplevel/x/']) + + +@mock_s3_deprecated +def test_delete_keys(): + conn = create_connection() + bucket = conn.create_bucket('foobar') + + Key(bucket=bucket, name='file1').set_contents_from_string('abc') + Key(bucket=bucket, name='file2').set_contents_from_string('abc') + Key(bucket=bucket, name='file3').set_contents_from_string('abc') + Key(bucket=bucket, name='file4').set_contents_from_string('abc') + + result = bucket.delete_keys(['file2', 'file3']) + result.deleted.should.have.length_of(2) + result.errors.should.have.length_of(0) + keys = bucket.get_all_keys() + keys.should.have.length_of(2) + keys[0].name.should.equal('file1') + + +@mock_s3_deprecated +def test_delete_keys_with_invalid(): + conn = create_connection() + bucket = conn.create_bucket('foobar') + + Key(bucket=bucket, name='file1').set_contents_from_string('abc') + Key(bucket=bucket, name='file2').set_contents_from_string('abc') + Key(bucket=bucket, name='file3').set_contents_from_string('abc') + Key(bucket=bucket, name='file4').set_contents_from_string('abc') + + result = bucket.delete_keys(['abc', 'file3']) + + result.deleted.should.have.length_of(1) + result.errors.should.have.length_of(1) + keys = bucket.get_all_keys() + keys.should.have.length_of(3) + keys[0].name.should.equal('file1') diff --git a/tests/test_s3bucket_path/test_s3bucket_path_combo.py b/tests/test_s3bucket_path/test_s3bucket_path_combo.py index e1b1075ee0d3..60dd58e8530a 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path_combo.py +++ b/tests/test_s3bucket_path/test_s3bucket_path_combo.py @@ -1,25 +1,25 @@ -from __future__ import unicode_literals - -import boto -from boto.s3.connection import OrdinaryCallingFormat - -from moto import mock_s3_deprecated - - -def create_connection(key=None, secret=None): - return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat()) - - -def test_bucketpath_combo_serial(): - @mock_s3_deprecated - def make_bucket_path(): - conn = create_connection() - conn.create_bucket('mybucketpath') - - @mock_s3_deprecated - def make_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.create_bucket('mybucket') - - make_bucket() - make_bucket_path() +from __future__ import unicode_literals + +import boto +from boto.s3.connection import OrdinaryCallingFormat + +from moto import mock_s3_deprecated + + +def create_connection(key=None, secret=None): + return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat()) + + +def test_bucketpath_combo_serial(): + @mock_s3_deprecated + def make_bucket_path(): + conn = create_connection() + conn.create_bucket('mybucketpath') + + @mock_s3_deprecated + def make_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.create_bucket('mybucket') + + make_bucket() + make_bucket_path() diff --git a/tests/test_s3bucket_path/test_s3bucket_path_utils.py b/tests/test_s3bucket_path/test_s3bucket_path_utils.py index c607ea2ecf7a..0bcc5cbe0228 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path_utils.py +++ b/tests/test_s3bucket_path/test_s3bucket_path_utils.py @@ -1,16 +1,16 @@ -from __future__ import unicode_literals -from sure import expect -from moto.s3bucket_path.utils import bucket_name_from_url - - -def test_base_url(): - expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None) - - -def test_localhost_bucket(): - expect(bucket_name_from_url('https://localhost:5000/wfoobar/abc') - ).should.equal("wfoobar") - - -def test_localhost_without_bucket(): - expect(bucket_name_from_url('https://www.localhost:5000')).should.equal(None) +from __future__ import unicode_literals +from sure import expect +from moto.s3bucket_path.utils import bucket_name_from_url + + +def test_base_url(): + expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None) + + +def test_localhost_bucket(): + expect(bucket_name_from_url('https://localhost:5000/wfoobar/abc') + ).should.equal("wfoobar") + + +def test_localhost_without_bucket(): + expect(bucket_name_from_url('https://www.localhost:5000')).should.equal(None) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index ec384a6601c6..9d496704c57a 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -1,286 +1,286 @@ -from __future__ import unicode_literals - -import boto3 - -from moto import mock_secretsmanager -from botocore.exceptions import ClientError -import sure # noqa -import string -import unittest -from nose.tools import assert_raises - -@mock_secretsmanager -def test_get_secret_value(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - create_secret = conn.create_secret(Name='java-util-test-password', - SecretString="foosecret") - result = conn.get_secret_value(SecretId='java-util-test-password') - assert result['SecretString'] == 'foosecret' - -@mock_secretsmanager -def test_get_secret_that_does_not_exist(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-exist') - -@mock_secretsmanager -def test_get_secret_that_does_not_match(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - create_secret = conn.create_secret(Name='java-util-test-password', - SecretString="foosecret") - - with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-match') - -@mock_secretsmanager -def test_create_secret(): - conn = boto3.client('secretsmanager', region_name='us-east-1') - - result = conn.create_secret(Name='test-secret', SecretString="foosecret") - assert result['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') - assert result['Name'] == 'test-secret' - secret = conn.get_secret_value(SecretId='test-secret') - assert secret['SecretString'] == 'foosecret' - -@mock_secretsmanager -def test_get_random_password_default_length(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password() - assert len(random_password['RandomPassword']) == 32 - -@mock_secretsmanager -def test_get_random_password_default_requirements(): - # When require_each_included_type, default true - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password() - # Should contain lowercase, upppercase, digit, special character - assert any(c.islower() for c in random_password['RandomPassword']) - assert any(c.isupper() for c in random_password['RandomPassword']) - assert any(c.isdigit() for c in random_password['RandomPassword']) - assert any(c in string.punctuation - for c in random_password['RandomPassword']) - -@mock_secretsmanager -def test_get_random_password_custom_length(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=50) - assert len(random_password['RandomPassword']) == 50 - -@mock_secretsmanager -def test_get_random_exclude_lowercase(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=55, - ExcludeLowercase=True) - assert any(c.islower() for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_exclude_uppercase(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=55, - ExcludeUppercase=True) - assert any(c.isupper() for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_exclude_characters_and_symbols(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=20, - ExcludeCharacters='xyzDje@?!.') - assert any(c in 'xyzDje@?!.' for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_exclude_numbers(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=100, - ExcludeNumbers=True) - assert any(c.isdigit() for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_exclude_punctuation(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=100, - ExcludePunctuation=True) - assert any(c in string.punctuation - for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_include_space_false(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=300) - assert any(c.isspace() for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_include_space_true(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=4, - IncludeSpace=True) - assert any(c.isspace() for c in random_password['RandomPassword']) == True - -@mock_secretsmanager -def test_get_random_require_each_included_type(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=4, - RequireEachIncludedType=True) - assert any(c in string.punctuation for c in random_password['RandomPassword']) == True - assert any(c in string.ascii_lowercase for c in random_password['RandomPassword']) == True - assert any(c in string.ascii_uppercase for c in random_password['RandomPassword']) == True - assert any(c in string.digits for c in random_password['RandomPassword']) == True - -@mock_secretsmanager -def test_get_random_too_short_password(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - with assert_raises(ClientError): - random_password = conn.get_random_password(PasswordLength=3) - -@mock_secretsmanager -def test_get_random_too_long_password(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - with assert_raises(Exception): - random_password = conn.get_random_password(PasswordLength=5555) - -@mock_secretsmanager -def test_describe_secret(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name='test-secret', - SecretString='foosecret') - - secret_description = conn.describe_secret(SecretId='test-secret') - assert secret_description # Returned dict is not empty - assert secret_description['ARN'] == ( - 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad') - -@mock_secretsmanager -def test_describe_secret_that_does_not_exist(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-exist') - -@mock_secretsmanager -def test_describe_secret_that_does_not_match(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name='test-secret', - SecretString='foosecret') - - with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-match') - -@mock_secretsmanager -def test_rotate_secret(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - rotated_secret = conn.rotate_secret(SecretId=secret_name) - - assert rotated_secret - assert rotated_secret['ARN'] == ( - 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad' - ) - assert rotated_secret['Name'] == secret_name - assert rotated_secret['VersionId'] != '' - -@mock_secretsmanager -def test_rotate_secret_enable_rotation(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - initial_description = conn.describe_secret(SecretId=secret_name) - assert initial_description - assert initial_description['RotationEnabled'] is False - assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 - - conn.rotate_secret(SecretId=secret_name, - RotationRules={'AutomaticallyAfterDays': 42}) - - rotated_description = conn.describe_secret(SecretId=secret_name) - assert rotated_description - assert rotated_description['RotationEnabled'] is True - assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 - -@mock_secretsmanager -def test_rotate_secret_that_does_not_exist(): - conn = boto3.client('secretsmanager', 'us-west-2') - - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId='i-dont-exist') - -@mock_secretsmanager -def test_rotate_secret_that_does_not_match(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name='test-secret', - SecretString='foosecret') - - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId='i-dont-match') - -@mock_secretsmanager -def test_rotate_secret_client_request_token_too_short(): - # Test is intentionally empty. Boto3 catches too short ClientRequestToken - # and raises ParamValidationError before Moto can see it. - # test_server actually handles this error. - assert True - -@mock_secretsmanager -def test_rotate_secret_client_request_token_too_long(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - client_request_token = ( - 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' - 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' - ) - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, - ClientRequestToken=client_request_token) - -@mock_secretsmanager -def test_rotate_secret_rotation_lambda_arn_too_long(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, - RotationLambdaARN=rotation_lambda_arn) - -@mock_secretsmanager -def test_rotate_secret_rotation_period_zero(): - # Test is intentionally empty. Boto3 catches zero day rotation period - # and raises ParamValidationError before Moto can see it. - # test_server actually handles this error. - assert True - -@mock_secretsmanager -def test_rotate_secret_rotation_period_too_long(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - rotation_rules = {'AutomaticallyAfterDays': 1001} - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, - RotationRules=rotation_rules) +from __future__ import unicode_literals + +import boto3 + +from moto import mock_secretsmanager +from botocore.exceptions import ClientError +import sure # noqa +import string +import unittest +from nose.tools import assert_raises + +@mock_secretsmanager +def test_get_secret_value(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + create_secret = conn.create_secret(Name='java-util-test-password', + SecretString="foosecret") + result = conn.get_secret_value(SecretId='java-util-test-password') + assert result['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_get_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_get_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + create_secret = conn.create_secret(Name='java-util-test-password', + SecretString="foosecret") + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-match') + +@mock_secretsmanager +def test_create_secret(): + conn = boto3.client('secretsmanager', region_name='us-east-1') + + result = conn.create_secret(Name='test-secret', SecretString="foosecret") + assert result['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') + assert result['Name'] == 'test-secret' + secret = conn.get_secret_value(SecretId='test-secret') + assert secret['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_get_random_password_default_length(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password() + assert len(random_password['RandomPassword']) == 32 + +@mock_secretsmanager +def test_get_random_password_default_requirements(): + # When require_each_included_type, default true + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password() + # Should contain lowercase, upppercase, digit, special character + assert any(c.islower() for c in random_password['RandomPassword']) + assert any(c.isupper() for c in random_password['RandomPassword']) + assert any(c.isdigit() for c in random_password['RandomPassword']) + assert any(c in string.punctuation + for c in random_password['RandomPassword']) + +@mock_secretsmanager +def test_get_random_password_custom_length(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=50) + assert len(random_password['RandomPassword']) == 50 + +@mock_secretsmanager +def test_get_random_exclude_lowercase(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=55, + ExcludeLowercase=True) + assert any(c.islower() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_uppercase(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=55, + ExcludeUppercase=True) + assert any(c.isupper() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_characters_and_symbols(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=20, + ExcludeCharacters='xyzDje@?!.') + assert any(c in 'xyzDje@?!.' for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_numbers(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=100, + ExcludeNumbers=True) + assert any(c.isdigit() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_punctuation(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=100, + ExcludePunctuation=True) + assert any(c in string.punctuation + for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_include_space_false(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=300) + assert any(c.isspace() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_include_space_true(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=4, + IncludeSpace=True) + assert any(c.isspace() for c in random_password['RandomPassword']) == True + +@mock_secretsmanager +def test_get_random_require_each_included_type(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=4, + RequireEachIncludedType=True) + assert any(c in string.punctuation for c in random_password['RandomPassword']) == True + assert any(c in string.ascii_lowercase for c in random_password['RandomPassword']) == True + assert any(c in string.ascii_uppercase for c in random_password['RandomPassword']) == True + assert any(c in string.digits for c in random_password['RandomPassword']) == True + +@mock_secretsmanager +def test_get_random_too_short_password(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + random_password = conn.get_random_password(PasswordLength=3) + +@mock_secretsmanager +def test_get_random_too_long_password(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(Exception): + random_password = conn.get_random_password(PasswordLength=5555) + +@mock_secretsmanager +def test_describe_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + secret_description = conn.describe_secret(SecretId='test-secret') + assert secret_description # Returned dict is not empty + assert secret_description['ARN'] == ( + 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad') + +@mock_secretsmanager +def test_describe_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_describe_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-match') + +@mock_secretsmanager +def test_rotate_secret(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotated_secret = conn.rotate_secret(SecretId=secret_name) + + assert rotated_secret + assert rotated_secret['ARN'] == ( + 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad' + ) + assert rotated_secret['Name'] == secret_name + assert rotated_secret['VersionId'] != '' + +@mock_secretsmanager +def test_rotate_secret_enable_rotation(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + initial_description = conn.describe_secret(SecretId=secret_name) + assert initial_description + assert initial_description['RotationEnabled'] is False + assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 + + conn.rotate_secret(SecretId=secret_name, + RotationRules={'AutomaticallyAfterDays': 42}) + + rotated_description = conn.describe_secret(SecretId=secret_name) + assert rotated_description + assert rotated_description['RotationEnabled'] is True + assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', 'us-west-2') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-match') + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + # Test is intentionally empty. Boto3 catches too short ClientRequestToken + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + ClientRequestToken=client_request_token) + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + RotationLambdaARN=rotation_lambda_arn) + +@mock_secretsmanager +def test_rotate_secret_rotation_period_zero(): + # Test is intentionally empty. Boto3 catches zero day rotation period + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_rotation_period_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotation_rules = {'AutomaticallyAfterDays': 1001} + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + RotationRules=rotation_rules) diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index e573f9b6719b..3365fe4de713 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -1,421 +1,421 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_secretsmanager - -''' -Test the different server responses for secretsmanager -''' - - -@mock_secretsmanager -def test_get_secret_value(): - - backend = server.create_backend_app("secretsmanager") - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foo-secret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret"}, - ) - get_secret = test_client.post('/', - data={"SecretId": "test-secret", - "VersionStage": "AWSCURRENT"}, - headers={ - "X-Amz-Target": "secretsmanager.GetSecretValue"}, - ) - - json_data = json.loads(get_secret.data.decode("utf-8")) - assert json_data['SecretString'] == 'foo-secret' - -@mock_secretsmanager -def test_get_secret_that_does_not_exist(): - - backend = server.create_backend_app("secretsmanager") - test_client = backend.test_client() - - get_secret = test_client.post('/', - data={"SecretId": "i-dont-exist", - "VersionStage": "AWSCURRENT"}, - headers={ - "X-Amz-Target": "secretsmanager.GetSecretValue"}, - ) - json_data = json.loads(get_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_get_secret_that_does_not_match(): - backend = server.create_backend_app("secretsmanager") - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foo-secret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret"}, - ) - get_secret = test_client.post('/', - data={"SecretId": "i-dont-match", - "VersionStage": "AWSCURRENT"}, - headers={ - "X-Amz-Target": "secretsmanager.GetSecretValue"}, - ) - json_data = json.loads(get_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_create_secret(): - - backend = server.create_backend_app("secretsmanager") - test_client = backend.test_client() - - res = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foo-secret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret"}, - ) - - json_data = json.loads(res.data.decode("utf-8")) - assert json_data['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') - assert json_data['Name'] == 'test-secret' - -@mock_secretsmanager -def test_describe_secret(): - - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - describe_secret = test_client.post('/', - data={"SecretId": "test-secret"}, - headers={ - "X-Amz-Target": "secretsmanager.DescribeSecret" - }, - ) - - json_data = json.loads(describe_secret.data.decode("utf-8")) - assert json_data # Returned dict is not empty - assert json_data['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' - ) - -@mock_secretsmanager -def test_describe_secret_that_does_not_exist(): - - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - describe_secret = test_client.post('/', - data={"SecretId": "i-dont-exist"}, - headers={ - "X-Amz-Target": "secretsmanager.DescribeSecret" - }, - ) - - json_data = json.loads(describe_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_describe_secret_that_does_not_match(): - - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - describe_secret = test_client.post('/', - data={"SecretId": "i-dont-match"}, - headers={ - "X-Amz-Target": "secretsmanager.DescribeSecret" - }, - ) - - json_data = json.loads(describe_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_rotate_secret(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" - rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", - "ClientRequestToken": client_request_token}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data # Returned dict is not empty - assert json_data['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' - ) - assert json_data['Name'] == 'test-secret' - assert json_data['VersionId'] == client_request_token - -# @mock_secretsmanager -# def test_rotate_secret_enable_rotation(): -# backend = server.create_backend_app('secretsmanager') -# test_client = backend.test_client() - -# create_secret = test_client.post( -# '/', -# data={ -# "Name": "test-secret", -# "SecretString": "foosecret" -# }, -# headers={ -# "X-Amz-Target": "secretsmanager.CreateSecret" -# }, -# ) - -# initial_description = test_client.post( -# '/', -# data={ -# "SecretId": "test-secret" -# }, -# headers={ -# "X-Amz-Target": "secretsmanager.DescribeSecret" -# }, -# ) - -# json_data = json.loads(initial_description.data.decode("utf-8")) -# assert json_data # Returned dict is not empty -# assert json_data['RotationEnabled'] is False -# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 0 - -# rotate_secret = test_client.post( -# '/', -# data={ -# "SecretId": "test-secret", -# "RotationRules": {"AutomaticallyAfterDays": 42} -# }, -# headers={ -# "X-Amz-Target": "secretsmanager.RotateSecret" -# }, -# ) - -# rotated_description = test_client.post( -# '/', -# data={ -# "SecretId": "test-secret" -# }, -# headers={ -# "X-Amz-Target": "secretsmanager.DescribeSecret" -# }, -# ) - -# json_data = json.loads(rotated_description.data.decode("utf-8")) -# assert json_data # Returned dict is not empty -# assert json_data['RotationEnabled'] is True -# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 42 - -@mock_secretsmanager -def test_rotate_secret_that_does_not_exist(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - rotate_secret = test_client.post('/', - data={"SecretId": "i-dont-exist"}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_rotate_secret_that_does_not_match(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - rotate_secret = test_client.post('/', - data={"SecretId": "i-dont-match"}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_rotate_secret_client_request_token_too_short(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" - rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", - "ClientRequestToken": client_request_token}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." - assert json_data['__type'] == 'InvalidParameterException' - -@mock_secretsmanager -def test_rotate_secret_client_request_token_too_long(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - client_request_token = ( - 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' - 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' - ) - rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", - "ClientRequestToken": client_request_token}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." - assert json_data['__type'] == 'InvalidParameterException' - -@mock_secretsmanager -def test_rotate_secret_rotation_lambda_arn_too_long(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters - rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", - "RotationLambdaARN": rotation_lambda_arn}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "RotationLambdaARN must <= 2048 characters long." - assert json_data['__type'] == 'InvalidParameterException' - - -# -# The following tests should work, but fail on the embedded dict in -# RotationRules. The error message suggests a problem deeper in the code, which -# needs further investigation. -# - -# @mock_secretsmanager -# def test_rotate_secret_rotation_period_zero(): -# backend = server.create_backend_app('secretsmanager') -# test_client = backend.test_client() - -# create_secret = test_client.post('/', -# data={"Name": "test-secret", -# "SecretString": "foosecret"}, -# headers={ -# "X-Amz-Target": "secretsmanager.CreateSecret" -# }, -# ) - -# rotate_secret = test_client.post('/', -# data={"SecretId": "test-secret", -# "RotationRules": {"AutomaticallyAfterDays": 0}}, -# headers={ -# "X-Amz-Target": "secretsmanager.RotateSecret" -# }, -# ) - -# json_data = json.loads(rotate_secret.data.decode("utf-8")) -# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." -# assert json_data['__type'] == 'InvalidParameterException' - -# @mock_secretsmanager -# def test_rotate_secret_rotation_period_too_long(): -# backend = server.create_backend_app('secretsmanager') -# test_client = backend.test_client() - -# create_secret = test_client.post('/', -# data={"Name": "test-secret", -# "SecretString": "foosecret"}, -# headers={ -# "X-Amz-Target": "secretsmanager.CreateSecret" -# }, -# ) - -# rotate_secret = test_client.post('/', -# data={"SecretId": "test-secret", -# "RotationRules": {"AutomaticallyAfterDays": 1001}}, -# headers={ -# "X-Amz-Target": "secretsmanager.RotateSecret" -# }, -# ) - -# json_data = json.loads(rotate_secret.data.decode("utf-8")) -# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." -# assert json_data['__type'] == 'InvalidParameterException' +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_secretsmanager + +''' +Test the different server responses for secretsmanager +''' + + +@mock_secretsmanager +def test_get_secret_value(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + get_secret = test_client.post('/', + data={"SecretId": "test-secret", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['SecretString'] == 'foo-secret' + +@mock_secretsmanager +def test_get_secret_that_does_not_exist(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + get_secret = test_client.post('/', + data={"SecretId": "i-dont-exist", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_get_secret_that_does_not_match(): + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + get_secret = test_client.post('/', + data={"SecretId": "i-dont-match", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_create_secret(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + res = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + assert json_data['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') + assert json_data['Name'] == 'test-secret' + +@mock_secretsmanager +def test_describe_secret(): + + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + describe_secret = test_client.post('/', + data={"SecretId": "test-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + json_data = json.loads(describe_secret.data.decode("utf-8")) + assert json_data # Returned dict is not empty + assert json_data['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' + ) + +@mock_secretsmanager +def test_describe_secret_that_does_not_exist(): + + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + describe_secret = test_client.post('/', + data={"SecretId": "i-dont-exist"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + json_data = json.loads(describe_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_describe_secret_that_does_not_match(): + + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + describe_secret = test_client.post('/', + data={"SecretId": "i-dont-match"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + json_data = json.loads(describe_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data # Returned dict is not empty + assert json_data['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' + ) + assert json_data['Name'] == 'test-secret' + assert json_data['VersionId'] == client_request_token + +# @mock_secretsmanager +# def test_rotate_secret_enable_rotation(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post( +# '/', +# data={ +# "Name": "test-secret", +# "SecretString": "foosecret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# initial_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(initial_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is False +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 0 + +# rotate_secret = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 42} +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# rotated_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(rotated_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is True +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-exist"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-match"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "RotationLambdaARN": rotation_lambda_arn}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "RotationLambdaARN must <= 2048 characters long." + assert json_data['__type'] == 'InvalidParameterException' + + +# +# The following tests should work, but fail on the embedded dict in +# RotationRules. The error message suggests a problem deeper in the code, which +# needs further investigation. +# + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_zero(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 0}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_too_long(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 1001}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' diff --git a/tests/test_ses/test_server.py b/tests/test_ses/test_server.py index 6af6560004d4..e679f06fb382 100644 --- a/tests/test_ses/test_server.py +++ b/tests/test_ses/test_server.py @@ -1,16 +1,16 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_ses_list_identities(): - backend = server.create_backend_app("ses") - test_client = backend.test_client() - - res = test_client.get('/?Action=ListIdentities') - res.data.should.contain(b"ListIdentitiesResponse") +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_ses_list_identities(): + backend = server.create_backend_app("ses") + test_client = backend.test_client() + + res = test_client.get('/?Action=ListIdentities') + res.data.should.contain(b"ListIdentitiesResponse") diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 431d42e1d1e8..4514267c3c2b 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -1,116 +1,116 @@ -from __future__ import unicode_literals -import email - -import boto -from boto.exception import BotoServerError - -import sure # noqa - -from moto import mock_ses_deprecated - - -@mock_ses_deprecated -def test_verify_email_identity(): - conn = boto.connect_ses('the_key', 'the_secret') - conn.verify_email_identity("test@example.com") - - identities = conn.list_identities() - address = identities['ListIdentitiesResponse'][ - 'ListIdentitiesResult']['Identities'][0] - address.should.equal('test@example.com') - - -@mock_ses_deprecated -def test_domain_verify(): - conn = boto.connect_ses('the_key', 'the_secret') - - conn.verify_domain_dkim("domain1.com") - conn.verify_domain_identity("domain2.com") - - identities = conn.list_identities() - domains = list(identities['ListIdentitiesResponse'][ - 'ListIdentitiesResult']['Identities']) - domains.should.equal(['domain1.com', 'domain2.com']) - - -@mock_ses_deprecated -def test_delete_identity(): - conn = boto.connect_ses('the_key', 'the_secret') - conn.verify_email_identity("test@example.com") - - conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ - 'Identities'].should.have.length_of(1) - conn.delete_identity("test@example.com") - conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ - 'Identities'].should.have.length_of(0) - - -@mock_ses_deprecated -def test_send_email(): - conn = boto.connect_ses('the_key', 'the_secret') - - conn.send_email.when.called_with( - "test@example.com", "test subject", - "test body", "test_to@example.com").should.throw(BotoServerError) - - conn.verify_email_identity("test@example.com") - conn.send_email("test@example.com", "test subject", - "test body", "test_to@example.com") - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse'][ - 'GetSendQuotaResult']['SentLast24Hours']) - sent_count.should.equal(1) - - -@mock_ses_deprecated -def test_send_html_email(): - conn = boto.connect_ses('the_key', 'the_secret') - - conn.send_email.when.called_with( - "test@example.com", "test subject", - "test body", "test_to@example.com", format="html").should.throw(BotoServerError) - - conn.verify_email_identity("test@example.com") - conn.send_email("test@example.com", "test subject", - "test body", "test_to@example.com", format="html") - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse'][ - 'GetSendQuotaResult']['SentLast24Hours']) - sent_count.should.equal(1) - - -@mock_ses_deprecated -def test_send_raw_email(): - conn = boto.connect_ses('the_key', 'the_secret') - - message = email.mime.multipart.MIMEMultipart() - message['Subject'] = 'Test' - message['From'] = 'test@example.com' - message['To'] = 'to@example.com' - - # Message body - part = email.mime.text.MIMEText('test file attached') - message.attach(part) - - # Attachment - part = email.mime.text.MIMEText('contents of test file here') - part.add_header('Content-Disposition', 'attachment; filename=test.txt') - message.attach(part) - - conn.send_raw_email.when.called_with( - source=message['From'], - raw_message=message.as_string(), - ).should.throw(BotoServerError) - - conn.verify_email_identity("test@example.com") - conn.send_raw_email( - source=message['From'], - raw_message=message.as_string(), - ) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse'][ - 'GetSendQuotaResult']['SentLast24Hours']) - sent_count.should.equal(1) +from __future__ import unicode_literals +import email + +import boto +from boto.exception import BotoServerError + +import sure # noqa + +from moto import mock_ses_deprecated + + +@mock_ses_deprecated +def test_verify_email_identity(): + conn = boto.connect_ses('the_key', 'the_secret') + conn.verify_email_identity("test@example.com") + + identities = conn.list_identities() + address = identities['ListIdentitiesResponse'][ + 'ListIdentitiesResult']['Identities'][0] + address.should.equal('test@example.com') + + +@mock_ses_deprecated +def test_domain_verify(): + conn = boto.connect_ses('the_key', 'the_secret') + + conn.verify_domain_dkim("domain1.com") + conn.verify_domain_identity("domain2.com") + + identities = conn.list_identities() + domains = list(identities['ListIdentitiesResponse'][ + 'ListIdentitiesResult']['Identities']) + domains.should.equal(['domain1.com', 'domain2.com']) + + +@mock_ses_deprecated +def test_delete_identity(): + conn = boto.connect_ses('the_key', 'the_secret') + conn.verify_email_identity("test@example.com") + + conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ + 'Identities'].should.have.length_of(1) + conn.delete_identity("test@example.com") + conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ + 'Identities'].should.have.length_of(0) + + +@mock_ses_deprecated +def test_send_email(): + conn = boto.connect_ses('the_key', 'the_secret') + + conn.send_email.when.called_with( + "test@example.com", "test subject", + "test body", "test_to@example.com").should.throw(BotoServerError) + + conn.verify_email_identity("test@example.com") + conn.send_email("test@example.com", "test subject", + "test body", "test_to@example.com") + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) + sent_count.should.equal(1) + + +@mock_ses_deprecated +def test_send_html_email(): + conn = boto.connect_ses('the_key', 'the_secret') + + conn.send_email.when.called_with( + "test@example.com", "test subject", + "test body", "test_to@example.com", format="html").should.throw(BotoServerError) + + conn.verify_email_identity("test@example.com") + conn.send_email("test@example.com", "test subject", + "test body", "test_to@example.com", format="html") + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) + sent_count.should.equal(1) + + +@mock_ses_deprecated +def test_send_raw_email(): + conn = boto.connect_ses('the_key', 'the_secret') + + message = email.mime.multipart.MIMEMultipart() + message['Subject'] = 'Test' + message['From'] = 'test@example.com' + message['To'] = 'to@example.com' + + # Message body + part = email.mime.text.MIMEText('test file attached') + message.attach(part) + + # Attachment + part = email.mime.text.MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + conn.send_raw_email.when.called_with( + source=message['From'], + raw_message=message.as_string(), + ).should.throw(BotoServerError) + + conn.verify_email_identity("test@example.com") + conn.send_raw_email( + source=message['From'], + raw_message=message.as_string(), + ) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) + sent_count.should.equal(1) diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index e800b80350a4..00d44bffaf44 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -1,194 +1,194 @@ -from __future__ import unicode_literals - -import boto3 -from botocore.exceptions import ClientError -from six.moves.email_mime_multipart import MIMEMultipart -from six.moves.email_mime_text import MIMEText - -import sure # noqa - -from moto import mock_ses - - -@mock_ses -def test_verify_email_identity(): - conn = boto3.client('ses', region_name='us-east-1') - conn.verify_email_identity(EmailAddress="test@example.com") - - identities = conn.list_identities() - address = identities['Identities'][0] - address.should.equal('test@example.com') - -@mock_ses -def test_verify_email_address(): - conn = boto3.client('ses', region_name='us-east-1') - conn.verify_email_address(EmailAddress="test@example.com") - email_addresses = conn.list_verified_email_addresses() - email = email_addresses['VerifiedEmailAddresses'][0] - email.should.equal('test@example.com') - -@mock_ses -def test_domain_verify(): - conn = boto3.client('ses', region_name='us-east-1') - - conn.verify_domain_dkim(Domain="domain1.com") - conn.verify_domain_identity(Domain="domain2.com") - - identities = conn.list_identities() - domains = list(identities['Identities']) - domains.should.equal(['domain1.com', 'domain2.com']) - - -@mock_ses -def test_delete_identity(): - conn = boto3.client('ses', region_name='us-east-1') - conn.verify_email_identity(EmailAddress="test@example.com") - - conn.list_identities()['Identities'].should.have.length_of(1) - conn.delete_identity(Identity="test@example.com") - conn.list_identities()['Identities'].should.have.length_of(0) - - -@mock_ses -def test_send_email(): - conn = boto3.client('ses', region_name='us-east-1') - - kwargs = dict( - Source="test@example.com", - Destination={ - "ToAddresses": ["test_to@example.com"], - "CcAddresses": ["test_cc@example.com"], - "BccAddresses": ["test_bcc@example.com"], - }, - Message={ - "Subject": {"Data": "test subject"}, - "Body": {"Text": {"Data": "test body"}} - } - ) - conn.send_email.when.called_with(**kwargs).should.throw(ClientError) - - conn.verify_domain_identity(Domain='example.com') - conn.send_email(**kwargs) - - too_many_addresses = list('to%s@example.com' % i for i in range(51)) - conn.send_email.when.called_with( - **dict(kwargs, Destination={'ToAddresses': too_many_addresses}) - ).should.throw(ClientError) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['SentLast24Hours']) - sent_count.should.equal(3) - - -@mock_ses -def test_send_html_email(): - conn = boto3.client('ses', region_name='us-east-1') - - kwargs = dict( - Source="test@example.com", - Destination={ - "ToAddresses": ["test_to@example.com"] - }, - Message={ - "Subject": {"Data": "test subject"}, - "Body": {"Html": {"Data": "test body"}} - } - ) - - conn.send_email.when.called_with(**kwargs).should.throw(ClientError) - - conn.verify_email_identity(EmailAddress="test@example.com") - conn.send_email(**kwargs) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['SentLast24Hours']) - sent_count.should.equal(1) - - -@mock_ses -def test_send_raw_email(): - conn = boto3.client('ses', region_name='us-east-1') - - message = MIMEMultipart() - message['Subject'] = 'Test' - message['From'] = 'test@example.com' - message['To'] = 'to@example.com, foo@example.com' - - # Message body - part = MIMEText('test file attached') - message.attach(part) - - # Attachment - part = MIMEText('contents of test file here') - part.add_header('Content-Disposition', 'attachment; filename=test.txt') - message.attach(part) - - kwargs = dict( - Source=message['From'], - RawMessage={'Data': message.as_string()}, - ) - - conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) - - conn.verify_email_identity(EmailAddress="test@example.com") - conn.send_raw_email(**kwargs) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['SentLast24Hours']) - sent_count.should.equal(2) - - -@mock_ses -def test_send_raw_email_without_source(): - conn = boto3.client('ses', region_name='us-east-1') - - message = MIMEMultipart() - message['Subject'] = 'Test' - message['From'] = 'test@example.com' - message['To'] = 'to@example.com, foo@example.com' - - # Message body - part = MIMEText('test file attached') - message.attach(part) - - # Attachment - part = MIMEText('contents of test file here') - part.add_header('Content-Disposition', 'attachment; filename=test.txt') - message.attach(part) - - kwargs = dict( - RawMessage={'Data': message.as_string()}, - ) - - conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) - - conn.verify_email_identity(EmailAddress="test@example.com") - conn.send_raw_email(**kwargs) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['SentLast24Hours']) - sent_count.should.equal(2) - - -@mock_ses -def test_send_raw_email_without_source_or_from(): - conn = boto3.client('ses', region_name='us-east-1') - - message = MIMEMultipart() - message['Subject'] = 'Test' - message['To'] = 'to@example.com, foo@example.com' - - # Message body - part = MIMEText('test file attached') - message.attach(part) - # Attachment - part = MIMEText('contents of test file here') - part.add_header('Content-Disposition', 'attachment; filename=test.txt') - message.attach(part) - - kwargs = dict( - RawMessage={'Data': message.as_string()}, - ) - - conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) - +from __future__ import unicode_literals + +import boto3 +from botocore.exceptions import ClientError +from six.moves.email_mime_multipart import MIMEMultipart +from six.moves.email_mime_text import MIMEText + +import sure # noqa + +from moto import mock_ses + + +@mock_ses +def test_verify_email_identity(): + conn = boto3.client('ses', region_name='us-east-1') + conn.verify_email_identity(EmailAddress="test@example.com") + + identities = conn.list_identities() + address = identities['Identities'][0] + address.should.equal('test@example.com') + +@mock_ses +def test_verify_email_address(): + conn = boto3.client('ses', region_name='us-east-1') + conn.verify_email_address(EmailAddress="test@example.com") + email_addresses = conn.list_verified_email_addresses() + email = email_addresses['VerifiedEmailAddresses'][0] + email.should.equal('test@example.com') + +@mock_ses +def test_domain_verify(): + conn = boto3.client('ses', region_name='us-east-1') + + conn.verify_domain_dkim(Domain="domain1.com") + conn.verify_domain_identity(Domain="domain2.com") + + identities = conn.list_identities() + domains = list(identities['Identities']) + domains.should.equal(['domain1.com', 'domain2.com']) + + +@mock_ses +def test_delete_identity(): + conn = boto3.client('ses', region_name='us-east-1') + conn.verify_email_identity(EmailAddress="test@example.com") + + conn.list_identities()['Identities'].should.have.length_of(1) + conn.delete_identity(Identity="test@example.com") + conn.list_identities()['Identities'].should.have.length_of(0) + + +@mock_ses +def test_send_email(): + conn = boto3.client('ses', region_name='us-east-1') + + kwargs = dict( + Source="test@example.com", + Destination={ + "ToAddresses": ["test_to@example.com"], + "CcAddresses": ["test_cc@example.com"], + "BccAddresses": ["test_bcc@example.com"], + }, + Message={ + "Subject": {"Data": "test subject"}, + "Body": {"Text": {"Data": "test body"}} + } + ) + conn.send_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_domain_identity(Domain='example.com') + conn.send_email(**kwargs) + + too_many_addresses = list('to%s@example.com' % i for i in range(51)) + conn.send_email.when.called_with( + **dict(kwargs, Destination={'ToAddresses': too_many_addresses}) + ).should.throw(ClientError) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['SentLast24Hours']) + sent_count.should.equal(3) + + +@mock_ses +def test_send_html_email(): + conn = boto3.client('ses', region_name='us-east-1') + + kwargs = dict( + Source="test@example.com", + Destination={ + "ToAddresses": ["test_to@example.com"] + }, + Message={ + "Subject": {"Data": "test subject"}, + "Body": {"Html": {"Data": "test body"}} + } + ) + + conn.send_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_email_identity(EmailAddress="test@example.com") + conn.send_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['SentLast24Hours']) + sent_count.should.equal(1) + + +@mock_ses +def test_send_raw_email(): + conn = boto3.client('ses', region_name='us-east-1') + + message = MIMEMultipart() + message['Subject'] = 'Test' + message['From'] = 'test@example.com' + message['To'] = 'to@example.com, foo@example.com' + + # Message body + part = MIMEText('test file attached') + message.attach(part) + + # Attachment + part = MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + kwargs = dict( + Source=message['From'], + RawMessage={'Data': message.as_string()}, + ) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_email_identity(EmailAddress="test@example.com") + conn.send_raw_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['SentLast24Hours']) + sent_count.should.equal(2) + + +@mock_ses +def test_send_raw_email_without_source(): + conn = boto3.client('ses', region_name='us-east-1') + + message = MIMEMultipart() + message['Subject'] = 'Test' + message['From'] = 'test@example.com' + message['To'] = 'to@example.com, foo@example.com' + + # Message body + part = MIMEText('test file attached') + message.attach(part) + + # Attachment + part = MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + kwargs = dict( + RawMessage={'Data': message.as_string()}, + ) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_email_identity(EmailAddress="test@example.com") + conn.send_raw_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['SentLast24Hours']) + sent_count.should.equal(2) + + +@mock_ses +def test_send_raw_email_without_source_or_from(): + conn = boto3.client('ses', region_name='us-east-1') + + message = MIMEMultipart() + message['Subject'] = 'Test' + message['To'] = 'to@example.com, foo@example.com' + + # Message body + part = MIMEText('test file attached') + message.attach(part) + # Attachment + part = MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + kwargs = dict( + RawMessage={'Data': message.as_string()}, + ) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + diff --git a/tests/test_sns/test_application.py b/tests/test_sns/test_application.py index 319e4a6f86a8..e8b5838c0431 100644 --- a/tests/test_sns/test_application.py +++ b/tests/test_sns/test_application.py @@ -1,308 +1,308 @@ -from __future__ import unicode_literals - -import boto -from boto.exception import BotoServerError -from moto import mock_sns_deprecated -import sure # noqa - - -@mock_sns_deprecated -def test_create_platform_application(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - application_arn.should.equal( - 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') - - -@mock_sns_deprecated -def test_get_platform_application_attributes(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ - 'GetPlatformApplicationAttributesResult']['Attributes'] - attributes.should.equal({ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }) - - -@mock_sns_deprecated -def test_get_missing_platform_application_attributes(): - conn = boto.connect_sns() - conn.get_platform_application_attributes.when.called_with( - "a-fake-arn").should.throw(BotoServerError) - - -@mock_sns_deprecated -def test_set_platform_application_attributes(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - conn.set_platform_application_attributes(arn, - {"PlatformPrincipal": "other"} - ) - attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ - 'GetPlatformApplicationAttributesResult']['Attributes'] - attributes.should.equal({ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "other", - }) - - -@mock_sns_deprecated -def test_list_platform_applications(): - conn = boto.connect_sns() - conn.create_platform_application( - name="application1", - platform="APNS", - ) - conn.create_platform_application( - name="application2", - platform="APNS", - ) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse'][ - 'ListPlatformApplicationsResult']['PlatformApplications'] - applications.should.have.length_of(2) - - -@mock_sns_deprecated -def test_delete_platform_application(): - conn = boto.connect_sns() - conn.create_platform_application( - name="application1", - platform="APNS", - ) - conn.create_platform_application( - name="application2", - platform="APNS", - ) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse'][ - 'ListPlatformApplicationsResult']['PlatformApplications'] - applications.should.have.length_of(2) - - application_arn = applications[0]['PlatformApplicationArn'] - conn.delete_platform_application(application_arn) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse'][ - 'ListPlatformApplicationsResult']['PlatformApplications'] - applications.should.have.length_of(1) - - -@mock_sns_deprecated -def test_create_platform_endpoint(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": False, - }, - ) - - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - endpoint_arn.should.contain( - "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") - - -@mock_sns_deprecated -def test_get_list_endpoints_by_platform_application(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - endpoint_list = conn.list_endpoints_by_platform_application( - platform_application_arn=application_arn - )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] - - endpoint_list.should.have.length_of(1) - endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data') - endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn) - - -@mock_sns_deprecated -def test_get_endpoint_attributes(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": False, - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ - 'GetEndpointAttributesResult']['Attributes'] - attributes.should.equal({ - "Token": "some_unique_id", - "Enabled": 'False', - "CustomUserData": "some data", - }) - - -@mock_sns_deprecated -def test_get_missing_endpoint_attributes(): - conn = boto.connect_sns() - conn.get_endpoint_attributes.when.called_with( - "a-fake-arn").should.throw(BotoServerError) - - -@mock_sns_deprecated -def test_set_endpoint_attributes(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": False, - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - conn.set_endpoint_attributes(endpoint_arn, - {"CustomUserData": "other data"} - ) - attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ - 'GetEndpointAttributesResult']['Attributes'] - attributes.should.equal({ - "Token": "some_unique_id", - "Enabled": 'False', - "CustomUserData": "other data", - }) - - -@mock_sns_deprecated -def test_delete_endpoint(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": False, - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - endpoint_list = conn.list_endpoints_by_platform_application( - platform_application_arn=application_arn - )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] - - endpoint_list.should.have.length_of(1) - - conn.delete_endpoint(endpoint_arn) - - endpoint_list = conn.list_endpoints_by_platform_application( - platform_application_arn=application_arn - )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] - endpoint_list.should.have.length_of(0) - - -@mock_sns_deprecated -def test_publish_to_platform_endpoint(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": True, - }, - ) - - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - conn.publish(message="some message", message_structure="json", - target_arn=endpoint_arn) +from __future__ import unicode_literals + +import boto +from boto.exception import BotoServerError +from moto import mock_sns_deprecated +import sure # noqa + + +@mock_sns_deprecated +def test_create_platform_application(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn.should.equal( + 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') + + +@mock_sns_deprecated +def test_get_platform_application_attributes(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ + 'GetPlatformApplicationAttributesResult']['Attributes'] + attributes.should.equal({ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }) + + +@mock_sns_deprecated +def test_get_missing_platform_application_attributes(): + conn = boto.connect_sns() + conn.get_platform_application_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) + + +@mock_sns_deprecated +def test_set_platform_application_attributes(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + conn.set_platform_application_attributes(arn, + {"PlatformPrincipal": "other"} + ) + attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ + 'GetPlatformApplicationAttributesResult']['Attributes'] + attributes.should.equal({ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "other", + }) + + +@mock_sns_deprecated +def test_list_platform_applications(): + conn = boto.connect_sns() + conn.create_platform_application( + name="application1", + platform="APNS", + ) + conn.create_platform_application( + name="application2", + platform="APNS", + ) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] + applications.should.have.length_of(2) + + +@mock_sns_deprecated +def test_delete_platform_application(): + conn = boto.connect_sns() + conn.create_platform_application( + name="application1", + platform="APNS", + ) + conn.create_platform_application( + name="application2", + platform="APNS", + ) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] + applications.should.have.length_of(2) + + application_arn = applications[0]['PlatformApplicationArn'] + conn.delete_platform_application(application_arn) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] + applications.should.have.length_of(1) + + +@mock_sns_deprecated +def test_create_platform_endpoint(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": False, + }, + ) + + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn.should.contain( + "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") + + +@mock_sns_deprecated +def test_get_list_endpoints_by_platform_application(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + endpoint_list = conn.list_endpoints_by_platform_application( + platform_application_arn=application_arn + )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] + + endpoint_list.should.have.length_of(1) + endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data') + endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn) + + +@mock_sns_deprecated +def test_get_endpoint_attributes(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": False, + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ + 'GetEndpointAttributesResult']['Attributes'] + attributes.should.equal({ + "Token": "some_unique_id", + "Enabled": 'False', + "CustomUserData": "some data", + }) + + +@mock_sns_deprecated +def test_get_missing_endpoint_attributes(): + conn = boto.connect_sns() + conn.get_endpoint_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) + + +@mock_sns_deprecated +def test_set_endpoint_attributes(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": False, + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + conn.set_endpoint_attributes(endpoint_arn, + {"CustomUserData": "other data"} + ) + attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ + 'GetEndpointAttributesResult']['Attributes'] + attributes.should.equal({ + "Token": "some_unique_id", + "Enabled": 'False', + "CustomUserData": "other data", + }) + + +@mock_sns_deprecated +def test_delete_endpoint(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": False, + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + endpoint_list = conn.list_endpoints_by_platform_application( + platform_application_arn=application_arn + )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] + + endpoint_list.should.have.length_of(1) + + conn.delete_endpoint(endpoint_arn) + + endpoint_list = conn.list_endpoints_by_platform_application( + platform_application_arn=application_arn + )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] + endpoint_list.should.have.length_of(0) + + +@mock_sns_deprecated +def test_publish_to_platform_endpoint(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": True, + }, + ) + + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + conn.publish(message="some message", message_structure="json", + target_arn=endpoint_arn) diff --git a/tests/test_sns/test_application_boto3.py b/tests/test_sns/test_application_boto3.py index 1c9695fea274..6ba2ed89de72 100644 --- a/tests/test_sns/test_application_boto3.py +++ b/tests/test_sns/test_application_boto3.py @@ -1,350 +1,350 @@ -from __future__ import unicode_literals - -import boto3 -from botocore.exceptions import ClientError -from moto import mock_sns -import sure # noqa - - -@mock_sns -def test_create_platform_application(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - application_arn = response['PlatformApplicationArn'] - application_arn.should.equal( - 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') - - -@mock_sns -def test_get_platform_application_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - arn = platform_application['PlatformApplicationArn'] - attributes = conn.get_platform_application_attributes( - PlatformApplicationArn=arn)['Attributes'] - attributes.should.equal({ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }) - - -@mock_sns -def test_get_missing_platform_application_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - conn.get_platform_application_attributes.when.called_with( - PlatformApplicationArn="a-fake-arn").should.throw(ClientError) - - -@mock_sns -def test_set_platform_application_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - arn = platform_application['PlatformApplicationArn'] - conn.set_platform_application_attributes(PlatformApplicationArn=arn, - Attributes={ - "PlatformPrincipal": "other"} - ) - attributes = conn.get_platform_application_attributes( - PlatformApplicationArn=arn)['Attributes'] - attributes.should.equal({ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "other", - }) - - -@mock_sns -def test_list_platform_applications(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_platform_application( - Name="application1", - Platform="APNS", - Attributes={}, - ) - conn.create_platform_application( - Name="application2", - Platform="APNS", - Attributes={}, - ) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['PlatformApplications'] - applications.should.have.length_of(2) - - -@mock_sns -def test_delete_platform_application(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_platform_application( - Name="application1", - Platform="APNS", - Attributes={}, - ) - conn.create_platform_application( - Name="application2", - Platform="APNS", - Attributes={}, - ) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['PlatformApplications'] - applications.should.have.length_of(2) - - application_arn = applications[0]['PlatformApplicationArn'] - conn.delete_platform_application(PlatformApplicationArn=application_arn) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['PlatformApplications'] - applications.should.have.length_of(1) - - -@mock_sns -def test_create_platform_endpoint(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - }, - ) - - endpoint_arn = endpoint['EndpointArn'] - endpoint_arn.should.contain( - "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") - - -@mock_sns -def test_create_duplicate_platform_endpoint(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - }, - ) - - endpoint = conn.create_platform_endpoint.when.called_with( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - }, - ).should.throw(ClientError) - - -@mock_sns -def test_get_list_endpoints_by_platform_application(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['EndpointArn'] - - endpoint_list = conn.list_endpoints_by_platform_application( - PlatformApplicationArn=application_arn - )['Endpoints'] - - endpoint_list.should.have.length_of(1) - endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data') - endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn) - - -@mock_sns -def test_get_endpoint_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['EndpointArn'] - - attributes = conn.get_endpoint_attributes( - EndpointArn=endpoint_arn)['Attributes'] - attributes.should.equal({ - "Token": "some_unique_id", - "Enabled": 'false', - "CustomUserData": "some data", - }) - - -@mock_sns -def test_get_missing_endpoint_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - conn.get_endpoint_attributes.when.called_with( - EndpointArn="a-fake-arn").should.throw(ClientError) - - -@mock_sns -def test_set_endpoint_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['EndpointArn'] - - conn.set_endpoint_attributes(EndpointArn=endpoint_arn, - Attributes={"CustomUserData": "other data"} - ) - attributes = conn.get_endpoint_attributes( - EndpointArn=endpoint_arn)['Attributes'] - attributes.should.equal({ - "Token": "some_unique_id", - "Enabled": 'false', - "CustomUserData": "other data", - }) - - -@mock_sns -def test_publish_to_platform_endpoint(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'true', - }, - ) - - endpoint_arn = endpoint['EndpointArn'] - - conn.publish(Message="some message", - MessageStructure="json", TargetArn=endpoint_arn) - - -@mock_sns -def test_publish_to_disabled_platform_endpoint(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - }, - ) - - endpoint_arn = endpoint['EndpointArn'] - - conn.publish.when.called_with( - Message="some message", - MessageStructure="json", - TargetArn=endpoint_arn, - ).should.throw(ClientError) - - -@mock_sns -def test_set_sms_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - - conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) - - response = conn.get_sms_attributes() - response.should.contain('attributes') - response['attributes'].should.contain('DefaultSMSType') - response['attributes'].should.contain('test') - response['attributes']['DefaultSMSType'].should.equal('Transactional') - response['attributes']['test'].should.equal('test') - - -@mock_sns -def test_get_sms_attributes_filtered(): - conn = boto3.client('sns', region_name='us-east-1') - - conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) - - response = conn.get_sms_attributes(attributes=['DefaultSMSType']) - response.should.contain('attributes') - response['attributes'].should.contain('DefaultSMSType') - response['attributes'].should_not.contain('test') - response['attributes']['DefaultSMSType'].should.equal('Transactional') +from __future__ import unicode_literals + +import boto3 +from botocore.exceptions import ClientError +from moto import mock_sns +import sure # noqa + + +@mock_sns +def test_create_platform_application(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + application_arn = response['PlatformApplicationArn'] + application_arn.should.equal( + 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') + + +@mock_sns +def test_get_platform_application_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + arn = platform_application['PlatformApplicationArn'] + attributes = conn.get_platform_application_attributes( + PlatformApplicationArn=arn)['Attributes'] + attributes.should.equal({ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }) + + +@mock_sns +def test_get_missing_platform_application_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.get_platform_application_attributes.when.called_with( + PlatformApplicationArn="a-fake-arn").should.throw(ClientError) + + +@mock_sns +def test_set_platform_application_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + arn = platform_application['PlatformApplicationArn'] + conn.set_platform_application_attributes(PlatformApplicationArn=arn, + Attributes={ + "PlatformPrincipal": "other"} + ) + attributes = conn.get_platform_application_attributes( + PlatformApplicationArn=arn)['Attributes'] + attributes.should.equal({ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "other", + }) + + +@mock_sns +def test_list_platform_applications(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_platform_application( + Name="application1", + Platform="APNS", + Attributes={}, + ) + conn.create_platform_application( + Name="application2", + Platform="APNS", + Attributes={}, + ) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['PlatformApplications'] + applications.should.have.length_of(2) + + +@mock_sns +def test_delete_platform_application(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_platform_application( + Name="application1", + Platform="APNS", + Attributes={}, + ) + conn.create_platform_application( + Name="application2", + Platform="APNS", + Attributes={}, + ) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['PlatformApplications'] + applications.should.have.length_of(2) + + application_arn = applications[0]['PlatformApplicationArn'] + conn.delete_platform_application(PlatformApplicationArn=application_arn) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['PlatformApplications'] + applications.should.have.length_of(1) + + +@mock_sns +def test_create_platform_endpoint(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + }, + ) + + endpoint_arn = endpoint['EndpointArn'] + endpoint_arn.should.contain( + "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") + + +@mock_sns +def test_create_duplicate_platform_endpoint(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + }, + ) + + endpoint = conn.create_platform_endpoint.when.called_with( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + }, + ).should.throw(ClientError) + + +@mock_sns +def test_get_list_endpoints_by_platform_application(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['EndpointArn'] + + endpoint_list = conn.list_endpoints_by_platform_application( + PlatformApplicationArn=application_arn + )['Endpoints'] + + endpoint_list.should.have.length_of(1) + endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data') + endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn) + + +@mock_sns +def test_get_endpoint_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['EndpointArn'] + + attributes = conn.get_endpoint_attributes( + EndpointArn=endpoint_arn)['Attributes'] + attributes.should.equal({ + "Token": "some_unique_id", + "Enabled": 'false', + "CustomUserData": "some data", + }) + + +@mock_sns +def test_get_missing_endpoint_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.get_endpoint_attributes.when.called_with( + EndpointArn="a-fake-arn").should.throw(ClientError) + + +@mock_sns +def test_set_endpoint_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['EndpointArn'] + + conn.set_endpoint_attributes(EndpointArn=endpoint_arn, + Attributes={"CustomUserData": "other data"} + ) + attributes = conn.get_endpoint_attributes( + EndpointArn=endpoint_arn)['Attributes'] + attributes.should.equal({ + "Token": "some_unique_id", + "Enabled": 'false', + "CustomUserData": "other data", + }) + + +@mock_sns +def test_publish_to_platform_endpoint(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'true', + }, + ) + + endpoint_arn = endpoint['EndpointArn'] + + conn.publish(Message="some message", + MessageStructure="json", TargetArn=endpoint_arn) + + +@mock_sns +def test_publish_to_disabled_platform_endpoint(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + }, + ) + + endpoint_arn = endpoint['EndpointArn'] + + conn.publish.when.called_with( + Message="some message", + MessageStructure="json", + TargetArn=endpoint_arn, + ).should.throw(ClientError) + + +@mock_sns +def test_set_sms_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + + conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) + + response = conn.get_sms_attributes() + response.should.contain('attributes') + response['attributes'].should.contain('DefaultSMSType') + response['attributes'].should.contain('test') + response['attributes']['DefaultSMSType'].should.equal('Transactional') + response['attributes']['test'].should.equal('test') + + +@mock_sns +def test_get_sms_attributes_filtered(): + conn = boto3.client('sns', region_name='us-east-1') + + conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) + + response = conn.get_sms_attributes(attributes=['DefaultSMSType']) + response.should.contain('attributes') + response['attributes'].should.contain('DefaultSMSType') + response['attributes'].should_not.contain('test') + response['attributes']['DefaultSMSType'].should.equal('Transactional') diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index 964296837cd5..d04cf5accb1a 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -1,69 +1,69 @@ -from __future__ import unicode_literals - -import boto -import json -import re -from freezegun import freeze_time -import sure # noqa - -from moto import mock_sns_deprecated, mock_sqs_deprecated - - -MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "%s",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' - - -@mock_sqs_deprecated -@mock_sns_deprecated -def test_publish_to_sqs(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - sqs_conn = boto.connect_sqs() - sqs_conn.create_queue("test-queue") - - conn.subscribe(topic_arn, "sqs", - "arn:aws:sqs:us-east-1:123456789012:test-queue") - - message_to_publish = 'my message' - subject_to_publish = "test subject" - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) - published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] - - queue = sqs_conn.get_queue("test-queue") - message = queue.read(1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-east-1') - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) - acquired_message.should.equal(expected) - - -@mock_sqs_deprecated -@mock_sns_deprecated -def test_publish_to_sqs_in_different_region(): - conn = boto.sns.connect_to_region("us-west-1") - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - sqs_conn = boto.sqs.connect_to_region("us-west-2") - sqs_conn.create_queue("test-queue") - - conn.subscribe(topic_arn, "sqs", - "arn:aws:sqs:us-west-2:123456789012:test-queue") - - message_to_publish = 'my message' - subject_to_publish = "test subject" - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) - published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] - - queue = sqs_conn.get_queue("test-queue") - message = queue.read(1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-west-1') - - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) - acquired_message.should.equal(expected) +from __future__ import unicode_literals + +import boto +import json +import re +from freezegun import freeze_time +import sure # noqa + +from moto import mock_sns_deprecated, mock_sqs_deprecated + + +MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "%s",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' + + +@mock_sqs_deprecated +@mock_sns_deprecated +def test_publish_to_sqs(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + sqs_conn = boto.connect_sqs() + sqs_conn.create_queue("test-queue") + + conn.subscribe(topic_arn, "sqs", + "arn:aws:sqs:us-east-1:123456789012:test-queue") + + message_to_publish = 'my message' + subject_to_publish = "test subject" + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) + published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] + + queue = sqs_conn.get_queue("test-queue") + message = queue.read(1) + expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-east-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) + acquired_message.should.equal(expected) + + +@mock_sqs_deprecated +@mock_sns_deprecated +def test_publish_to_sqs_in_different_region(): + conn = boto.sns.connect_to_region("us-west-1") + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + sqs_conn = boto.sqs.connect_to_region("us-west-2") + sqs_conn.create_queue("test-queue") + + conn.subscribe(topic_arn, "sqs", + "arn:aws:sqs:us-west-2:123456789012:test-queue") + + message_to_publish = 'my message' + subject_to_publish = "test subject" + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) + published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] + + queue = sqs_conn.get_queue("test-queue") + message = queue.read(1) + expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-west-1') + + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) + acquired_message.should.equal(expected) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 3d598d40630a..e146ec3c9ec4 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -1,489 +1,489 @@ -from __future__ import unicode_literals - -import base64 -import json - -import boto3 -import re -from freezegun import freeze_time -import sure # noqa - -import responses -from botocore.exceptions import ClientError -from nose.tools import assert_raises -from moto import mock_sns, mock_sqs - - -MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' - - -@mock_sqs -@mock_sns -def test_publish_to_sqs(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - message = 'my message' - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(TopicArn=topic_arn, Message=message) - published_message_id = published_message['MessageId'] - - queue = sqs_conn.get_queue_by_name(QueueName="test-queue") - messages = queue.receive_messages(MaxNumberOfMessages=1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-east-1') - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) - acquired_message.should.equal(expected) - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_raw(): - sns = boto3.resource('sns', region_name='us-east-1') - topic = sns.create_topic(Name='some-topic') - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - subscription = topic.subscribe( - Protocol='sqs', Endpoint=queue.attributes['QueueArn']) - - subscription.set_attributes( - AttributeName='RawMessageDelivery', AttributeValue='true') - - message = 'my message' - with freeze_time("2015-01-01 12:00:00"): - topic.publish(Message=message) - - messages = queue.receive_messages(MaxNumberOfMessages=1) - messages[0].body.should.equal(message) - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_bad(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - message = 'my message' - try: - # Test missing Value - conn.publish( - TopicArn=topic_arn, Message=message, - MessageAttributes={'store': {'DataType': 'String'}}) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - try: - # Test empty DataType (if the DataType field is missing entirely - # botocore throws an exception during validation) - conn.publish( - TopicArn=topic_arn, Message=message, - MessageAttributes={'store': { - 'DataType': '', - 'StringValue': 'example_corp' - }}) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - try: - # Test empty Value - conn.publish( - TopicArn=topic_arn, Message=message, - MessageAttributes={'store': { - 'DataType': 'String', - 'StringValue': '' - }}) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_msg_attr_byte_value(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - queue = sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - message = 'my message' - conn.publish( - TopicArn=topic_arn, Message=message, - MessageAttributes={'store': { - 'DataType': 'Binary', - 'BinaryValue': b'\x02\x03\x04' - }}) - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([{ - 'store': { - 'Type': 'Binary', - 'Value': base64.b64encode(b'\x02\x03\x04').decode() - } - }]) - - -@mock_sns -def test_publish_sms(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - client.subscribe( - TopicArn=arn, - Protocol='sms', - Endpoint='+15551234567' - ) - - result = client.publish(PhoneNumber="+15551234567", Message="my message") - result.should.contain('MessageId') - - -@mock_sns -def test_publish_bad_sms(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - client.subscribe( - TopicArn=arn, - Protocol='sms', - Endpoint='+15551234567' - ) - - try: - # Test invalid number - client.publish(PhoneNumber="NAA+15551234567", Message="my message") - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameter') - - try: - # Test not found number - client.publish(PhoneNumber="+44001234567", Message="my message") - except ClientError as err: - err.response['Error']['Code'].should.equal('ParameterValueInvalid') - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_dump_json(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - - message = json.dumps({ - "Records": [{ - "eventVersion": "2.0", - "eventSource": "aws:s3", - "s3": { - "s3SchemaVersion": "1.0" - } - }] - }, sort_keys=True) - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(TopicArn=topic_arn, Message=message) - published_message_id = published_message['MessageId'] - - queue = sqs_conn.get_queue_by_name(QueueName="test-queue") - messages = queue.receive_messages(MaxNumberOfMessages=1) - - escaped = message.replace('"', '\\"') - expected = MESSAGE_FROM_SQS_TEMPLATE % (escaped, published_message_id, 'us-east-1') - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) - acquired_message.should.equal(expected) - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_in_different_region(): - conn = boto3.client('sns', region_name='us-west-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-west-2') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-west-2:123456789012:test-queue") - - message = 'my message' - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(TopicArn=topic_arn, Message=message) - published_message_id = published_message['MessageId'] - - queue = sqs_conn.get_queue_by_name(QueueName="test-queue") - messages = queue.receive_messages(MaxNumberOfMessages=1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-west-1') - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) - acquired_message.should.equal(expected) - - -@freeze_time("2013-01-01") -@mock_sns -def test_publish_to_http(): - def callback(request): - request.headers["Content-Type"].should.equal("text/plain; charset=UTF-8") - json.loads.when.called_with( - request.body.decode() - ).should_not.throw(Exception) - return 200, {}, "" - - responses.add_callback( - method="POST", - url="http://example.com/foobar", - callback=callback, - ) - - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/foobar") - - response = conn.publish( - TopicArn=topic_arn, Message="my message", Subject="my subject") - - -@mock_sqs -@mock_sns -def test_publish_subject(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - message = 'my message' - subject1 = 'test subject' - subject2 = 'test subject' * 20 - with freeze_time("2015-01-01 12:00:00"): - conn.publish(TopicArn=topic_arn, Message=message, Subject=subject1) - - # Just that it doesnt error is a pass - try: - with freeze_time("2015-01-01 12:00:00"): - conn.publish(TopicArn=topic_arn, Message=message, Subject=subject2) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameter') - else: - raise RuntimeError('Should have raised an InvalidParameter exception') - - -@mock_sns -def test_publish_message_too_long(): - sns = boto3.resource('sns', region_name='us-east-1') - topic = sns.create_topic(Name='some-topic') - - with assert_raises(ClientError): - topic.publish( - Message="".join(["." for i in range(0, 262145)])) - - # message short enough - does not raise an error - topic.publish( - Message="".join(["." for i in range(0, 262144)])) - - -def _setup_filter_policy_test(filter_policy): - sns = boto3.resource('sns', region_name='us-east-1') - topic = sns.create_topic(Name='some-topic') - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - subscription = topic.subscribe( - Protocol='sqs', Endpoint=queue.attributes['QueueArn']) - - subscription.set_attributes( - AttributeName='FilterPolicy', AttributeValue=json.dumps(filter_policy)) - - return topic, subscription, queue - - -@mock_sqs -@mock_sns -def test_filtering_exact_string(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp']}) - - topic.publish( - Message='match', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal(['match']) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal( - [{'store': {'Type': 'String', 'Value': 'example_corp'}}]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_multiple_message_attributes(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp']}) - - topic.publish( - Message='match', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}, - 'event': {'DataType': 'String', - 'StringValue': 'order_cancelled'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal(['match']) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([{ - 'store': {'Type': 'String', 'Value': 'example_corp'}, - 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_OR_matching(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp', 'different_corp']}) - - topic.publish( - Message='match example_corp', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}}) - topic.publish( - Message='match different_corp', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'different_corp'}}) - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal( - ['match example_corp', 'match different_corp']) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([ - {'store': {'Type': 'String', 'Value': 'example_corp'}}, - {'store': {'Type': 'String', 'Value': 'different_corp'}}]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_AND_matching_positive(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp'], - 'event': ['order_cancelled']}) - - topic.publish( - Message='match example_corp order_cancelled', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}, - 'event': {'DataType': 'String', - 'StringValue': 'order_cancelled'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal( - ['match example_corp order_cancelled']) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([{ - 'store': {'Type': 'String', 'Value': 'example_corp'}, - 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_AND_matching_no_match(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp'], - 'event': ['order_cancelled']}) - - topic.publish( - Message='match example_corp order_accepted', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}, - 'event': {'DataType': 'String', - 'StringValue': 'order_accepted'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal([]) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_no_match(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp']}) - - topic.publish( - Message='no match', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'different_corp'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal([]) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_no_attributes_no_match(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp']}) - - topic.publish(Message='no match') - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal([]) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([]) +from __future__ import unicode_literals + +import base64 +import json + +import boto3 +import re +from freezegun import freeze_time +import sure # noqa + +import responses +from botocore.exceptions import ClientError +from nose.tools import assert_raises +from moto import mock_sns, mock_sqs + + +MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' + + +@mock_sqs +@mock_sns +def test_publish_to_sqs(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message) + published_message_id = published_message['MessageId'] + + queue = sqs_conn.get_queue_by_name(QueueName="test-queue") + messages = queue.receive_messages(MaxNumberOfMessages=1) + expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-east-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) + acquired_message.should.equal(expected) + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_raw(): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + subscription = topic.subscribe( + Protocol='sqs', Endpoint=queue.attributes['QueueArn']) + + subscription.set_attributes( + AttributeName='RawMessageDelivery', AttributeValue='true') + + message = 'my message' + with freeze_time("2015-01-01 12:00:00"): + topic.publish(Message=message) + + messages = queue.receive_messages(MaxNumberOfMessages=1) + messages[0].body.should.equal(message) + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_bad(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + try: + # Test missing Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': {'DataType': 'String'}}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty DataType (if the DataType field is missing entirely + # botocore throws an exception during validation) + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': '', + 'StringValue': 'example_corp' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'String', + 'StringValue': '' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_msg_attr_byte_value(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + queue = sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'Binary', + 'BinaryValue': b'\x02\x03\x04' + }}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': { + 'Type': 'Binary', + 'Value': base64.b64encode(b'\x02\x03\x04').decode() + } + }]) + + +@mock_sns +def test_publish_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='+15551234567' + ) + + result = client.publish(PhoneNumber="+15551234567", Message="my message") + result.should.contain('MessageId') + + +@mock_sns +def test_publish_bad_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='+15551234567' + ) + + try: + # Test invalid number + client.publish(PhoneNumber="NAA+15551234567", Message="my message") + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + + try: + # Test not found number + client.publish(PhoneNumber="+44001234567", Message="my message") + except ClientError as err: + err.response['Error']['Code'].should.equal('ParameterValueInvalid') + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_dump_json(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + + message = json.dumps({ + "Records": [{ + "eventVersion": "2.0", + "eventSource": "aws:s3", + "s3": { + "s3SchemaVersion": "1.0" + } + }] + }, sort_keys=True) + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message) + published_message_id = published_message['MessageId'] + + queue = sqs_conn.get_queue_by_name(QueueName="test-queue") + messages = queue.receive_messages(MaxNumberOfMessages=1) + + escaped = message.replace('"', '\\"') + expected = MESSAGE_FROM_SQS_TEMPLATE % (escaped, published_message_id, 'us-east-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) + acquired_message.should.equal(expected) + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_in_different_region(): + conn = boto3.client('sns', region_name='us-west-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-west-2') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-west-2:123456789012:test-queue") + + message = 'my message' + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message) + published_message_id = published_message['MessageId'] + + queue = sqs_conn.get_queue_by_name(QueueName="test-queue") + messages = queue.receive_messages(MaxNumberOfMessages=1) + expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-west-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) + acquired_message.should.equal(expected) + + +@freeze_time("2013-01-01") +@mock_sns +def test_publish_to_http(): + def callback(request): + request.headers["Content-Type"].should.equal("text/plain; charset=UTF-8") + json.loads.when.called_with( + request.body.decode() + ).should_not.throw(Exception) + return 200, {}, "" + + responses.add_callback( + method="POST", + url="http://example.com/foobar", + callback=callback, + ) + + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/foobar") + + response = conn.publish( + TopicArn=topic_arn, Message="my message", Subject="my subject") + + +@mock_sqs +@mock_sns +def test_publish_subject(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + subject1 = 'test subject' + subject2 = 'test subject' * 20 + with freeze_time("2015-01-01 12:00:00"): + conn.publish(TopicArn=topic_arn, Message=message, Subject=subject1) + + # Just that it doesnt error is a pass + try: + with freeze_time("2015-01-01 12:00:00"): + conn.publish(TopicArn=topic_arn, Message=message, Subject=subject2) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + else: + raise RuntimeError('Should have raised an InvalidParameter exception') + + +@mock_sns +def test_publish_message_too_long(): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + with assert_raises(ClientError): + topic.publish( + Message="".join(["." for i in range(0, 262145)])) + + # message short enough - does not raise an error + topic.publish( + Message="".join(["." for i in range(0, 262144)])) + + +def _setup_filter_policy_test(filter_policy): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + subscription = topic.subscribe( + Protocol='sqs', Endpoint=queue.attributes['QueueArn']) + + subscription.set_attributes( + AttributeName='FilterPolicy', AttributeValue=json.dumps(filter_policy)) + + return topic, subscription, queue + + +@mock_sqs +@mock_sns +def test_filtering_exact_string(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal( + [{'store': {'Type': 'String', 'Value': 'example_corp'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_multiple_message_attributes(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_OR_matching(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp', 'different_corp']}) + + topic.publish( + Message='match example_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + topic.publish( + Message='match different_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp', 'match different_corp']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([ + {'store': {'Type': 'String', 'Value': 'example_corp'}}, + {'store': {'Type': 'String', 'Value': 'different_corp'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_positive(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_cancelled', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp order_cancelled']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_accepted', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_accepted'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='no match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_attributes_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish(Message='no match') + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) diff --git a/tests/test_sns/test_server.py b/tests/test_sns/test_server.py index 465dfa2c2ae4..bdaefa453bae 100644 --- a/tests/test_sns/test_server.py +++ b/tests/test_sns/test_server.py @@ -1,24 +1,24 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_sns_server_get(): - backend = server.create_backend_app("sns") - test_client = backend.test_client() - - topic_data = test_client.action_data("CreateTopic", Name="testtopic") - topic_data.should.contain("CreateTopicResult") - topic_data.should.contain( - "arn:aws:sns:us-east-1:123456789012:testtopic") - - topics_data = test_client.action_data("ListTopics") - topics_data.should.contain("ListTopicsResult") - topic_data.should.contain( - "arn:aws:sns:us-east-1:123456789012:testtopic") +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_sns_server_get(): + backend = server.create_backend_app("sns") + test_client = backend.test_client() + + topic_data = test_client.action_data("CreateTopic", Name="testtopic") + topic_data.should.contain("CreateTopicResult") + topic_data.should.contain( + "arn:aws:sns:us-east-1:123456789012:testtopic") + + topics_data = test_client.action_data("ListTopics") + topics_data.should.contain("ListTopicsResult") + topic_data.should.contain( + "arn:aws:sns:us-east-1:123456789012:testtopic") diff --git a/tests/test_sns/test_subscriptions.py b/tests/test_sns/test_subscriptions.py index ba241ba444a4..3a40ba9ad37f 100644 --- a/tests/test_sns/test_subscriptions.py +++ b/tests/test_sns/test_subscriptions.py @@ -1,135 +1,135 @@ -from __future__ import unicode_literals -import boto - -import sure # noqa - -from moto import mock_sns_deprecated -from moto.sns.models import DEFAULT_PAGE_SIZE - - -@mock_sns_deprecated -def test_creating_subscription(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - conn.subscribe(topic_arn, "http", "http://example.com/") - - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Now unsubscribe the subscription - conn.unsubscribe(subscription["SubscriptionArn"]) - - # And there should be zero subscriptions left - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(0) - - -@mock_sns_deprecated -def test_deleting_subscriptions_by_deleting_topic(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - conn.subscribe(topic_arn, "http", "http://example.com/") - - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Now delete the topic - conn.delete_topic(topic_arn) - - # And there should now be 0 topics - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topics.should.have.length_of(0) - - # And there should be zero subscriptions left - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(0) - - -@mock_sns_deprecated -def test_getting_subscriptions_by_topic(): - conn = boto.connect_sns() - conn.create_topic("topic1") - conn.create_topic("topic2") - - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topic1_arn = topics[0]['TopicArn'] - topic2_arn = topics[1]['TopicArn'] - - conn.subscribe(topic1_arn, "http", "http://example1.com/") - conn.subscribe(topic2_arn, "http", "http://example2.com/") - - topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn)[ - "ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"] - topic1_subscriptions.should.have.length_of(1) - topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") - - -@mock_sns_deprecated -def test_subscription_paging(): - conn = boto.connect_sns() - conn.create_topic("topic1") - conn.create_topic("topic2") - - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topic1_arn = topics[0]['TopicArn'] - topic2_arn = topics[1]['TopicArn'] - - for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): - conn.subscribe(topic1_arn, 'email', 'email_' + - str(index) + '@test.com') - conn.subscribe(topic2_arn, 'email', 'email_' + - str(index) + '@test.com') - - all_subscriptions = conn.get_all_subscriptions() - all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ - "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = all_subscriptions["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["NextToken"] - next_token.should.equal(DEFAULT_PAGE_SIZE) - - all_subscriptions = conn.get_all_subscriptions(next_token=next_token * 2) - all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ - "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE * 2 / 3)) - next_token = all_subscriptions["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["NextToken"] - next_token.should.equal(None) - - topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn) - topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ - "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ - "ListSubscriptionsByTopicResult"]["NextToken"] - next_token.should.equal(DEFAULT_PAGE_SIZE) - - topic1_subscriptions = conn.get_all_subscriptions_by_topic( - topic1_arn, next_token=next_token) - topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ - "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) - next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ - "ListSubscriptionsByTopicResult"]["NextToken"] - next_token.should.equal(None) +from __future__ import unicode_literals +import boto + +import sure # noqa + +from moto import mock_sns_deprecated +from moto.sns.models import DEFAULT_PAGE_SIZE + + +@mock_sns_deprecated +def test_creating_subscription(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + conn.subscribe(topic_arn, "http", "http://example.com/") + + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now unsubscribe the subscription + conn.unsubscribe(subscription["SubscriptionArn"]) + + # And there should be zero subscriptions left + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(0) + + +@mock_sns_deprecated +def test_deleting_subscriptions_by_deleting_topic(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + conn.subscribe(topic_arn, "http", "http://example.com/") + + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now delete the topic + conn.delete_topic(topic_arn) + + # And there should now be 0 topics + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topics.should.have.length_of(0) + + # And there should be zero subscriptions left + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(0) + + +@mock_sns_deprecated +def test_getting_subscriptions_by_topic(): + conn = boto.connect_sns() + conn.create_topic("topic1") + conn.create_topic("topic2") + + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topic1_arn = topics[0]['TopicArn'] + topic2_arn = topics[1]['TopicArn'] + + conn.subscribe(topic1_arn, "http", "http://example1.com/") + conn.subscribe(topic2_arn, "http", "http://example2.com/") + + topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn)[ + "ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"] + topic1_subscriptions.should.have.length_of(1) + topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") + + +@mock_sns_deprecated +def test_subscription_paging(): + conn = boto.connect_sns() + conn.create_topic("topic1") + conn.create_topic("topic2") + + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topic1_arn = topics[0]['TopicArn'] + topic2_arn = topics[1]['TopicArn'] + + for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): + conn.subscribe(topic1_arn, 'email', 'email_' + + str(index) + '@test.com') + conn.subscribe(topic2_arn, 'email', 'email_' + + str(index) + '@test.com') + + all_subscriptions = conn.get_all_subscriptions() + all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ + "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = all_subscriptions["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["NextToken"] + next_token.should.equal(DEFAULT_PAGE_SIZE) + + all_subscriptions = conn.get_all_subscriptions(next_token=next_token * 2) + all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ + "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE * 2 / 3)) + next_token = all_subscriptions["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["NextToken"] + next_token.should.equal(None) + + topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn) + topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ + "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ + "ListSubscriptionsByTopicResult"]["NextToken"] + next_token.should.equal(DEFAULT_PAGE_SIZE) + + topic1_subscriptions = conn.get_all_subscriptions_by_topic( + topic1_arn, next_token=next_token) + topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ + "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) + next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ + "ListSubscriptionsByTopicResult"]["NextToken"] + next_token.should.equal(None) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index 2a56c8213570..d7a32e0c6311 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -1,396 +1,396 @@ -from __future__ import unicode_literals -import boto3 -import json - -import sure # noqa - -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_sns -from moto.sns.models import DEFAULT_PAGE_SIZE - - -@mock_sns -def test_subscribe_sms(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - resp = client.subscribe( - TopicArn=arn, - Protocol='sms', - Endpoint='+15551234567' - ) - resp.should.contain('SubscriptionArn') - -@mock_sns -def test_double_subscription(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - do_subscribe_sqs = lambda sqs_arn: client.subscribe( - TopicArn=arn, - Protocol='sqs', - Endpoint=sqs_arn - ) - resp1 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') - resp2 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') - - resp1['SubscriptionArn'].should.equal(resp2['SubscriptionArn']) - - -@mock_sns -def test_subscribe_bad_sms(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - try: - # Test invalid number - client.subscribe( - TopicArn=arn, - Protocol='sms', - Endpoint='NAA+15551234567' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameter') - - -@mock_sns -def test_creating_subscription(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/") - - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Now unsubscribe the subscription - conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"]) - - # And there should be zero subscriptions left - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(0) - - -@mock_sns -def test_deleting_subscriptions_by_deleting_topic(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/") - - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Now delete the topic - conn.delete_topic(TopicArn=topic_arn) - - # And there should now be 0 topics - topics_json = conn.list_topics() - topics = topics_json["Topics"] - topics.should.have.length_of(0) - - # And there should be zero subscriptions left - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(0) - - -@mock_sns -def test_getting_subscriptions_by_topic(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="topic1") - conn.create_topic(Name="topic2") - - response = conn.list_topics() - topics = response["Topics"] - topic1_arn = topics[0]['TopicArn'] - topic2_arn = topics[1]['TopicArn'] - - conn.subscribe(TopicArn=topic1_arn, - Protocol="http", - Endpoint="http://example1.com/") - conn.subscribe(TopicArn=topic2_arn, - Protocol="http", - Endpoint="http://example2.com/") - - topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)[ - "Subscriptions"] - topic1_subscriptions.should.have.length_of(1) - topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") - - -@mock_sns -def test_subscription_paging(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="topic1") - - response = conn.list_topics() - topics = response["Topics"] - topic1_arn = topics[0]['TopicArn'] - - for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): - conn.subscribe(TopicArn=topic1_arn, - Protocol='email', - Endpoint='email_' + str(index) + '@test.com') - - all_subscriptions = conn.list_subscriptions() - all_subscriptions["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = all_subscriptions["NextToken"] - next_token.should.equal(str(DEFAULT_PAGE_SIZE)) - - all_subscriptions = conn.list_subscriptions(NextToken=next_token) - all_subscriptions["Subscriptions"].should.have.length_of( - int(DEFAULT_PAGE_SIZE / 3)) - all_subscriptions.shouldnt.have("NextToken") - - topic1_subscriptions = conn.list_subscriptions_by_topic( - TopicArn=topic1_arn) - topic1_subscriptions["Subscriptions"].should.have.length_of( - DEFAULT_PAGE_SIZE) - next_token = topic1_subscriptions["NextToken"] - next_token.should.equal(str(DEFAULT_PAGE_SIZE)) - - topic1_subscriptions = conn.list_subscriptions_by_topic( - TopicArn=topic1_arn, NextToken=next_token) - topic1_subscriptions["Subscriptions"].should.have.length_of( - int(DEFAULT_PAGE_SIZE / 3)) - topic1_subscriptions.shouldnt.have("NextToken") - - -@mock_sns -def test_creating_subscription_with_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - delivery_policy = json.dumps({ - 'healthyRetryPolicy': { - "numRetries": 10, - "minDelayTarget": 1, - "maxDelayTarget":2 - } - }) - - filter_policy = json.dumps({ - "store": ["example_corp"], - "event": ["order_cancelled"], - "encrypted": [False], - "customer_interests": ["basketball", "baseball"] - }) - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/", - Attributes={ - 'RawMessageDelivery': 'true', - 'DeliveryPolicy': delivery_policy, - 'FilterPolicy': filter_policy - }) - - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Test the subscription attributes have been set - subscription_arn = subscription["SubscriptionArn"] - attrs = conn.get_subscription_attributes( - SubscriptionArn=subscription_arn - ) - - attrs['Attributes']['RawMessageDelivery'].should.equal('true') - attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) - attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) - - # Now unsubscribe the subscription - conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"]) - - # And there should be zero subscriptions left - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(0) - - # invalid attr name - with assert_raises(ClientError): - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/", - Attributes={ - 'InvalidName': 'true' - }) - - -@mock_sns -def test_set_subscription_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/") - - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - subscription_arn = subscription["SubscriptionArn"] - attrs = conn.get_subscription_attributes( - SubscriptionArn=subscription_arn - ) - attrs.should.have.key('Attributes') - conn.set_subscription_attributes( - SubscriptionArn=subscription_arn, - AttributeName='RawMessageDelivery', - AttributeValue='true' - ) - delivery_policy = json.dumps({ - 'healthyRetryPolicy': { - "numRetries": 10, - "minDelayTarget": 1, - "maxDelayTarget":2 - } - }) - conn.set_subscription_attributes( - SubscriptionArn=subscription_arn, - AttributeName='DeliveryPolicy', - AttributeValue=delivery_policy - ) - - filter_policy = json.dumps({ - "store": ["example_corp"], - "event": ["order_cancelled"], - "encrypted": [False], - "customer_interests": ["basketball", "baseball"] - }) - conn.set_subscription_attributes( - SubscriptionArn=subscription_arn, - AttributeName='FilterPolicy', - AttributeValue=filter_policy - ) - - attrs = conn.get_subscription_attributes( - SubscriptionArn=subscription_arn - ) - - attrs['Attributes']['RawMessageDelivery'].should.equal('true') - attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) - attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) - - # not existing subscription - with assert_raises(ClientError): - conn.set_subscription_attributes( - SubscriptionArn='invalid', - AttributeName='RawMessageDelivery', - AttributeValue='true' - ) - with assert_raises(ClientError): - attrs = conn.get_subscription_attributes( - SubscriptionArn='invalid' - ) - - - # invalid attr name - with assert_raises(ClientError): - conn.set_subscription_attributes( - SubscriptionArn=subscription_arn, - AttributeName='InvalidName', - AttributeValue='true' - ) - - -@mock_sns -def test_check_not_opted_out(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545375') - - response.should.contain('isOptedOut') - response['isOptedOut'].should.be(False) - - -@mock_sns -def test_check_opted_out(): - # Phone number ends in 99 so is hardcoded in the endpoint to return opted - # out status - conn = boto3.client('sns', region_name='us-east-1') - response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545399') - - response.should.contain('isOptedOut') - response['isOptedOut'].should.be(True) - - -@mock_sns -def test_check_opted_out_invalid(): - conn = boto3.client('sns', region_name='us-east-1') - - # Invalid phone number - with assert_raises(ClientError): - conn.check_if_phone_number_is_opted_out(phoneNumber='+44742LALALA') - - -@mock_sns -def test_list_opted_out(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.list_phone_numbers_opted_out() - - response.should.contain('phoneNumbers') - len(response['phoneNumbers']).should.be.greater_than(0) - - -@mock_sns -def test_opt_in(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.list_phone_numbers_opted_out() - current_len = len(response['phoneNumbers']) - assert current_len > 0 - - conn.opt_in_phone_number(phoneNumber=response['phoneNumbers'][0]) - - response = conn.list_phone_numbers_opted_out() - len(response['phoneNumbers']).should.be.greater_than(0) - len(response['phoneNumbers']).should.be.lower_than(current_len) - - -@mock_sns -def test_confirm_subscription(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.create_topic(Name='testconfirm') - - conn.confirm_subscription( - TopicArn=response['TopicArn'], - Token='2336412f37fb687f5d51e6e241d59b68c4e583a5cee0be6f95bbf97ab8d2441cf47b99e848408adaadf4c197e65f03473d53c4ba398f6abbf38ce2e8ebf7b4ceceb2cd817959bcde1357e58a2861b05288c535822eb88cac3db04f592285249971efc6484194fc4a4586147f16916692', - AuthenticateOnUnsubscribe='true' - ) +from __future__ import unicode_literals +import boto3 +import json + +import sure # noqa + +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_sns +from moto.sns.models import DEFAULT_PAGE_SIZE + + +@mock_sns +def test_subscribe_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + resp = client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='+15551234567' + ) + resp.should.contain('SubscriptionArn') + +@mock_sns +def test_double_subscription(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + do_subscribe_sqs = lambda sqs_arn: client.subscribe( + TopicArn=arn, + Protocol='sqs', + Endpoint=sqs_arn + ) + resp1 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + resp2 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + + resp1['SubscriptionArn'].should.equal(resp2['SubscriptionArn']) + + +@mock_sns +def test_subscribe_bad_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + try: + # Test invalid number + client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='NAA+15551234567' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + + +@mock_sns +def test_creating_subscription(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/") + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now unsubscribe the subscription + conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"]) + + # And there should be zero subscriptions left + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) + + +@mock_sns +def test_deleting_subscriptions_by_deleting_topic(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/") + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now delete the topic + conn.delete_topic(TopicArn=topic_arn) + + # And there should now be 0 topics + topics_json = conn.list_topics() + topics = topics_json["Topics"] + topics.should.have.length_of(0) + + # And there should be zero subscriptions left + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) + + +@mock_sns +def test_getting_subscriptions_by_topic(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="topic1") + conn.create_topic(Name="topic2") + + response = conn.list_topics() + topics = response["Topics"] + topic1_arn = topics[0]['TopicArn'] + topic2_arn = topics[1]['TopicArn'] + + conn.subscribe(TopicArn=topic1_arn, + Protocol="http", + Endpoint="http://example1.com/") + conn.subscribe(TopicArn=topic2_arn, + Protocol="http", + Endpoint="http://example2.com/") + + topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)[ + "Subscriptions"] + topic1_subscriptions.should.have.length_of(1) + topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") + + +@mock_sns +def test_subscription_paging(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="topic1") + + response = conn.list_topics() + topics = response["Topics"] + topic1_arn = topics[0]['TopicArn'] + + for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): + conn.subscribe(TopicArn=topic1_arn, + Protocol='email', + Endpoint='email_' + str(index) + '@test.com') + + all_subscriptions = conn.list_subscriptions() + all_subscriptions["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = all_subscriptions["NextToken"] + next_token.should.equal(str(DEFAULT_PAGE_SIZE)) + + all_subscriptions = conn.list_subscriptions(NextToken=next_token) + all_subscriptions["Subscriptions"].should.have.length_of( + int(DEFAULT_PAGE_SIZE / 3)) + all_subscriptions.shouldnt.have("NextToken") + + topic1_subscriptions = conn.list_subscriptions_by_topic( + TopicArn=topic1_arn) + topic1_subscriptions["Subscriptions"].should.have.length_of( + DEFAULT_PAGE_SIZE) + next_token = topic1_subscriptions["NextToken"] + next_token.should.equal(str(DEFAULT_PAGE_SIZE)) + + topic1_subscriptions = conn.list_subscriptions_by_topic( + TopicArn=topic1_arn, NextToken=next_token) + topic1_subscriptions["Subscriptions"].should.have.length_of( + int(DEFAULT_PAGE_SIZE / 3)) + topic1_subscriptions.shouldnt.have("NextToken") + + +@mock_sns +def test_creating_subscription_with_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + delivery_policy = json.dumps({ + 'healthyRetryPolicy': { + "numRetries": 10, + "minDelayTarget": 1, + "maxDelayTarget":2 + } + }) + + filter_policy = json.dumps({ + "store": ["example_corp"], + "event": ["order_cancelled"], + "encrypted": [False], + "customer_interests": ["basketball", "baseball"] + }) + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/", + Attributes={ + 'RawMessageDelivery': 'true', + 'DeliveryPolicy': delivery_policy, + 'FilterPolicy': filter_policy + }) + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Test the subscription attributes have been set + subscription_arn = subscription["SubscriptionArn"] + attrs = conn.get_subscription_attributes( + SubscriptionArn=subscription_arn + ) + + attrs['Attributes']['RawMessageDelivery'].should.equal('true') + attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) + attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) + + # Now unsubscribe the subscription + conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"]) + + # And there should be zero subscriptions left + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) + + # invalid attr name + with assert_raises(ClientError): + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/", + Attributes={ + 'InvalidName': 'true' + }) + + +@mock_sns +def test_set_subscription_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/") + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + subscription_arn = subscription["SubscriptionArn"] + attrs = conn.get_subscription_attributes( + SubscriptionArn=subscription_arn + ) + attrs.should.have.key('Attributes') + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='RawMessageDelivery', + AttributeValue='true' + ) + delivery_policy = json.dumps({ + 'healthyRetryPolicy': { + "numRetries": 10, + "minDelayTarget": 1, + "maxDelayTarget":2 + } + }) + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='DeliveryPolicy', + AttributeValue=delivery_policy + ) + + filter_policy = json.dumps({ + "store": ["example_corp"], + "event": ["order_cancelled"], + "encrypted": [False], + "customer_interests": ["basketball", "baseball"] + }) + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='FilterPolicy', + AttributeValue=filter_policy + ) + + attrs = conn.get_subscription_attributes( + SubscriptionArn=subscription_arn + ) + + attrs['Attributes']['RawMessageDelivery'].should.equal('true') + attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) + attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) + + # not existing subscription + with assert_raises(ClientError): + conn.set_subscription_attributes( + SubscriptionArn='invalid', + AttributeName='RawMessageDelivery', + AttributeValue='true' + ) + with assert_raises(ClientError): + attrs = conn.get_subscription_attributes( + SubscriptionArn='invalid' + ) + + + # invalid attr name + with assert_raises(ClientError): + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='InvalidName', + AttributeValue='true' + ) + + +@mock_sns +def test_check_not_opted_out(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545375') + + response.should.contain('isOptedOut') + response['isOptedOut'].should.be(False) + + +@mock_sns +def test_check_opted_out(): + # Phone number ends in 99 so is hardcoded in the endpoint to return opted + # out status + conn = boto3.client('sns', region_name='us-east-1') + response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545399') + + response.should.contain('isOptedOut') + response['isOptedOut'].should.be(True) + + +@mock_sns +def test_check_opted_out_invalid(): + conn = boto3.client('sns', region_name='us-east-1') + + # Invalid phone number + with assert_raises(ClientError): + conn.check_if_phone_number_is_opted_out(phoneNumber='+44742LALALA') + + +@mock_sns +def test_list_opted_out(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.list_phone_numbers_opted_out() + + response.should.contain('phoneNumbers') + len(response['phoneNumbers']).should.be.greater_than(0) + + +@mock_sns +def test_opt_in(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.list_phone_numbers_opted_out() + current_len = len(response['phoneNumbers']) + assert current_len > 0 + + conn.opt_in_phone_number(phoneNumber=response['phoneNumbers'][0]) + + response = conn.list_phone_numbers_opted_out() + len(response['phoneNumbers']).should.be.greater_than(0) + len(response['phoneNumbers']).should.be.lower_than(current_len) + + +@mock_sns +def test_confirm_subscription(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_topic(Name='testconfirm') + + conn.confirm_subscription( + TopicArn=response['TopicArn'], + Token='2336412f37fb687f5d51e6e241d59b68c4e583a5cee0be6f95bbf97ab8d2441cf47b99e848408adaadf4c197e65f03473d53c4ba398f6abbf38ce2e8ebf7b4ceceb2cd817959bcde1357e58a2861b05288c535822eb88cac3db04f592285249971efc6484194fc4a4586147f16916692', + AuthenticateOnUnsubscribe='true' + ) diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index 1b039c51d611..928db8d02957 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -1,133 +1,133 @@ -from __future__ import unicode_literals -import boto -import json -import six - -import sure # noqa - -from boto.exception import BotoServerError -from moto import mock_sns_deprecated -from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY, DEFAULT_PAGE_SIZE - - -@mock_sns_deprecated -def test_create_and_delete_topic(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topics.should.have.length_of(1) - topics[0]['TopicArn'].should.equal( - "arn:aws:sns:{0}:123456789012:some-topic" - .format(conn.region.name) - ) - - # Delete the topic - conn.delete_topic(topics[0]['TopicArn']) - - # And there should now be 0 topics - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topics.should.have.length_of(0) - - -@mock_sns_deprecated -def test_get_missing_topic(): - conn = boto.connect_sns() - conn.get_topic_attributes.when.called_with( - "a-fake-arn").should.throw(BotoServerError) - - -@mock_sns_deprecated -def test_create_topic_in_multiple_regions(): - for region in ['us-west-1', 'us-west-2']: - conn = boto.sns.connect_to_region(region) - conn.create_topic("some-topic") - list(conn.get_all_topics()["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"]).should.have.length_of(1) - - -@mock_sns_deprecated -def test_topic_corresponds_to_region(): - for region in ['us-east-1', 'us-west-2']: - conn = boto.sns.connect_to_region(region) - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - topic_arn.should.equal( - "arn:aws:sns:{0}:123456789012:some-topic".format(region)) - - -@mock_sns_deprecated -def test_topic_attributes(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ - 'GetTopicAttributesResult']['Attributes'] - attributes["TopicArn"].should.equal( - "arn:aws:sns:{0}:123456789012:some-topic" - .format(conn.region.name) - ) - attributes["Owner"].should.equal(123456789012) - json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) - attributes["DisplayName"].should.equal("") - attributes["SubscriptionsPending"].should.equal(0) - attributes["SubscriptionsConfirmed"].should.equal(0) - attributes["SubscriptionsDeleted"].should.equal(0) - attributes["DeliveryPolicy"].should.equal("") - json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( - DEFAULT_EFFECTIVE_DELIVERY_POLICY) - - # boto can't handle prefix-mandatory strings: - # i.e. unicode on Python 2 -- u"foobar" - # and bytes on Python 3 -- b"foobar" - if six.PY2: - policy = {b"foo": b"bar"} - displayname = b"My display name" - delivery = {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}} - else: - policy = {u"foo": u"bar"} - displayname = u"My display name" - delivery = {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}} - conn.set_topic_attributes(topic_arn, "Policy", policy) - conn.set_topic_attributes(topic_arn, "DisplayName", displayname) - conn.set_topic_attributes(topic_arn, "DeliveryPolicy", delivery) - - attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ - 'GetTopicAttributesResult']['Attributes'] - attributes["Policy"].should.equal("{'foo': 'bar'}") - attributes["DisplayName"].should.equal("My display name") - attributes["DeliveryPolicy"].should.equal( - "{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}") - - -@mock_sns_deprecated -def test_topic_paging(): - conn = boto.connect_sns() - for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 2)): - conn.create_topic("some-topic_" + str(index)) - - topics_json = conn.get_all_topics() - topics_list = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"] - next_token = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["NextToken"] - - len(topics_list).should.equal(DEFAULT_PAGE_SIZE) - next_token.should.equal(DEFAULT_PAGE_SIZE) - - topics_json = conn.get_all_topics(next_token=next_token) - topics_list = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"] - next_token = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["NextToken"] - - topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) - next_token.should.equal(None) +from __future__ import unicode_literals +import boto +import json +import six + +import sure # noqa + +from boto.exception import BotoServerError +from moto import mock_sns_deprecated +from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY, DEFAULT_PAGE_SIZE + + +@mock_sns_deprecated +def test_create_and_delete_topic(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topics.should.have.length_of(1) + topics[0]['TopicArn'].should.equal( + "arn:aws:sns:{0}:123456789012:some-topic" + .format(conn.region.name) + ) + + # Delete the topic + conn.delete_topic(topics[0]['TopicArn']) + + # And there should now be 0 topics + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topics.should.have.length_of(0) + + +@mock_sns_deprecated +def test_get_missing_topic(): + conn = boto.connect_sns() + conn.get_topic_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) + + +@mock_sns_deprecated +def test_create_topic_in_multiple_regions(): + for region in ['us-west-1', 'us-west-2']: + conn = boto.sns.connect_to_region(region) + conn.create_topic("some-topic") + list(conn.get_all_topics()["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"]).should.have.length_of(1) + + +@mock_sns_deprecated +def test_topic_corresponds_to_region(): + for region in ['us-east-1', 'us-west-2']: + conn = boto.sns.connect_to_region(region) + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn.should.equal( + "arn:aws:sns:{0}:123456789012:some-topic".format(region)) + + +@mock_sns_deprecated +def test_topic_attributes(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ + 'GetTopicAttributesResult']['Attributes'] + attributes["TopicArn"].should.equal( + "arn:aws:sns:{0}:123456789012:some-topic" + .format(conn.region.name) + ) + attributes["Owner"].should.equal(123456789012) + json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) + attributes["DisplayName"].should.equal("") + attributes["SubscriptionsPending"].should.equal(0) + attributes["SubscriptionsConfirmed"].should.equal(0) + attributes["SubscriptionsDeleted"].should.equal(0) + attributes["DeliveryPolicy"].should.equal("") + json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( + DEFAULT_EFFECTIVE_DELIVERY_POLICY) + + # boto can't handle prefix-mandatory strings: + # i.e. unicode on Python 2 -- u"foobar" + # and bytes on Python 3 -- b"foobar" + if six.PY2: + policy = {b"foo": b"bar"} + displayname = b"My display name" + delivery = {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}} + else: + policy = {u"foo": u"bar"} + displayname = u"My display name" + delivery = {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}} + conn.set_topic_attributes(topic_arn, "Policy", policy) + conn.set_topic_attributes(topic_arn, "DisplayName", displayname) + conn.set_topic_attributes(topic_arn, "DeliveryPolicy", delivery) + + attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ + 'GetTopicAttributesResult']['Attributes'] + attributes["Policy"].should.equal("{'foo': 'bar'}") + attributes["DisplayName"].should.equal("My display name") + attributes["DeliveryPolicy"].should.equal( + "{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}") + + +@mock_sns_deprecated +def test_topic_paging(): + conn = boto.connect_sns() + for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 2)): + conn.create_topic("some-topic_" + str(index)) + + topics_json = conn.get_all_topics() + topics_list = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + next_token = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["NextToken"] + + len(topics_list).should.equal(DEFAULT_PAGE_SIZE) + next_token.should.equal(DEFAULT_PAGE_SIZE) + + topics_json = conn.get_all_topics(next_token=next_token) + topics_list = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + next_token = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["NextToken"] + + topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) + next_token.should.equal(None) diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 7d9a27b183b4..f836535ef800 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -1,190 +1,190 @@ -from __future__ import unicode_literals -import boto3 -import six -import json - -import sure # noqa - -from botocore.exceptions import ClientError -from moto import mock_sns -from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY, DEFAULT_PAGE_SIZE - - -@mock_sns -def test_create_and_delete_topic(): - conn = boto3.client("sns", region_name="us-east-1") - for topic_name in ('some-topic', '-some-topic-', '_some-topic_', 'a' * 256): - conn.create_topic(Name=topic_name) - - topics_json = conn.list_topics() - topics = topics_json["Topics"] - topics.should.have.length_of(1) - topics[0]['TopicArn'].should.equal( - "arn:aws:sns:{0}:123456789012:{1}" - .format(conn._client_config.region_name, topic_name) - ) - - # Delete the topic - conn.delete_topic(TopicArn=topics[0]['TopicArn']) - - # And there should now be 0 topics - topics_json = conn.list_topics() - topics = topics_json["Topics"] - topics.should.have.length_of(0) - -@mock_sns -def test_create_topic_should_be_indempodent(): - conn = boto3.client("sns", region_name="us-east-1") - topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] - conn.set_topic_attributes( - TopicArn=topic_arn, - AttributeName="DisplayName", - AttributeValue="should_be_set" - ) - topic_display_name = conn.get_topic_attributes( - TopicArn=topic_arn - )['Attributes']['DisplayName'] - topic_display_name.should.be.equal("should_be_set") - - #recreate topic to prove indempodentcy - topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] - topic_display_name = conn.get_topic_attributes( - TopicArn=topic_arn - )['Attributes']['DisplayName'] - topic_display_name.should.be.equal("should_be_set") - -@mock_sns -def test_get_missing_topic(): - conn = boto3.client("sns", region_name="us-east-1") - conn.get_topic_attributes.when.called_with( - TopicArn="a-fake-arn").should.throw(ClientError) - -@mock_sns -def test_create_topic_must_meet_constraints(): - conn = boto3.client("sns", region_name="us-east-1") - common_random_chars = [':', ";", "!", "@", "|", "^", "%"] - for char in common_random_chars: - conn.create_topic.when.called_with( - Name="no%s_invalidchar" % char).should.throw(ClientError) - conn.create_topic.when.called_with( - Name="no spaces allowed").should.throw(ClientError) - - -@mock_sns -def test_create_topic_should_be_of_certain_length(): - conn = boto3.client("sns", region_name="us-east-1") - too_short = "" - conn.create_topic.when.called_with( - Name=too_short).should.throw(ClientError) - too_long = "x" * 257 - conn.create_topic.when.called_with( - Name=too_long).should.throw(ClientError) - - -@mock_sns -def test_create_topic_in_multiple_regions(): - for region in ['us-west-1', 'us-west-2']: - conn = boto3.client("sns", region_name=region) - conn.create_topic(Name="some-topic") - list(conn.list_topics()["Topics"]).should.have.length_of(1) - - -@mock_sns -def test_topic_corresponds_to_region(): - for region in ['us-east-1', 'us-west-2']: - conn = boto3.client("sns", region_name=region) - conn.create_topic(Name="some-topic") - topics_json = conn.list_topics() - topic_arn = topics_json["Topics"][0]['TopicArn'] - topic_arn.should.equal( - "arn:aws:sns:{0}:123456789012:some-topic".format(region)) - - -@mock_sns -def test_topic_attributes(): - conn = boto3.client("sns", region_name="us-east-1") - conn.create_topic(Name="some-topic") - - topics_json = conn.list_topics() - topic_arn = topics_json["Topics"][0]['TopicArn'] - - attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] - attributes["TopicArn"].should.equal( - "arn:aws:sns:{0}:123456789012:some-topic" - .format(conn._client_config.region_name) - ) - attributes["Owner"].should.equal('123456789012') - json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) - attributes["DisplayName"].should.equal("") - attributes["SubscriptionsPending"].should.equal('0') - attributes["SubscriptionsConfirmed"].should.equal('0') - attributes["SubscriptionsDeleted"].should.equal('0') - attributes["DeliveryPolicy"].should.equal("") - json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( - DEFAULT_EFFECTIVE_DELIVERY_POLICY) - - # boto can't handle prefix-mandatory strings: - # i.e. unicode on Python 2 -- u"foobar" - # and bytes on Python 3 -- b"foobar" - if six.PY2: - policy = json.dumps({b"foo": b"bar"}) - displayname = b"My display name" - delivery = json.dumps( - {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}}) - else: - policy = json.dumps({u"foo": u"bar"}) - displayname = u"My display name" - delivery = json.dumps( - {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}}) - conn.set_topic_attributes(TopicArn=topic_arn, - AttributeName="Policy", - AttributeValue=policy) - conn.set_topic_attributes(TopicArn=topic_arn, - AttributeName="DisplayName", - AttributeValue=displayname) - conn.set_topic_attributes(TopicArn=topic_arn, - AttributeName="DeliveryPolicy", - AttributeValue=delivery) - - attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] - attributes["Policy"].should.equal('{"foo": "bar"}') - attributes["DisplayName"].should.equal("My display name") - attributes["DeliveryPolicy"].should.equal( - '{"http": {"defaultHealthyRetryPolicy": {"numRetries": 5}}}') - - -@mock_sns -def test_topic_paging(): - conn = boto3.client("sns", region_name="us-east-1") - for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 2)): - conn.create_topic(Name="some-topic_" + str(index)) - - response = conn.list_topics() - topics_list = response["Topics"] - next_token = response["NextToken"] - - len(topics_list).should.equal(DEFAULT_PAGE_SIZE) - int(next_token).should.equal(DEFAULT_PAGE_SIZE) - - response = conn.list_topics(NextToken=next_token) - topics_list = response["Topics"] - response.shouldnt.have("NextToken") - - topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) - - -@mock_sns -def test_add_remove_permissions(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.create_topic(Name='testpermissions') - - conn.add_permission( - TopicArn=response['TopicArn'], - Label='Test1234', - AWSAccountId=['999999999999'], - ActionName=['AddPermission'] - ) - conn.remove_permission( - TopicArn=response['TopicArn'], - Label='Test1234' - ) +from __future__ import unicode_literals +import boto3 +import six +import json + +import sure # noqa + +from botocore.exceptions import ClientError +from moto import mock_sns +from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY, DEFAULT_PAGE_SIZE + + +@mock_sns +def test_create_and_delete_topic(): + conn = boto3.client("sns", region_name="us-east-1") + for topic_name in ('some-topic', '-some-topic-', '_some-topic_', 'a' * 256): + conn.create_topic(Name=topic_name) + + topics_json = conn.list_topics() + topics = topics_json["Topics"] + topics.should.have.length_of(1) + topics[0]['TopicArn'].should.equal( + "arn:aws:sns:{0}:123456789012:{1}" + .format(conn._client_config.region_name, topic_name) + ) + + # Delete the topic + conn.delete_topic(TopicArn=topics[0]['TopicArn']) + + # And there should now be 0 topics + topics_json = conn.list_topics() + topics = topics_json["Topics"] + topics.should.have.length_of(0) + +@mock_sns +def test_create_topic_should_be_indempodent(): + conn = boto3.client("sns", region_name="us-east-1") + topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] + conn.set_topic_attributes( + TopicArn=topic_arn, + AttributeName="DisplayName", + AttributeValue="should_be_set" + ) + topic_display_name = conn.get_topic_attributes( + TopicArn=topic_arn + )['Attributes']['DisplayName'] + topic_display_name.should.be.equal("should_be_set") + + #recreate topic to prove indempodentcy + topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] + topic_display_name = conn.get_topic_attributes( + TopicArn=topic_arn + )['Attributes']['DisplayName'] + topic_display_name.should.be.equal("should_be_set") + +@mock_sns +def test_get_missing_topic(): + conn = boto3.client("sns", region_name="us-east-1") + conn.get_topic_attributes.when.called_with( + TopicArn="a-fake-arn").should.throw(ClientError) + +@mock_sns +def test_create_topic_must_meet_constraints(): + conn = boto3.client("sns", region_name="us-east-1") + common_random_chars = [':', ";", "!", "@", "|", "^", "%"] + for char in common_random_chars: + conn.create_topic.when.called_with( + Name="no%s_invalidchar" % char).should.throw(ClientError) + conn.create_topic.when.called_with( + Name="no spaces allowed").should.throw(ClientError) + + +@mock_sns +def test_create_topic_should_be_of_certain_length(): + conn = boto3.client("sns", region_name="us-east-1") + too_short = "" + conn.create_topic.when.called_with( + Name=too_short).should.throw(ClientError) + too_long = "x" * 257 + conn.create_topic.when.called_with( + Name=too_long).should.throw(ClientError) + + +@mock_sns +def test_create_topic_in_multiple_regions(): + for region in ['us-west-1', 'us-west-2']: + conn = boto3.client("sns", region_name=region) + conn.create_topic(Name="some-topic") + list(conn.list_topics()["Topics"]).should.have.length_of(1) + + +@mock_sns +def test_topic_corresponds_to_region(): + for region in ['us-east-1', 'us-west-2']: + conn = boto3.client("sns", region_name=region) + conn.create_topic(Name="some-topic") + topics_json = conn.list_topics() + topic_arn = topics_json["Topics"][0]['TopicArn'] + topic_arn.should.equal( + "arn:aws:sns:{0}:123456789012:some-topic".format(region)) + + +@mock_sns +def test_topic_attributes(): + conn = boto3.client("sns", region_name="us-east-1") + conn.create_topic(Name="some-topic") + + topics_json = conn.list_topics() + topic_arn = topics_json["Topics"][0]['TopicArn'] + + attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] + attributes["TopicArn"].should.equal( + "arn:aws:sns:{0}:123456789012:some-topic" + .format(conn._client_config.region_name) + ) + attributes["Owner"].should.equal('123456789012') + json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) + attributes["DisplayName"].should.equal("") + attributes["SubscriptionsPending"].should.equal('0') + attributes["SubscriptionsConfirmed"].should.equal('0') + attributes["SubscriptionsDeleted"].should.equal('0') + attributes["DeliveryPolicy"].should.equal("") + json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( + DEFAULT_EFFECTIVE_DELIVERY_POLICY) + + # boto can't handle prefix-mandatory strings: + # i.e. unicode on Python 2 -- u"foobar" + # and bytes on Python 3 -- b"foobar" + if six.PY2: + policy = json.dumps({b"foo": b"bar"}) + displayname = b"My display name" + delivery = json.dumps( + {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}}) + else: + policy = json.dumps({u"foo": u"bar"}) + displayname = u"My display name" + delivery = json.dumps( + {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}}) + conn.set_topic_attributes(TopicArn=topic_arn, + AttributeName="Policy", + AttributeValue=policy) + conn.set_topic_attributes(TopicArn=topic_arn, + AttributeName="DisplayName", + AttributeValue=displayname) + conn.set_topic_attributes(TopicArn=topic_arn, + AttributeName="DeliveryPolicy", + AttributeValue=delivery) + + attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] + attributes["Policy"].should.equal('{"foo": "bar"}') + attributes["DisplayName"].should.equal("My display name") + attributes["DeliveryPolicy"].should.equal( + '{"http": {"defaultHealthyRetryPolicy": {"numRetries": 5}}}') + + +@mock_sns +def test_topic_paging(): + conn = boto3.client("sns", region_name="us-east-1") + for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 2)): + conn.create_topic(Name="some-topic_" + str(index)) + + response = conn.list_topics() + topics_list = response["Topics"] + next_token = response["NextToken"] + + len(topics_list).should.equal(DEFAULT_PAGE_SIZE) + int(next_token).should.equal(DEFAULT_PAGE_SIZE) + + response = conn.list_topics(NextToken=next_token) + topics_list = response["Topics"] + response.shouldnt.have("NextToken") + + topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) + + +@mock_sns +def test_add_remove_permissions(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_topic(Name='testpermissions') + + conn.add_permission( + TopicArn=response['TopicArn'], + Label='Test1234', + AWSAccountId=['999999999999'], + ActionName=['AddPermission'] + ) + conn.remove_permission( + TopicArn=response['TopicArn'], + Label='Test1234' + ) diff --git a/tests/test_sqs/test_server.py b/tests/test_sqs/test_server.py index e7f745fd265c..b2b233bde0a2 100644 --- a/tests/test_sqs/test_server.py +++ b/tests/test_sqs/test_server.py @@ -1,85 +1,85 @@ -from __future__ import unicode_literals - -import re -import sure # noqa -import threading -import time - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_sqs_list_identities(): - backend = server.create_backend_app("sqs") - test_client = backend.test_client() - - res = test_client.get('/?Action=ListQueues') - res.data.should.contain(b"ListQueuesResponse") - - # Make sure that we can receive messages from queues whose name contains dots (".") - # The AWS API mandates that the names of FIFO queues use the suffix ".fifo" - # See: https://github.com/spulec/moto/issues/866 - - for queue_name in ('testqueue', 'otherqueue.fifo'): - - res = test_client.put('/?Action=CreateQueue&QueueName=%s' % queue_name) - - - res = test_client.put( - '/123/%s?MessageBody=test-message&Action=SendMessage' % queue_name) - - res = test_client.get( - '/123/%s?Action=ReceiveMessage&MaxNumberOfMessages=1' % queue_name) - - message = re.search("(.*?)", - res.data.decode('utf-8')).groups()[0] - message.should.equal('test-message') - - res = test_client.get('/?Action=ListQueues&QueueNamePrefix=other') - res.data.should.contain(b'otherqueue.fifo') - res.data.should_not.contain(b'testqueue') - - -def test_messages_polling(): - backend = server.create_backend_app("sqs") - test_client = backend.test_client() - messages = [] - - test_client.put('/?Action=CreateQueue&QueueName=testqueue') - - def insert_messages(): - messages_count = 5 - while messages_count > 0: - test_client.put( - '/123/testqueue?MessageBody=test-message&Action=SendMessage' - '&Attribute.1.Name=WaitTimeSeconds&Attribute.1.Value=10' - ) - messages_count -= 1 - time.sleep(.5) - - def get_messages(): - count = 0 - while count < 5: - msg_res = test_client.get( - '/123/testqueue?Action=ReceiveMessage&MaxNumberOfMessages=1&WaitTimeSeconds=5' - ) - new_msgs = re.findall("(.*?)", - msg_res.data.decode('utf-8')) - count += len(new_msgs) - messages.append(new_msgs) - - get_messages_thread = threading.Thread(target=get_messages) - insert_messages_thread = threading.Thread(target=insert_messages) - - get_messages_thread.start() - insert_messages_thread.start() - - get_messages_thread.join() - insert_messages_thread.join() - - # got each message in a separate call to ReceiveMessage, despite the long - # WaitTimeSeconds - assert len(messages) == 5 +from __future__ import unicode_literals + +import re +import sure # noqa +import threading +import time + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_sqs_list_identities(): + backend = server.create_backend_app("sqs") + test_client = backend.test_client() + + res = test_client.get('/?Action=ListQueues') + res.data.should.contain(b"ListQueuesResponse") + + # Make sure that we can receive messages from queues whose name contains dots (".") + # The AWS API mandates that the names of FIFO queues use the suffix ".fifo" + # See: https://github.com/spulec/moto/issues/866 + + for queue_name in ('testqueue', 'otherqueue.fifo'): + + res = test_client.put('/?Action=CreateQueue&QueueName=%s' % queue_name) + + + res = test_client.put( + '/123/%s?MessageBody=test-message&Action=SendMessage' % queue_name) + + res = test_client.get( + '/123/%s?Action=ReceiveMessage&MaxNumberOfMessages=1' % queue_name) + + message = re.search("(.*?)", + res.data.decode('utf-8')).groups()[0] + message.should.equal('test-message') + + res = test_client.get('/?Action=ListQueues&QueueNamePrefix=other') + res.data.should.contain(b'otherqueue.fifo') + res.data.should_not.contain(b'testqueue') + + +def test_messages_polling(): + backend = server.create_backend_app("sqs") + test_client = backend.test_client() + messages = [] + + test_client.put('/?Action=CreateQueue&QueueName=testqueue') + + def insert_messages(): + messages_count = 5 + while messages_count > 0: + test_client.put( + '/123/testqueue?MessageBody=test-message&Action=SendMessage' + '&Attribute.1.Name=WaitTimeSeconds&Attribute.1.Value=10' + ) + messages_count -= 1 + time.sleep(.5) + + def get_messages(): + count = 0 + while count < 5: + msg_res = test_client.get( + '/123/testqueue?Action=ReceiveMessage&MaxNumberOfMessages=1&WaitTimeSeconds=5' + ) + new_msgs = re.findall("(.*?)", + msg_res.data.decode('utf-8')) + count += len(new_msgs) + messages.append(new_msgs) + + get_messages_thread = threading.Thread(target=get_messages) + insert_messages_thread = threading.Thread(target=insert_messages) + + get_messages_thread.start() + insert_messages_thread.start() + + get_messages_thread.join() + insert_messages_thread.join() + + # got each message in a separate call to ReceiveMessage, despite the long + # WaitTimeSeconds + assert len(messages) == 5 diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9beb9a3faef0..f070625c03d9 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1,1237 +1,1237 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import os - -import boto -import boto3 -import botocore.exceptions -from botocore.exceptions import ClientError -from boto.exception import SQSError -from boto.sqs.message import RawMessage, Message - -from freezegun import freeze_time -import base64 -import json -import sure # noqa -import time -import uuid - -from moto import settings, mock_sqs, mock_sqs_deprecated -from tests.helpers import requires_boto_gte -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises -from nose import SkipTest - - -@mock_sqs -def test_create_fifo_queue_fail(): - sqs = boto3.client('sqs', region_name='us-east-1') - - try: - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'FifoQueue': 'true', - } - ) - except botocore.exceptions.ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised InvalidParameterValue Exception') - - -@mock_sqs -def test_create_queue_with_same_attributes(): - sqs = boto3.client('sqs', region_name='us-east-1') - - dlq_url = sqs.create_queue(QueueName='test-queue-dlq')['QueueUrl'] - dlq_arn = sqs.get_queue_attributes(QueueUrl=dlq_url)['Attributes']['QueueArn'] - - attributes = { - 'DelaySeconds': '900', - 'MaximumMessageSize': '262144', - 'MessageRetentionPeriod': '1209600', - 'ReceiveMessageWaitTimeSeconds': '20', - 'RedrivePolicy': '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' % (dlq_arn), - 'VisibilityTimeout': '43200' - } - - sqs.create_queue( - QueueName='test-queue', - Attributes=attributes - ) - - sqs.create_queue( - QueueName='test-queue', - Attributes=attributes - ) - - -@mock_sqs -def test_create_queue_with_different_attributes_fail(): - sqs = boto3.client('sqs', region_name='us-east-1') - - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'VisibilityTimeout': '10', - } - ) - try: - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'VisibilityTimeout': '60', - } - ) - except botocore.exceptions.ClientError as err: - err.response['Error']['Code'].should.equal('QueueAlreadyExists') - else: - raise RuntimeError('Should of raised QueueAlreadyExists Exception') - - -@mock_sqs -def test_create_fifo_queue(): - sqs = boto3.client('sqs', region_name='us-east-1') - resp = sqs.create_queue( - QueueName='test-queue.fifo', - Attributes={ - 'FifoQueue': 'true', - } - ) - queue_url = resp['QueueUrl'] - - response = sqs.get_queue_attributes(QueueUrl=queue_url) - response['Attributes'].should.contain('FifoQueue') - response['Attributes']['FifoQueue'].should.equal('true') - - -@mock_sqs -def test_create_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - new_queue = sqs.create_queue(QueueName='test-queue') - new_queue.should_not.be.none - new_queue.should.have.property('url').should.contain('test-queue') - - queue = sqs.get_queue_by_name(QueueName='test-queue') - queue.attributes.get('QueueArn').should_not.be.none - queue.attributes.get('QueueArn').split(':')[-1].should.equal('test-queue') - queue.attributes.get('QueueArn').split(':')[3].should.equal('us-east-1') - queue.attributes.get('VisibilityTimeout').should_not.be.none - queue.attributes.get('VisibilityTimeout').should.equal('30') - - -@mock_sqs -def test_create_queue_kms(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - new_queue = sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'KmsMasterKeyId': 'master-key-id', - 'KmsDataKeyReusePeriodSeconds': '600' - }) - new_queue.should_not.be.none - - queue = sqs.get_queue_by_name(QueueName='test-queue') - - queue.attributes.get('KmsMasterKeyId').should.equal('master-key-id') - queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') - - -@mock_sqs -def test_get_nonexistent_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - with assert_raises(ClientError) as err: - sqs.get_queue_by_name(QueueName='nonexisting-queue') - ex = err.exception - ex.operation_name.should.equal('GetQueueUrl') - ex.response['Error']['Code'].should.equal( - 'AWS.SimpleQueueService.NonExistentQueue') - - with assert_raises(ClientError) as err: - sqs.Queue('http://whatever-incorrect-queue-address').load() - ex = err.exception - ex.operation_name.should.equal('GetQueueAttributes') - ex.response['Error']['Code'].should.equal( - 'AWS.SimpleQueueService.NonExistentQueue') - - -@mock_sqs -def test_message_send_without_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message( - MessageBody="derp" - ) - msg.get('MD5OfMessageBody').should.equal( - '58fd9edd83341c29f1aebba81c31e257') - msg.shouldnt.have.key('MD5OfMessageAttributes') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_message_send_with_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message( - MessageBody="derp", - MessageAttributes={ - 'timestamp': { - 'StringValue': '1493147359900', - 'DataType': 'Number', - } - } - ) - msg.get('MD5OfMessageBody').should.equal( - '58fd9edd83341c29f1aebba81c31e257') - msg.get('MD5OfMessageAttributes').should.equal( - '235c5c510d26fb653d073faed50ae77c') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_message_with_complex_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message( - MessageBody="derp", - MessageAttributes={ - 'ccc': {'StringValue': 'testjunk', 'DataType': 'String'}, - 'aaa': {'BinaryValue': b'\x02\x03\x04', 'DataType': 'Binary'}, - 'zzz': {'DataType': 'Number', 'StringValue': '0230.01'}, - 'öther_encodings': {'DataType': 'String', 'StringValue': 'T\xFCst'} - } - ) - msg.get('MD5OfMessageBody').should.equal( - '58fd9edd83341c29f1aebba81c31e257') - msg.get('MD5OfMessageAttributes').should.equal( - '8ae21a7957029ef04146b42aeaa18a22') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_send_message_with_message_group_id(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-group-id.fifo", - Attributes={'FifoQueue': 'true'}) - - sent = queue.send_message( - MessageBody="mydata", - MessageDeduplicationId="dedupe_id_1", - MessageGroupId="group_id_1", - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - message_attributes = messages[0].attributes - message_attributes.should.contain('MessageGroupId') - message_attributes['MessageGroupId'].should.equal('group_id_1') - message_attributes.should.contain('MessageDeduplicationId') - message_attributes['MessageDeduplicationId'].should.equal('dedupe_id_1') - - -@mock_sqs -def test_send_message_with_unicode_characters(): - body_one = 'Héllo!😀' - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message(MessageBody=body_one) - - messages = queue.receive_messages() - message_body = messages[0].body - - message_body.should.equal(body_one) - - -@mock_sqs -def test_set_queue_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - queue.attributes['VisibilityTimeout'].should.equal("30") - - queue.set_attributes(Attributes={"VisibilityTimeout": "45"}) - queue.attributes['VisibilityTimeout'].should.equal("45") - - -@mock_sqs -def test_create_queues_in_multiple_region(): - west1_conn = boto3.client('sqs', region_name='us-west-1') - west1_conn.create_queue(QueueName="blah") - - west2_conn = boto3.client('sqs', region_name='us-west-2') - west2_conn.create_queue(QueueName="test-queue") - - list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) - list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) - - if settings.TEST_SERVER_MODE: - base_url = 'http://localhost:5000' - else: - base_url = 'https://us-west-1.queue.amazonaws.com' - - west1_conn.list_queues()['QueueUrls'][0].should.equal( - '{base_url}/123456789012/blah'.format(base_url=base_url)) - - -@mock_sqs -def test_get_queue_with_prefix(): - conn = boto3.client("sqs", region_name='us-west-1') - conn.create_queue(QueueName="prefixa-queue") - conn.create_queue(QueueName="prefixb-queue") - conn.create_queue(QueueName="test-queue") - - conn.list_queues()['QueueUrls'].should.have.length_of(3) - - queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] - queue.should.have.length_of(1) - - if settings.TEST_SERVER_MODE: - base_url = 'http://localhost:5000' - else: - base_url = 'https://us-west-1.queue.amazonaws.com' - - queue[0].should.equal( - "{base_url}/123456789012/test-queue".format(base_url=base_url)) - - -@mock_sqs -def test_delete_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", - Attributes={"VisibilityTimeout": "3"}) - queue = sqs.Queue('test-queue') - - conn.list_queues()['QueueUrls'].should.have.length_of(1) - - queue.delete() - conn.list_queues().get('QueueUrls').should.equal(None) - - with assert_raises(botocore.exceptions.ClientError): - queue.delete() - - -@mock_sqs -def test_set_queue_attribute(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", - Attributes={"VisibilityTimeout": '3'}) - - queue = sqs.Queue("test-queue") - queue.attributes['VisibilityTimeout'].should.equal('3') - - queue.set_attributes(Attributes={"VisibilityTimeout": '45'}) - queue = sqs.Queue("test-queue") - queue.attributes['VisibilityTimeout'].should.equal('45') - - -@mock_sqs -def test_send_receive_message_without_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue") - queue = sqs.Queue("test-queue") - - body_one = 'this is a test message' - body_two = 'this is another test message' - - queue.send_message(MessageBody=body_one) - queue.send_message(MessageBody=body_two) - - messages = conn.receive_message( - QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] - - message1 = messages[0] - message2 = messages[1] - - message1['Body'].should.equal(body_one) - message2['Body'].should.equal(body_two) - - message1.shouldnt.have.key('MD5OfMessageAttributes') - message2.shouldnt.have.key('MD5OfMessageAttributes') - - -@mock_sqs -def test_send_receive_message_with_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue") - queue = sqs.Queue("test-queue") - - body_one = 'this is a test message' - body_two = 'this is another test message' - - queue.send_message( - MessageBody=body_one, - MessageAttributes={ - 'timestamp': { - 'StringValue': '1493147359900', - 'DataType': 'Number', - } - } - ) - - queue.send_message( - MessageBody=body_two, - MessageAttributes={ - 'timestamp': { - 'StringValue': '1493147359901', - 'DataType': 'Number', - } - } - ) - - messages = conn.receive_message( - QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] - - message1 = messages[0] - message2 = messages[1] - - message1.get('Body').should.equal(body_one) - message2.get('Body').should.equal(body_two) - - message1.get('MD5OfMessageAttributes').should.equal('235c5c510d26fb653d073faed50ae77c') - message2.get('MD5OfMessageAttributes').should.equal('994258b45346a2cc3f9cbb611aa7af30') - - -@mock_sqs -def test_send_receive_message_timestamps(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue") - queue = sqs.Queue("test-queue") - - queue.send_message(MessageBody="derp") - messages = conn.receive_message( - QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] - - message = messages[0] - sent_timestamp = message.get('Attributes').get('SentTimestamp') - approximate_first_receive_timestamp = message.get('Attributes').get('ApproximateFirstReceiveTimestamp') - - int.when.called_with(sent_timestamp).shouldnt.throw(ValueError) - int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) - - -@mock_sqs -def test_max_number_of_messages_invalid_param(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - with assert_raises(ClientError): - queue.receive_messages(MaxNumberOfMessages=11) - - with assert_raises(ClientError): - queue.receive_messages(MaxNumberOfMessages=0) - - # no error but also no messages returned - queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=0) - - -@mock_sqs -def test_wait_time_seconds_invalid_param(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - with assert_raises(ClientError): - queue.receive_messages(WaitTimeSeconds=-1) - - with assert_raises(ClientError): - queue.receive_messages(WaitTimeSeconds=21) - - # no error but also no messages returned - queue.receive_messages(WaitTimeSeconds=0) - - -@mock_sqs -def test_receive_messages_with_wait_seconds_timeout_of_zero(): - """ - test that zero messages is returned with a wait_seconds_timeout of zero, - previously this created an infinite loop and nothing was returned - :return: - """ - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.equal([]) - - -@mock_sqs_deprecated -def test_send_message_with_xml_characters(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = '< & >' - - queue.write(queue.new_message(body_one)) - - messages = conn.receive_message(queue, number_messages=1) - - messages[0].get_body().should.equal(body_one) - - -@requires_boto_gte("2.28") -@mock_sqs_deprecated -def test_send_message_with_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body = 'this is a test message' - message = queue.new_message(body) - BASE64_BINARY = base64.b64encode(b'binary value').decode('utf-8') - message_attributes = { - 'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'}, - 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': BASE64_BINARY}, - 'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'} - } - message.message_attributes = message_attributes - - queue.write(message) - - messages = conn.receive_message(queue) - - messages[0].get_body().should.equal(body) - - for name, value in message_attributes.items(): - dict(messages[0].message_attributes[name]).should.equal(value) - - -@mock_sqs_deprecated -def test_send_message_with_delay(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = 'this is a test message' - body_two = 'this is another test message' - - queue.write(queue.new_message(body_one), delay_seconds=3) - queue.write(queue.new_message(body_two)) - - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=2) - assert len(messages) == 1 - message = messages[0] - assert message.get_body().should.equal(body_two) - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_send_large_message_fails(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = 'test message' * 200000 - huge_message = queue.new_message(body_one) - - queue.write.when.called_with(huge_message).should.throw(SQSError) - - -@mock_sqs_deprecated -def test_message_becomes_inflight_when_received(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=2) - queue.set_message_class(RawMessage) - - body_one = 'this is a test message' - queue.write(queue.new_message(body_one)) - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - queue.count().should.equal(0) - - assert len(messages) == 1 - - # Wait - time.sleep(3) - - queue.count().should.equal(1) - - -@mock_sqs_deprecated -def test_receive_message_with_explicit_visibility_timeout(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = 'this is another test message' - queue.write(queue.new_message(body_one)) - - queue.count().should.equal(1) - messages = conn.receive_message( - queue, number_messages=1, visibility_timeout=0) - - assert len(messages) == 1 - - # Message should remain visible - queue.count().should.equal(1) - - -@mock_sqs_deprecated -def test_change_message_visibility(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=2) - queue.set_message_class(RawMessage) - - body_one = 'this is another test message' - queue.write(queue.new_message(body_one)) - - queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - queue.count().should.equal(0) - - messages[0].change_visibility(2) - - # Wait - time.sleep(1) - - # Message is not visible - queue.count().should.equal(0) - - time.sleep(2) - - # Message now becomes visible - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - messages[0].delete() - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_message_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=2) - queue.set_message_class(RawMessage) - - body_one = 'this is another test message' - queue.write(queue.new_message(body_one)) - - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - queue.count().should.equal(0) - - assert len(messages) == 1 - - message_attributes = messages[0].attributes - - assert message_attributes.get('ApproximateFirstReceiveTimestamp') - assert int(message_attributes.get('ApproximateReceiveCount')) == 1 - assert message_attributes.get('SentTimestamp') - assert message_attributes.get('SenderId') - - -@mock_sqs_deprecated -def test_read_message_from_queue(): - conn = boto.connect_sqs() - queue = conn.create_queue('testqueue') - queue.set_message_class(RawMessage) - - body = 'foo bar baz' - queue.write(queue.new_message(body)) - message = queue.read(1) - message.get_body().should.equal(body) - - -@mock_sqs_deprecated -def test_queue_length(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is a test message')) - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(2) - - -@mock_sqs_deprecated -def test_delete_message(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is a test message')) - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(2) - - messages = conn.receive_message(queue, number_messages=1) - assert len(messages) == 1 - messages[0].delete() - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - assert len(messages) == 1 - messages[0].delete() - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_send_batch_operation(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - - # See https://github.com/boto/boto/issues/831 - queue.set_message_class(RawMessage) - - queue.write_batch([ - ("my_first_message", 'test message 1', 0), - ("my_second_message", 'test message 2', 0), - ("my_third_message", 'test message 3', 0), - ]) - - messages = queue.get_messages(3) - messages[0].get_body().should.equal("test message 1") - - # Test that pulling more messages doesn't break anything - messages = queue.get_messages(2) - - -@requires_boto_gte("2.28") -@mock_sqs_deprecated -def test_send_batch_operation_with_message_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - message_tuple = ("my_first_message", 'test message 1', 0, { - 'name1': {'data_type': 'String', 'string_value': 'foo'}}) - queue.write_batch([message_tuple]) - - messages = queue.get_messages() - messages[0].get_body().should.equal("test message 1") - - for name, value in message_tuple[3].items(): - dict(messages[0].message_attributes[name]).should.equal(value) - - -@mock_sqs_deprecated -def test_delete_batch_operation(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - - conn.send_message_batch(queue, [ - ("my_first_message", 'test message 1', 0), - ("my_second_message", 'test message 2', 0), - ("my_third_message", 'test message 3', 0), - ]) - - messages = queue.get_messages(2) - queue.delete_message_batch(messages) - - queue.count().should.equal(1) - - -@mock_sqs_deprecated -def test_queue_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - - queue_name = 'test-queue' - visibility_timeout = 3 - - queue = conn.create_queue( - queue_name, visibility_timeout=visibility_timeout) - - attributes = queue.get_attributes() - - attributes['QueueArn'].should.look_like( - 'arn:aws:sqs:us-east-1:123456789012:%s' % queue_name) - - attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout)) - - attribute_names = queue.get_attributes().keys() - attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible') - attribute_names.should.contain('MessageRetentionPeriod') - attribute_names.should.contain('ApproximateNumberOfMessagesDelayed') - attribute_names.should.contain('MaximumMessageSize') - attribute_names.should.contain('CreatedTimestamp') - attribute_names.should.contain('ApproximateNumberOfMessages') - attribute_names.should.contain('ReceiveMessageWaitTimeSeconds') - attribute_names.should.contain('DelaySeconds') - attribute_names.should.contain('VisibilityTimeout') - attribute_names.should.contain('LastModifiedTimestamp') - attribute_names.should.contain('QueueArn') - - -@mock_sqs_deprecated -def test_change_message_visibility_on_invalid_receipt(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=1) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - original_message = messages[0] - - queue.count().should.equal(0) - - time.sleep(2) - - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - original_message.change_visibility.when.called_with( - 100).should.throw(SQSError) - - -@mock_sqs_deprecated -def test_change_message_visibility_on_visible_message(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=1) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - original_message = messages[0] - - queue.count().should.equal(0) - - time.sleep(2) - - queue.count().should.equal(1) - - original_message.change_visibility.when.called_with( - 100).should.throw(SQSError) - - -@mock_sqs_deprecated -def test_purge_action(): - conn = boto.sqs.connect_to_region("us-east-1") - - queue = conn.create_queue('new-queue') - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(1) - - queue.purge() - - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_delete_message_after_visibility_timeout(): - VISIBILITY_TIMEOUT = 1 - conn = boto.sqs.connect_to_region("us-east-1") - new_queue = conn.create_queue( - 'new-queue', visibility_timeout=VISIBILITY_TIMEOUT) - - m1 = Message() - m1.set_body('Message 1!') - new_queue.write(m1) - - assert new_queue.count() == 1 - - m1_retrieved = new_queue.read() - - time.sleep(VISIBILITY_TIMEOUT + 1) - - m1_retrieved.delete() - - assert new_queue.count() == 0 - - -@mock_sqs -def test_batch_change_message_visibility(): - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': - raise SkipTest('Cant manipulate time in server mode') - - with freeze_time("2015-01-01 12:00:00"): - sqs = boto3.client('sqs', region_name='us-east-1') - resp = sqs.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url = resp['QueueUrl'] - - sqs.send_message(QueueUrl=queue_url, MessageBody='msg1') - sqs.send_message(QueueUrl=queue_url, MessageBody='msg2') - sqs.send_message(QueueUrl=queue_url, MessageBody='msg3') - - with freeze_time("2015-01-01 12:01:00"): - receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) - len(receive_resp['Messages']).should.equal(2) - - handles = [item['ReceiptHandle'] for item in receive_resp['Messages']] - entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles] - - resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries) - len(resp['Successful']).should.equal(2) - - with freeze_time("2015-01-01 14:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) - len(resp['Messages']).should.equal(1) - - with freeze_time("2015-01-01 16:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) - len(resp['Messages']).should.equal(1) - - with freeze_time("2015-01-02 12:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) - len(resp['Messages']).should.equal(3) - - -@mock_sqs -def test_permissions(): - client = boto3.client('sqs', region_name='us-east-1') - - resp = client.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url = resp['QueueUrl'] - - client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*']) - client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage']) - - with assert_raises(ClientError): - client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish']) - - client.remove_permission(QueueUrl=queue_url, Label='account2') - - with assert_raises(ClientError): - client.remove_permission(QueueUrl=queue_url, Label='non_existant') - - -@mock_sqs -def test_tags(): - client = boto3.client('sqs', region_name='us-east-1') - - resp = client.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url = resp['QueueUrl'] - - client.tag_queue( - QueueUrl=queue_url, - Tags={ - 'test1': 'value1', - 'test2': 'value2', - } - ) - - resp = client.list_queue_tags(QueueUrl=queue_url) - resp['Tags'].should.contain('test1') - resp['Tags'].should.contain('test2') - - client.untag_queue( - QueueUrl=queue_url, - TagKeys=['test2'] - ) - - resp = client.list_queue_tags(QueueUrl=queue_url) - resp['Tags'].should.contain('test1') - resp['Tags'].should_not.contain('test2') - - -@mock_sqs -def test_create_fifo_queue_with_dlq(): - sqs = boto3.client('sqs', region_name='us-east-1') - resp = sqs.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url1 = resp['QueueUrl'] - queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] - - resp = sqs.create_queue( - QueueName='test-dlr-queue', - Attributes={'FifoQueue': 'false'} - ) - queue_url2 = resp['QueueUrl'] - queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn'] - - sqs.create_queue( - QueueName='test-queue.fifo', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) - } - ) - - # Cant have fifo queue with non fifo DLQ - with assert_raises(ClientError): - sqs.create_queue( - QueueName='test-queue2.fifo', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2}) - } - ) - - -@mock_sqs -def test_queue_with_dlq(): - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': - raise SkipTest('Cant manipulate time in server mode') - - sqs = boto3.client('sqs', region_name='us-east-1') - - with freeze_time("2015-01-01 12:00:00"): - resp = sqs.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url1 = resp['QueueUrl'] - queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] - - resp = sqs.create_queue( - QueueName='test-queue.fifo', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) - } - ) - queue_url2 = resp['QueueUrl'] - - sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1') - sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2') - - with freeze_time("2015-01-01 13:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) - resp['Messages'][0]['Body'].should.equal('msg1') - - with freeze_time("2015-01-01 13:01:00"): - resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) - resp['Messages'][0]['Body'].should.equal('msg1') - - with freeze_time("2015-01-01 13:02:00"): - resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) - len(resp['Messages']).should.equal(1) - - resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0) - resp['Messages'][0]['Body'].should.equal('msg1') - - # Might as well test list source queues - - resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) - resp['queueUrls'][0].should.equal(queue_url2) - - -@mock_sqs -def test_redrive_policy_available(): - sqs = boto3.client('sqs', region_name='us-east-1') - - resp = sqs.create_queue(QueueName='test-deadletter') - queue_url1 = resp['QueueUrl'] - queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] - redrive_policy = { - 'deadLetterTargetArn': queue_arn1, - 'maxReceiveCount': 1, - } - - resp = sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'RedrivePolicy': json.dumps(redrive_policy) - } - ) - - queue_url2 = resp['QueueUrl'] - attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] - assert 'RedrivePolicy' in attributes - assert json.loads(attributes['RedrivePolicy']) == redrive_policy - - # Cant have redrive policy without maxReceiveCount - with assert_raises(ClientError): - sqs.create_queue( - QueueName='test-queue2', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1}) - } - ) - - -@mock_sqs -def test_redrive_policy_non_existent_queue(): - sqs = boto3.client('sqs', region_name='us-east-1') - redrive_policy = { - 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789012:no-queue', - 'maxReceiveCount': 1, - } - - with assert_raises(ClientError): - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'RedrivePolicy': json.dumps(redrive_policy) - } - ) - - -@mock_sqs -def test_redrive_policy_set_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - queue = sqs.create_queue(QueueName='test-queue') - deadletter_queue = sqs.create_queue(QueueName='test-deadletter') - - redrive_policy = { - 'deadLetterTargetArn': deadletter_queue.attributes['QueueArn'], - 'maxReceiveCount': 1, - } - - queue.set_attributes(Attributes={ - 'RedrivePolicy': json.dumps(redrive_policy)}) - - copy = sqs.get_queue_by_name(QueueName='test-queue') - assert 'RedrivePolicy' in copy.attributes - copy_policy = json.loads(copy.attributes['RedrivePolicy']) - assert copy_policy == redrive_policy - - -@mock_sqs -def test_receive_messages_with_message_group_id(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-queue.fifo", - Attributes={ - 'FifoQueue': 'true', - }) - queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) - queue.send_message( - MessageBody="message-1", - MessageGroupId="group" - ) - queue.send_message( - MessageBody="message-2", - MessageGroupId="group" - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] - - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - # message is now processed, next one should be available - message.delete() - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_receive_messages_with_message_group_id_on_requeue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-queue.fifo", - Attributes={ - 'FifoQueue': 'true', - }) - queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) - queue.send_message( - MessageBody="message-1", - MessageGroupId="group" - ) - queue.send_message( - MessageBody="message-2", - MessageGroupId="group" - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] - - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - # message is now available again, next one should be available - message.change_visibility(VisibilityTimeout=0) - messages = queue.receive_messages() - messages.should.have.length_of(1) - messages[0].message_id.should.equal(message.message_id) - - -@mock_sqs -def test_receive_messages_with_message_group_id_on_visibility_timeout(): - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': - raise SkipTest('Cant manipulate time in server mode') - - with freeze_time("2015-01-01 12:00:00"): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-queue.fifo", - Attributes={ - 'FifoQueue': 'true', - }) - queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) - queue.send_message( - MessageBody="message-1", - MessageGroupId="group" - ) - queue.send_message( - MessageBody="message-2", - MessageGroupId="group" - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] - - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - message.change_visibility(VisibilityTimeout=10) - - with freeze_time("2015-01-01 12:00:05"): - # no timeout yet - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - with freeze_time("2015-01-01 12:00:15"): - # message is now available again, next one should be available - messages = queue.receive_messages() - messages.should.have.length_of(1) - messages[0].message_id.should.equal(message.message_id) - -@mock_sqs -def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - queue = sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'ReceiveMessageWaitTimeSeconds': '2', - } - ) - - queue.receive_messages() +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import os + +import boto +import boto3 +import botocore.exceptions +from botocore.exceptions import ClientError +from boto.exception import SQSError +from boto.sqs.message import RawMessage, Message + +from freezegun import freeze_time +import base64 +import json +import sure # noqa +import time +import uuid + +from moto import settings, mock_sqs, mock_sqs_deprecated +from tests.helpers import requires_boto_gte +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises +from nose import SkipTest + + +@mock_sqs +def test_create_fifo_queue_fail(): + sqs = boto3.client('sqs', region_name='us-east-1') + + try: + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'FifoQueue': 'true', + } + ) + except botocore.exceptions.ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised InvalidParameterValue Exception') + + +@mock_sqs +def test_create_queue_with_same_attributes(): + sqs = boto3.client('sqs', region_name='us-east-1') + + dlq_url = sqs.create_queue(QueueName='test-queue-dlq')['QueueUrl'] + dlq_arn = sqs.get_queue_attributes(QueueUrl=dlq_url)['Attributes']['QueueArn'] + + attributes = { + 'DelaySeconds': '900', + 'MaximumMessageSize': '262144', + 'MessageRetentionPeriod': '1209600', + 'ReceiveMessageWaitTimeSeconds': '20', + 'RedrivePolicy': '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' % (dlq_arn), + 'VisibilityTimeout': '43200' + } + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + +@mock_sqs +def test_create_queue_with_different_attributes_fail(): + sqs = boto3.client('sqs', region_name='us-east-1') + + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'VisibilityTimeout': '10', + } + ) + try: + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'VisibilityTimeout': '60', + } + ) + except botocore.exceptions.ClientError as err: + err.response['Error']['Code'].should.equal('QueueAlreadyExists') + else: + raise RuntimeError('Should of raised QueueAlreadyExists Exception') + + +@mock_sqs +def test_create_fifo_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + } + ) + queue_url = resp['QueueUrl'] + + response = sqs.get_queue_attributes(QueueUrl=queue_url) + response['Attributes'].should.contain('FifoQueue') + response['Attributes']['FifoQueue'].should.equal('true') + + +@mock_sqs +def test_create_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + new_queue = sqs.create_queue(QueueName='test-queue') + new_queue.should_not.be.none + new_queue.should.have.property('url').should.contain('test-queue') + + queue = sqs.get_queue_by_name(QueueName='test-queue') + queue.attributes.get('QueueArn').should_not.be.none + queue.attributes.get('QueueArn').split(':')[-1].should.equal('test-queue') + queue.attributes.get('QueueArn').split(':')[3].should.equal('us-east-1') + queue.attributes.get('VisibilityTimeout').should_not.be.none + queue.attributes.get('VisibilityTimeout').should.equal('30') + + +@mock_sqs +def test_create_queue_kms(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + new_queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'KmsMasterKeyId': 'master-key-id', + 'KmsDataKeyReusePeriodSeconds': '600' + }) + new_queue.should_not.be.none + + queue = sqs.get_queue_by_name(QueueName='test-queue') + + queue.attributes.get('KmsMasterKeyId').should.equal('master-key-id') + queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') + + +@mock_sqs +def test_get_nonexistent_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + with assert_raises(ClientError) as err: + sqs.get_queue_by_name(QueueName='nonexisting-queue') + ex = err.exception + ex.operation_name.should.equal('GetQueueUrl') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') + + with assert_raises(ClientError) as err: + sqs.Queue('http://whatever-incorrect-queue-address').load() + ex = err.exception + ex.operation_name.should.equal('GetQueueAttributes') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') + + +@mock_sqs +def test_message_send_without_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp" + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.shouldnt.have.key('MD5OfMessageAttributes') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_send_with_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359900', + 'DataType': 'Number', + } + } + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.get('MD5OfMessageAttributes').should.equal( + '235c5c510d26fb653d073faed50ae77c') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_with_complex_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + 'ccc': {'StringValue': 'testjunk', 'DataType': 'String'}, + 'aaa': {'BinaryValue': b'\x02\x03\x04', 'DataType': 'Binary'}, + 'zzz': {'DataType': 'Number', 'StringValue': '0230.01'}, + 'öther_encodings': {'DataType': 'String', 'StringValue': 'T\xFCst'} + } + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.get('MD5OfMessageAttributes').should.equal( + '8ae21a7957029ef04146b42aeaa18a22') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_send_message_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-group-id.fifo", + Attributes={'FifoQueue': 'true'}) + + sent = queue.send_message( + MessageBody="mydata", + MessageDeduplicationId="dedupe_id_1", + MessageGroupId="group_id_1", + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + message_attributes = messages[0].attributes + message_attributes.should.contain('MessageGroupId') + message_attributes['MessageGroupId'].should.equal('group_id_1') + message_attributes.should.contain('MessageDeduplicationId') + message_attributes['MessageDeduplicationId'].should.equal('dedupe_id_1') + + +@mock_sqs +def test_send_message_with_unicode_characters(): + body_one = 'Héllo!😀' + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message(MessageBody=body_one) + + messages = queue.receive_messages() + message_body = messages[0].body + + message_body.should.equal(body_one) + + +@mock_sqs +def test_set_queue_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + + queue.attributes['VisibilityTimeout'].should.equal("30") + + queue.set_attributes(Attributes={"VisibilityTimeout": "45"}) + queue.attributes['VisibilityTimeout'].should.equal("45") + + +@mock_sqs +def test_create_queues_in_multiple_region(): + west1_conn = boto3.client('sqs', region_name='us-west-1') + west1_conn.create_queue(QueueName="blah") + + west2_conn = boto3.client('sqs', region_name='us-west-2') + west2_conn.create_queue(QueueName="test-queue") + + list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) + list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) + + if settings.TEST_SERVER_MODE: + base_url = 'http://localhost:5000' + else: + base_url = 'https://us-west-1.queue.amazonaws.com' + + west1_conn.list_queues()['QueueUrls'][0].should.equal( + '{base_url}/123456789012/blah'.format(base_url=base_url)) + + +@mock_sqs +def test_get_queue_with_prefix(): + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="prefixa-queue") + conn.create_queue(QueueName="prefixb-queue") + conn.create_queue(QueueName="test-queue") + + conn.list_queues()['QueueUrls'].should.have.length_of(3) + + queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] + queue.should.have.length_of(1) + + if settings.TEST_SERVER_MODE: + base_url = 'http://localhost:5000' + else: + base_url = 'https://us-west-1.queue.amazonaws.com' + + queue[0].should.equal( + "{base_url}/123456789012/test-queue".format(base_url=base_url)) + + +@mock_sqs +def test_delete_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": "3"}) + queue = sqs.Queue('test-queue') + + conn.list_queues()['QueueUrls'].should.have.length_of(1) + + queue.delete() + conn.list_queues().get('QueueUrls').should.equal(None) + + with assert_raises(botocore.exceptions.ClientError): + queue.delete() + + +@mock_sqs +def test_set_queue_attribute(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": '3'}) + + queue = sqs.Queue("test-queue") + queue.attributes['VisibilityTimeout'].should.equal('3') + + queue.set_attributes(Attributes={"VisibilityTimeout": '45'}) + queue = sqs.Queue("test-queue") + queue.attributes['VisibilityTimeout'].should.equal('45') + + +@mock_sqs +def test_send_receive_message_without_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.send_message(MessageBody=body_one) + queue.send_message(MessageBody=body_two) + + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] + + message1 = messages[0] + message2 = messages[1] + + message1['Body'].should.equal(body_one) + message2['Body'].should.equal(body_two) + + message1.shouldnt.have.key('MD5OfMessageAttributes') + message2.shouldnt.have.key('MD5OfMessageAttributes') + + +@mock_sqs +def test_send_receive_message_with_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359900', + 'DataType': 'Number', + } + } + ) + + queue.send_message( + MessageBody=body_two, + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359901', + 'DataType': 'Number', + } + } + ) + + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] + + message1 = messages[0] + message2 = messages[1] + + message1.get('Body').should.equal(body_one) + message2.get('Body').should.equal(body_two) + + message1.get('MD5OfMessageAttributes').should.equal('235c5c510d26fb653d073faed50ae77c') + message2.get('MD5OfMessageAttributes').should.equal('994258b45346a2cc3f9cbb611aa7af30') + + +@mock_sqs +def test_send_receive_message_timestamps(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + queue.send_message(MessageBody="derp") + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] + + message = messages[0] + sent_timestamp = message.get('Attributes').get('SentTimestamp') + approximate_first_receive_timestamp = message.get('Attributes').get('ApproximateFirstReceiveTimestamp') + + int.when.called_with(sent_timestamp).shouldnt.throw(ValueError) + int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) + + +@mock_sqs +def test_max_number_of_messages_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=11) + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=0) + + # no error but also no messages returned + queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=0) + + +@mock_sqs +def test_wait_time_seconds_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=-1) + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=21) + + # no error but also no messages returned + queue.receive_messages(WaitTimeSeconds=0) + + +@mock_sqs +def test_receive_messages_with_wait_seconds_timeout_of_zero(): + """ + test that zero messages is returned with a wait_seconds_timeout of zero, + previously this created an infinite loop and nothing was returned + :return: + """ + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.equal([]) + + +@mock_sqs_deprecated +def test_send_message_with_xml_characters(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = '< & >' + + queue.write(queue.new_message(body_one)) + + messages = conn.receive_message(queue, number_messages=1) + + messages[0].get_body().should.equal(body_one) + + +@requires_boto_gte("2.28") +@mock_sqs_deprecated +def test_send_message_with_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body = 'this is a test message' + message = queue.new_message(body) + BASE64_BINARY = base64.b64encode(b'binary value').decode('utf-8') + message_attributes = { + 'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'}, + 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': BASE64_BINARY}, + 'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'} + } + message.message_attributes = message_attributes + + queue.write(message) + + messages = conn.receive_message(queue) + + messages[0].get_body().should.equal(body) + + for name, value in message_attributes.items(): + dict(messages[0].message_attributes[name]).should.equal(value) + + +@mock_sqs_deprecated +def test_send_message_with_delay(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.write(queue.new_message(body_one), delay_seconds=3) + queue.write(queue.new_message(body_two)) + + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=2) + assert len(messages) == 1 + message = messages[0] + assert message.get_body().should.equal(body_two) + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_send_large_message_fails(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = 'test message' * 200000 + huge_message = queue.new_message(body_one) + + queue.write.when.called_with(huge_message).should.throw(SQSError) + + +@mock_sqs_deprecated +def test_message_becomes_inflight_when_received(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=2) + queue.set_message_class(RawMessage) + + body_one = 'this is a test message' + queue.write(queue.new_message(body_one)) + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + queue.count().should.equal(0) + + assert len(messages) == 1 + + # Wait + time.sleep(3) + + queue.count().should.equal(1) + + +@mock_sqs_deprecated +def test_receive_message_with_explicit_visibility_timeout(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = 'this is another test message' + queue.write(queue.new_message(body_one)) + + queue.count().should.equal(1) + messages = conn.receive_message( + queue, number_messages=1, visibility_timeout=0) + + assert len(messages) == 1 + + # Message should remain visible + queue.count().should.equal(1) + + +@mock_sqs_deprecated +def test_change_message_visibility(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=2) + queue.set_message_class(RawMessage) + + body_one = 'this is another test message' + queue.write(queue.new_message(body_one)) + + queue.count().should.equal(1) + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + queue.count().should.equal(0) + + messages[0].change_visibility(2) + + # Wait + time.sleep(1) + + # Message is not visible + queue.count().should.equal(0) + + time.sleep(2) + + # Message now becomes visible + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + messages[0].delete() + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_message_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=2) + queue.set_message_class(RawMessage) + + body_one = 'this is another test message' + queue.write(queue.new_message(body_one)) + + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + queue.count().should.equal(0) + + assert len(messages) == 1 + + message_attributes = messages[0].attributes + + assert message_attributes.get('ApproximateFirstReceiveTimestamp') + assert int(message_attributes.get('ApproximateReceiveCount')) == 1 + assert message_attributes.get('SentTimestamp') + assert message_attributes.get('SenderId') + + +@mock_sqs_deprecated +def test_read_message_from_queue(): + conn = boto.connect_sqs() + queue = conn.create_queue('testqueue') + queue.set_message_class(RawMessage) + + body = 'foo bar baz' + queue.write(queue.new_message(body)) + message = queue.read(1) + message.get_body().should.equal(body) + + +@mock_sqs_deprecated +def test_queue_length(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is a test message')) + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(2) + + +@mock_sqs_deprecated +def test_delete_message(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is a test message')) + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(2) + + messages = conn.receive_message(queue, number_messages=1) + assert len(messages) == 1 + messages[0].delete() + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + assert len(messages) == 1 + messages[0].delete() + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_send_batch_operation(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + + # See https://github.com/boto/boto/issues/831 + queue.set_message_class(RawMessage) + + queue.write_batch([ + ("my_first_message", 'test message 1', 0), + ("my_second_message", 'test message 2', 0), + ("my_third_message", 'test message 3', 0), + ]) + + messages = queue.get_messages(3) + messages[0].get_body().should.equal("test message 1") + + # Test that pulling more messages doesn't break anything + messages = queue.get_messages(2) + + +@requires_boto_gte("2.28") +@mock_sqs_deprecated +def test_send_batch_operation_with_message_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + message_tuple = ("my_first_message", 'test message 1', 0, { + 'name1': {'data_type': 'String', 'string_value': 'foo'}}) + queue.write_batch([message_tuple]) + + messages = queue.get_messages() + messages[0].get_body().should.equal("test message 1") + + for name, value in message_tuple[3].items(): + dict(messages[0].message_attributes[name]).should.equal(value) + + +@mock_sqs_deprecated +def test_delete_batch_operation(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + + conn.send_message_batch(queue, [ + ("my_first_message", 'test message 1', 0), + ("my_second_message", 'test message 2', 0), + ("my_third_message", 'test message 3', 0), + ]) + + messages = queue.get_messages(2) + queue.delete_message_batch(messages) + + queue.count().should.equal(1) + + +@mock_sqs_deprecated +def test_queue_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + + queue_name = 'test-queue' + visibility_timeout = 3 + + queue = conn.create_queue( + queue_name, visibility_timeout=visibility_timeout) + + attributes = queue.get_attributes() + + attributes['QueueArn'].should.look_like( + 'arn:aws:sqs:us-east-1:123456789012:%s' % queue_name) + + attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout)) + + attribute_names = queue.get_attributes().keys() + attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible') + attribute_names.should.contain('MessageRetentionPeriod') + attribute_names.should.contain('ApproximateNumberOfMessagesDelayed') + attribute_names.should.contain('MaximumMessageSize') + attribute_names.should.contain('CreatedTimestamp') + attribute_names.should.contain('ApproximateNumberOfMessages') + attribute_names.should.contain('ReceiveMessageWaitTimeSeconds') + attribute_names.should.contain('DelaySeconds') + attribute_names.should.contain('VisibilityTimeout') + attribute_names.should.contain('LastModifiedTimestamp') + attribute_names.should.contain('QueueArn') + + +@mock_sqs_deprecated +def test_change_message_visibility_on_invalid_receipt(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=1) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(1) + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + original_message = messages[0] + + queue.count().should.equal(0) + + time.sleep(2) + + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) + + +@mock_sqs_deprecated +def test_change_message_visibility_on_visible_message(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=1) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(1) + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + original_message = messages[0] + + queue.count().should.equal(0) + + time.sleep(2) + + queue.count().should.equal(1) + + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) + + +@mock_sqs_deprecated +def test_purge_action(): + conn = boto.sqs.connect_to_region("us-east-1") + + queue = conn.create_queue('new-queue') + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(1) + + queue.purge() + + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_delete_message_after_visibility_timeout(): + VISIBILITY_TIMEOUT = 1 + conn = boto.sqs.connect_to_region("us-east-1") + new_queue = conn.create_queue( + 'new-queue', visibility_timeout=VISIBILITY_TIMEOUT) + + m1 = Message() + m1.set_body('Message 1!') + new_queue.write(m1) + + assert new_queue.count() == 1 + + m1_retrieved = new_queue.read() + + time.sleep(VISIBILITY_TIMEOUT + 1) + + m1_retrieved.delete() + + assert new_queue.count() == 0 + + +@mock_sqs +def test_batch_change_message_visibility(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + with freeze_time("2015-01-01 12:00:00"): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg2') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg3') + + with freeze_time("2015-01-01 12:01:00"): + receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) + len(receive_resp['Messages']).should.equal(2) + + handles = [item['ReceiptHandle'] for item in receive_resp['Messages']] + entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles] + + resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries) + len(resp['Successful']).should.equal(2) + + with freeze_time("2015-01-01 14:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-01 16:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-02 12:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(3) + + +@mock_sqs +def test_permissions(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*']) + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage']) + + with assert_raises(ClientError): + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish']) + + client.remove_permission(QueueUrl=queue_url, Label='account2') + + with assert_raises(ClientError): + client.remove_permission(QueueUrl=queue_url, Label='non_existant') + + +@mock_sqs +def test_tags(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.tag_queue( + QueueUrl=queue_url, + Tags={ + 'test1': 'value1', + 'test2': 'value2', + } + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should.contain('test2') + + client.untag_queue( + QueueUrl=queue_url, + TagKeys=['test2'] + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should_not.contain('test2') + + +@mock_sqs +def test_create_fifo_queue_with_dlq(): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-dlr-queue', + Attributes={'FifoQueue': 'false'} + ) + queue_url2 = resp['QueueUrl'] + queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn'] + + sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + + # Cant have fifo queue with non fifo DLQ + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2}) + } + ) + + +@mock_sqs +def test_queue_with_dlq(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + sqs = boto3.client('sqs', region_name='us-east-1') + + with freeze_time("2015-01-01 12:00:00"): + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + queue_url2 = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2') + + with freeze_time("2015-01-01 13:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:01:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:02:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + len(resp['Messages']).should.equal(1) + + resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + # Might as well test list source queues + + resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) + resp['queueUrls'][0].should.equal(queue_url2) + + +@mock_sqs +def test_redrive_policy_available(): + sqs = boto3.client('sqs', region_name='us-east-1') + + resp = sqs.create_queue(QueueName='test-deadletter') + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + redrive_policy = { + 'deadLetterTargetArn': queue_arn1, + 'maxReceiveCount': 1, + } + + resp = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + queue_url2 = resp['QueueUrl'] + attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] + assert 'RedrivePolicy' in attributes + assert json.loads(attributes['RedrivePolicy']) == redrive_policy + + # Cant have redrive policy without maxReceiveCount + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1}) + } + ) + + +@mock_sqs +def test_redrive_policy_non_existent_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + redrive_policy = { + 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789012:no-queue', + 'maxReceiveCount': 1, + } + + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + +@mock_sqs +def test_redrive_policy_set_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue(QueueName='test-queue') + deadletter_queue = sqs.create_queue(QueueName='test-deadletter') + + redrive_policy = { + 'deadLetterTargetArn': deadletter_queue.attributes['QueueArn'], + 'maxReceiveCount': 1, + } + + queue.set_attributes(Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy)}) + + copy = sqs.get_queue_by_name(QueueName='test-queue') + assert 'RedrivePolicy' in copy.attributes + copy_policy = json.loads(copy.attributes['RedrivePolicy']) + assert copy_policy == redrive_policy + + +@mock_sqs +def test_receive_messages_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + # message is now processed, next one should be available + message.delete() + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_receive_messages_with_message_group_id_on_requeue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + # message is now available again, next one should be available + message.change_visibility(VisibilityTimeout=0) + messages = queue.receive_messages() + messages.should.have.length_of(1) + messages[0].message_id.should.equal(message.message_id) + + +@mock_sqs +def test_receive_messages_with_message_group_id_on_visibility_timeout(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + with freeze_time("2015-01-01 12:00:00"): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + message.change_visibility(VisibilityTimeout=10) + + with freeze_time("2015-01-01 12:00:05"): + # no timeout yet + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + with freeze_time("2015-01-01 12:00:15"): + # message is now available again, next one should be available + messages = queue.receive_messages() + messages.should.have.length_of(1) + messages[0].message_id.should.equal(message.message_id) + +@mock_sqs +def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'ReceiveMessageWaitTimeSeconds': '2', + } + ) + + queue.receive_messages() diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index f8ef3a237825..7f25ac61beb5 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -1,786 +1,786 @@ -from __future__ import unicode_literals - -import boto3 -import botocore.exceptions -import sure # noqa -import datetime -import uuid -import json - -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_ssm, mock_cloudformation - - -@mock_ssm -def test_delete_parameter(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response = client.get_parameters(Names=['test']) - len(response['Parameters']).should.equal(1) - - client.delete_parameter(Name='test') - - response = client.get_parameters(Names=['test']) - len(response['Parameters']).should.equal(0) - - -@mock_ssm -def test_delete_parameters(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response = client.get_parameters(Names=['test']) - len(response['Parameters']).should.equal(1) - - result = client.delete_parameters(Names=['test', 'invalid']) - len(result['DeletedParameters']).should.equal(1) - len(result['InvalidParameters']).should.equal(1) - - response = client.get_parameters(Names=['test']) - len(response['Parameters']).should.equal(0) - - -@mock_ssm -def test_get_parameters_by_path(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='/foo/name1', - Description='A test parameter', - Value='value1', - Type='String') - - client.put_parameter( - Name='/foo/name2', - Description='A test parameter', - Value='value2', - Type='String') - - client.put_parameter( - Name='/bar/name3', - Description='A test parameter', - Value='value3', - Type='String') - - client.put_parameter( - Name='/bar/name3/name4', - Description='A test parameter', - Value='value4', - Type='String') - - client.put_parameter( - Name='/baz/name1', - Description='A test parameter (list)', - Value='value1,value2,value3', - Type='StringList') - - client.put_parameter( - Name='/baz/name2', - Description='A test parameter', - Value='value1', - Type='String') - - client.put_parameter( - Name='/baz/pwd', - Description='A secure test parameter', - Value='my_secret', - Type='SecureString', - KeyId='alias/aws/ssm') - - client.put_parameter( - Name='foo', - Description='A test parameter', - Value='bar', - Type='String') - - client.put_parameter( - Name='baz', - Description='A test parameter', - Value='qux', - Type='String') - - response = client.get_parameters_by_path(Path='/', Recursive=False) - len(response['Parameters']).should.equal(2) - {p['Value'] for p in response['Parameters']}.should.equal( - set(['bar', 'qux']) - ) - - response = client.get_parameters_by_path(Path='/', Recursive=True) - len(response['Parameters']).should.equal(9) - - response = client.get_parameters_by_path(Path='/foo') - len(response['Parameters']).should.equal(2) - {p['Value'] for p in response['Parameters']}.should.equal( - set(['value1', 'value2']) - ) - - response = client.get_parameters_by_path(Path='/bar', Recursive=False) - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Value'].should.equal('value3') - - response = client.get_parameters_by_path(Path='/bar', Recursive=True) - len(response['Parameters']).should.equal(2) - {p['Value'] for p in response['Parameters']}.should.equal( - set(['value3', 'value4']) - ) - - response = client.get_parameters_by_path(Path='/baz') - len(response['Parameters']).should.equal(3) - - filters = [{ - 'Key': 'Type', - 'Option': 'Equals', - 'Values': ['StringList'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(1) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name1']) - ) - - # note: 'Option' is optional (default: 'Equals') - filters = [{ - 'Key': 'Type', - 'Values': ['StringList'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(1) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name1']) - ) - - filters = [{ - 'Key': 'Type', - 'Option': 'Equals', - 'Values': ['String'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(1) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name2']) - ) - - filters = [{ - 'Key': 'Type', - 'Option': 'Equals', - 'Values': ['String', 'SecureString'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(2) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name2', '/baz/pwd']) - ) - - filters = [{ - 'Key': 'Type', - 'Option': 'BeginsWith', - 'Values': ['String'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(2) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name1', '/baz/name2']) - ) - - filters = [{ - 'Key': 'KeyId', - 'Option': 'Equals', - 'Values': ['alias/aws/ssm'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(1) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/pwd']) - ) - - -@mock_ssm -def test_put_parameter(): - client = boto3.client('ssm', region_name='us-east-1') - - response = client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response['Version'].should.equal(1) - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value') - response['Parameters'][0]['Type'].should.equal('String') - response['Parameters'][0]['Version'].should.equal(1) - - try: - client.put_parameter( - Name='test', - Description='desc 2', - Value='value 2', - Type='String') - raise RuntimeError('Should fail') - except botocore.exceptions.ClientError as err: - err.operation_name.should.equal('PutParameter') - err.response['Error']['Message'].should.equal('Parameter test already exists.') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - # without overwrite nothing change - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value') - response['Parameters'][0]['Type'].should.equal('String') - response['Parameters'][0]['Version'].should.equal(1) - - response = client.put_parameter( - Name='test', - Description='desc 3', - Value='value 3', - Type='String', - Overwrite=True) - - response['Version'].should.equal(2) - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - # without overwrite nothing change - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value 3') - response['Parameters'][0]['Type'].should.equal('String') - response['Parameters'][0]['Version'].should.equal(2) - - -@mock_ssm -def test_get_parameter(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response = client.get_parameter( - Name='test', - WithDecryption=False) - - response['Parameter']['Name'].should.equal('test') - response['Parameter']['Value'].should.equal('value') - response['Parameter']['Type'].should.equal('String') - - -@mock_ssm -def test_get_nonexistant_parameter(): - client = boto3.client('ssm', region_name='us-east-1') - - try: - client.get_parameter( - Name='test_noexist', - WithDecryption=False) - raise RuntimeError('Should of failed') - except botocore.exceptions.ClientError as err: - err.operation_name.should.equal('GetParameter') - err.response['Error']['Message'].should.equal('Parameter test_noexist not found.') - - -@mock_ssm -def test_describe_parameters(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response = client.describe_parameters() - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Type'].should.equal('String') - - -@mock_ssm -def test_describe_parameters_paging(): - client = boto3.client('ssm', region_name='us-east-1') - - for i in range(50): - client.put_parameter( - Name="param-%d" % i, - Value="value-%d" % i, - Type="String" - ) - - response = client.describe_parameters() - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('10') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('20') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('30') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('40') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('50') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(0) - ''.should.equal(response.get('NextToken', '')) - - -@mock_ssm -def test_describe_parameters_filter_names(): - client = boto3.client('ssm', region_name='us-east-1') - - for i in range(50): - p = { - 'Name': "param-%d" % i, - 'Value': "value-%d" % i, - 'Type': "String" - } - if i % 5 == 0: - p['Type'] = 'SecureString' - p['KeyId'] = 'a key' - client.put_parameter(**p) - - response = client.describe_parameters(Filters=[ - { - 'Key': 'Name', - 'Values': ['param-22'] - }, - ]) - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('param-22') - response['Parameters'][0]['Type'].should.equal('String') - ''.should.equal(response.get('NextToken', '')) - - -@mock_ssm -def test_describe_parameters_filter_type(): - client = boto3.client('ssm', region_name='us-east-1') - - for i in range(50): - p = { - 'Name': "param-%d" % i, - 'Value': "value-%d" % i, - 'Type': "String" - } - if i % 5 == 0: - p['Type'] = 'SecureString' - p['KeyId'] = 'a key' - client.put_parameter(**p) - - response = client.describe_parameters(Filters=[ - { - 'Key': 'Type', - 'Values': ['SecureString'] - }, - ]) - len(response['Parameters']).should.equal(10) - response['Parameters'][0]['Type'].should.equal('SecureString') - '10'.should.equal(response.get('NextToken', '')) - - -@mock_ssm -def test_describe_parameters_filter_keyid(): - client = boto3.client('ssm', region_name='us-east-1') - - for i in range(50): - p = { - 'Name': "param-%d" % i, - 'Value': "value-%d" % i, - 'Type': "String" - } - if i % 5 == 0: - p['Type'] = 'SecureString' - p['KeyId'] = "key:%d" % i - client.put_parameter(**p) - - response = client.describe_parameters(Filters=[ - { - 'Key': 'KeyId', - 'Values': ['key:10'] - }, - ]) - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('param-10') - response['Parameters'][0]['Type'].should.equal('SecureString') - ''.should.equal(response.get('NextToken', '')) - - -@mock_ssm -def test_describe_parameters_attributes(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='aa', - Value='11', - Type='String', - Description='my description' - ) - - client.put_parameter( - Name='bb', - Value='22', - Type='String' - ) - - response = client.describe_parameters() - len(response['Parameters']).should.equal(2) - - response['Parameters'][0]['Description'].should.equal('my description') - response['Parameters'][0]['Version'].should.equal(1) - response['Parameters'][0]['LastModifiedDate'].should.be.a(datetime.date) - response['Parameters'][0]['LastModifiedUser'].should.equal('N/A') - - response['Parameters'][1].get('Description').should.be.none - response['Parameters'][1]['Version'].should.equal(1) - - -@mock_ssm -def test_get_parameter_invalid(): - client = client = boto3.client('ssm', region_name='us-east-1') - response = client.get_parameters( - Names=[ - 'invalid' - ], - WithDecryption=False) - - len(response['Parameters']).should.equal(0) - len(response['InvalidParameters']).should.equal(1) - response['InvalidParameters'][0].should.equal('invalid') - - -@mock_ssm -def test_put_parameter_secure_default_kms(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='SecureString') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('kms:default:value') - response['Parameters'][0]['Type'].should.equal('SecureString') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=True) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value') - response['Parameters'][0]['Type'].should.equal('SecureString') - - -@mock_ssm -def test_put_parameter_secure_custom_kms(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='SecureString', - KeyId='foo') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('kms:foo:value') - response['Parameters'][0]['Type'].should.equal('SecureString') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=True) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value') - response['Parameters'][0]['Type'].should.equal('SecureString') - - -@mock_ssm -def test_add_remove_list_tags_for_resource(): - client = boto3.client('ssm', region_name='us-east-1') - - client.add_tags_to_resource( - ResourceId='test', - ResourceType='Parameter', - Tags=[{'Key': 'test-key', 'Value': 'test-value'}] - ) - - response = client.list_tags_for_resource( - ResourceId='test', - ResourceType='Parameter' - ) - len(response['TagList']).should.equal(1) - response['TagList'][0]['Key'].should.equal('test-key') - response['TagList'][0]['Value'].should.equal('test-value') - - client.remove_tags_from_resource( - ResourceId='test', - ResourceType='Parameter', - TagKeys=['test-key'] - ) - - response = client.list_tags_for_resource( - ResourceId='test', - ResourceType='Parameter' - ) - len(response['TagList']).should.equal(0) - - -@mock_ssm -def test_send_command(): - ssm_document = 'AWS-RunShellScript' - params = {'commands': ['#!/bin/bash\necho \'hello world\'']} - - client = boto3.client('ssm', region_name='us-east-1') - # note the timeout is determined server side, so this is a simpler check. - before = datetime.datetime.now() - - response = client.send_command( - InstanceIds=['i-123456'], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region='us-east-2', - OutputS3BucketName='the-bucket', - OutputS3KeyPrefix='pref' - ) - cmd = response['Command'] - - cmd['CommandId'].should_not.be(None) - cmd['DocumentName'].should.equal(ssm_document) - cmd['Parameters'].should.equal(params) - - cmd['OutputS3Region'].should.equal('us-east-2') - cmd['OutputS3BucketName'].should.equal('the-bucket') - cmd['OutputS3KeyPrefix'].should.equal('pref') - - cmd['ExpiresAfter'].should.be.greater_than(before) - - # test sending a command without any optional parameters - response = client.send_command( - DocumentName=ssm_document) - - cmd = response['Command'] - - cmd['CommandId'].should_not.be(None) - cmd['DocumentName'].should.equal(ssm_document) - - -@mock_ssm -def test_list_commands(): - client = boto3.client('ssm', region_name='us-east-1') - - ssm_document = 'AWS-RunShellScript' - params = {'commands': ['#!/bin/bash\necho \'hello world\'']} - - response = client.send_command( - InstanceIds=['i-123456'], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region='us-east-2', - OutputS3BucketName='the-bucket', - OutputS3KeyPrefix='pref') - - cmd = response['Command'] - cmd_id = cmd['CommandId'] - - # get the command by id - response = client.list_commands( - CommandId=cmd_id) - - cmds = response['Commands'] - len(cmds).should.equal(1) - cmds[0]['CommandId'].should.equal(cmd_id) - - # add another command with the same instance id to test listing by - # instance id - client.send_command( - InstanceIds=['i-123456'], - DocumentName=ssm_document) - - response = client.list_commands( - InstanceId='i-123456') - - cmds = response['Commands'] - len(cmds).should.equal(2) - - for cmd in cmds: - cmd['InstanceIds'].should.contain('i-123456') - - # test the error case for an invalid command id - with assert_raises(ClientError): - response = client.list_commands( - CommandId=str(uuid.uuid4())) - -@mock_ssm -def test_get_command_invocation(): - client = boto3.client('ssm', region_name='us-east-1') - - ssm_document = 'AWS-RunShellScript' - params = {'commands': ['#!/bin/bash\necho \'hello world\'']} - - response = client.send_command( - InstanceIds=['i-123456', 'i-234567', 'i-345678'], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region='us-east-2', - OutputS3BucketName='the-bucket', - OutputS3KeyPrefix='pref') - - cmd = response['Command'] - cmd_id = cmd['CommandId'] - - instance_id = 'i-345678' - invocation_response = client.get_command_invocation( - CommandId=cmd_id, - InstanceId=instance_id, - PluginName='aws:runShellScript') - - invocation_response['CommandId'].should.equal(cmd_id) - invocation_response['InstanceId'].should.equal(instance_id) - - # test the error case for an invalid instance id - with assert_raises(ClientError): - invocation_response = client.get_command_invocation( - CommandId=cmd_id, - InstanceId='i-FAKE') - - # test the error case for an invalid plugin name - with assert_raises(ClientError): - invocation_response = client.get_command_invocation( - CommandId=cmd_id, - InstanceId=instance_id, - PluginName='FAKE') - -@mock_ssm -@mock_cloudformation -def test_get_command_invocations_from_stack(): - stack_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Test Stack", - "Resources": { - "EC2Instance1": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-test-image-id", - "KeyName": "test", - "InstanceType": "t2.micro", - "Tags": [ - { - "Key": "Test Description", - "Value": "Test tag" - }, - { - "Key": "Test Name", - "Value": "Name tag for tests" - } - ] - } - } - }, - "Outputs": { - "test": { - "Description": "Test Output", - "Value": "Test output value", - "Export": { - "Name": "Test value to export" - } - }, - "PublicIP": { - "Value": "Test public ip" - } - } - } - - cloudformation_client = boto3.client( - 'cloudformation', - region_name='us-east-1') - - stack_template_str = json.dumps(stack_template) - - response = cloudformation_client.create_stack( - StackName='test_stack', - TemplateBody=stack_template_str, - Capabilities=('CAPABILITY_IAM', )) - - client = boto3.client('ssm', region_name='us-east-1') - - ssm_document = 'AWS-RunShellScript' - params = {'commands': ['#!/bin/bash\necho \'hello world\'']} - - response = client.send_command( - Targets=[{ - 'Key': 'tag:aws:cloudformation:stack-name', - 'Values': ('test_stack', )}], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region='us-east-2', - OutputS3BucketName='the-bucket', - OutputS3KeyPrefix='pref') - - cmd = response['Command'] - cmd_id = cmd['CommandId'] - instance_ids = cmd['InstanceIds'] - - invocation_response = client.get_command_invocation( - CommandId=cmd_id, - InstanceId=instance_ids[0], - PluginName='aws:runShellScript') +from __future__ import unicode_literals + +import boto3 +import botocore.exceptions +import sure # noqa +import datetime +import uuid +import json + +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_ssm, mock_cloudformation + + +@mock_ssm +def test_delete_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(1) + + client.delete_parameter(Name='test') + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(0) + + +@mock_ssm +def test_delete_parameters(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(1) + + result = client.delete_parameters(Names=['test', 'invalid']) + len(result['DeletedParameters']).should.equal(1) + len(result['InvalidParameters']).should.equal(1) + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(0) + + +@mock_ssm +def test_get_parameters_by_path(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='/foo/name1', + Description='A test parameter', + Value='value1', + Type='String') + + client.put_parameter( + Name='/foo/name2', + Description='A test parameter', + Value='value2', + Type='String') + + client.put_parameter( + Name='/bar/name3', + Description='A test parameter', + Value='value3', + Type='String') + + client.put_parameter( + Name='/bar/name3/name4', + Description='A test parameter', + Value='value4', + Type='String') + + client.put_parameter( + Name='/baz/name1', + Description='A test parameter (list)', + Value='value1,value2,value3', + Type='StringList') + + client.put_parameter( + Name='/baz/name2', + Description='A test parameter', + Value='value1', + Type='String') + + client.put_parameter( + Name='/baz/pwd', + Description='A secure test parameter', + Value='my_secret', + Type='SecureString', + KeyId='alias/aws/ssm') + + client.put_parameter( + Name='foo', + Description='A test parameter', + Value='bar', + Type='String') + + client.put_parameter( + Name='baz', + Description='A test parameter', + Value='qux', + Type='String') + + response = client.get_parameters_by_path(Path='/', Recursive=False) + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['bar', 'qux']) + ) + + response = client.get_parameters_by_path(Path='/', Recursive=True) + len(response['Parameters']).should.equal(9) + + response = client.get_parameters_by_path(Path='/foo') + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['value1', 'value2']) + ) + + response = client.get_parameters_by_path(Path='/bar', Recursive=False) + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Value'].should.equal('value3') + + response = client.get_parameters_by_path(Path='/bar', Recursive=True) + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['value3', 'value4']) + ) + + response = client.get_parameters_by_path(Path='/baz') + len(response['Parameters']).should.equal(3) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + # note: 'Option' is optional (default: 'Equals') + filters = [{ + 'Key': 'Type', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String', 'SecureString'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2', '/baz/pwd']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'BeginsWith', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1', '/baz/name2']) + ) + + filters = [{ + 'Key': 'KeyId', + 'Option': 'Equals', + 'Values': ['alias/aws/ssm'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/pwd']) + ) + + +@mock_ssm +def test_put_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + response = client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response['Version'].should.equal(1) + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(1) + + try: + client.put_parameter( + Name='test', + Description='desc 2', + Value='value 2', + Type='String') + raise RuntimeError('Should fail') + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal('PutParameter') + err.response['Error']['Message'].should.equal('Parameter test already exists.') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + # without overwrite nothing change + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(1) + + response = client.put_parameter( + Name='test', + Description='desc 3', + Value='value 3', + Type='String', + Overwrite=True) + + response['Version'].should.equal(2) + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + # without overwrite nothing change + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value 3') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(2) + + +@mock_ssm +def test_get_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.get_parameter( + Name='test', + WithDecryption=False) + + response['Parameter']['Name'].should.equal('test') + response['Parameter']['Value'].should.equal('value') + response['Parameter']['Type'].should.equal('String') + + +@mock_ssm +def test_get_nonexistant_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + try: + client.get_parameter( + Name='test_noexist', + WithDecryption=False) + raise RuntimeError('Should of failed') + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal('GetParameter') + err.response['Error']['Message'].should.equal('Parameter test_noexist not found.') + + +@mock_ssm +def test_describe_parameters(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.describe_parameters() + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Type'].should.equal('String') + + +@mock_ssm +def test_describe_parameters_paging(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + client.put_parameter( + Name="param-%d" % i, + Value="value-%d" % i, + Type="String" + ) + + response = client.describe_parameters() + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('10') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('20') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('30') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('40') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('50') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(0) + ''.should.equal(response.get('NextToken', '')) + + +@mock_ssm +def test_describe_parameters_filter_names(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + p = { + 'Name': "param-%d" % i, + 'Value': "value-%d" % i, + 'Type': "String" + } + if i % 5 == 0: + p['Type'] = 'SecureString' + p['KeyId'] = 'a key' + client.put_parameter(**p) + + response = client.describe_parameters(Filters=[ + { + 'Key': 'Name', + 'Values': ['param-22'] + }, + ]) + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('param-22') + response['Parameters'][0]['Type'].should.equal('String') + ''.should.equal(response.get('NextToken', '')) + + +@mock_ssm +def test_describe_parameters_filter_type(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + p = { + 'Name': "param-%d" % i, + 'Value': "value-%d" % i, + 'Type': "String" + } + if i % 5 == 0: + p['Type'] = 'SecureString' + p['KeyId'] = 'a key' + client.put_parameter(**p) + + response = client.describe_parameters(Filters=[ + { + 'Key': 'Type', + 'Values': ['SecureString'] + }, + ]) + len(response['Parameters']).should.equal(10) + response['Parameters'][0]['Type'].should.equal('SecureString') + '10'.should.equal(response.get('NextToken', '')) + + +@mock_ssm +def test_describe_parameters_filter_keyid(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + p = { + 'Name': "param-%d" % i, + 'Value': "value-%d" % i, + 'Type': "String" + } + if i % 5 == 0: + p['Type'] = 'SecureString' + p['KeyId'] = "key:%d" % i + client.put_parameter(**p) + + response = client.describe_parameters(Filters=[ + { + 'Key': 'KeyId', + 'Values': ['key:10'] + }, + ]) + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('param-10') + response['Parameters'][0]['Type'].should.equal('SecureString') + ''.should.equal(response.get('NextToken', '')) + + +@mock_ssm +def test_describe_parameters_attributes(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='aa', + Value='11', + Type='String', + Description='my description' + ) + + client.put_parameter( + Name='bb', + Value='22', + Type='String' + ) + + response = client.describe_parameters() + len(response['Parameters']).should.equal(2) + + response['Parameters'][0]['Description'].should.equal('my description') + response['Parameters'][0]['Version'].should.equal(1) + response['Parameters'][0]['LastModifiedDate'].should.be.a(datetime.date) + response['Parameters'][0]['LastModifiedUser'].should.equal('N/A') + + response['Parameters'][1].get('Description').should.be.none + response['Parameters'][1]['Version'].should.equal(1) + + +@mock_ssm +def test_get_parameter_invalid(): + client = client = boto3.client('ssm', region_name='us-east-1') + response = client.get_parameters( + Names=[ + 'invalid' + ], + WithDecryption=False) + + len(response['Parameters']).should.equal(0) + len(response['InvalidParameters']).should.equal(1) + response['InvalidParameters'][0].should.equal('invalid') + + +@mock_ssm +def test_put_parameter_secure_default_kms(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='SecureString') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('kms:default:value') + response['Parameters'][0]['Type'].should.equal('SecureString') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=True) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('SecureString') + + +@mock_ssm +def test_put_parameter_secure_custom_kms(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='SecureString', + KeyId='foo') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('kms:foo:value') + response['Parameters'][0]['Type'].should.equal('SecureString') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=True) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('SecureString') + + +@mock_ssm +def test_add_remove_list_tags_for_resource(): + client = boto3.client('ssm', region_name='us-east-1') + + client.add_tags_to_resource( + ResourceId='test', + ResourceType='Parameter', + Tags=[{'Key': 'test-key', 'Value': 'test-value'}] + ) + + response = client.list_tags_for_resource( + ResourceId='test', + ResourceType='Parameter' + ) + len(response['TagList']).should.equal(1) + response['TagList'][0]['Key'].should.equal('test-key') + response['TagList'][0]['Value'].should.equal('test-value') + + client.remove_tags_from_resource( + ResourceId='test', + ResourceType='Parameter', + TagKeys=['test-key'] + ) + + response = client.list_tags_for_resource( + ResourceId='test', + ResourceType='Parameter' + ) + len(response['TagList']).should.equal(0) + + +@mock_ssm +def test_send_command(): + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + client = boto3.client('ssm', region_name='us-east-1') + # note the timeout is determined server side, so this is a simpler check. + before = datetime.datetime.now() + + response = client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref' + ) + cmd = response['Command'] + + cmd['CommandId'].should_not.be(None) + cmd['DocumentName'].should.equal(ssm_document) + cmd['Parameters'].should.equal(params) + + cmd['OutputS3Region'].should.equal('us-east-2') + cmd['OutputS3BucketName'].should.equal('the-bucket') + cmd['OutputS3KeyPrefix'].should.equal('pref') + + cmd['ExpiresAfter'].should.be.greater_than(before) + + # test sending a command without any optional parameters + response = client.send_command( + DocumentName=ssm_document) + + cmd = response['Command'] + + cmd['CommandId'].should_not.be(None) + cmd['DocumentName'].should.equal(ssm_document) + + +@mock_ssm +def test_list_commands(): + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + + # get the command by id + response = client.list_commands( + CommandId=cmd_id) + + cmds = response['Commands'] + len(cmds).should.equal(1) + cmds[0]['CommandId'].should.equal(cmd_id) + + # add another command with the same instance id to test listing by + # instance id + client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document) + + response = client.list_commands( + InstanceId='i-123456') + + cmds = response['Commands'] + len(cmds).should.equal(2) + + for cmd in cmds: + cmd['InstanceIds'].should.contain('i-123456') + + # test the error case for an invalid command id + with assert_raises(ClientError): + response = client.list_commands( + CommandId=str(uuid.uuid4())) + +@mock_ssm +def test_get_command_invocation(): + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + InstanceIds=['i-123456', 'i-234567', 'i-345678'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + + instance_id = 'i-345678' + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='aws:runShellScript') + + invocation_response['CommandId'].should.equal(cmd_id) + invocation_response['InstanceId'].should.equal(instance_id) + + # test the error case for an invalid instance id + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId='i-FAKE') + + # test the error case for an invalid plugin name + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='FAKE') + +@mock_ssm +@mock_cloudformation +def test_get_command_invocations_from_stack(): + stack_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Test Stack", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-test-image-id", + "KeyName": "test", + "InstanceType": "t2.micro", + "Tags": [ + { + "Key": "Test Description", + "Value": "Test tag" + }, + { + "Key": "Test Name", + "Value": "Name tag for tests" + } + ] + } + } + }, + "Outputs": { + "test": { + "Description": "Test Output", + "Value": "Test output value", + "Export": { + "Name": "Test value to export" + } + }, + "PublicIP": { + "Value": "Test public ip" + } + } + } + + cloudformation_client = boto3.client( + 'cloudformation', + region_name='us-east-1') + + stack_template_str = json.dumps(stack_template) + + response = cloudformation_client.create_stack( + StackName='test_stack', + TemplateBody=stack_template_str, + Capabilities=('CAPABILITY_IAM', )) + + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + Targets=[{ + 'Key': 'tag:aws:cloudformation:stack-name', + 'Values': ('test_stack', )}], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + instance_ids = cmd['InstanceIds'] + + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_ids[0], + PluginName='aws:runShellScript') diff --git a/tests/test_sts/test_server.py b/tests/test_sts/test_server.py index 40260a49f7d5..1cff6b0af61f 100644 --- a/tests/test_sts/test_server.py +++ b/tests/test_sts/test_server.py @@ -1,39 +1,39 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_sts_get_session_token(): - backend = server.create_backend_app("sts") - test_client = backend.test_client() - - res = test_client.get('/?Action=GetSessionToken') - res.status_code.should.equal(200) - res.data.should.contain(b"SessionToken") - res.data.should.contain(b"AccessKeyId") - - -def test_sts_get_federation_token(): - backend = server.create_backend_app("sts") - test_client = backend.test_client() - - res = test_client.get('/?Action=GetFederationToken&Name=Bob') - res.status_code.should.equal(200) - res.data.should.contain(b"SessionToken") - res.data.should.contain(b"AccessKeyId") - - -def test_sts_get_caller_identity(): - backend = server.create_backend_app("sts") - test_client = backend.test_client() - - res = test_client.get('/?Action=GetCallerIdentity') - res.status_code.should.equal(200) - res.data.should.contain(b"Arn") - res.data.should.contain(b"UserId") - res.data.should.contain(b"Account") +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_sts_get_session_token(): + backend = server.create_backend_app("sts") + test_client = backend.test_client() + + res = test_client.get('/?Action=GetSessionToken') + res.status_code.should.equal(200) + res.data.should.contain(b"SessionToken") + res.data.should.contain(b"AccessKeyId") + + +def test_sts_get_federation_token(): + backend = server.create_backend_app("sts") + test_client = backend.test_client() + + res = test_client.get('/?Action=GetFederationToken&Name=Bob') + res.status_code.should.equal(200) + res.data.should.contain(b"SessionToken") + res.data.should.contain(b"AccessKeyId") + + +def test_sts_get_caller_identity(): + backend = server.create_backend_app("sts") + test_client = backend.test_client() + + res = test_client.get('/?Action=GetCallerIdentity') + res.status_code.should.equal(200) + res.data.should.contain(b"Arn") + res.data.should.contain(b"UserId") + res.data.should.contain(b"Account") diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 4e0e526064ce..61ab76a29e66 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -1,84 +1,84 @@ -from __future__ import unicode_literals -import json - -import boto -import boto3 -from freezegun import freeze_time -import sure # noqa - -from moto import mock_sts, mock_sts_deprecated - - -@freeze_time("2012-01-01 12:00:00") -@mock_sts_deprecated -def test_get_session_token(): - conn = boto.connect_sts() - token = conn.get_session_token(duration=123) - - token.expiration.should.equal('2012-01-01T12:02:03.000Z') - token.session_token.should.equal( - "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") - token.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - token.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") - - -@freeze_time("2012-01-01 12:00:00") -@mock_sts_deprecated -def test_get_federation_token(): - conn = boto.connect_sts() - token = conn.get_federation_token(duration=123, name="Bob") - - token.credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') - token.credentials.session_token.should.equal( - "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==") - token.credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - token.credentials.secret_key.should.equal( - "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") - token.federated_user_arn.should.equal( - "arn:aws:sts::123456789012:federated-user/Bob") - token.federated_user_id.should.equal("123456789012:Bob") - - -@freeze_time("2012-01-01 12:00:00") -@mock_sts_deprecated -def test_assume_role(): - conn = boto.connect_sts() - - policy = json.dumps({ - "Statement": [ - { - "Sid": "Stmt13690092345534", - "Action": [ - "S3:ListBucket" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:s3:::foobar-tester" - ] - }, - ] - }) - s3_role = "arn:aws:iam::123456789012:role/test-role" - role = conn.assume_role(s3_role, "session-name", - policy, duration_seconds=123) - - credentials = role.credentials - credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') - credentials.session_token.should.equal( - "BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") - credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - credentials.secret_key.should.equal( - "aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") - - role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") - role.user.assume_role_id.should.contain("session-name") - - -@mock_sts -def test_get_caller_identity(): - identity = boto3.client( - "sts", region_name='us-east-1').get_caller_identity() - - identity['Arn'].should.equal('arn:aws:sts::123456789012:user/moto') - identity['UserId'].should.equal('AKIAIOSFODNN7EXAMPLE') - identity['Account'].should.equal('123456789012') +from __future__ import unicode_literals +import json + +import boto +import boto3 +from freezegun import freeze_time +import sure # noqa + +from moto import mock_sts, mock_sts_deprecated + + +@freeze_time("2012-01-01 12:00:00") +@mock_sts_deprecated +def test_get_session_token(): + conn = boto.connect_sts() + token = conn.get_session_token(duration=123) + + token.expiration.should.equal('2012-01-01T12:02:03.000Z') + token.session_token.should.equal( + "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + token.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") + token.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + + +@freeze_time("2012-01-01 12:00:00") +@mock_sts_deprecated +def test_get_federation_token(): + conn = boto.connect_sts() + token = conn.get_federation_token(duration=123, name="Bob") + + token.credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') + token.credentials.session_token.should.equal( + "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==") + token.credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") + token.credentials.secret_key.should.equal( + "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + token.federated_user_arn.should.equal( + "arn:aws:sts::123456789012:federated-user/Bob") + token.federated_user_id.should.equal("123456789012:Bob") + + +@freeze_time("2012-01-01 12:00:00") +@mock_sts_deprecated +def test_assume_role(): + conn = boto.connect_sts() + + policy = json.dumps({ + "Statement": [ + { + "Sid": "Stmt13690092345534", + "Action": [ + "S3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::foobar-tester" + ] + }, + ] + }) + s3_role = "arn:aws:iam::123456789012:role/test-role" + role = conn.assume_role(s3_role, "session-name", + policy, duration_seconds=123) + + credentials = role.credentials + credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') + credentials.session_token.should.equal( + "BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") + credentials.secret_key.should.equal( + "aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + + role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") + role.user.assume_role_id.should.contain("session-name") + + +@mock_sts +def test_get_caller_identity(): + identity = boto3.client( + "sts", region_name='us-east-1').get_caller_identity() + + identity['Arn'].should.equal('arn:aws:sts::123456789012:user/moto') + identity['UserId'].should.equal('AKIAIOSFODNN7EXAMPLE') + identity['Account'].should.equal('123456789012') diff --git a/tests/test_swf/models/test_activity_task.py b/tests/test_swf/models/test_activity_task.py index 41c88cafe3cc..dfcaf98011ec 100644 --- a/tests/test_swf/models/test_activity_task.py +++ b/tests/test_swf/models/test_activity_task.py @@ -1,154 +1,154 @@ -from freezegun import freeze_time -import sure # noqa - -from moto.swf.exceptions import SWFWorkflowExecutionClosedError -from moto.swf.models import ( - ActivityTask, - ActivityType, - Timeout, -) - -from ..utils import ( - ACTIVITY_TASK_TIMEOUTS, - make_workflow_execution, - process_first_timeout, -) - - -def test_activity_task_creation(): - wfe = make_workflow_execution() - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - workflow_execution=wfe, - timeouts=ACTIVITY_TASK_TIMEOUTS, - ) - task.workflow_execution.should.equal(wfe) - task.state.should.equal("SCHEDULED") - task.task_token.should_not.be.empty - task.started_event_id.should.be.none - - task.start(123) - task.state.should.equal("STARTED") - task.started_event_id.should.equal(123) - - task.complete() - task.state.should.equal("COMPLETED") - - # NB: this doesn't make any sense for SWF, a task shouldn't go from a - # "COMPLETED" state to a "FAILED" one, but this is an internal state on our - # side and we don't care about invalid state transitions for now. - task.fail() - task.state.should.equal("FAILED") - - -def test_activity_task_full_dict_representation(): - wfe = make_workflow_execution() - at = ActivityTask( - activity_id="my-activity-123", - activity_type=ActivityType("foo", "v1.0"), - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - at.start(1234) - - fd = at.to_full_dict() - fd["activityId"].should.equal("my-activity-123") - fd["activityType"]["version"].should.equal("v1.0") - fd["input"].should.equal("optional") - fd["startedEventId"].should.equal(1234) - fd.should.contain("taskToken") - fd["workflowExecution"].should.equal(wfe.to_short_dict()) - - at.start(1234) - fd = at.to_full_dict() - fd["startedEventId"].should.equal(1234) - - -def test_activity_task_reset_heartbeat_clock(): - wfe = make_workflow_execution() - - with freeze_time("2015-01-01 12:00:00"): - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - - task.last_heartbeat_timestamp.should.equal(1420113600.0) - - with freeze_time("2015-01-01 13:00:00"): - task.reset_heartbeat_clock() - - task.last_heartbeat_timestamp.should.equal(1420117200.0) - - -def test_activity_task_first_timeout(): - wfe = make_workflow_execution() - - with freeze_time("2015-01-01 12:00:00"): - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - task.first_timeout().should.be.none - - # activity task timeout is 300s == 5mins - with freeze_time("2015-01-01 12:06:00"): - task.first_timeout().should.be.a(Timeout) - process_first_timeout(task) - task.state.should.equal("TIMED_OUT") - task.timeout_type.should.equal("HEARTBEAT") - - -def test_activity_task_cannot_timeout_on_closed_workflow_execution(): - with freeze_time("2015-01-01 12:00:00"): - wfe = make_workflow_execution() - wfe.start() - - with freeze_time("2015-01-01 13:58:00"): - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - - with freeze_time("2015-01-01 14:10:00"): - task.first_timeout().should.be.a(Timeout) - wfe.first_timeout().should.be.a(Timeout) - process_first_timeout(wfe) - task.first_timeout().should.be.none - - -def test_activity_task_cannot_change_state_on_closed_workflow_execution(): - wfe = make_workflow_execution() - wfe.start() - - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - wfe.complete(123) - - task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( - SWFWorkflowExecutionClosedError) - task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) - task.fail.when.called_with().should.throw(SWFWorkflowExecutionClosedError) +from freezegun import freeze_time +import sure # noqa + +from moto.swf.exceptions import SWFWorkflowExecutionClosedError +from moto.swf.models import ( + ActivityTask, + ActivityType, + Timeout, +) + +from ..utils import ( + ACTIVITY_TASK_TIMEOUTS, + make_workflow_execution, + process_first_timeout, +) + + +def test_activity_task_creation(): + wfe = make_workflow_execution() + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + workflow_execution=wfe, + timeouts=ACTIVITY_TASK_TIMEOUTS, + ) + task.workflow_execution.should.equal(wfe) + task.state.should.equal("SCHEDULED") + task.task_token.should_not.be.empty + task.started_event_id.should.be.none + + task.start(123) + task.state.should.equal("STARTED") + task.started_event_id.should.equal(123) + + task.complete() + task.state.should.equal("COMPLETED") + + # NB: this doesn't make any sense for SWF, a task shouldn't go from a + # "COMPLETED" state to a "FAILED" one, but this is an internal state on our + # side and we don't care about invalid state transitions for now. + task.fail() + task.state.should.equal("FAILED") + + +def test_activity_task_full_dict_representation(): + wfe = make_workflow_execution() + at = ActivityTask( + activity_id="my-activity-123", + activity_type=ActivityType("foo", "v1.0"), + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + at.start(1234) + + fd = at.to_full_dict() + fd["activityId"].should.equal("my-activity-123") + fd["activityType"]["version"].should.equal("v1.0") + fd["input"].should.equal("optional") + fd["startedEventId"].should.equal(1234) + fd.should.contain("taskToken") + fd["workflowExecution"].should.equal(wfe.to_short_dict()) + + at.start(1234) + fd = at.to_full_dict() + fd["startedEventId"].should.equal(1234) + + +def test_activity_task_reset_heartbeat_clock(): + wfe = make_workflow_execution() + + with freeze_time("2015-01-01 12:00:00"): + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + + task.last_heartbeat_timestamp.should.equal(1420113600.0) + + with freeze_time("2015-01-01 13:00:00"): + task.reset_heartbeat_clock() + + task.last_heartbeat_timestamp.should.equal(1420117200.0) + + +def test_activity_task_first_timeout(): + wfe = make_workflow_execution() + + with freeze_time("2015-01-01 12:00:00"): + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + task.first_timeout().should.be.none + + # activity task timeout is 300s == 5mins + with freeze_time("2015-01-01 12:06:00"): + task.first_timeout().should.be.a(Timeout) + process_first_timeout(task) + task.state.should.equal("TIMED_OUT") + task.timeout_type.should.equal("HEARTBEAT") + + +def test_activity_task_cannot_timeout_on_closed_workflow_execution(): + with freeze_time("2015-01-01 12:00:00"): + wfe = make_workflow_execution() + wfe.start() + + with freeze_time("2015-01-01 13:58:00"): + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + + with freeze_time("2015-01-01 14:10:00"): + task.first_timeout().should.be.a(Timeout) + wfe.first_timeout().should.be.a(Timeout) + process_first_timeout(wfe) + task.first_timeout().should.be.none + + +def test_activity_task_cannot_change_state_on_closed_workflow_execution(): + wfe = make_workflow_execution() + wfe.start() + + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + wfe.complete(123) + + task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( + SWFWorkflowExecutionClosedError) + task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) + task.fail.when.called_with().should.throw(SWFWorkflowExecutionClosedError) diff --git a/tests/test_swf/models/test_decision_task.py b/tests/test_swf/models/test_decision_task.py index b5e23eaca6ae..b593db5ff891 100644 --- a/tests/test_swf/models/test_decision_task.py +++ b/tests/test_swf/models/test_decision_task.py @@ -1,80 +1,80 @@ -from boto.swf.exceptions import SWFResponseError -from freezegun import freeze_time -from sure import expect - -from moto.swf.models import DecisionTask, Timeout -from moto.swf.exceptions import SWFWorkflowExecutionClosedError - -from ..utils import make_workflow_execution, process_first_timeout - - -def test_decision_task_creation(): - wfe = make_workflow_execution() - dt = DecisionTask(wfe, 123) - dt.workflow_execution.should.equal(wfe) - dt.state.should.equal("SCHEDULED") - dt.task_token.should_not.be.empty - dt.started_event_id.should.be.none - - -def test_decision_task_full_dict_representation(): - wfe = make_workflow_execution() - wft = wfe.workflow_type - dt = DecisionTask(wfe, 123) - - fd = dt.to_full_dict() - fd["events"].should.be.a("list") - fd["previousStartedEventId"].should.equal(0) - fd.should_not.contain("startedEventId") - fd.should.contain("taskToken") - fd["workflowExecution"].should.equal(wfe.to_short_dict()) - fd["workflowType"].should.equal(wft.to_short_dict()) - - dt.start(1234) - fd = dt.to_full_dict() - fd["startedEventId"].should.equal(1234) - - -def test_decision_task_first_timeout(): - wfe = make_workflow_execution() - dt = DecisionTask(wfe, 123) - dt.first_timeout().should.be.none - - with freeze_time("2015-01-01 12:00:00"): - dt.start(1234) - dt.first_timeout().should.be.none - - # activity task timeout is 300s == 5mins - with freeze_time("2015-01-01 12:06:00"): - dt.first_timeout().should.be.a(Timeout) - - dt.complete() - dt.first_timeout().should.be.none - - -def test_decision_task_cannot_timeout_on_closed_workflow_execution(): - with freeze_time("2015-01-01 12:00:00"): - wfe = make_workflow_execution() - wfe.start() - - with freeze_time("2015-01-01 13:55:00"): - dt = DecisionTask(wfe, 123) - dt.start(1234) - - with freeze_time("2015-01-01 14:10:00"): - dt.first_timeout().should.be.a(Timeout) - wfe.first_timeout().should.be.a(Timeout) - process_first_timeout(wfe) - dt.first_timeout().should.be.none - - -def test_decision_task_cannot_change_state_on_closed_workflow_execution(): - wfe = make_workflow_execution() - wfe.start() - task = DecisionTask(wfe, 123) - - wfe.complete(123) - - task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( - SWFWorkflowExecutionClosedError) - task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) +from boto.swf.exceptions import SWFResponseError +from freezegun import freeze_time +from sure import expect + +from moto.swf.models import DecisionTask, Timeout +from moto.swf.exceptions import SWFWorkflowExecutionClosedError + +from ..utils import make_workflow_execution, process_first_timeout + + +def test_decision_task_creation(): + wfe = make_workflow_execution() + dt = DecisionTask(wfe, 123) + dt.workflow_execution.should.equal(wfe) + dt.state.should.equal("SCHEDULED") + dt.task_token.should_not.be.empty + dt.started_event_id.should.be.none + + +def test_decision_task_full_dict_representation(): + wfe = make_workflow_execution() + wft = wfe.workflow_type + dt = DecisionTask(wfe, 123) + + fd = dt.to_full_dict() + fd["events"].should.be.a("list") + fd["previousStartedEventId"].should.equal(0) + fd.should_not.contain("startedEventId") + fd.should.contain("taskToken") + fd["workflowExecution"].should.equal(wfe.to_short_dict()) + fd["workflowType"].should.equal(wft.to_short_dict()) + + dt.start(1234) + fd = dt.to_full_dict() + fd["startedEventId"].should.equal(1234) + + +def test_decision_task_first_timeout(): + wfe = make_workflow_execution() + dt = DecisionTask(wfe, 123) + dt.first_timeout().should.be.none + + with freeze_time("2015-01-01 12:00:00"): + dt.start(1234) + dt.first_timeout().should.be.none + + # activity task timeout is 300s == 5mins + with freeze_time("2015-01-01 12:06:00"): + dt.first_timeout().should.be.a(Timeout) + + dt.complete() + dt.first_timeout().should.be.none + + +def test_decision_task_cannot_timeout_on_closed_workflow_execution(): + with freeze_time("2015-01-01 12:00:00"): + wfe = make_workflow_execution() + wfe.start() + + with freeze_time("2015-01-01 13:55:00"): + dt = DecisionTask(wfe, 123) + dt.start(1234) + + with freeze_time("2015-01-01 14:10:00"): + dt.first_timeout().should.be.a(Timeout) + wfe.first_timeout().should.be.a(Timeout) + process_first_timeout(wfe) + dt.first_timeout().should.be.none + + +def test_decision_task_cannot_change_state_on_closed_workflow_execution(): + wfe = make_workflow_execution() + wfe.start() + task = DecisionTask(wfe, 123) + + wfe.complete(123) + + task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( + SWFWorkflowExecutionClosedError) + task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index 1a8a1268d761..1dc5cec6578c 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -1,119 +1,119 @@ -from collections import namedtuple -import sure # noqa - -from moto.swf.exceptions import SWFUnknownResourceFault -from moto.swf.models import Domain - -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa - -# Fake WorkflowExecution for tests purposes -WorkflowExecution = namedtuple( - "WorkflowExecution", - ["workflow_id", "run_id", "execution_status", "open"] -) - - -def test_domain_short_dict_representation(): - domain = Domain("foo", "52") - domain.to_short_dict().should.equal( - {"name": "foo", "status": "REGISTERED"}) - - domain.description = "foo bar" - domain.to_short_dict()["description"].should.equal("foo bar") - - -def test_domain_full_dict_representation(): - domain = Domain("foo", "52") - - domain.to_full_dict()["domainInfo"].should.equal(domain.to_short_dict()) - _config = domain.to_full_dict()["configuration"] - _config["workflowExecutionRetentionPeriodInDays"].should.equal("52") - - -def test_domain_string_representation(): - domain = Domain("my-domain", "60") - str(domain).should.equal("Domain(name: my-domain, status: REGISTERED)") - - -def test_domain_add_to_activity_task_list(): - domain = Domain("my-domain", "60") - domain.add_to_activity_task_list("foo", "bar") - domain.activity_task_lists.should.equal({ - "foo": ["bar"] - }) - - -def test_domain_activity_tasks(): - domain = Domain("my-domain", "60") - domain.add_to_activity_task_list("foo", "bar") - domain.add_to_activity_task_list("other", "baz") - sorted(domain.activity_tasks).should.equal(["bar", "baz"]) - - -def test_domain_add_to_decision_task_list(): - domain = Domain("my-domain", "60") - domain.add_to_decision_task_list("foo", "bar") - domain.decision_task_lists.should.equal({ - "foo": ["bar"] - }) - - -def test_domain_decision_tasks(): - domain = Domain("my-domain", "60") - domain.add_to_decision_task_list("foo", "bar") - domain.add_to_decision_task_list("other", "baz") - sorted(domain.decision_tasks).should.equal(["bar", "baz"]) - - -def test_domain_get_workflow_execution(): - domain = Domain("my-domain", "60") - - wfe1 = WorkflowExecution( - workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True) - wfe2 = WorkflowExecution( - workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False) - wfe3 = WorkflowExecution( - workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True) - wfe4 = WorkflowExecution( - workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False) - domain.workflow_executions = [wfe1, wfe2, wfe3, wfe4] - - # get workflow execution through workflow_id and run_id - domain.get_workflow_execution( - "wf-id-1", run_id="run-id-1").should.equal(wfe1) - domain.get_workflow_execution( - "wf-id-1", run_id="run-id-2").should.equal(wfe2) - domain.get_workflow_execution( - "wf-id-3", run_id="run-id-4").should.equal(wfe4) - - domain.get_workflow_execution.when.called_with( - "wf-id-1", run_id="non-existent" - ).should.throw( - SWFUnknownResourceFault, - ) - - # get OPEN workflow execution by default if no run_id - domain.get_workflow_execution("wf-id-1").should.equal(wfe1) - domain.get_workflow_execution.when.called_with( - "wf-id-3" - ).should.throw( - SWFUnknownResourceFault - ) - domain.get_workflow_execution.when.called_with( - "wf-id-non-existent" - ).should.throw( - SWFUnknownResourceFault - ) - - # raise_if_closed attribute - domain.get_workflow_execution( - "wf-id-1", run_id="run-id-1", raise_if_closed=True).should.equal(wfe1) - domain.get_workflow_execution.when.called_with( - "wf-id-3", run_id="run-id-4", raise_if_closed=True - ).should.throw( - SWFUnknownResourceFault - ) - - # raise_if_none attribute - domain.get_workflow_execution("foo", raise_if_none=False).should.be.none +from collections import namedtuple +import sure # noqa + +from moto.swf.exceptions import SWFUnknownResourceFault +from moto.swf.models import Domain + +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa + +# Fake WorkflowExecution for tests purposes +WorkflowExecution = namedtuple( + "WorkflowExecution", + ["workflow_id", "run_id", "execution_status", "open"] +) + + +def test_domain_short_dict_representation(): + domain = Domain("foo", "52") + domain.to_short_dict().should.equal( + {"name": "foo", "status": "REGISTERED"}) + + domain.description = "foo bar" + domain.to_short_dict()["description"].should.equal("foo bar") + + +def test_domain_full_dict_representation(): + domain = Domain("foo", "52") + + domain.to_full_dict()["domainInfo"].should.equal(domain.to_short_dict()) + _config = domain.to_full_dict()["configuration"] + _config["workflowExecutionRetentionPeriodInDays"].should.equal("52") + + +def test_domain_string_representation(): + domain = Domain("my-domain", "60") + str(domain).should.equal("Domain(name: my-domain, status: REGISTERED)") + + +def test_domain_add_to_activity_task_list(): + domain = Domain("my-domain", "60") + domain.add_to_activity_task_list("foo", "bar") + domain.activity_task_lists.should.equal({ + "foo": ["bar"] + }) + + +def test_domain_activity_tasks(): + domain = Domain("my-domain", "60") + domain.add_to_activity_task_list("foo", "bar") + domain.add_to_activity_task_list("other", "baz") + sorted(domain.activity_tasks).should.equal(["bar", "baz"]) + + +def test_domain_add_to_decision_task_list(): + domain = Domain("my-domain", "60") + domain.add_to_decision_task_list("foo", "bar") + domain.decision_task_lists.should.equal({ + "foo": ["bar"] + }) + + +def test_domain_decision_tasks(): + domain = Domain("my-domain", "60") + domain.add_to_decision_task_list("foo", "bar") + domain.add_to_decision_task_list("other", "baz") + sorted(domain.decision_tasks).should.equal(["bar", "baz"]) + + +def test_domain_get_workflow_execution(): + domain = Domain("my-domain", "60") + + wfe1 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True) + wfe2 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False) + wfe3 = WorkflowExecution( + workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True) + wfe4 = WorkflowExecution( + workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False) + domain.workflow_executions = [wfe1, wfe2, wfe3, wfe4] + + # get workflow execution through workflow_id and run_id + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-1").should.equal(wfe1) + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-2").should.equal(wfe2) + domain.get_workflow_execution( + "wf-id-3", run_id="run-id-4").should.equal(wfe4) + + domain.get_workflow_execution.when.called_with( + "wf-id-1", run_id="non-existent" + ).should.throw( + SWFUnknownResourceFault, + ) + + # get OPEN workflow execution by default if no run_id + domain.get_workflow_execution("wf-id-1").should.equal(wfe1) + domain.get_workflow_execution.when.called_with( + "wf-id-3" + ).should.throw( + SWFUnknownResourceFault + ) + domain.get_workflow_execution.when.called_with( + "wf-id-non-existent" + ).should.throw( + SWFUnknownResourceFault + ) + + # raise_if_closed attribute + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-1", raise_if_closed=True).should.equal(wfe1) + domain.get_workflow_execution.when.called_with( + "wf-id-3", run_id="run-id-4", raise_if_closed=True + ).should.throw( + SWFUnknownResourceFault + ) + + # raise_if_none attribute + domain.get_workflow_execution("foo", raise_if_none=False).should.be.none diff --git a/tests/test_swf/models/test_generic_type.py b/tests/test_swf/models/test_generic_type.py index 294df9f843df..bea07ce1c69b 100644 --- a/tests/test_swf/models/test_generic_type.py +++ b/tests/test_swf/models/test_generic_type.py @@ -1,58 +1,58 @@ -from moto.swf.models import GenericType -import sure # noqa - - -# Tests for GenericType (ActivityType, WorkflowType) -class FooType(GenericType): - - @property - def kind(self): - return "foo" - - @property - def _configuration_keys(self): - return ["justAnExampleTimeout"] - - -def test_type_short_dict_representation(): - _type = FooType("test-foo", "v1.0") - _type.to_short_dict().should.equal({"name": "test-foo", "version": "v1.0"}) - - -def test_type_medium_dict_representation(): - _type = FooType("test-foo", "v1.0") - _type.to_medium_dict()["fooType"].should.equal(_type.to_short_dict()) - _type.to_medium_dict()["status"].should.equal("REGISTERED") - _type.to_medium_dict().should.contain("creationDate") - _type.to_medium_dict().should_not.contain("deprecationDate") - _type.to_medium_dict().should_not.contain("description") - - _type.description = "foo bar" - _type.to_medium_dict()["description"].should.equal("foo bar") - - _type.status = "DEPRECATED" - _type.to_medium_dict().should.contain("deprecationDate") - - -def test_type_full_dict_representation(): - _type = FooType("test-foo", "v1.0") - _type.to_full_dict()["typeInfo"].should.equal(_type.to_medium_dict()) - _type.to_full_dict()["configuration"].should.equal({}) - - _type.task_list = "foo" - _type.to_full_dict()["configuration"][ - "defaultTaskList"].should.equal({"name": "foo"}) - - _type.just_an_example_timeout = "60" - _type.to_full_dict()["configuration"][ - "justAnExampleTimeout"].should.equal("60") - - _type.non_whitelisted_property = "34" - keys = _type.to_full_dict()["configuration"].keys() - sorted(keys).should.equal(["defaultTaskList", "justAnExampleTimeout"]) - - -def test_type_string_representation(): - _type = FooType("test-foo", "v1.0") - str(_type).should.equal( - "FooType(name: test-foo, version: v1.0, status: REGISTERED)") +from moto.swf.models import GenericType +import sure # noqa + + +# Tests for GenericType (ActivityType, WorkflowType) +class FooType(GenericType): + + @property + def kind(self): + return "foo" + + @property + def _configuration_keys(self): + return ["justAnExampleTimeout"] + + +def test_type_short_dict_representation(): + _type = FooType("test-foo", "v1.0") + _type.to_short_dict().should.equal({"name": "test-foo", "version": "v1.0"}) + + +def test_type_medium_dict_representation(): + _type = FooType("test-foo", "v1.0") + _type.to_medium_dict()["fooType"].should.equal(_type.to_short_dict()) + _type.to_medium_dict()["status"].should.equal("REGISTERED") + _type.to_medium_dict().should.contain("creationDate") + _type.to_medium_dict().should_not.contain("deprecationDate") + _type.to_medium_dict().should_not.contain("description") + + _type.description = "foo bar" + _type.to_medium_dict()["description"].should.equal("foo bar") + + _type.status = "DEPRECATED" + _type.to_medium_dict().should.contain("deprecationDate") + + +def test_type_full_dict_representation(): + _type = FooType("test-foo", "v1.0") + _type.to_full_dict()["typeInfo"].should.equal(_type.to_medium_dict()) + _type.to_full_dict()["configuration"].should.equal({}) + + _type.task_list = "foo" + _type.to_full_dict()["configuration"][ + "defaultTaskList"].should.equal({"name": "foo"}) + + _type.just_an_example_timeout = "60" + _type.to_full_dict()["configuration"][ + "justAnExampleTimeout"].should.equal("60") + + _type.non_whitelisted_property = "34" + keys = _type.to_full_dict()["configuration"].keys() + sorted(keys).should.equal(["defaultTaskList", "justAnExampleTimeout"]) + + +def test_type_string_representation(): + _type = FooType("test-foo", "v1.0") + str(_type).should.equal( + "FooType(name: test-foo, version: v1.0, status: REGISTERED)") diff --git a/tests/test_swf/models/test_history_event.py b/tests/test_swf/models/test_history_event.py index b869408ce7f7..fcf4a4a553d1 100644 --- a/tests/test_swf/models/test_history_event.py +++ b/tests/test_swf/models/test_history_event.py @@ -1,31 +1,31 @@ -from freezegun import freeze_time -import sure # noqa - -from moto.swf.models import HistoryEvent - - -@freeze_time("2015-01-01 12:00:00") -def test_history_event_creation(): - he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2) - he.event_id.should.equal(123) - he.event_type.should.equal("DecisionTaskStarted") - he.event_timestamp.should.equal(1420113600.0) - - -@freeze_time("2015-01-01 12:00:00") -def test_history_event_to_dict_representation(): - he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2) - he.to_dict().should.equal({ - "eventId": 123, - "eventType": "DecisionTaskStarted", - "eventTimestamp": 1420113600.0, - "decisionTaskStartedEventAttributes": { - "scheduledEventId": 2 - } - }) - - -def test_history_event_breaks_on_initialization_if_not_implemented(): - HistoryEvent.when.called_with( - 123, "UnknownHistoryEvent" - ).should.throw(NotImplementedError) +from freezegun import freeze_time +import sure # noqa + +from moto.swf.models import HistoryEvent + + +@freeze_time("2015-01-01 12:00:00") +def test_history_event_creation(): + he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2) + he.event_id.should.equal(123) + he.event_type.should.equal("DecisionTaskStarted") + he.event_timestamp.should.equal(1420113600.0) + + +@freeze_time("2015-01-01 12:00:00") +def test_history_event_to_dict_representation(): + he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2) + he.to_dict().should.equal({ + "eventId": 123, + "eventType": "DecisionTaskStarted", + "eventTimestamp": 1420113600.0, + "decisionTaskStartedEventAttributes": { + "scheduledEventId": 2 + } + }) + + +def test_history_event_breaks_on_initialization_if_not_implemented(): + HistoryEvent.when.called_with( + 123, "UnknownHistoryEvent" + ).should.throw(NotImplementedError) diff --git a/tests/test_swf/models/test_timeout.py b/tests/test_swf/models/test_timeout.py index fb52652fdc40..0ee059065a76 100644 --- a/tests/test_swf/models/test_timeout.py +++ b/tests/test_swf/models/test_timeout.py @@ -1,19 +1,19 @@ -from freezegun import freeze_time -import sure # noqa - -from moto.swf.models import Timeout - -from ..utils import make_workflow_execution - - -def test_timeout_creation(): - wfe = make_workflow_execution() - - # epoch 1420113600 == "2015-01-01 13:00:00" - timeout = Timeout(wfe, 1420117200, "START_TO_CLOSE") - - with freeze_time("2015-01-01 12:00:00"): - timeout.reached.should.be.falsy - - with freeze_time("2015-01-01 13:00:00"): - timeout.reached.should.be.truthy +from freezegun import freeze_time +import sure # noqa + +from moto.swf.models import Timeout + +from ..utils import make_workflow_execution + + +def test_timeout_creation(): + wfe = make_workflow_execution() + + # epoch 1420113600 == "2015-01-01 13:00:00" + timeout = Timeout(wfe, 1420117200, "START_TO_CLOSE") + + with freeze_time("2015-01-01 12:00:00"): + timeout.reached.should.be.falsy + + with freeze_time("2015-01-01 13:00:00"): + timeout.reached.should.be.truthy diff --git a/tests/test_swf/models/test_workflow_execution.py b/tests/test_swf/models/test_workflow_execution.py index 45b91c86a7cb..7271cca7fb1f 100644 --- a/tests/test_swf/models/test_workflow_execution.py +++ b/tests/test_swf/models/test_workflow_execution.py @@ -1,501 +1,501 @@ -from freezegun import freeze_time -import sure # noqa - -from moto.swf.models import ( - ActivityType, - Timeout, - WorkflowType, - WorkflowExecution, -) -from moto.swf.exceptions import SWFDefaultUndefinedFault -from ..utils import ( - auto_start_decision_tasks, - get_basic_domain, - get_basic_workflow_type, - make_workflow_execution, -) - - -VALID_ACTIVITY_TASK_ATTRIBUTES = { - "activityId": "my-activity-001", - "activityType": {"name": "test-activity", "version": "v1.1"}, - "taskList": {"name": "task-list-name"}, - "scheduleToStartTimeout": "600", - "scheduleToCloseTimeout": "600", - "startToCloseTimeout": "600", - "heartbeatTimeout": "300", -} - - -def test_workflow_execution_creation(): - domain = get_basic_domain() - wft = get_basic_workflow_type() - wfe = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE") - - wfe.domain.should.equal(domain) - wfe.workflow_type.should.equal(wft) - wfe.child_policy.should.equal("TERMINATE") - - -def test_workflow_execution_creation_child_policy_logic(): - domain = get_basic_domain() - - WorkflowExecution( - domain, - WorkflowType( - "test-workflow", "v1.0", - task_list="queue", default_child_policy="ABANDON", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ), - "ab1234" - ).child_policy.should.equal("ABANDON") - - WorkflowExecution( - domain, - WorkflowType( - "test-workflow", "v1.0", task_list="queue", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ), - "ab1234", - child_policy="REQUEST_CANCEL" - ).child_policy.should.equal("REQUEST_CANCEL") - - WorkflowExecution.when.called_with( - domain, - WorkflowType("test-workflow", "v1.0"), "ab1234" - ).should.throw(SWFDefaultUndefinedFault) - - -def test_workflow_execution_string_representation(): - wfe = make_workflow_execution(child_policy="TERMINATE") - str(wfe).should.match(r"^WorkflowExecution\(run_id: .*\)") - - -def test_workflow_execution_generates_a_random_run_id(): - domain = get_basic_domain() - wft = get_basic_workflow_type() - wfe1 = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE") - wfe2 = WorkflowExecution(domain, wft, "ab1235", child_policy="TERMINATE") - wfe1.run_id.should_not.equal(wfe2.run_id) - - -def test_workflow_execution_short_dict_representation(): - domain = get_basic_domain() - wf_type = WorkflowType( - "test-workflow", "v1.0", - task_list="queue", default_child_policy="ABANDON", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ) - wfe = WorkflowExecution(domain, wf_type, "ab1234") - - sd = wfe.to_short_dict() - sd["workflowId"].should.equal("ab1234") - sd.should.contain("runId") - - -def test_workflow_execution_medium_dict_representation(): - domain = get_basic_domain() - wf_type = WorkflowType( - "test-workflow", "v1.0", - task_list="queue", default_child_policy="ABANDON", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ) - wfe = WorkflowExecution(domain, wf_type, "ab1234") - - md = wfe.to_medium_dict() - md["execution"].should.equal(wfe.to_short_dict()) - md["workflowType"].should.equal(wf_type.to_short_dict()) - md["startTimestamp"].should.be.a('float') - md["executionStatus"].should.equal("OPEN") - md["cancelRequested"].should.be.falsy - md.should_not.contain("tagList") - - wfe.tag_list = ["foo", "bar", "baz"] - md = wfe.to_medium_dict() - md["tagList"].should.equal(["foo", "bar", "baz"]) - - -def test_workflow_execution_full_dict_representation(): - domain = get_basic_domain() - wf_type = WorkflowType( - "test-workflow", "v1.0", - task_list="queue", default_child_policy="ABANDON", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ) - wfe = WorkflowExecution(domain, wf_type, "ab1234") - - fd = wfe.to_full_dict() - fd["executionInfo"].should.equal(wfe.to_medium_dict()) - fd["openCounts"]["openTimers"].should.equal(0) - fd["openCounts"]["openDecisionTasks"].should.equal(0) - fd["openCounts"]["openActivityTasks"].should.equal(0) - fd["executionConfiguration"].should.equal({ - "childPolicy": "ABANDON", - "executionStartToCloseTimeout": "300", - "taskList": {"name": "queue"}, - "taskStartToCloseTimeout": "300", - }) - - -def test_workflow_execution_list_dict_representation(): - domain = get_basic_domain() - wf_type = WorkflowType( - 'test-workflow', 'v1.0', - task_list='queue', default_child_policy='ABANDON', - default_execution_start_to_close_timeout='300', - default_task_start_to_close_timeout='300', - ) - wfe = WorkflowExecution(domain, wf_type, 'ab1234') - - ld = wfe.to_list_dict() - ld['workflowType']['version'].should.equal('v1.0') - ld['workflowType']['name'].should.equal('test-workflow') - ld['executionStatus'].should.equal('OPEN') - ld['execution']['workflowId'].should.equal('ab1234') - ld['execution'].should.contain('runId') - ld['cancelRequested'].should.be.false - ld.should.contain('startTimestamp') - - -def test_workflow_execution_schedule_decision_task(): - wfe = make_workflow_execution() - wfe.open_counts["openDecisionTasks"].should.equal(0) - wfe.schedule_decision_task() - wfe.open_counts["openDecisionTasks"].should.equal(1) - - -def test_workflow_execution_start_decision_task(): - wfe = make_workflow_execution() - wfe.schedule_decision_task() - dt = wfe.decision_tasks[0] - wfe.start_decision_task(dt.task_token, identity="srv01") - dt = wfe.decision_tasks[0] - dt.state.should.equal("STARTED") - wfe.events()[-1].event_type.should.equal("DecisionTaskStarted") - wfe.events()[-1].event_attributes["identity"].should.equal("srv01") - - -def test_workflow_execution_history_events_ids(): - wfe = make_workflow_execution() - wfe._add_event("WorkflowExecutionStarted") - wfe._add_event("DecisionTaskScheduled") - wfe._add_event("DecisionTaskStarted") - ids = [evt.event_id for evt in wfe.events()] - ids.should.equal([1, 2, 3]) - - -@freeze_time("2015-01-01 12:00:00") -def test_workflow_execution_start(): - wfe = make_workflow_execution() - wfe.events().should.equal([]) - - wfe.start() - wfe.start_timestamp.should.equal(1420113600.0) - wfe.events().should.have.length_of(2) - wfe.events()[0].event_type.should.equal("WorkflowExecutionStarted") - wfe.events()[1].event_type.should.equal("DecisionTaskScheduled") - - -@freeze_time("2015-01-02 12:00:00") -def test_workflow_execution_complete(): - wfe = make_workflow_execution() - wfe.complete(123, result="foo") - - wfe.execution_status.should.equal("CLOSED") - wfe.close_status.should.equal("COMPLETED") - wfe.close_timestamp.should.equal(1420200000.0) - wfe.events()[-1].event_type.should.equal("WorkflowExecutionCompleted") - wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123) - wfe.events()[-1].event_attributes["result"].should.equal("foo") - - -@freeze_time("2015-01-02 12:00:00") -def test_workflow_execution_fail(): - wfe = make_workflow_execution() - wfe.fail(123, details="some details", reason="my rules") - - wfe.execution_status.should.equal("CLOSED") - wfe.close_status.should.equal("FAILED") - wfe.close_timestamp.should.equal(1420200000.0) - wfe.events()[-1].event_type.should.equal("WorkflowExecutionFailed") - wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123) - wfe.events()[-1].event_attributes["details"].should.equal("some details") - wfe.events()[-1].event_attributes["reason"].should.equal("my rules") - - -@freeze_time("2015-01-01 12:00:00") -def test_workflow_execution_schedule_activity_task(): - wfe = make_workflow_execution() - wfe.latest_activity_task_timestamp.should.be.none - - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - - wfe.latest_activity_task_timestamp.should.equal(1420113600.0) - - wfe.open_counts["openActivityTasks"].should.equal(1) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ActivityTaskScheduled") - last_event.event_attributes[ - "decisionTaskCompletedEventId"].should.equal(123) - last_event.event_attributes["taskList"][ - "name"].should.equal("task-list-name") - - wfe.activity_tasks.should.have.length_of(1) - task = wfe.activity_tasks[0] - task.activity_id.should.equal("my-activity-001") - task.activity_type.name.should.equal("test-activity") - wfe.domain.activity_task_lists["task-list-name"].should.contain(task) - - -def test_workflow_execution_schedule_activity_task_without_task_list_should_take_default(): - wfe = make_workflow_execution() - wfe.domain.add_type( - ActivityType("test-activity", "v1.2", task_list="foobar") - ) - wfe.schedule_activity_task(123, { - "activityId": "my-activity-001", - "activityType": {"name": "test-activity", "version": "v1.2"}, - "scheduleToStartTimeout": "600", - "scheduleToCloseTimeout": "600", - "startToCloseTimeout": "600", - "heartbeatTimeout": "300", - }) - - wfe.open_counts["openActivityTasks"].should.equal(1) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ActivityTaskScheduled") - last_event.event_attributes["taskList"]["name"].should.equal("foobar") - - task = wfe.activity_tasks[0] - wfe.domain.activity_task_lists["foobar"].should.contain(task) - - -def test_workflow_execution_schedule_activity_task_should_fail_if_wrong_attributes(): - wfe = make_workflow_execution() - at = ActivityType("test-activity", "v1.1") - at.status = "DEPRECATED" - wfe.domain.add_type(at) - wfe.domain.add_type(ActivityType("test-activity", "v1.2")) - - hsh = { - "activityId": "my-activity-001", - "activityType": {"name": "test-activity-does-not-exists", "version": "v1.1"}, - } - - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "ACTIVITY_TYPE_DOES_NOT_EXIST") - - hsh["activityType"]["name"] = "test-activity" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "ACTIVITY_TYPE_DEPRECATED") - - hsh["activityType"]["version"] = "v1.2" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_TASK_LIST_UNDEFINED") - - hsh["taskList"] = {"name": "foobar"} - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED") - - hsh["scheduleToStartTimeout"] = "600" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED") - - hsh["scheduleToCloseTimeout"] = "600" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED") - - hsh["startToCloseTimeout"] = "600" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED") - - wfe.open_counts["openActivityTasks"].should.equal(0) - wfe.activity_tasks.should.have.length_of(0) - wfe.domain.activity_task_lists.should.have.length_of(0) - - hsh["heartbeatTimeout"] = "300" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ActivityTaskScheduled") - - task = wfe.activity_tasks[0] - wfe.domain.activity_task_lists["foobar"].should.contain(task) - wfe.open_counts["openDecisionTasks"].should.equal(0) - wfe.open_counts["openActivityTasks"].should.equal(1) - - -def test_workflow_execution_schedule_activity_task_failure_triggers_new_decision(): - wfe = make_workflow_execution() - wfe.start() - task_token = wfe.decision_tasks[-1].task_token - wfe.start_decision_task(task_token) - wfe.complete_decision_task( - task_token, - execution_context="free-form execution context", - decisions=[ - { - "decisionType": "ScheduleActivityTask", - "scheduleActivityTaskDecisionAttributes": { - "activityId": "my-activity-001", - "activityType": { - "name": "test-activity-does-not-exist", - "version": "v1.2" - }, - } - }, - { - "decisionType": "ScheduleActivityTask", - "scheduleActivityTaskDecisionAttributes": { - "activityId": "my-activity-001", - "activityType": { - "name": "test-activity-does-not-exist", - "version": "v1.2" - }, - } - }, - ]) - - wfe.latest_execution_context.should.equal("free-form execution context") - wfe.open_counts["openActivityTasks"].should.equal(0) - wfe.open_counts["openDecisionTasks"].should.equal(1) - last_events = wfe.events()[-3:] - last_events[0].event_type.should.equal("ScheduleActivityTaskFailed") - last_events[1].event_type.should.equal("ScheduleActivityTaskFailed") - last_events[2].event_type.should.equal("DecisionTaskScheduled") - - -def test_workflow_execution_schedule_activity_task_with_same_activity_id(): - wfe = make_workflow_execution() - - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - wfe.open_counts["openActivityTasks"].should.equal(1) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ActivityTaskScheduled") - - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - wfe.open_counts["openActivityTasks"].should.equal(1) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "ACTIVITY_ID_ALREADY_IN_USE") - - -def test_workflow_execution_start_activity_task(): - wfe = make_workflow_execution() - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - task_token = wfe.activity_tasks[-1].task_token - wfe.start_activity_task(task_token, identity="worker01") - task = wfe.activity_tasks[-1] - task.state.should.equal("STARTED") - wfe.events()[-1].event_type.should.equal("ActivityTaskStarted") - wfe.events()[-1].event_attributes["identity"].should.equal("worker01") - - -def test_complete_activity_task(): - wfe = make_workflow_execution() - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - task_token = wfe.activity_tasks[-1].task_token - - wfe.open_counts["openActivityTasks"].should.equal(1) - wfe.open_counts["openDecisionTasks"].should.equal(0) - - wfe.start_activity_task(task_token, identity="worker01") - wfe.complete_activity_task(task_token, result="a superb result") - - task = wfe.activity_tasks[-1] - task.state.should.equal("COMPLETED") - wfe.events()[-2].event_type.should.equal("ActivityTaskCompleted") - wfe.events()[-1].event_type.should.equal("DecisionTaskScheduled") - - wfe.open_counts["openActivityTasks"].should.equal(0) - wfe.open_counts["openDecisionTasks"].should.equal(1) - - -def test_terminate(): - wfe = make_workflow_execution() - wfe.schedule_decision_task() - wfe.terminate() - - wfe.execution_status.should.equal("CLOSED") - wfe.close_status.should.equal("TERMINATED") - wfe.close_cause.should.equal("OPERATOR_INITIATED") - wfe.open_counts["openDecisionTasks"].should.equal(1) - - last_event = wfe.events()[-1] - last_event.event_type.should.equal("WorkflowExecutionTerminated") - # take default child_policy if not provided (as here) - last_event.event_attributes["childPolicy"].should.equal("ABANDON") - - -def test_first_timeout(): - wfe = make_workflow_execution() - wfe.first_timeout().should.be.none - - with freeze_time("2015-01-01 12:00:00"): - wfe.start() - wfe.first_timeout().should.be.none - - with freeze_time("2015-01-01 14:01"): - # 2 hours timeout reached - wfe.first_timeout().should.be.a(Timeout) - - -# See moto/swf/models/workflow_execution.py "_process_timeouts()" for more -# details -def test_timeouts_are_processed_in_order_and_reevaluated(): - # Let's make a Workflow Execution with the following properties: - # - execution start to close timeout of 8 mins - # - (decision) task start to close timeout of 5 mins - # - # Now start the workflow execution, and look at the history 15 mins later: - # - a first decision task is fired just after workflow execution start - # - the first decision task should have timed out after 5 mins - # - that fires a new decision task (which we hack to start automatically) - # - then the workflow timeouts after 8 mins (shows gradual reevaluation) - # - but the last scheduled decision task should *not* timeout (workflow closed) - with freeze_time("2015-01-01 12:00:00"): - wfe = make_workflow_execution( - execution_start_to_close_timeout=8 * 60, - task_start_to_close_timeout=5 * 60, - ) - # decision will automatically start - wfe = auto_start_decision_tasks(wfe) - wfe.start() - event_idx = len(wfe.events()) - - with freeze_time("2015-01-01 12:08:00"): - wfe._process_timeouts() - - event_types = [e.event_type for e in wfe.events()[event_idx:]] - event_types.should.equal([ - "DecisionTaskTimedOut", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "WorkflowExecutionTimedOut", - ]) +from freezegun import freeze_time +import sure # noqa + +from moto.swf.models import ( + ActivityType, + Timeout, + WorkflowType, + WorkflowExecution, +) +from moto.swf.exceptions import SWFDefaultUndefinedFault +from ..utils import ( + auto_start_decision_tasks, + get_basic_domain, + get_basic_workflow_type, + make_workflow_execution, +) + + +VALID_ACTIVITY_TASK_ATTRIBUTES = { + "activityId": "my-activity-001", + "activityType": {"name": "test-activity", "version": "v1.1"}, + "taskList": {"name": "task-list-name"}, + "scheduleToStartTimeout": "600", + "scheduleToCloseTimeout": "600", + "startToCloseTimeout": "600", + "heartbeatTimeout": "300", +} + + +def test_workflow_execution_creation(): + domain = get_basic_domain() + wft = get_basic_workflow_type() + wfe = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE") + + wfe.domain.should.equal(domain) + wfe.workflow_type.should.equal(wft) + wfe.child_policy.should.equal("TERMINATE") + + +def test_workflow_execution_creation_child_policy_logic(): + domain = get_basic_domain() + + WorkflowExecution( + domain, + WorkflowType( + "test-workflow", "v1.0", + task_list="queue", default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ), + "ab1234" + ).child_policy.should.equal("ABANDON") + + WorkflowExecution( + domain, + WorkflowType( + "test-workflow", "v1.0", task_list="queue", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ), + "ab1234", + child_policy="REQUEST_CANCEL" + ).child_policy.should.equal("REQUEST_CANCEL") + + WorkflowExecution.when.called_with( + domain, + WorkflowType("test-workflow", "v1.0"), "ab1234" + ).should.throw(SWFDefaultUndefinedFault) + + +def test_workflow_execution_string_representation(): + wfe = make_workflow_execution(child_policy="TERMINATE") + str(wfe).should.match(r"^WorkflowExecution\(run_id: .*\)") + + +def test_workflow_execution_generates_a_random_run_id(): + domain = get_basic_domain() + wft = get_basic_workflow_type() + wfe1 = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE") + wfe2 = WorkflowExecution(domain, wft, "ab1235", child_policy="TERMINATE") + wfe1.run_id.should_not.equal(wfe2.run_id) + + +def test_workflow_execution_short_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + "test-workflow", "v1.0", + task_list="queue", default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + wfe = WorkflowExecution(domain, wf_type, "ab1234") + + sd = wfe.to_short_dict() + sd["workflowId"].should.equal("ab1234") + sd.should.contain("runId") + + +def test_workflow_execution_medium_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + "test-workflow", "v1.0", + task_list="queue", default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + wfe = WorkflowExecution(domain, wf_type, "ab1234") + + md = wfe.to_medium_dict() + md["execution"].should.equal(wfe.to_short_dict()) + md["workflowType"].should.equal(wf_type.to_short_dict()) + md["startTimestamp"].should.be.a('float') + md["executionStatus"].should.equal("OPEN") + md["cancelRequested"].should.be.falsy + md.should_not.contain("tagList") + + wfe.tag_list = ["foo", "bar", "baz"] + md = wfe.to_medium_dict() + md["tagList"].should.equal(["foo", "bar", "baz"]) + + +def test_workflow_execution_full_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + "test-workflow", "v1.0", + task_list="queue", default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + wfe = WorkflowExecution(domain, wf_type, "ab1234") + + fd = wfe.to_full_dict() + fd["executionInfo"].should.equal(wfe.to_medium_dict()) + fd["openCounts"]["openTimers"].should.equal(0) + fd["openCounts"]["openDecisionTasks"].should.equal(0) + fd["openCounts"]["openActivityTasks"].should.equal(0) + fd["executionConfiguration"].should.equal({ + "childPolicy": "ABANDON", + "executionStartToCloseTimeout": "300", + "taskList": {"name": "queue"}, + "taskStartToCloseTimeout": "300", + }) + + +def test_workflow_execution_list_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + 'test-workflow', 'v1.0', + task_list='queue', default_child_policy='ABANDON', + default_execution_start_to_close_timeout='300', + default_task_start_to_close_timeout='300', + ) + wfe = WorkflowExecution(domain, wf_type, 'ab1234') + + ld = wfe.to_list_dict() + ld['workflowType']['version'].should.equal('v1.0') + ld['workflowType']['name'].should.equal('test-workflow') + ld['executionStatus'].should.equal('OPEN') + ld['execution']['workflowId'].should.equal('ab1234') + ld['execution'].should.contain('runId') + ld['cancelRequested'].should.be.false + ld.should.contain('startTimestamp') + + +def test_workflow_execution_schedule_decision_task(): + wfe = make_workflow_execution() + wfe.open_counts["openDecisionTasks"].should.equal(0) + wfe.schedule_decision_task() + wfe.open_counts["openDecisionTasks"].should.equal(1) + + +def test_workflow_execution_start_decision_task(): + wfe = make_workflow_execution() + wfe.schedule_decision_task() + dt = wfe.decision_tasks[0] + wfe.start_decision_task(dt.task_token, identity="srv01") + dt = wfe.decision_tasks[0] + dt.state.should.equal("STARTED") + wfe.events()[-1].event_type.should.equal("DecisionTaskStarted") + wfe.events()[-1].event_attributes["identity"].should.equal("srv01") + + +def test_workflow_execution_history_events_ids(): + wfe = make_workflow_execution() + wfe._add_event("WorkflowExecutionStarted") + wfe._add_event("DecisionTaskScheduled") + wfe._add_event("DecisionTaskStarted") + ids = [evt.event_id for evt in wfe.events()] + ids.should.equal([1, 2, 3]) + + +@freeze_time("2015-01-01 12:00:00") +def test_workflow_execution_start(): + wfe = make_workflow_execution() + wfe.events().should.equal([]) + + wfe.start() + wfe.start_timestamp.should.equal(1420113600.0) + wfe.events().should.have.length_of(2) + wfe.events()[0].event_type.should.equal("WorkflowExecutionStarted") + wfe.events()[1].event_type.should.equal("DecisionTaskScheduled") + + +@freeze_time("2015-01-02 12:00:00") +def test_workflow_execution_complete(): + wfe = make_workflow_execution() + wfe.complete(123, result="foo") + + wfe.execution_status.should.equal("CLOSED") + wfe.close_status.should.equal("COMPLETED") + wfe.close_timestamp.should.equal(1420200000.0) + wfe.events()[-1].event_type.should.equal("WorkflowExecutionCompleted") + wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123) + wfe.events()[-1].event_attributes["result"].should.equal("foo") + + +@freeze_time("2015-01-02 12:00:00") +def test_workflow_execution_fail(): + wfe = make_workflow_execution() + wfe.fail(123, details="some details", reason="my rules") + + wfe.execution_status.should.equal("CLOSED") + wfe.close_status.should.equal("FAILED") + wfe.close_timestamp.should.equal(1420200000.0) + wfe.events()[-1].event_type.should.equal("WorkflowExecutionFailed") + wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123) + wfe.events()[-1].event_attributes["details"].should.equal("some details") + wfe.events()[-1].event_attributes["reason"].should.equal("my rules") + + +@freeze_time("2015-01-01 12:00:00") +def test_workflow_execution_schedule_activity_task(): + wfe = make_workflow_execution() + wfe.latest_activity_task_timestamp.should.be.none + + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + + wfe.latest_activity_task_timestamp.should.equal(1420113600.0) + + wfe.open_counts["openActivityTasks"].should.equal(1) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ActivityTaskScheduled") + last_event.event_attributes[ + "decisionTaskCompletedEventId"].should.equal(123) + last_event.event_attributes["taskList"][ + "name"].should.equal("task-list-name") + + wfe.activity_tasks.should.have.length_of(1) + task = wfe.activity_tasks[0] + task.activity_id.should.equal("my-activity-001") + task.activity_type.name.should.equal("test-activity") + wfe.domain.activity_task_lists["task-list-name"].should.contain(task) + + +def test_workflow_execution_schedule_activity_task_without_task_list_should_take_default(): + wfe = make_workflow_execution() + wfe.domain.add_type( + ActivityType("test-activity", "v1.2", task_list="foobar") + ) + wfe.schedule_activity_task(123, { + "activityId": "my-activity-001", + "activityType": {"name": "test-activity", "version": "v1.2"}, + "scheduleToStartTimeout": "600", + "scheduleToCloseTimeout": "600", + "startToCloseTimeout": "600", + "heartbeatTimeout": "300", + }) + + wfe.open_counts["openActivityTasks"].should.equal(1) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ActivityTaskScheduled") + last_event.event_attributes["taskList"]["name"].should.equal("foobar") + + task = wfe.activity_tasks[0] + wfe.domain.activity_task_lists["foobar"].should.contain(task) + + +def test_workflow_execution_schedule_activity_task_should_fail_if_wrong_attributes(): + wfe = make_workflow_execution() + at = ActivityType("test-activity", "v1.1") + at.status = "DEPRECATED" + wfe.domain.add_type(at) + wfe.domain.add_type(ActivityType("test-activity", "v1.2")) + + hsh = { + "activityId": "my-activity-001", + "activityType": {"name": "test-activity-does-not-exists", "version": "v1.1"}, + } + + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_TYPE_DOES_NOT_EXIST") + + hsh["activityType"]["name"] = "test-activity" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_TYPE_DEPRECATED") + + hsh["activityType"]["version"] = "v1.2" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_TASK_LIST_UNDEFINED") + + hsh["taskList"] = {"name": "foobar"} + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED") + + hsh["scheduleToStartTimeout"] = "600" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED") + + hsh["scheduleToCloseTimeout"] = "600" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED") + + hsh["startToCloseTimeout"] = "600" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED") + + wfe.open_counts["openActivityTasks"].should.equal(0) + wfe.activity_tasks.should.have.length_of(0) + wfe.domain.activity_task_lists.should.have.length_of(0) + + hsh["heartbeatTimeout"] = "300" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ActivityTaskScheduled") + + task = wfe.activity_tasks[0] + wfe.domain.activity_task_lists["foobar"].should.contain(task) + wfe.open_counts["openDecisionTasks"].should.equal(0) + wfe.open_counts["openActivityTasks"].should.equal(1) + + +def test_workflow_execution_schedule_activity_task_failure_triggers_new_decision(): + wfe = make_workflow_execution() + wfe.start() + task_token = wfe.decision_tasks[-1].task_token + wfe.start_decision_task(task_token) + wfe.complete_decision_task( + task_token, + execution_context="free-form execution context", + decisions=[ + { + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": { + "activityId": "my-activity-001", + "activityType": { + "name": "test-activity-does-not-exist", + "version": "v1.2" + }, + } + }, + { + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": { + "activityId": "my-activity-001", + "activityType": { + "name": "test-activity-does-not-exist", + "version": "v1.2" + }, + } + }, + ]) + + wfe.latest_execution_context.should.equal("free-form execution context") + wfe.open_counts["openActivityTasks"].should.equal(0) + wfe.open_counts["openDecisionTasks"].should.equal(1) + last_events = wfe.events()[-3:] + last_events[0].event_type.should.equal("ScheduleActivityTaskFailed") + last_events[1].event_type.should.equal("ScheduleActivityTaskFailed") + last_events[2].event_type.should.equal("DecisionTaskScheduled") + + +def test_workflow_execution_schedule_activity_task_with_same_activity_id(): + wfe = make_workflow_execution() + + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + wfe.open_counts["openActivityTasks"].should.equal(1) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ActivityTaskScheduled") + + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + wfe.open_counts["openActivityTasks"].should.equal(1) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_ID_ALREADY_IN_USE") + + +def test_workflow_execution_start_activity_task(): + wfe = make_workflow_execution() + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + task_token = wfe.activity_tasks[-1].task_token + wfe.start_activity_task(task_token, identity="worker01") + task = wfe.activity_tasks[-1] + task.state.should.equal("STARTED") + wfe.events()[-1].event_type.should.equal("ActivityTaskStarted") + wfe.events()[-1].event_attributes["identity"].should.equal("worker01") + + +def test_complete_activity_task(): + wfe = make_workflow_execution() + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + task_token = wfe.activity_tasks[-1].task_token + + wfe.open_counts["openActivityTasks"].should.equal(1) + wfe.open_counts["openDecisionTasks"].should.equal(0) + + wfe.start_activity_task(task_token, identity="worker01") + wfe.complete_activity_task(task_token, result="a superb result") + + task = wfe.activity_tasks[-1] + task.state.should.equal("COMPLETED") + wfe.events()[-2].event_type.should.equal("ActivityTaskCompleted") + wfe.events()[-1].event_type.should.equal("DecisionTaskScheduled") + + wfe.open_counts["openActivityTasks"].should.equal(0) + wfe.open_counts["openDecisionTasks"].should.equal(1) + + +def test_terminate(): + wfe = make_workflow_execution() + wfe.schedule_decision_task() + wfe.terminate() + + wfe.execution_status.should.equal("CLOSED") + wfe.close_status.should.equal("TERMINATED") + wfe.close_cause.should.equal("OPERATOR_INITIATED") + wfe.open_counts["openDecisionTasks"].should.equal(1) + + last_event = wfe.events()[-1] + last_event.event_type.should.equal("WorkflowExecutionTerminated") + # take default child_policy if not provided (as here) + last_event.event_attributes["childPolicy"].should.equal("ABANDON") + + +def test_first_timeout(): + wfe = make_workflow_execution() + wfe.first_timeout().should.be.none + + with freeze_time("2015-01-01 12:00:00"): + wfe.start() + wfe.first_timeout().should.be.none + + with freeze_time("2015-01-01 14:01"): + # 2 hours timeout reached + wfe.first_timeout().should.be.a(Timeout) + + +# See moto/swf/models/workflow_execution.py "_process_timeouts()" for more +# details +def test_timeouts_are_processed_in_order_and_reevaluated(): + # Let's make a Workflow Execution with the following properties: + # - execution start to close timeout of 8 mins + # - (decision) task start to close timeout of 5 mins + # + # Now start the workflow execution, and look at the history 15 mins later: + # - a first decision task is fired just after workflow execution start + # - the first decision task should have timed out after 5 mins + # - that fires a new decision task (which we hack to start automatically) + # - then the workflow timeouts after 8 mins (shows gradual reevaluation) + # - but the last scheduled decision task should *not* timeout (workflow closed) + with freeze_time("2015-01-01 12:00:00"): + wfe = make_workflow_execution( + execution_start_to_close_timeout=8 * 60, + task_start_to_close_timeout=5 * 60, + ) + # decision will automatically start + wfe = auto_start_decision_tasks(wfe) + wfe.start() + event_idx = len(wfe.events()) + + with freeze_time("2015-01-01 12:08:00"): + wfe._process_timeouts() + + event_types = [e.event_type for e in wfe.events()[event_idx:]] + event_types.should.equal([ + "DecisionTaskTimedOut", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "WorkflowExecutionTimedOut", + ]) diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index c0b8897b9ed0..e67013f6b033 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -1,228 +1,228 @@ -from boto.swf.exceptions import SWFResponseError -from freezegun import freeze_time -import sure # noqa - -from moto import mock_swf_deprecated -from moto.swf import swf_backend - -from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION - - -# PollForActivityTask endpoint -@mock_swf_deprecated -def test_poll_for_activity_task_when_one(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - resp = conn.poll_for_activity_task( - "test-domain", "activity-task-list", identity="surprise") - resp["activityId"].should.equal("my-activity-001") - resp["taskToken"].should_not.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") - resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal( - {"identity": "surprise", "scheduledEventId": 5} - ) - - -@mock_swf_deprecated -def test_poll_for_activity_task_when_none(): - conn = setup_workflow() - resp = conn.poll_for_activity_task("test-domain", "activity-task-list") - resp.should.equal({"startedEventId": 0}) - - -@mock_swf_deprecated -def test_poll_for_activity_task_on_non_existent_queue(): - conn = setup_workflow() - resp = conn.poll_for_activity_task("test-domain", "non-existent-queue") - resp.should.equal({"startedEventId": 0}) - - -# CountPendingActivityTasks endpoint -@mock_swf_deprecated -def test_count_pending_activity_tasks(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - - resp = conn.count_pending_activity_tasks( - "test-domain", "activity-task-list") - resp.should.equal({"count": 1, "truncated": False}) - - -@mock_swf_deprecated -def test_count_pending_decision_tasks_on_non_existent_task_list(): - conn = setup_workflow() - resp = conn.count_pending_activity_tasks("test-domain", "non-existent") - resp.should.equal({"count": 0, "truncated": False}) - - -# RespondActivityTaskCompleted endpoint -@mock_swf_deprecated -def test_respond_activity_task_completed(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - resp = conn.respond_activity_task_completed( - activity_token, result="result of the task") - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted") - resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal( - {"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6} - ) - - -@mock_swf_deprecated -def test_respond_activity_task_completed_on_closed_workflow_execution(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - # bad: we're closing workflow execution manually, but endpoints are not - # coded for now.. - wfe = swf_backend.domains[0].workflow_executions[-1] - wfe.execution_status = "CLOSED" - # /bad - - conn.respond_activity_task_completed.when.called_with( - activity_token - ).should.throw(SWFResponseError, "WorkflowExecution=") - - -@mock_swf_deprecated -def test_respond_activity_task_completed_with_task_already_completed(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - conn.respond_activity_task_completed(activity_token) - - conn.respond_activity_task_completed.when.called_with( - activity_token - ).should.throw(SWFResponseError, "Unknown activity, scheduledEventId = 5") - - -# RespondActivityTaskFailed endpoint -@mock_swf_deprecated -def test_respond_activity_task_failed(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - resp = conn.respond_activity_task_failed(activity_token, - reason="short reason", - details="long details") - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed") - resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal( - {"reason": "short reason", "details": "long details", - "scheduledEventId": 5, "startedEventId": 6} - ) - - -@mock_swf_deprecated -def test_respond_activity_task_completed_with_wrong_token(): - # NB: we just test ONE failure case for RespondActivityTaskFailed - # because the safeguards are shared with RespondActivityTaskCompleted, so - # no need to retest everything end-to-end. - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - conn.poll_for_activity_task("test-domain", "activity-task-list") - conn.respond_activity_task_failed.when.called_with( - "not-a-correct-token" - ).should.throw(SWFResponseError, "Invalid token") - - -# RecordActivityTaskHeartbeat endpoint -@mock_swf_deprecated -def test_record_activity_task_heartbeat(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - resp = conn.record_activity_task_heartbeat(activity_token) - resp.should.equal({"cancelRequested": False}) - - -@mock_swf_deprecated -def test_record_activity_task_heartbeat_with_wrong_token(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - conn.record_activity_task_heartbeat.when.called_with( - "bad-token", details="some progress details" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - with freeze_time("2015-01-01 12:00:00"): - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - conn.record_activity_task_heartbeat( - activity_token, details="some progress details") - - with freeze_time("2015-01-01 12:05:30"): - # => Activity Task Heartbeat timeout reached!! - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") - attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] - attrs["details"].should.equal("some progress details") +from boto.swf.exceptions import SWFResponseError +from freezegun import freeze_time +import sure # noqa + +from moto import mock_swf_deprecated +from moto.swf import swf_backend + +from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION + + +# PollForActivityTask endpoint +@mock_swf_deprecated +def test_poll_for_activity_task_when_one(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + resp = conn.poll_for_activity_task( + "test-domain", "activity-task-list", identity="surprise") + resp["activityId"].should.equal("my-activity-001") + resp["taskToken"].should_not.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") + resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal( + {"identity": "surprise", "scheduledEventId": 5} + ) + + +@mock_swf_deprecated +def test_poll_for_activity_task_when_none(): + conn = setup_workflow() + resp = conn.poll_for_activity_task("test-domain", "activity-task-list") + resp.should.equal({"startedEventId": 0}) + + +@mock_swf_deprecated +def test_poll_for_activity_task_on_non_existent_queue(): + conn = setup_workflow() + resp = conn.poll_for_activity_task("test-domain", "non-existent-queue") + resp.should.equal({"startedEventId": 0}) + + +# CountPendingActivityTasks endpoint +@mock_swf_deprecated +def test_count_pending_activity_tasks(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + + resp = conn.count_pending_activity_tasks( + "test-domain", "activity-task-list") + resp.should.equal({"count": 1, "truncated": False}) + + +@mock_swf_deprecated +def test_count_pending_decision_tasks_on_non_existent_task_list(): + conn = setup_workflow() + resp = conn.count_pending_activity_tasks("test-domain", "non-existent") + resp.should.equal({"count": 0, "truncated": False}) + + +# RespondActivityTaskCompleted endpoint +@mock_swf_deprecated +def test_respond_activity_task_completed(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + resp = conn.respond_activity_task_completed( + activity_token, result="result of the task") + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted") + resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal( + {"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6} + ) + + +@mock_swf_deprecated +def test_respond_activity_task_completed_on_closed_workflow_execution(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + # bad: we're closing workflow execution manually, but endpoints are not + # coded for now.. + wfe = swf_backend.domains[0].workflow_executions[-1] + wfe.execution_status = "CLOSED" + # /bad + + conn.respond_activity_task_completed.when.called_with( + activity_token + ).should.throw(SWFResponseError, "WorkflowExecution=") + + +@mock_swf_deprecated +def test_respond_activity_task_completed_with_task_already_completed(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + conn.respond_activity_task_completed(activity_token) + + conn.respond_activity_task_completed.when.called_with( + activity_token + ).should.throw(SWFResponseError, "Unknown activity, scheduledEventId = 5") + + +# RespondActivityTaskFailed endpoint +@mock_swf_deprecated +def test_respond_activity_task_failed(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + resp = conn.respond_activity_task_failed(activity_token, + reason="short reason", + details="long details") + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed") + resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal( + {"reason": "short reason", "details": "long details", + "scheduledEventId": 5, "startedEventId": 6} + ) + + +@mock_swf_deprecated +def test_respond_activity_task_completed_with_wrong_token(): + # NB: we just test ONE failure case for RespondActivityTaskFailed + # because the safeguards are shared with RespondActivityTaskCompleted, so + # no need to retest everything end-to-end. + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + conn.poll_for_activity_task("test-domain", "activity-task-list") + conn.respond_activity_task_failed.when.called_with( + "not-a-correct-token" + ).should.throw(SWFResponseError, "Invalid token") + + +# RecordActivityTaskHeartbeat endpoint +@mock_swf_deprecated +def test_record_activity_task_heartbeat(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + resp = conn.record_activity_task_heartbeat(activity_token) + resp.should.equal({"cancelRequested": False}) + + +@mock_swf_deprecated +def test_record_activity_task_heartbeat_with_wrong_token(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + conn.record_activity_task_heartbeat.when.called_with( + "bad-token", details="some progress details" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + with freeze_time("2015-01-01 12:00:00"): + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + conn.record_activity_task_heartbeat( + activity_token, details="some progress details") + + with freeze_time("2015-01-01 12:05:30"): + # => Activity Task Heartbeat timeout reached!! + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") + attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] + attrs["details"].should.equal("some progress details") diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index 95d8a37337d8..7bb66ac3219a 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -1,134 +1,134 @@ -import boto -from boto.swf.exceptions import SWFResponseError -import sure # noqa - -from moto import mock_swf_deprecated - - -# RegisterActivityType endpoint -@mock_swf_deprecated -def test_register_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0") - - types = conn.list_activity_types("test-domain", "REGISTERED") - actype = types["typeInfos"][0] - actype["activityType"]["name"].should.equal("test-activity") - actype["activityType"]["version"].should.equal("v1.0") - - -@mock_swf_deprecated -def test_register_already_existing_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0") - - conn.register_activity_type.when.called_with( - "test-domain", "test-activity", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_register_with_wrong_parameter_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.register_activity_type.when.called_with( - "test-domain", "test-activity", 12 - ).should.throw(SWFResponseError) - - -# ListActivityTypes endpoint -@mock_swf_deprecated -def test_list_activity_types(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "b-test-activity", "v1.0") - conn.register_activity_type("test-domain", "a-test-activity", "v1.0") - conn.register_activity_type("test-domain", "c-test-activity", "v1.0") - - all_activity_types = conn.list_activity_types("test-domain", "REGISTERED") - names = [activity_type["activityType"]["name"] - for activity_type in all_activity_types["typeInfos"]] - names.should.equal( - ["a-test-activity", "b-test-activity", "c-test-activity"]) - - -@mock_swf_deprecated -def test_list_activity_types_reverse_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "b-test-activity", "v1.0") - conn.register_activity_type("test-domain", "a-test-activity", "v1.0") - conn.register_activity_type("test-domain", "c-test-activity", "v1.0") - - all_activity_types = conn.list_activity_types("test-domain", "REGISTERED", - reverse_order=True) - names = [activity_type["activityType"]["name"] - for activity_type in all_activity_types["typeInfos"]] - names.should.equal( - ["c-test-activity", "b-test-activity", "a-test-activity"]) - - -# DeprecateActivityType endpoint -@mock_swf_deprecated -def test_deprecate_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0") - conn.deprecate_activity_type("test-domain", "test-activity", "v1.0") - - actypes = conn.list_activity_types("test-domain", "DEPRECATED") - actype = actypes["typeInfos"][0] - actype["activityType"]["name"].should.equal("test-activity") - actype["activityType"]["version"].should.equal("v1.0") - - -@mock_swf_deprecated -def test_deprecate_already_deprecated_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0") - conn.deprecate_activity_type("test-domain", "test-activity", "v1.0") - - conn.deprecate_activity_type.when.called_with( - "test-domain", "test-activity", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_deprecate_non_existent_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.deprecate_activity_type.when.called_with( - "test-domain", "non-existent", "v1.0" - ).should.throw(SWFResponseError) - - -# DescribeActivityType endpoint -@mock_swf_deprecated -def test_describe_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0", - task_list="foo", default_task_heartbeat_timeout="32") - - actype = conn.describe_activity_type( - "test-domain", "test-activity", "v1.0") - actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") - infos = actype["typeInfo"] - infos["activityType"]["name"].should.equal("test-activity") - infos["activityType"]["version"].should.equal("v1.0") - infos["status"].should.equal("REGISTERED") - - -@mock_swf_deprecated -def test_describe_non_existent_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.describe_activity_type.when.called_with( - "test-domain", "non-existent", "v1.0" - ).should.throw(SWFResponseError) +import boto +from boto.swf.exceptions import SWFResponseError +import sure # noqa + +from moto import mock_swf_deprecated + + +# RegisterActivityType endpoint +@mock_swf_deprecated +def test_register_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0") + + types = conn.list_activity_types("test-domain", "REGISTERED") + actype = types["typeInfos"][0] + actype["activityType"]["name"].should.equal("test-activity") + actype["activityType"]["version"].should.equal("v1.0") + + +@mock_swf_deprecated +def test_register_already_existing_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0") + + conn.register_activity_type.when.called_with( + "test-domain", "test-activity", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_register_with_wrong_parameter_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.register_activity_type.when.called_with( + "test-domain", "test-activity", 12 + ).should.throw(SWFResponseError) + + +# ListActivityTypes endpoint +@mock_swf_deprecated +def test_list_activity_types(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "b-test-activity", "v1.0") + conn.register_activity_type("test-domain", "a-test-activity", "v1.0") + conn.register_activity_type("test-domain", "c-test-activity", "v1.0") + + all_activity_types = conn.list_activity_types("test-domain", "REGISTERED") + names = [activity_type["activityType"]["name"] + for activity_type in all_activity_types["typeInfos"]] + names.should.equal( + ["a-test-activity", "b-test-activity", "c-test-activity"]) + + +@mock_swf_deprecated +def test_list_activity_types_reverse_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "b-test-activity", "v1.0") + conn.register_activity_type("test-domain", "a-test-activity", "v1.0") + conn.register_activity_type("test-domain", "c-test-activity", "v1.0") + + all_activity_types = conn.list_activity_types("test-domain", "REGISTERED", + reverse_order=True) + names = [activity_type["activityType"]["name"] + for activity_type in all_activity_types["typeInfos"]] + names.should.equal( + ["c-test-activity", "b-test-activity", "a-test-activity"]) + + +# DeprecateActivityType endpoint +@mock_swf_deprecated +def test_deprecate_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0") + conn.deprecate_activity_type("test-domain", "test-activity", "v1.0") + + actypes = conn.list_activity_types("test-domain", "DEPRECATED") + actype = actypes["typeInfos"][0] + actype["activityType"]["name"].should.equal("test-activity") + actype["activityType"]["version"].should.equal("v1.0") + + +@mock_swf_deprecated +def test_deprecate_already_deprecated_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0") + conn.deprecate_activity_type("test-domain", "test-activity", "v1.0") + + conn.deprecate_activity_type.when.called_with( + "test-domain", "test-activity", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_deprecate_non_existent_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.deprecate_activity_type.when.called_with( + "test-domain", "non-existent", "v1.0" + ).should.throw(SWFResponseError) + + +# DescribeActivityType endpoint +@mock_swf_deprecated +def test_describe_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0", + task_list="foo", default_task_heartbeat_timeout="32") + + actype = conn.describe_activity_type( + "test-domain", "test-activity", "v1.0") + actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") + infos = actype["typeInfo"] + infos["activityType"]["name"].should.equal("test-activity") + infos["activityType"]["version"].should.equal("v1.0") + infos["status"].should.equal("REGISTERED") + + +@mock_swf_deprecated +def test_describe_non_existent_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.describe_activity_type.when.called_with( + "test-domain", "non-existent", "v1.0" + ).should.throw(SWFResponseError) diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index 972b1053b77d..ecb3c31178fe 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -1,342 +1,342 @@ -from boto.swf.exceptions import SWFResponseError -from freezegun import freeze_time -import sure # noqa - -from moto import mock_swf_deprecated -from moto.swf import swf_backend - -from ..utils import setup_workflow - - -# PollForDecisionTask endpoint -@mock_swf_deprecated -def test_poll_for_decision_task_when_one(): - conn = setup_workflow() - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) - - resp = conn.poll_for_decision_task( - "test-domain", "queue", identity="srv01") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["WorkflowExecutionStarted", - "DecisionTaskScheduled", "DecisionTaskStarted"]) - - resp[ - "events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01") - - -@mock_swf_deprecated -def test_poll_for_decision_task_when_none(): - conn = setup_workflow() - conn.poll_for_decision_task("test-domain", "queue") - - resp = conn.poll_for_decision_task("test-domain", "queue") - # this is the DecisionTask representation you get from the real SWF - # after waiting 60s when there's no decision to be taken - resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) - - -@mock_swf_deprecated -def test_poll_for_decision_task_on_non_existent_queue(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "non-existent-queue") - resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) - - -@mock_swf_deprecated -def test_poll_for_decision_task_with_reverse_order(): - conn = setup_workflow() - resp = conn.poll_for_decision_task( - "test-domain", "queue", reverse_order=True) - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal( - ["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"]) - - -# CountPendingDecisionTasks endpoint -@mock_swf_deprecated -def test_count_pending_decision_tasks(): - conn = setup_workflow() - conn.poll_for_decision_task("test-domain", "queue") - resp = conn.count_pending_decision_tasks("test-domain", "queue") - resp.should.equal({"count": 1, "truncated": False}) - - -@mock_swf_deprecated -def test_count_pending_decision_tasks_on_non_existent_task_list(): - conn = setup_workflow() - resp = conn.count_pending_decision_tasks("test-domain", "non-existent") - resp.should.equal({"count": 0, "truncated": False}) - - -@mock_swf_deprecated -def test_count_pending_decision_tasks_after_decision_completes(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - conn.respond_decision_task_completed(resp["taskToken"]) - - resp = conn.count_pending_decision_tasks("test-domain", "queue") - resp.should.equal({"count": 0, "truncated": False}) - - -# RespondDecisionTaskCompleted endpoint -@mock_swf_deprecated -def test_respond_decision_task_completed_with_no_decision(): - conn = setup_workflow() - - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - resp = conn.respond_decision_task_completed( - task_token, - execution_context="free-form context", - ) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal([ - "WorkflowExecutionStarted", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "DecisionTaskCompleted", - ]) - evt = resp["events"][-1] - evt["decisionTaskCompletedEventAttributes"].should.equal({ - "executionContext": "free-form context", - "scheduledEventId": 2, - "startedEventId": 3, - }) - - resp = conn.describe_workflow_execution( - "test-domain", conn.run_id, "uid-abcd1234") - resp["latestExecutionContext"].should.equal("free-form context") - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_wrong_token(): - conn = setup_workflow() - conn.poll_for_decision_task("test-domain", "queue") - conn.respond_decision_task_completed.when.called_with( - "not-a-correct-token" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_on_close_workflow_execution(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - # bad: we're closing workflow execution manually, but endpoints are not - # coded for now.. - wfe = swf_backend.domains[0].workflow_executions[-1] - wfe.execution_status = "CLOSED" - # /bad - - conn.respond_decision_task_completed.when.called_with( - task_token - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_task_already_completed(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - conn.respond_decision_task_completed(task_token) - - conn.respond_decision_task_completed.when.called_with( - task_token - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_complete_workflow_execution(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [{ - "decisionType": "CompleteWorkflowExecution", - "completeWorkflowExecutionDecisionAttributes": {"result": "foo bar"} - }] - resp = conn.respond_decision_task_completed( - task_token, decisions=decisions) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal([ - "WorkflowExecutionStarted", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "DecisionTaskCompleted", - "WorkflowExecutionCompleted", - ]) - resp["events"][-1]["workflowExecutionCompletedEventAttributes"][ - "result"].should.equal("foo bar") - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_close_decision_not_last(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [ - {"decisionType": "CompleteWorkflowExecution"}, - {"decisionType": "WeDontCare"}, - ] - - conn.respond_decision_task_completed.when.called_with( - task_token, decisions=decisions - ).should.throw(SWFResponseError, r"Close must be last decision in list") - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_invalid_decision_type(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [ - {"decisionType": "BadDecisionType"}, - {"decisionType": "CompleteWorkflowExecution"}, - ] - - conn.respond_decision_task_completed.when.called_with( - task_token, decisions=decisions).should.throw( - SWFResponseError, - r"Value 'BadDecisionType' at 'decisions.1.member.decisionType'" - ) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_missing_attributes(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [ - { - "decisionType": "should trigger even with incorrect decision type", - "startTimerDecisionAttributes": {} - }, - ] - - conn.respond_decision_task_completed.when.called_with( - task_token, decisions=decisions - ).should.throw( - SWFResponseError, - r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' " - r"failed to satisfy constraint: Member must not be null" - ) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_missing_attributes_totally(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [ - {"decisionType": "StartTimer"}, - ] - - conn.respond_decision_task_completed.when.called_with( - task_token, decisions=decisions - ).should.throw( - SWFResponseError, - r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' " - r"failed to satisfy constraint: Member must not be null" - ) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_fail_workflow_execution(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [{ - "decisionType": "FailWorkflowExecution", - "failWorkflowExecutionDecisionAttributes": {"reason": "my rules", "details": "foo"} - }] - resp = conn.respond_decision_task_completed( - task_token, decisions=decisions) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal([ - "WorkflowExecutionStarted", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "DecisionTaskCompleted", - "WorkflowExecutionFailed", - ]) - attrs = resp["events"][-1]["workflowExecutionFailedEventAttributes"] - attrs["reason"].should.equal("my rules") - attrs["details"].should.equal("foo") - - -@mock_swf_deprecated -@freeze_time("2015-01-01 12:00:00") -def test_respond_decision_task_completed_with_schedule_activity_task(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [{ - "decisionType": "ScheduleActivityTask", - "scheduleActivityTaskDecisionAttributes": { - "activityId": "my-activity-001", - "activityType": { - "name": "test-activity", - "version": "v1.1" - }, - "heartbeatTimeout": "60", - "input": "123", - "taskList": { - "name": "my-task-list" - }, - } - }] - resp = conn.respond_decision_task_completed( - task_token, decisions=decisions) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal([ - "WorkflowExecutionStarted", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "DecisionTaskCompleted", - "ActivityTaskScheduled", - ]) - resp["events"][-1]["activityTaskScheduledEventAttributes"].should.equal({ - "decisionTaskCompletedEventId": 4, - "activityId": "my-activity-001", - "activityType": { - "name": "test-activity", - "version": "v1.1", - }, - "heartbeatTimeout": "60", - "input": "123", - "taskList": { - "name": "my-task-list" - }, - }) - - resp = conn.describe_workflow_execution( - "test-domain", conn.run_id, "uid-abcd1234") - resp["latestActivityTaskTimestamp"].should.equal(1420113600.0) +from boto.swf.exceptions import SWFResponseError +from freezegun import freeze_time +import sure # noqa + +from moto import mock_swf_deprecated +from moto.swf import swf_backend + +from ..utils import setup_workflow + + +# PollForDecisionTask endpoint +@mock_swf_deprecated +def test_poll_for_decision_task_when_one(): + conn = setup_workflow() + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) + + resp = conn.poll_for_decision_task( + "test-domain", "queue", identity="srv01") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal(["WorkflowExecutionStarted", + "DecisionTaskScheduled", "DecisionTaskStarted"]) + + resp[ + "events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01") + + +@mock_swf_deprecated +def test_poll_for_decision_task_when_none(): + conn = setup_workflow() + conn.poll_for_decision_task("test-domain", "queue") + + resp = conn.poll_for_decision_task("test-domain", "queue") + # this is the DecisionTask representation you get from the real SWF + # after waiting 60s when there's no decision to be taken + resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) + + +@mock_swf_deprecated +def test_poll_for_decision_task_on_non_existent_queue(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "non-existent-queue") + resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) + + +@mock_swf_deprecated +def test_poll_for_decision_task_with_reverse_order(): + conn = setup_workflow() + resp = conn.poll_for_decision_task( + "test-domain", "queue", reverse_order=True) + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal( + ["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"]) + + +# CountPendingDecisionTasks endpoint +@mock_swf_deprecated +def test_count_pending_decision_tasks(): + conn = setup_workflow() + conn.poll_for_decision_task("test-domain", "queue") + resp = conn.count_pending_decision_tasks("test-domain", "queue") + resp.should.equal({"count": 1, "truncated": False}) + + +@mock_swf_deprecated +def test_count_pending_decision_tasks_on_non_existent_task_list(): + conn = setup_workflow() + resp = conn.count_pending_decision_tasks("test-domain", "non-existent") + resp.should.equal({"count": 0, "truncated": False}) + + +@mock_swf_deprecated +def test_count_pending_decision_tasks_after_decision_completes(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + conn.respond_decision_task_completed(resp["taskToken"]) + + resp = conn.count_pending_decision_tasks("test-domain", "queue") + resp.should.equal({"count": 0, "truncated": False}) + + +# RespondDecisionTaskCompleted endpoint +@mock_swf_deprecated +def test_respond_decision_task_completed_with_no_decision(): + conn = setup_workflow() + + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + resp = conn.respond_decision_task_completed( + task_token, + execution_context="free-form context", + ) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal([ + "WorkflowExecutionStarted", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + ]) + evt = resp["events"][-1] + evt["decisionTaskCompletedEventAttributes"].should.equal({ + "executionContext": "free-form context", + "scheduledEventId": 2, + "startedEventId": 3, + }) + + resp = conn.describe_workflow_execution( + "test-domain", conn.run_id, "uid-abcd1234") + resp["latestExecutionContext"].should.equal("free-form context") + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_wrong_token(): + conn = setup_workflow() + conn.poll_for_decision_task("test-domain", "queue") + conn.respond_decision_task_completed.when.called_with( + "not-a-correct-token" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_on_close_workflow_execution(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + # bad: we're closing workflow execution manually, but endpoints are not + # coded for now.. + wfe = swf_backend.domains[0].workflow_executions[-1] + wfe.execution_status = "CLOSED" + # /bad + + conn.respond_decision_task_completed.when.called_with( + task_token + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_task_already_completed(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + conn.respond_decision_task_completed(task_token) + + conn.respond_decision_task_completed.when.called_with( + task_token + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_complete_workflow_execution(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [{ + "decisionType": "CompleteWorkflowExecution", + "completeWorkflowExecutionDecisionAttributes": {"result": "foo bar"} + }] + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal([ + "WorkflowExecutionStarted", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + "WorkflowExecutionCompleted", + ]) + resp["events"][-1]["workflowExecutionCompletedEventAttributes"][ + "result"].should.equal("foo bar") + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_close_decision_not_last(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [ + {"decisionType": "CompleteWorkflowExecution"}, + {"decisionType": "WeDontCare"}, + ] + + conn.respond_decision_task_completed.when.called_with( + task_token, decisions=decisions + ).should.throw(SWFResponseError, r"Close must be last decision in list") + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_invalid_decision_type(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [ + {"decisionType": "BadDecisionType"}, + {"decisionType": "CompleteWorkflowExecution"}, + ] + + conn.respond_decision_task_completed.when.called_with( + task_token, decisions=decisions).should.throw( + SWFResponseError, + r"Value 'BadDecisionType' at 'decisions.1.member.decisionType'" + ) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_missing_attributes(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [ + { + "decisionType": "should trigger even with incorrect decision type", + "startTimerDecisionAttributes": {} + }, + ] + + conn.respond_decision_task_completed.when.called_with( + task_token, decisions=decisions + ).should.throw( + SWFResponseError, + r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' " + r"failed to satisfy constraint: Member must not be null" + ) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_missing_attributes_totally(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [ + {"decisionType": "StartTimer"}, + ] + + conn.respond_decision_task_completed.when.called_with( + task_token, decisions=decisions + ).should.throw( + SWFResponseError, + r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' " + r"failed to satisfy constraint: Member must not be null" + ) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_fail_workflow_execution(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [{ + "decisionType": "FailWorkflowExecution", + "failWorkflowExecutionDecisionAttributes": {"reason": "my rules", "details": "foo"} + }] + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal([ + "WorkflowExecutionStarted", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + "WorkflowExecutionFailed", + ]) + attrs = resp["events"][-1]["workflowExecutionFailedEventAttributes"] + attrs["reason"].should.equal("my rules") + attrs["details"].should.equal("foo") + + +@mock_swf_deprecated +@freeze_time("2015-01-01 12:00:00") +def test_respond_decision_task_completed_with_schedule_activity_task(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [{ + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": { + "activityId": "my-activity-001", + "activityType": { + "name": "test-activity", + "version": "v1.1" + }, + "heartbeatTimeout": "60", + "input": "123", + "taskList": { + "name": "my-task-list" + }, + } + }] + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal([ + "WorkflowExecutionStarted", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + "ActivityTaskScheduled", + ]) + resp["events"][-1]["activityTaskScheduledEventAttributes"].should.equal({ + "decisionTaskCompletedEventId": 4, + "activityId": "my-activity-001", + "activityType": { + "name": "test-activity", + "version": "v1.1", + }, + "heartbeatTimeout": "60", + "input": "123", + "taskList": { + "name": "my-task-list" + }, + }) + + resp = conn.describe_workflow_execution( + "test-domain", conn.run_id, "uid-abcd1234") + resp["latestActivityTaskTimestamp"].should.equal(1420113600.0) diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index 8edc76432ef3..4004496eda6b 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -1,119 +1,119 @@ -import boto -from boto.swf.exceptions import SWFResponseError -import sure # noqa - -from moto import mock_swf_deprecated - - -# RegisterDomain endpoint -@mock_swf_deprecated -def test_register_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - - all_domains = conn.list_domains("REGISTERED") - domain = all_domains["domainInfos"][0] - - domain["name"].should.equal("test-domain") - domain["status"].should.equal("REGISTERED") - domain["description"].should.equal("A test domain") - - -@mock_swf_deprecated -def test_register_already_existing_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - - conn.register_domain.when.called_with( - "test-domain", "60", description="A test domain" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_register_with_wrong_parameter_type(): - conn = boto.connect_swf("the_key", "the_secret") - - conn.register_domain.when.called_with( - "test-domain", 60, description="A test domain" - ).should.throw(SWFResponseError) - - -# ListDomains endpoint -@mock_swf_deprecated -def test_list_domains_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("b-test-domain", "60") - conn.register_domain("a-test-domain", "60") - conn.register_domain("c-test-domain", "60") - - all_domains = conn.list_domains("REGISTERED") - names = [domain["name"] for domain in all_domains["domainInfos"]] - names.should.equal(["a-test-domain", "b-test-domain", "c-test-domain"]) - - -@mock_swf_deprecated -def test_list_domains_reverse_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("b-test-domain", "60") - conn.register_domain("a-test-domain", "60") - conn.register_domain("c-test-domain", "60") - - all_domains = conn.list_domains("REGISTERED", reverse_order=True) - names = [domain["name"] for domain in all_domains["domainInfos"]] - names.should.equal(["c-test-domain", "b-test-domain", "a-test-domain"]) - - -# DeprecateDomain endpoint -@mock_swf_deprecated -def test_deprecate_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn.deprecate_domain("test-domain") - - all_domains = conn.list_domains("DEPRECATED") - domain = all_domains["domainInfos"][0] - - domain["name"].should.equal("test-domain") - - -@mock_swf_deprecated -def test_deprecate_already_deprecated_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn.deprecate_domain("test-domain") - - conn.deprecate_domain.when.called_with( - "test-domain" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_deprecate_non_existent_domain(): - conn = boto.connect_swf("the_key", "the_secret") - - conn.deprecate_domain.when.called_with( - "non-existent" - ).should.throw(SWFResponseError) - - -# DescribeDomain endpoint -@mock_swf_deprecated -def test_describe_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - - domain = conn.describe_domain("test-domain") - domain["configuration"][ - "workflowExecutionRetentionPeriodInDays"].should.equal("60") - domain["domainInfo"]["description"].should.equal("A test domain") - domain["domainInfo"]["name"].should.equal("test-domain") - domain["domainInfo"]["status"].should.equal("REGISTERED") - - -@mock_swf_deprecated -def test_describe_non_existent_domain(): - conn = boto.connect_swf("the_key", "the_secret") - - conn.describe_domain.when.called_with( - "non-existent" - ).should.throw(SWFResponseError) +import boto +from boto.swf.exceptions import SWFResponseError +import sure # noqa + +from moto import mock_swf_deprecated + + +# RegisterDomain endpoint +@mock_swf_deprecated +def test_register_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + + all_domains = conn.list_domains("REGISTERED") + domain = all_domains["domainInfos"][0] + + domain["name"].should.equal("test-domain") + domain["status"].should.equal("REGISTERED") + domain["description"].should.equal("A test domain") + + +@mock_swf_deprecated +def test_register_already_existing_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + + conn.register_domain.when.called_with( + "test-domain", "60", description="A test domain" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_register_with_wrong_parameter_type(): + conn = boto.connect_swf("the_key", "the_secret") + + conn.register_domain.when.called_with( + "test-domain", 60, description="A test domain" + ).should.throw(SWFResponseError) + + +# ListDomains endpoint +@mock_swf_deprecated +def test_list_domains_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("b-test-domain", "60") + conn.register_domain("a-test-domain", "60") + conn.register_domain("c-test-domain", "60") + + all_domains = conn.list_domains("REGISTERED") + names = [domain["name"] for domain in all_domains["domainInfos"]] + names.should.equal(["a-test-domain", "b-test-domain", "c-test-domain"]) + + +@mock_swf_deprecated +def test_list_domains_reverse_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("b-test-domain", "60") + conn.register_domain("a-test-domain", "60") + conn.register_domain("c-test-domain", "60") + + all_domains = conn.list_domains("REGISTERED", reverse_order=True) + names = [domain["name"] for domain in all_domains["domainInfos"]] + names.should.equal(["c-test-domain", "b-test-domain", "a-test-domain"]) + + +# DeprecateDomain endpoint +@mock_swf_deprecated +def test_deprecate_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn.deprecate_domain("test-domain") + + all_domains = conn.list_domains("DEPRECATED") + domain = all_domains["domainInfos"][0] + + domain["name"].should.equal("test-domain") + + +@mock_swf_deprecated +def test_deprecate_already_deprecated_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn.deprecate_domain("test-domain") + + conn.deprecate_domain.when.called_with( + "test-domain" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_deprecate_non_existent_domain(): + conn = boto.connect_swf("the_key", "the_secret") + + conn.deprecate_domain.when.called_with( + "non-existent" + ).should.throw(SWFResponseError) + + +# DescribeDomain endpoint +@mock_swf_deprecated +def test_describe_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + + domain = conn.describe_domain("test-domain") + domain["configuration"][ + "workflowExecutionRetentionPeriodInDays"].should.equal("60") + domain["domainInfo"]["description"].should.equal("A test domain") + domain["domainInfo"]["name"].should.equal("test-domain") + domain["domainInfo"]["status"].should.equal("REGISTERED") + + +@mock_swf_deprecated +def test_describe_non_existent_domain(): + conn = boto.connect_swf("the_key", "the_secret") + + conn.describe_domain.when.called_with( + "non-existent" + ).should.throw(SWFResponseError) diff --git a/tests/test_swf/responses/test_timeouts.py b/tests/test_swf/responses/test_timeouts.py index f49c597a4dd7..95d956f99194 100644 --- a/tests/test_swf/responses/test_timeouts.py +++ b/tests/test_swf/responses/test_timeouts.py @@ -1,110 +1,110 @@ -from freezegun import freeze_time -import sure # noqa - -from moto import mock_swf_deprecated - -from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION - - -# Activity Task Heartbeat timeout -# Default value in workflow helpers: 5 mins -@mock_swf_deprecated -def test_activity_task_heartbeat_timeout(): - with freeze_time("2015-01-01 12:00:00"): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - conn.poll_for_activity_task( - "test-domain", "activity-task-list", identity="surprise") - - with freeze_time("2015-01-01 12:04:30"): - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") - - with freeze_time("2015-01-01 12:05:30"): - # => Activity Task Heartbeat timeout reached!! - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") - attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] - attrs["timeoutType"].should.equal("HEARTBEAT") - # checks that event has been emitted at 12:05:00, not 12:05:30 - resp["events"][-2]["eventTimestamp"].should.equal(1420113900.0) - - resp["events"][-1]["eventType"].should.equal("DecisionTaskScheduled") - - -# Decision Task Start to Close timeout -# Default value in workflow helpers: 5 mins -@mock_swf_deprecated -def test_decision_task_start_to_close_timeout(): - pass - with freeze_time("2015-01-01 12:00:00"): - conn = setup_workflow() - conn.poll_for_decision_task("test-domain", "queue")["taskToken"] - - with freeze_time("2015-01-01 12:04:30"): - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - event_types = [evt["eventType"] for evt in resp["events"]] - event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted"] - ) - - with freeze_time("2015-01-01 12:05:30"): - # => Decision Task Start to Close timeout reached!! - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - event_types = [evt["eventType"] for evt in resp["events"]] - event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted", - "DecisionTaskTimedOut", "DecisionTaskScheduled"] - ) - attrs = resp["events"][-2]["decisionTaskTimedOutEventAttributes"] - attrs.should.equal({ - "scheduledEventId": 2, "startedEventId": 3, "timeoutType": "START_TO_CLOSE" - }) - # checks that event has been emitted at 12:05:00, not 12:05:30 - resp["events"][-2]["eventTimestamp"].should.equal(1420113900.0) - - -# Workflow Execution Start to Close timeout -# Default value in workflow helpers: 2 hours -@mock_swf_deprecated -def test_workflow_execution_start_to_close_timeout(): - pass - with freeze_time("2015-01-01 12:00:00"): - conn = setup_workflow() - - with freeze_time("2015-01-01 13:59:30"): - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - event_types = [evt["eventType"] for evt in resp["events"]] - event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled"] - ) - - with freeze_time("2015-01-01 14:00:30"): - # => Workflow Execution Start to Close timeout reached!! - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - event_types = [evt["eventType"] for evt in resp["events"]] - event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled", - "WorkflowExecutionTimedOut"] - ) - attrs = resp["events"][-1]["workflowExecutionTimedOutEventAttributes"] - attrs.should.equal({ - "childPolicy": "ABANDON", "timeoutType": "START_TO_CLOSE" - }) - # checks that event has been emitted at 14:00:00, not 14:00:30 - resp["events"][-1]["eventTimestamp"].should.equal(1420120800.0) +from freezegun import freeze_time +import sure # noqa + +from moto import mock_swf_deprecated + +from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION + + +# Activity Task Heartbeat timeout +# Default value in workflow helpers: 5 mins +@mock_swf_deprecated +def test_activity_task_heartbeat_timeout(): + with freeze_time("2015-01-01 12:00:00"): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + conn.poll_for_activity_task( + "test-domain", "activity-task-list", identity="surprise") + + with freeze_time("2015-01-01 12:04:30"): + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") + + with freeze_time("2015-01-01 12:05:30"): + # => Activity Task Heartbeat timeout reached!! + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") + attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] + attrs["timeoutType"].should.equal("HEARTBEAT") + # checks that event has been emitted at 12:05:00, not 12:05:30 + resp["events"][-2]["eventTimestamp"].should.equal(1420113900.0) + + resp["events"][-1]["eventType"].should.equal("DecisionTaskScheduled") + + +# Decision Task Start to Close timeout +# Default value in workflow helpers: 5 mins +@mock_swf_deprecated +def test_decision_task_start_to_close_timeout(): + pass + with freeze_time("2015-01-01 12:00:00"): + conn = setup_workflow() + conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + + with freeze_time("2015-01-01 12:04:30"): + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + event_types = [evt["eventType"] for evt in resp["events"]] + event_types.should.equal( + ["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted"] + ) + + with freeze_time("2015-01-01 12:05:30"): + # => Decision Task Start to Close timeout reached!! + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + event_types = [evt["eventType"] for evt in resp["events"]] + event_types.should.equal( + ["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted", + "DecisionTaskTimedOut", "DecisionTaskScheduled"] + ) + attrs = resp["events"][-2]["decisionTaskTimedOutEventAttributes"] + attrs.should.equal({ + "scheduledEventId": 2, "startedEventId": 3, "timeoutType": "START_TO_CLOSE" + }) + # checks that event has been emitted at 12:05:00, not 12:05:30 + resp["events"][-2]["eventTimestamp"].should.equal(1420113900.0) + + +# Workflow Execution Start to Close timeout +# Default value in workflow helpers: 2 hours +@mock_swf_deprecated +def test_workflow_execution_start_to_close_timeout(): + pass + with freeze_time("2015-01-01 12:00:00"): + conn = setup_workflow() + + with freeze_time("2015-01-01 13:59:30"): + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + event_types = [evt["eventType"] for evt in resp["events"]] + event_types.should.equal( + ["WorkflowExecutionStarted", "DecisionTaskScheduled"] + ) + + with freeze_time("2015-01-01 14:00:30"): + # => Workflow Execution Start to Close timeout reached!! + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + event_types = [evt["eventType"] for evt in resp["events"]] + event_types.should.equal( + ["WorkflowExecutionStarted", "DecisionTaskScheduled", + "WorkflowExecutionTimedOut"] + ) + attrs = resp["events"][-1]["workflowExecutionTimedOutEventAttributes"] + attrs.should.equal({ + "childPolicy": "ABANDON", "timeoutType": "START_TO_CLOSE" + }) + # checks that event has been emitted at 14:00:00, not 14:00:30 + resp["events"][-1]["eventTimestamp"].should.equal(1420120800.0) diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index 88e3caa75c6f..2cb0922604fc 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -1,262 +1,262 @@ -import boto -from boto.swf.exceptions import SWFResponseError -from datetime import datetime, timedelta - -import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa - -from moto import mock_swf_deprecated -from moto.core.utils import unix_time - - -# Utils -@mock_swf_deprecated -def setup_swf_environment(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn.register_workflow_type( - "test-domain", "test-workflow", "v1.0", - task_list="queue", default_child_policy="TERMINATE", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ) - conn.register_activity_type("test-domain", "test-activity", "v1.1") - return conn - - -# StartWorkflowExecution endpoint -@mock_swf_deprecated -def test_start_workflow_execution(): - conn = setup_swf_environment() - - wf = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - wf.should.contain("runId") - -@mock_swf_deprecated -def test_signal_workflow_execution(): - conn = setup_swf_environment() - hsh = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - run_id = hsh["runId"] - - wfe = conn.signal_workflow_execution( - "test-domain", "my_signal", "uid-abcd1234", "my_input", run_id) - - wfe = conn.describe_workflow_execution( - "test-domain", run_id, "uid-abcd1234") - - wfe["openCounts"]["openDecisionTasks"].should.equal(2) - -@mock_swf_deprecated -def test_start_already_started_workflow_execution(): - conn = setup_swf_environment() - conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - - conn.start_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_start_workflow_execution_on_deprecated_type(): - conn = setup_swf_environment() - conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") - - conn.start_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0" - ).should.throw(SWFResponseError) - - -# DescribeWorkflowExecution endpoint -@mock_swf_deprecated -def test_describe_workflow_execution(): - conn = setup_swf_environment() - hsh = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - run_id = hsh["runId"] - - wfe = conn.describe_workflow_execution( - "test-domain", run_id, "uid-abcd1234") - wfe["executionInfo"]["execution"][ - "workflowId"].should.equal("uid-abcd1234") - wfe["executionInfo"]["executionStatus"].should.equal("OPEN") - - -@mock_swf_deprecated -def test_describe_non_existent_workflow_execution(): - conn = setup_swf_environment() - - conn.describe_workflow_execution.when.called_with( - "test-domain", "wrong-run-id", "wrong-workflow-id" - ).should.throw(SWFResponseError) - - -# GetWorkflowExecutionHistory endpoint -@mock_swf_deprecated -def test_get_workflow_execution_history(): - conn = setup_swf_environment() - hsh = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - run_id = hsh["runId"] - - resp = conn.get_workflow_execution_history( - "test-domain", run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) - - -@mock_swf_deprecated -def test_get_workflow_execution_history_with_reverse_order(): - conn = setup_swf_environment() - hsh = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - run_id = hsh["runId"] - - resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234", - reverse_order=True) - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["DecisionTaskScheduled", "WorkflowExecutionStarted"]) - - -@mock_swf_deprecated -def test_get_workflow_execution_history_on_non_existent_workflow_execution(): - conn = setup_swf_environment() - - conn.get_workflow_execution_history.when.called_with( - "test-domain", "wrong-run-id", "wrong-workflow-id" - ).should.throw(SWFResponseError) - - -# ListOpenWorkflowExecutions endpoint -@mock_swf_deprecated -def test_list_open_workflow_executions(): - conn = setup_swf_environment() - # One open workflow execution - conn.start_workflow_execution( - 'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0' - ) - # One closed workflow execution to make sure it isn't displayed - run_id = conn.start_workflow_execution( - 'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0' - )['runId'] - conn.terminate_workflow_execution('test-domain', 'uid-abcd12345', - details='some details', - reason='a more complete reason', - run_id=run_id) - - yesterday = datetime.utcnow() - timedelta(days=1) - oldest_date = unix_time(yesterday) - response = conn.list_open_workflow_executions('test-domain', - oldest_date, - workflow_id='test-workflow') - execution_infos = response['executionInfos'] - len(execution_infos).should.equal(1) - open_workflow = execution_infos[0] - open_workflow['workflowType'].should.equal({'version': 'v1.0', - 'name': 'test-workflow'}) - open_workflow.should.contain('startTimestamp') - open_workflow['execution']['workflowId'].should.equal('uid-abcd1234') - open_workflow['execution'].should.contain('runId') - open_workflow['cancelRequested'].should.be(False) - open_workflow['executionStatus'].should.equal('OPEN') - - -# ListClosedWorkflowExecutions endpoint -@mock_swf_deprecated -def test_list_closed_workflow_executions(): - conn = setup_swf_environment() - # Leave one workflow execution open to make sure it isn't displayed - conn.start_workflow_execution( - 'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0' - ) - # One closed workflow execution - run_id = conn.start_workflow_execution( - 'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0' - )['runId'] - conn.terminate_workflow_execution('test-domain', 'uid-abcd12345', - details='some details', - reason='a more complete reason', - run_id=run_id) - - yesterday = datetime.utcnow() - timedelta(days=1) - oldest_date = unix_time(yesterday) - response = conn.list_closed_workflow_executions( - 'test-domain', - start_oldest_date=oldest_date, - workflow_id='test-workflow') - execution_infos = response['executionInfos'] - len(execution_infos).should.equal(1) - open_workflow = execution_infos[0] - open_workflow['workflowType'].should.equal({'version': 'v1.0', - 'name': 'test-workflow'}) - open_workflow.should.contain('startTimestamp') - open_workflow['execution']['workflowId'].should.equal('uid-abcd12345') - open_workflow['execution'].should.contain('runId') - open_workflow['cancelRequested'].should.be(False) - open_workflow['executionStatus'].should.equal('CLOSED') - - -# TerminateWorkflowExecution endpoint -@mock_swf_deprecated -def test_terminate_workflow_execution(): - conn = setup_swf_environment() - run_id = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0" - )["runId"] - - resp = conn.terminate_workflow_execution("test-domain", "uid-abcd1234", - details="some details", - reason="a more complete reason", - run_id=run_id) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", run_id, "uid-abcd1234") - evt = resp["events"][-1] - evt["eventType"].should.equal("WorkflowExecutionTerminated") - attrs = evt["workflowExecutionTerminatedEventAttributes"] - attrs["details"].should.equal("some details") - attrs["reason"].should.equal("a more complete reason") - attrs["cause"].should.equal("OPERATOR_INITIATED") - - -@mock_swf_deprecated -def test_terminate_workflow_execution_with_wrong_workflow_or_run_id(): - conn = setup_swf_environment() - run_id = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0" - )["runId"] - - # terminate workflow execution - conn.terminate_workflow_execution("test-domain", "uid-abcd1234") - - # already closed, with run_id - conn.terminate_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234", run_id=run_id - ).should.throw( - SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId=" - ) - - # already closed, without run_id - conn.terminate_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234" - ).should.throw( - SWFResponseError, "Unknown execution, workflowId = uid-abcd1234" - ) - - # wrong workflow id - conn.terminate_workflow_execution.when.called_with( - "test-domain", "uid-non-existent" - ).should.throw( - SWFResponseError, "Unknown execution, workflowId = uid-non-existent" - ) - - # wrong run_id - conn.terminate_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234", run_id="foo" - ).should.throw( - SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId=" - ) +import boto +from boto.swf.exceptions import SWFResponseError +from datetime import datetime, timedelta + +import sure # noqa +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa + +from moto import mock_swf_deprecated +from moto.core.utils import unix_time + + +# Utils +@mock_swf_deprecated +def setup_swf_environment(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn.register_workflow_type( + "test-domain", "test-workflow", "v1.0", + task_list="queue", default_child_policy="TERMINATE", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + conn.register_activity_type("test-domain", "test-activity", "v1.1") + return conn + + +# StartWorkflowExecution endpoint +@mock_swf_deprecated +def test_start_workflow_execution(): + conn = setup_swf_environment() + + wf = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + wf.should.contain("runId") + +@mock_swf_deprecated +def test_signal_workflow_execution(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + wfe = conn.signal_workflow_execution( + "test-domain", "my_signal", "uid-abcd1234", "my_input", run_id) + + wfe = conn.describe_workflow_execution( + "test-domain", run_id, "uid-abcd1234") + + wfe["openCounts"]["openDecisionTasks"].should.equal(2) + +@mock_swf_deprecated +def test_start_already_started_workflow_execution(): + conn = setup_swf_environment() + conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + + conn.start_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_start_workflow_execution_on_deprecated_type(): + conn = setup_swf_environment() + conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") + + conn.start_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0" + ).should.throw(SWFResponseError) + + +# DescribeWorkflowExecution endpoint +@mock_swf_deprecated +def test_describe_workflow_execution(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + wfe = conn.describe_workflow_execution( + "test-domain", run_id, "uid-abcd1234") + wfe["executionInfo"]["execution"][ + "workflowId"].should.equal("uid-abcd1234") + wfe["executionInfo"]["executionStatus"].should.equal("OPEN") + + +@mock_swf_deprecated +def test_describe_non_existent_workflow_execution(): + conn = setup_swf_environment() + + conn.describe_workflow_execution.when.called_with( + "test-domain", "wrong-run-id", "wrong-workflow-id" + ).should.throw(SWFResponseError) + + +# GetWorkflowExecutionHistory endpoint +@mock_swf_deprecated +def test_get_workflow_execution_history(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + resp = conn.get_workflow_execution_history( + "test-domain", run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) + + +@mock_swf_deprecated +def test_get_workflow_execution_history_with_reverse_order(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234", + reverse_order=True) + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal(["DecisionTaskScheduled", "WorkflowExecutionStarted"]) + + +@mock_swf_deprecated +def test_get_workflow_execution_history_on_non_existent_workflow_execution(): + conn = setup_swf_environment() + + conn.get_workflow_execution_history.when.called_with( + "test-domain", "wrong-run-id", "wrong-workflow-id" + ).should.throw(SWFResponseError) + + +# ListOpenWorkflowExecutions endpoint +@mock_swf_deprecated +def test_list_open_workflow_executions(): + conn = setup_swf_environment() + # One open workflow execution + conn.start_workflow_execution( + 'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0' + ) + # One closed workflow execution to make sure it isn't displayed + run_id = conn.start_workflow_execution( + 'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0' + )['runId'] + conn.terminate_workflow_execution('test-domain', 'uid-abcd12345', + details='some details', + reason='a more complete reason', + run_id=run_id) + + yesterday = datetime.utcnow() - timedelta(days=1) + oldest_date = unix_time(yesterday) + response = conn.list_open_workflow_executions('test-domain', + oldest_date, + workflow_id='test-workflow') + execution_infos = response['executionInfos'] + len(execution_infos).should.equal(1) + open_workflow = execution_infos[0] + open_workflow['workflowType'].should.equal({'version': 'v1.0', + 'name': 'test-workflow'}) + open_workflow.should.contain('startTimestamp') + open_workflow['execution']['workflowId'].should.equal('uid-abcd1234') + open_workflow['execution'].should.contain('runId') + open_workflow['cancelRequested'].should.be(False) + open_workflow['executionStatus'].should.equal('OPEN') + + +# ListClosedWorkflowExecutions endpoint +@mock_swf_deprecated +def test_list_closed_workflow_executions(): + conn = setup_swf_environment() + # Leave one workflow execution open to make sure it isn't displayed + conn.start_workflow_execution( + 'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0' + ) + # One closed workflow execution + run_id = conn.start_workflow_execution( + 'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0' + )['runId'] + conn.terminate_workflow_execution('test-domain', 'uid-abcd12345', + details='some details', + reason='a more complete reason', + run_id=run_id) + + yesterday = datetime.utcnow() - timedelta(days=1) + oldest_date = unix_time(yesterday) + response = conn.list_closed_workflow_executions( + 'test-domain', + start_oldest_date=oldest_date, + workflow_id='test-workflow') + execution_infos = response['executionInfos'] + len(execution_infos).should.equal(1) + open_workflow = execution_infos[0] + open_workflow['workflowType'].should.equal({'version': 'v1.0', + 'name': 'test-workflow'}) + open_workflow.should.contain('startTimestamp') + open_workflow['execution']['workflowId'].should.equal('uid-abcd12345') + open_workflow['execution'].should.contain('runId') + open_workflow['cancelRequested'].should.be(False) + open_workflow['executionStatus'].should.equal('CLOSED') + + +# TerminateWorkflowExecution endpoint +@mock_swf_deprecated +def test_terminate_workflow_execution(): + conn = setup_swf_environment() + run_id = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0" + )["runId"] + + resp = conn.terminate_workflow_execution("test-domain", "uid-abcd1234", + details="some details", + reason="a more complete reason", + run_id=run_id) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", run_id, "uid-abcd1234") + evt = resp["events"][-1] + evt["eventType"].should.equal("WorkflowExecutionTerminated") + attrs = evt["workflowExecutionTerminatedEventAttributes"] + attrs["details"].should.equal("some details") + attrs["reason"].should.equal("a more complete reason") + attrs["cause"].should.equal("OPERATOR_INITIATED") + + +@mock_swf_deprecated +def test_terminate_workflow_execution_with_wrong_workflow_or_run_id(): + conn = setup_swf_environment() + run_id = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0" + )["runId"] + + # terminate workflow execution + conn.terminate_workflow_execution("test-domain", "uid-abcd1234") + + # already closed, with run_id + conn.terminate_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234", run_id=run_id + ).should.throw( + SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId=" + ) + + # already closed, without run_id + conn.terminate_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234" + ).should.throw( + SWFResponseError, "Unknown execution, workflowId = uid-abcd1234" + ) + + # wrong workflow id + conn.terminate_workflow_execution.when.called_with( + "test-domain", "uid-non-existent" + ).should.throw( + SWFResponseError, "Unknown execution, workflowId = uid-non-existent" + ) + + # wrong run_id + conn.terminate_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234", run_id="foo" + ).should.throw( + SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId=" + ) diff --git a/tests/test_swf/responses/test_workflow_types.py b/tests/test_swf/responses/test_workflow_types.py index 9e097a873f00..f0b39e7ad1b6 100644 --- a/tests/test_swf/responses/test_workflow_types.py +++ b/tests/test_swf/responses/test_workflow_types.py @@ -1,137 +1,137 @@ -import sure -import boto - -from moto import mock_swf_deprecated -from boto.swf.exceptions import SWFResponseError - - -# RegisterWorkflowType endpoint -@mock_swf_deprecated -def test_register_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0") - - types = conn.list_workflow_types("test-domain", "REGISTERED") - actype = types["typeInfos"][0] - actype["workflowType"]["name"].should.equal("test-workflow") - actype["workflowType"]["version"].should.equal("v1.0") - - -@mock_swf_deprecated -def test_register_already_existing_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0") - - conn.register_workflow_type.when.called_with( - "test-domain", "test-workflow", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_register_with_wrong_parameter_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.register_workflow_type.when.called_with( - "test-domain", "test-workflow", 12 - ).should.throw(SWFResponseError) - - -# ListWorkflowTypes endpoint -@mock_swf_deprecated -def test_list_workflow_types(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0") - conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0") - conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") - - all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED") - names = [activity_type["workflowType"]["name"] - for activity_type in all_workflow_types["typeInfos"]] - names.should.equal( - ["a-test-workflow", "b-test-workflow", "c-test-workflow"]) - - -@mock_swf_deprecated -def test_list_workflow_types_reverse_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0") - conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0") - conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") - - all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED", - reverse_order=True) - names = [activity_type["workflowType"]["name"] - for activity_type in all_workflow_types["typeInfos"]] - names.should.equal( - ["c-test-workflow", "b-test-workflow", "a-test-workflow"]) - - -# DeprecateWorkflowType endpoint -@mock_swf_deprecated -def test_deprecate_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0") - conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") - - actypes = conn.list_workflow_types("test-domain", "DEPRECATED") - actype = actypes["typeInfos"][0] - actype["workflowType"]["name"].should.equal("test-workflow") - actype["workflowType"]["version"].should.equal("v1.0") - - -@mock_swf_deprecated -def test_deprecate_already_deprecated_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0") - conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") - - conn.deprecate_workflow_type.when.called_with( - "test-domain", "test-workflow", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_deprecate_non_existent_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.deprecate_workflow_type.when.called_with( - "test-domain", "non-existent", "v1.0" - ).should.throw(SWFResponseError) - - -# DescribeWorkflowType endpoint -@mock_swf_deprecated -def test_describe_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0", - task_list="foo", default_child_policy="TERMINATE") - - actype = conn.describe_workflow_type( - "test-domain", "test-workflow", "v1.0") - actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") - actype["configuration"]["defaultChildPolicy"].should.equal("TERMINATE") - actype["configuration"].keys().should_not.contain( - "defaultTaskStartToCloseTimeout") - infos = actype["typeInfo"] - infos["workflowType"]["name"].should.equal("test-workflow") - infos["workflowType"]["version"].should.equal("v1.0") - infos["status"].should.equal("REGISTERED") - - -@mock_swf_deprecated -def test_describe_non_existent_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.describe_workflow_type.when.called_with( - "test-domain", "non-existent", "v1.0" - ).should.throw(SWFResponseError) +import sure +import boto + +from moto import mock_swf_deprecated +from boto.swf.exceptions import SWFResponseError + + +# RegisterWorkflowType endpoint +@mock_swf_deprecated +def test_register_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0") + + types = conn.list_workflow_types("test-domain", "REGISTERED") + actype = types["typeInfos"][0] + actype["workflowType"]["name"].should.equal("test-workflow") + actype["workflowType"]["version"].should.equal("v1.0") + + +@mock_swf_deprecated +def test_register_already_existing_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0") + + conn.register_workflow_type.when.called_with( + "test-domain", "test-workflow", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_register_with_wrong_parameter_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.register_workflow_type.when.called_with( + "test-domain", "test-workflow", 12 + ).should.throw(SWFResponseError) + + +# ListWorkflowTypes endpoint +@mock_swf_deprecated +def test_list_workflow_types(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0") + conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0") + conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") + + all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED") + names = [activity_type["workflowType"]["name"] + for activity_type in all_workflow_types["typeInfos"]] + names.should.equal( + ["a-test-workflow", "b-test-workflow", "c-test-workflow"]) + + +@mock_swf_deprecated +def test_list_workflow_types_reverse_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0") + conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0") + conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") + + all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED", + reverse_order=True) + names = [activity_type["workflowType"]["name"] + for activity_type in all_workflow_types["typeInfos"]] + names.should.equal( + ["c-test-workflow", "b-test-workflow", "a-test-workflow"]) + + +# DeprecateWorkflowType endpoint +@mock_swf_deprecated +def test_deprecate_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0") + conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") + + actypes = conn.list_workflow_types("test-domain", "DEPRECATED") + actype = actypes["typeInfos"][0] + actype["workflowType"]["name"].should.equal("test-workflow") + actype["workflowType"]["version"].should.equal("v1.0") + + +@mock_swf_deprecated +def test_deprecate_already_deprecated_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0") + conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") + + conn.deprecate_workflow_type.when.called_with( + "test-domain", "test-workflow", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_deprecate_non_existent_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.deprecate_workflow_type.when.called_with( + "test-domain", "non-existent", "v1.0" + ).should.throw(SWFResponseError) + + +# DescribeWorkflowType endpoint +@mock_swf_deprecated +def test_describe_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0", + task_list="foo", default_child_policy="TERMINATE") + + actype = conn.describe_workflow_type( + "test-domain", "test-workflow", "v1.0") + actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") + actype["configuration"]["defaultChildPolicy"].should.equal("TERMINATE") + actype["configuration"].keys().should_not.contain( + "defaultTaskStartToCloseTimeout") + infos = actype["typeInfo"] + infos["workflowType"]["name"].should.equal("test-workflow") + infos["workflowType"]["version"].should.equal("v1.0") + infos["status"].should.equal("REGISTERED") + + +@mock_swf_deprecated +def test_describe_non_existent_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.describe_workflow_type.when.called_with( + "test-domain", "non-existent", "v1.0" + ).should.throw(SWFResponseError) diff --git a/tests/test_swf/test_exceptions.py b/tests/test_swf/test_exceptions.py index 8617242b9704..b91a697b9f01 100644 --- a/tests/test_swf/test_exceptions.py +++ b/tests/test_swf/test_exceptions.py @@ -1,158 +1,158 @@ -from __future__ import unicode_literals -import sure # noqa - -import json - -from moto.swf.exceptions import ( - SWFClientError, - SWFUnknownResourceFault, - SWFDomainAlreadyExistsFault, - SWFDomainDeprecatedFault, - SWFSerializationException, - SWFTypeAlreadyExistsFault, - SWFTypeDeprecatedFault, - SWFWorkflowExecutionAlreadyStartedFault, - SWFDefaultUndefinedFault, - SWFValidationException, - SWFDecisionValidationException, -) -from moto.swf.models import ( - WorkflowType, -) - - -def test_swf_client_error(): - ex = SWFClientError("ASpecificType", "error message") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "ASpecificType", - "message": "error message" - }) - - -def test_swf_unknown_resource_fault(): - ex = SWFUnknownResourceFault("type", "detail") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#UnknownResourceFault", - "message": "Unknown type: detail" - }) - - -def test_swf_unknown_resource_fault_with_only_one_parameter(): - ex = SWFUnknownResourceFault("foo bar baz") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#UnknownResourceFault", - "message": "Unknown foo bar baz" - }) - - -def test_swf_domain_already_exists_fault(): - ex = SWFDomainAlreadyExistsFault("domain-name") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#DomainAlreadyExistsFault", - "message": "domain-name" - }) - - -def test_swf_domain_deprecated_fault(): - ex = SWFDomainDeprecatedFault("domain-name") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#DomainDeprecatedFault", - "message": "domain-name" - }) - - -def test_swf_serialization_exception(): - ex = SWFSerializationException("value") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#SerializationException", - "message": "class java.lang.Foo can not be converted to an String (not a real SWF exception ; happened on: value)" - }) - - -def test_swf_type_already_exists_fault(): - wft = WorkflowType("wf-name", "wf-version") - ex = SWFTypeAlreadyExistsFault(wft) - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#TypeAlreadyExistsFault", - "message": "WorkflowType=[name=wf-name, version=wf-version]" - }) - - -def test_swf_type_deprecated_fault(): - wft = WorkflowType("wf-name", "wf-version") - ex = SWFTypeDeprecatedFault(wft) - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#TypeDeprecatedFault", - "message": "WorkflowType=[name=wf-name, version=wf-version]" - }) - - -def test_swf_workflow_execution_already_started_fault(): - ex = SWFWorkflowExecutionAlreadyStartedFault() - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault", - 'message': 'Already Started', - }) - - -def test_swf_default_undefined_fault(): - ex = SWFDefaultUndefinedFault("execution_start_to_close_timeout") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#DefaultUndefinedFault", - "message": "executionStartToCloseTimeout", - }) - - -def test_swf_validation_exception(): - ex = SWFValidationException("Invalid token") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazon.coral.validate#ValidationException", - "message": "Invalid token", - }) - - -def test_swf_decision_validation_error(): - ex = SWFDecisionValidationException([ - {"type": "null_value", - "where": "decisions.1.member.startTimerDecisionAttributes.startToFireTimeout"}, - {"type": "bad_decision_type", - "value": "FooBar", - "where": "decisions.1.member.decisionType", - "possible_values": "Foo, Bar, Baz"}, - ]) - - ex.code.should.equal(400) - ex.error_type.should.equal("com.amazon.coral.validate#ValidationException") - - msg = ex.get_body() - msg.should.match(r"2 validation errors detected:") - msg.should.match( - r"Value null at 'decisions.1.member.startTimerDecisionAttributes.startToFireTimeout' " - r"failed to satisfy constraint: Member must not be null;" - ) - msg.should.match( - r"Value 'FooBar' at 'decisions.1.member.decisionType' failed to satisfy constraint: " - r"Member must satisfy enum value set: \[Foo, Bar, Baz\]" - ) +from __future__ import unicode_literals +import sure # noqa + +import json + +from moto.swf.exceptions import ( + SWFClientError, + SWFUnknownResourceFault, + SWFDomainAlreadyExistsFault, + SWFDomainDeprecatedFault, + SWFSerializationException, + SWFTypeAlreadyExistsFault, + SWFTypeDeprecatedFault, + SWFWorkflowExecutionAlreadyStartedFault, + SWFDefaultUndefinedFault, + SWFValidationException, + SWFDecisionValidationException, +) +from moto.swf.models import ( + WorkflowType, +) + + +def test_swf_client_error(): + ex = SWFClientError("ASpecificType", "error message") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "ASpecificType", + "message": "error message" + }) + + +def test_swf_unknown_resource_fault(): + ex = SWFUnknownResourceFault("type", "detail") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#UnknownResourceFault", + "message": "Unknown type: detail" + }) + + +def test_swf_unknown_resource_fault_with_only_one_parameter(): + ex = SWFUnknownResourceFault("foo bar baz") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#UnknownResourceFault", + "message": "Unknown foo bar baz" + }) + + +def test_swf_domain_already_exists_fault(): + ex = SWFDomainAlreadyExistsFault("domain-name") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#DomainAlreadyExistsFault", + "message": "domain-name" + }) + + +def test_swf_domain_deprecated_fault(): + ex = SWFDomainDeprecatedFault("domain-name") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#DomainDeprecatedFault", + "message": "domain-name" + }) + + +def test_swf_serialization_exception(): + ex = SWFSerializationException("value") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#SerializationException", + "message": "class java.lang.Foo can not be converted to an String (not a real SWF exception ; happened on: value)" + }) + + +def test_swf_type_already_exists_fault(): + wft = WorkflowType("wf-name", "wf-version") + ex = SWFTypeAlreadyExistsFault(wft) + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#TypeAlreadyExistsFault", + "message": "WorkflowType=[name=wf-name, version=wf-version]" + }) + + +def test_swf_type_deprecated_fault(): + wft = WorkflowType("wf-name", "wf-version") + ex = SWFTypeDeprecatedFault(wft) + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#TypeDeprecatedFault", + "message": "WorkflowType=[name=wf-name, version=wf-version]" + }) + + +def test_swf_workflow_execution_already_started_fault(): + ex = SWFWorkflowExecutionAlreadyStartedFault() + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault", + 'message': 'Already Started', + }) + + +def test_swf_default_undefined_fault(): + ex = SWFDefaultUndefinedFault("execution_start_to_close_timeout") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#DefaultUndefinedFault", + "message": "executionStartToCloseTimeout", + }) + + +def test_swf_validation_exception(): + ex = SWFValidationException("Invalid token") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazon.coral.validate#ValidationException", + "message": "Invalid token", + }) + + +def test_swf_decision_validation_error(): + ex = SWFDecisionValidationException([ + {"type": "null_value", + "where": "decisions.1.member.startTimerDecisionAttributes.startToFireTimeout"}, + {"type": "bad_decision_type", + "value": "FooBar", + "where": "decisions.1.member.decisionType", + "possible_values": "Foo, Bar, Baz"}, + ]) + + ex.code.should.equal(400) + ex.error_type.should.equal("com.amazon.coral.validate#ValidationException") + + msg = ex.get_body() + msg.should.match(r"2 validation errors detected:") + msg.should.match( + r"Value null at 'decisions.1.member.startTimerDecisionAttributes.startToFireTimeout' " + r"failed to satisfy constraint: Member must not be null;" + ) + msg.should.match( + r"Value 'FooBar' at 'decisions.1.member.decisionType' failed to satisfy constraint: " + r"Member must satisfy enum value set: \[Foo, Bar, Baz\]" + ) diff --git a/tests/test_swf/test_utils.py b/tests/test_swf/test_utils.py index ffa14703729f..2e04b990c8d6 100644 --- a/tests/test_swf/test_utils.py +++ b/tests/test_swf/test_utils.py @@ -1,13 +1,13 @@ -import sure # noqa - -from moto.swf.utils import decapitalize - - -def test_decapitalize(): - cases = { - "fooBar": "fooBar", - "FooBar": "fooBar", - "FOO BAR": "fOO BAR", - } - for before, after in cases.items(): - decapitalize(before).should.equal(after) +import sure # noqa + +from moto.swf.utils import decapitalize + + +def test_decapitalize(): + cases = { + "fooBar": "fooBar", + "FooBar": "fooBar", + "FOO BAR": "fOO BAR", + } + for before, after in cases.items(): + decapitalize(before).should.equal(after) diff --git a/tests/test_swf/utils.py b/tests/test_swf/utils.py index 2197b71df80b..4879a0011b32 100644 --- a/tests/test_swf/utils.py +++ b/tests/test_swf/utils.py @@ -1,100 +1,100 @@ -import boto - -from moto.swf.models import ( - ActivityType, - Domain, - WorkflowType, - WorkflowExecution, -) - - -# Some useful constants -# Here are some activity timeouts we use in moto/swf tests ; they're extracted -# from semi-real world example, the goal is mostly to have predictible and -# intuitive behaviour in moto/swf own tests... -ACTIVITY_TASK_TIMEOUTS = { - "heartbeatTimeout": "300", # 5 mins - "scheduleToStartTimeout": "1800", # 30 mins - "startToCloseTimeout": "1800", # 30 mins - "scheduleToCloseTimeout": "2700", # 45 mins -} - -# Some useful decisions -SCHEDULE_ACTIVITY_TASK_DECISION = { - "decisionType": "ScheduleActivityTask", - "scheduleActivityTaskDecisionAttributes": { - "activityId": "my-activity-001", - "activityType": {"name": "test-activity", "version": "v1.1"}, - "taskList": {"name": "activity-task-list"}, - } -} -for key, value in ACTIVITY_TASK_TIMEOUTS.items(): - SCHEDULE_ACTIVITY_TASK_DECISION[ - "scheduleActivityTaskDecisionAttributes"][key] = value - - -# A test Domain -def get_basic_domain(): - return Domain("test-domain", "90") - - -# A test WorkflowType -def _generic_workflow_type_attributes(): - return [ - "test-workflow", "v1.0" - ], { - "task_list": "queue", - "default_child_policy": "ABANDON", - "default_execution_start_to_close_timeout": "7200", - "default_task_start_to_close_timeout": "300", - } - - -def get_basic_workflow_type(): - args, kwargs = _generic_workflow_type_attributes() - return WorkflowType(*args, **kwargs) - - -def mock_basic_workflow_type(domain_name, conn): - args, kwargs = _generic_workflow_type_attributes() - conn.register_workflow_type(domain_name, *args, **kwargs) - return conn - - -# A test WorkflowExecution -def make_workflow_execution(**kwargs): - domain = get_basic_domain() - domain.add_type(ActivityType("test-activity", "v1.1")) - wft = get_basic_workflow_type() - return WorkflowExecution(domain, wft, "ab1234", **kwargs) - - -# Makes decision tasks start automatically on a given workflow -def auto_start_decision_tasks(wfe): - wfe.schedule_decision_task = wfe.schedule_and_start_decision_task - return wfe - - -# Setup a complete example workflow and return the connection object -def setup_workflow(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn = mock_basic_workflow_type("test-domain", conn) - conn.register_activity_type( - "test-domain", "test-activity", "v1.1", - default_task_heartbeat_timeout="600", - default_task_schedule_to_close_timeout="600", - default_task_schedule_to_start_timeout="600", - default_task_start_to_close_timeout="600", - ) - wfe = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - conn.run_id = wfe["runId"] - return conn - - -# A helper for processing the first timeout on a given object -def process_first_timeout(obj): - _timeout = obj.first_timeout() - if _timeout: - obj.timeout(_timeout) +import boto + +from moto.swf.models import ( + ActivityType, + Domain, + WorkflowType, + WorkflowExecution, +) + + +# Some useful constants +# Here are some activity timeouts we use in moto/swf tests ; they're extracted +# from semi-real world example, the goal is mostly to have predictible and +# intuitive behaviour in moto/swf own tests... +ACTIVITY_TASK_TIMEOUTS = { + "heartbeatTimeout": "300", # 5 mins + "scheduleToStartTimeout": "1800", # 30 mins + "startToCloseTimeout": "1800", # 30 mins + "scheduleToCloseTimeout": "2700", # 45 mins +} + +# Some useful decisions +SCHEDULE_ACTIVITY_TASK_DECISION = { + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": { + "activityId": "my-activity-001", + "activityType": {"name": "test-activity", "version": "v1.1"}, + "taskList": {"name": "activity-task-list"}, + } +} +for key, value in ACTIVITY_TASK_TIMEOUTS.items(): + SCHEDULE_ACTIVITY_TASK_DECISION[ + "scheduleActivityTaskDecisionAttributes"][key] = value + + +# A test Domain +def get_basic_domain(): + return Domain("test-domain", "90") + + +# A test WorkflowType +def _generic_workflow_type_attributes(): + return [ + "test-workflow", "v1.0" + ], { + "task_list": "queue", + "default_child_policy": "ABANDON", + "default_execution_start_to_close_timeout": "7200", + "default_task_start_to_close_timeout": "300", + } + + +def get_basic_workflow_type(): + args, kwargs = _generic_workflow_type_attributes() + return WorkflowType(*args, **kwargs) + + +def mock_basic_workflow_type(domain_name, conn): + args, kwargs = _generic_workflow_type_attributes() + conn.register_workflow_type(domain_name, *args, **kwargs) + return conn + + +# A test WorkflowExecution +def make_workflow_execution(**kwargs): + domain = get_basic_domain() + domain.add_type(ActivityType("test-activity", "v1.1")) + wft = get_basic_workflow_type() + return WorkflowExecution(domain, wft, "ab1234", **kwargs) + + +# Makes decision tasks start automatically on a given workflow +def auto_start_decision_tasks(wfe): + wfe.schedule_decision_task = wfe.schedule_and_start_decision_task + return wfe + + +# Setup a complete example workflow and return the connection object +def setup_workflow(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn = mock_basic_workflow_type("test-domain", conn) + conn.register_activity_type( + "test-domain", "test-activity", "v1.1", + default_task_heartbeat_timeout="600", + default_task_schedule_to_close_timeout="600", + default_task_schedule_to_start_timeout="600", + default_task_start_to_close_timeout="600", + ) + wfe = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + conn.run_id = wfe["runId"] + return conn + + +# A helper for processing the first timeout on a given object +def process_first_timeout(obj): + _timeout = obj.first_timeout() + if _timeout: + obj.timeout(_timeout) diff --git a/tests/test_xray/test_xray_boto3.py b/tests/test_xray/test_xray_boto3.py index 5ad8f8bc7b75..c754e3a69a5d 100644 --- a/tests/test_xray/test_xray_boto3.py +++ b/tests/test_xray/test_xray_boto3.py @@ -1,139 +1,139 @@ -from __future__ import unicode_literals - -import boto3 -import json -import botocore.exceptions -import sure # noqa - -from moto import mock_xray - -import datetime - - -@mock_xray -def test_put_telemetry(): - client = boto3.client('xray', region_name='us-east-1') - - client.put_telemetry_records( - TelemetryRecords=[ - { - 'Timestamp': datetime.datetime(2015, 1, 1), - 'SegmentsReceivedCount': 123, - 'SegmentsSentCount': 123, - 'SegmentsSpilloverCount': 123, - 'SegmentsRejectedCount': 123, - 'BackendConnectionErrors': { - 'TimeoutCount': 123, - 'ConnectionRefusedCount': 123, - 'HTTPCode4XXCount': 123, - 'HTTPCode5XXCount': 123, - 'UnknownHostCount': 123, - 'OtherCount': 123 - } - }, - ], - EC2InstanceId='string', - Hostname='string', - ResourceARN='string' - ) - - -@mock_xray -def test_put_trace_segments(): - client = boto3.client('xray', region_name='us-east-1') - - client.put_trace_segments( - TraceSegmentDocuments=[ - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0a', - 'start_time': 1.478293361271E9, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'end_time': 1.478293361449E9 - }) - ] - ) - - -@mock_xray -def test_trace_summary(): - client = boto3.client('xray', region_name='us-east-1') - - client.put_trace_segments( - TraceSegmentDocuments=[ - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0a', - 'start_time': 1.478293361271E9, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'in_progress': True - }), - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0b', - 'start_time': 1478293365, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'end_time': 1478293385 - }) - ] - ) - - client.get_trace_summaries( - StartTime=datetime.datetime(2014, 1, 1), - EndTime=datetime.datetime(2017, 1, 1) - ) - - -@mock_xray -def test_batch_get_trace(): - client = boto3.client('xray', region_name='us-east-1') - - client.put_trace_segments( - TraceSegmentDocuments=[ - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0a', - 'start_time': 1.478293361271E9, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'in_progress': True - }), - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0b', - 'start_time': 1478293365, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'end_time': 1478293385 - }) - ] - ) - - resp = client.batch_get_traces( - TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] - ) - len(resp['UnprocessedTraceIds']).should.equal(1) - len(resp['Traces']).should.equal(1) - - -# Following are not implemented, just testing it returns what boto expects -@mock_xray -def test_batch_get_service_graph(): - client = boto3.client('xray', region_name='us-east-1') - - client.get_service_graph( - StartTime=datetime.datetime(2014, 1, 1), - EndTime=datetime.datetime(2017, 1, 1) - ) - - -@mock_xray -def test_batch_get_trace_graph(): - client = boto3.client('xray', region_name='us-east-1') - - client.batch_get_traces( - TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] - ) - - - - - +from __future__ import unicode_literals + +import boto3 +import json +import botocore.exceptions +import sure # noqa + +from moto import mock_xray + +import datetime + + +@mock_xray +def test_put_telemetry(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_telemetry_records( + TelemetryRecords=[ + { + 'Timestamp': datetime.datetime(2015, 1, 1), + 'SegmentsReceivedCount': 123, + 'SegmentsSentCount': 123, + 'SegmentsSpilloverCount': 123, + 'SegmentsRejectedCount': 123, + 'BackendConnectionErrors': { + 'TimeoutCount': 123, + 'ConnectionRefusedCount': 123, + 'HTTPCode4XXCount': 123, + 'HTTPCode5XXCount': 123, + 'UnknownHostCount': 123, + 'OtherCount': 123 + } + }, + ], + EC2InstanceId='string', + Hostname='string', + ResourceARN='string' + ) + + +@mock_xray +def test_put_trace_segments(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_trace_segments( + TraceSegmentDocuments=[ + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0a', + 'start_time': 1.478293361271E9, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'end_time': 1.478293361449E9 + }) + ] + ) + + +@mock_xray +def test_trace_summary(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_trace_segments( + TraceSegmentDocuments=[ + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0a', + 'start_time': 1.478293361271E9, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'in_progress': True + }), + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0b', + 'start_time': 1478293365, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'end_time': 1478293385 + }) + ] + ) + + client.get_trace_summaries( + StartTime=datetime.datetime(2014, 1, 1), + EndTime=datetime.datetime(2017, 1, 1) + ) + + +@mock_xray +def test_batch_get_trace(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_trace_segments( + TraceSegmentDocuments=[ + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0a', + 'start_time': 1.478293361271E9, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'in_progress': True + }), + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0b', + 'start_time': 1478293365, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'end_time': 1478293385 + }) + ] + ) + + resp = client.batch_get_traces( + TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] + ) + len(resp['UnprocessedTraceIds']).should.equal(1) + len(resp['Traces']).should.equal(1) + + +# Following are not implemented, just testing it returns what boto expects +@mock_xray +def test_batch_get_service_graph(): + client = boto3.client('xray', region_name='us-east-1') + + client.get_service_graph( + StartTime=datetime.datetime(2014, 1, 1), + EndTime=datetime.datetime(2017, 1, 1) + ) + + +@mock_xray +def test_batch_get_trace_graph(): + client = boto3.client('xray', region_name='us-east-1') + + client.batch_get_traces( + TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] + ) + + + + + diff --git a/tests/test_xray/test_xray_client.py b/tests/test_xray/test_xray_client.py index 0cd948950434..8e7b84be0fac 100644 --- a/tests/test_xray/test_xray_client.py +++ b/tests/test_xray/test_xray_client.py @@ -1,72 +1,72 @@ -from __future__ import unicode_literals -from moto import mock_xray_client, XRaySegment, mock_dynamodb2 -import sure # noqa -import boto3 - -from moto.xray.mock_client import MockEmitter -import aws_xray_sdk.core as xray_core -import aws_xray_sdk.core.patcher as xray_core_patcher - -import botocore.client -import botocore.endpoint -original_make_api_call = botocore.client.BaseClient._make_api_call -original_encode_headers = botocore.endpoint.Endpoint._encode_headers - -import requests -original_session_request = requests.Session.request -original_session_prep_request = requests.Session.prepare_request - - -@mock_xray_client -@mock_dynamodb2 -def test_xray_dynamo_request_id(): - # Could be ran in any order, so we need to tell sdk that its been unpatched - xray_core_patcher._PATCHED_MODULES = set() - xray_core.patch_all() - - client = boto3.client('dynamodb', region_name='us-east-1') - - with XRaySegment(): - resp = client.list_tables() - resp['ResponseMetadata'].should.contain('RequestId') - id1 = resp['ResponseMetadata']['RequestId'] - - with XRaySegment(): - client.list_tables() - resp = client.list_tables() - id2 = resp['ResponseMetadata']['RequestId'] - - id1.should_not.equal(id2) - - setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) - setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) - setattr(requests.Session, 'request', original_session_request) - setattr(requests.Session, 'prepare_request', original_session_prep_request) - - -@mock_xray_client -def test_xray_udp_emitter_patched(): - # Could be ran in any order, so we need to tell sdk that its been unpatched - xray_core_patcher._PATCHED_MODULES = set() - xray_core.patch_all() - - assert isinstance(xray_core.xray_recorder._emitter, MockEmitter) - - setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) - setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) - setattr(requests.Session, 'request', original_session_request) - setattr(requests.Session, 'prepare_request', original_session_prep_request) - - -@mock_xray_client -def test_xray_context_patched(): - # Could be ran in any order, so we need to tell sdk that its been unpatched - xray_core_patcher._PATCHED_MODULES = set() - xray_core.patch_all() - - xray_core.xray_recorder._context.context_missing.should.equal('LOG_ERROR') - - setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) - setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) - setattr(requests.Session, 'request', original_session_request) - setattr(requests.Session, 'prepare_request', original_session_prep_request) +from __future__ import unicode_literals +from moto import mock_xray_client, XRaySegment, mock_dynamodb2 +import sure # noqa +import boto3 + +from moto.xray.mock_client import MockEmitter +import aws_xray_sdk.core as xray_core +import aws_xray_sdk.core.patcher as xray_core_patcher + +import botocore.client +import botocore.endpoint +original_make_api_call = botocore.client.BaseClient._make_api_call +original_encode_headers = botocore.endpoint.Endpoint._encode_headers + +import requests +original_session_request = requests.Session.request +original_session_prep_request = requests.Session.prepare_request + + +@mock_xray_client +@mock_dynamodb2 +def test_xray_dynamo_request_id(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + client = boto3.client('dynamodb', region_name='us-east-1') + + with XRaySegment(): + resp = client.list_tables() + resp['ResponseMetadata'].should.contain('RequestId') + id1 = resp['ResponseMetadata']['RequestId'] + + with XRaySegment(): + client.list_tables() + resp = client.list_tables() + id2 = resp['ResponseMetadata']['RequestId'] + + id1.should_not.equal(id2) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_udp_emitter_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + assert isinstance(xray_core.xray_recorder._emitter, MockEmitter) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_context_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + xray_core.xray_recorder._context.context_missing.should.equal('LOG_ERROR') + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) diff --git a/tox.ini b/tox.ini index 0f3f1466a0ec..7c5ed1ef7e89 100644 --- a/tox.ini +++ b/tox.ini @@ -1,14 +1,14 @@ -[tox] -envlist = py27, py36 - -[testenv] -deps = - -r{toxinidir}/requirements.txt - -r{toxinidir}/requirements-dev.txt -commands = - {envpython} setup.py test - nosetests {posargs} - -[flake8] -ignore = E128,E501 -exclude = moto/packages,dist +[tox] +envlist = py27, py36 + +[testenv] +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/requirements-dev.txt +commands = + {envpython} setup.py test + nosetests {posargs} + +[flake8] +ignore = E128,E501 +exclude = moto/packages,dist diff --git a/travis_moto_server.sh b/travis_moto_server.sh index 902644b20556..3c6947fd95d4 100755 --- a/travis_moto_server.sh +++ b/travis_moto_server.sh @@ -1,5 +1,5 @@ -#!/usr/bin/env bash -set -e -pip install flask -pip install /moto/dist/moto*.gz +#!/usr/bin/env bash +set -e +pip install flask +pip install /moto/dist/moto*.gz moto_server -H 0.0.0.0 -p 5000 \ No newline at end of file diff --git a/wait_for.py b/wait_for.py index d313ea5a9755..cba4bc665183 100755 --- a/wait_for.py +++ b/wait_for.py @@ -1,31 +1,31 @@ -import time - -try: - # py2 - import urllib2 as urllib - from urllib2 import URLError - import socket - import httplib - - EXCEPTIONS = (URLError, socket.error, httplib.BadStatusLine) -except ImportError: - # py3 - import urllib.request as urllib - from urllib.error import URLError - - EXCEPTIONS = (URLError, ConnectionResetError) - - -start_ts = time.time() -print("Waiting for service to come up") -while True: - try: - urllib.urlopen('http://localhost:5000/', timeout=1) - break - except EXCEPTIONS: - elapsed_s = time.time() - start_ts - if elapsed_s > 60: - raise - - print('.') - time.sleep(1) +import time + +try: + # py2 + import urllib2 as urllib + from urllib2 import URLError + import socket + import httplib + + EXCEPTIONS = (URLError, socket.error, httplib.BadStatusLine) +except ImportError: + # py3 + import urllib.request as urllib + from urllib.error import URLError + + EXCEPTIONS = (URLError, ConnectionResetError) + + +start_ts = time.time() +print("Waiting for service to come up") +while True: + try: + urllib.urlopen('http://localhost:5000/', timeout=1) + break + except EXCEPTIONS: + elapsed_s = time.time() - start_ts + if elapsed_s > 60: + raise + + print('.') + time.sleep(1) From 3ea673b3d04c866f9301e3714cd0578a24883000 Mon Sep 17 00:00:00 2001 From: Stephan Date: Fri, 21 Dec 2018 16:30:17 +0100 Subject: [PATCH 011/658] started with mocking job execution --- moto/iot/models.py | 1521 +++++++++++++++++++----------------- moto/iot/responses.py | 1004 ++++++++++++------------ tests/test_iot/test_iot.py | 39 + 3 files changed, 1331 insertions(+), 1233 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 4bcab26ebc4b..1279a5baa710 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -1,736 +1,785 @@ -from __future__ import unicode_literals - -import hashlib -import random -import re -import string -import time -import uuid -from collections import OrderedDict -from datetime import datetime - -import boto3 - -from moto.core import BaseBackend, BaseModel -from .exceptions import ( - ResourceNotFoundException, - InvalidRequestException, - VersionConflictException -) - - -class FakeThing(BaseModel): - def __init__(self, thing_name, thing_type, attributes, region_name): - self.region_name = region_name - self.thing_name = thing_name - self.thing_type = thing_type - self.attributes = attributes - self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) - self.version = 1 - # TODO: we need to handle 'version'? - - # for iot-data - self.thing_shadow = None - - def to_dict(self, include_default_client_id=False): - obj = { - 'thingName': self.thing_name, - 'thingArn': self.arn, - 'attributes': self.attributes, - 'version': self.version - } - if self.thing_type: - obj['thingTypeName'] = self.thing_type.thing_type_name - if include_default_client_id: - obj['defaultClientId'] = self.thing_name - return obj - - -class FakeThingType(BaseModel): - def __init__(self, thing_type_name, thing_type_properties, region_name): - self.region_name = region_name - self.thing_type_name = thing_type_name - self.thing_type_properties = thing_type_properties - self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id - t = time.time() - self.metadata = { - 'deprecated': False, - 'creationData': int(t * 1000) / 1000.0 - } - self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) - - def to_dict(self): - return { - 'thingTypeName': self.thing_type_name, - 'thingTypeId': self.thing_type_id, - 'thingTypeProperties': self.thing_type_properties, - 'thingTypeMetadata': self.metadata - } - - -class FakeThingGroup(BaseModel): - def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): - self.region_name = region_name - self.thing_group_name = thing_group_name - self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id - self.version = 1 # TODO: tmp - self.parent_group_name = parent_group_name - self.thing_group_properties = thing_group_properties or {} - t = time.time() - self.metadata = { - 'creationData': int(t * 1000) / 1000.0 - } - self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) - self.things = OrderedDict() - - def to_dict(self): - return { - 'thingGroupName': self.thing_group_name, - 'thingGroupId': self.thing_group_id, - 'version': self.version, - 'thingGroupProperties': self.thing_group_properties, - 'thingGroupMetadata': self.metadata - } - - -class FakeCertificate(BaseModel): - def __init__(self, certificate_pem, status, region_name): - m = hashlib.sha256() - m.update(str(uuid.uuid4()).encode('utf-8')) - self.certificate_id = m.hexdigest() - self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) - self.certificate_pem = certificate_pem - self.status = status - - # TODO: must adjust - self.owner = '1' - self.transfer_data = {} - self.creation_date = time.time() - self.last_modified_date = self.creation_date - self.ca_certificate_id = None - - def to_dict(self): - return { - 'certificateArn': self.arn, - 'certificateId': self.certificate_id, - 'status': self.status, - 'creationDate': self.creation_date - } - - def to_description_dict(self): - """ - You might need keys below in some situation - - caCertificateId - - previousOwnedBy - """ - return { - 'certificateArn': self.arn, - 'certificateId': self.certificate_id, - 'status': self.status, - 'certificatePem': self.certificate_pem, - 'ownedBy': self.owner, - 'creationDate': self.creation_date, - 'lastModifiedDate': self.last_modified_date, - 'transferData': self.transfer_data - } - - -class FakePolicy(BaseModel): - def __init__(self, name, document, region_name, default_version_id='1'): - self.name = name - self.document = document - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) - self.default_version_id = default_version_id - self.versions = [FakePolicyVersion(self.name, document, True, region_name)] - - def to_get_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'defaultVersionId': self.default_version_id - } - - def to_dict_at_creation(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.default_version_id - } - - def to_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - } - - -class FakePolicyVersion(object): - - def __init__(self, - policy_name, - document, - is_default, - region_name): - self.name = policy_name - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) - self.document = document or {} - self.is_default = is_default - self.version_id = '1' - - self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) - - def to_get_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'creationDate': self.create_datetime, - 'lastModifiedDate': self.last_modified_datetime, - 'generationId': self.version_id - } - - def to_dict_at_creation(self): - return { - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default - } - - def to_dict(self): - return { - 'versionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'createDate': self.create_datetime, - } - - -class FakeJob(BaseModel): - JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" - JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) - - def __init__(self, job_id, targets, document_source, document, description, presigned_url_config, target_selection, - job_executions_rollout_config, document_parameters, region_name): - if not self._job_id_matcher(self.JOB_ID_REGEX, job_id): - raise InvalidRequestException() - - self.region_name = region_name - self.job_id = job_id - self.job_arn = 'arn:aws:iot:%s:1:job/%s' % (self.region_name, job_id) - self.targets = targets - self.document_source = document_source - self.document = document - self.description = description - self.presigned_url_config = presigned_url_config - self.target_selection = target_selection - self.job_executions_rollout_config = job_executions_rollout_config - self.status = None # IN_PROGRESS | CANCELED | COMPLETED - self.comment = None - self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.completed_at = None - self.job_process_details = { - 'processingTargets': targets, - 'numberOfQueuedThings': 1, - 'numberOfCanceledThings': 0, - 'numberOfSucceededThings': 0, - 'numberOfFailedThings': 0, - 'numberOfRejectedThings': 0, - 'numberOfInProgressThings': 0, - 'numberOfRemovedThings': 0 - } - self.document_parameters = document_parameters - - def to_dict(self): - obj = { - 'jobArn': self.job_arn, - 'jobId': self.job_id, - 'targets': self.targets, - 'description': self.description, - 'presignedUrlConfig': self.presigned_url_config, - 'targetSelection': self.target_selection, - 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, - 'status': self.status, - 'comment': self.comment, - 'createdAt': self.created_at, - 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completedAt, - 'jobProcessDetails': self.job_process_details, - 'documentParameters': self.document_parameters, - 'document': self.document, - 'documentSource': self.document_source - } - - return obj - - def _job_id_matcher(self, regex, argument): - regex_match = regex.match(argument) - length_match = len(argument) <= 64 - return regex_match and length_match - - -class IoTBackend(BaseBackend): - def __init__(self, region_name=None): - super(IoTBackend, self).__init__() - self.region_name = region_name - self.things = OrderedDict() - self.jobs = OrderedDict() - self.thing_types = OrderedDict() - self.thing_groups = OrderedDict() - self.certificates = OrderedDict() - self.policies = OrderedDict() - self.principal_policies = OrderedDict() - self.principal_things = OrderedDict() - - def reset(self): - region_name = self.region_name - self.__dict__ = {} - self.__init__(region_name) - - def create_thing(self, thing_name, thing_type_name, attribute_payload): - thing_types = self.list_thing_types() - thing_type = None - if thing_type_name: - filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] - if len(filtered_thing_types) == 0: - raise ResourceNotFoundException() - thing_type = filtered_thing_types[0] - if attribute_payload is None: - attributes = {} - elif 'attributes' not in attribute_payload: - attributes = {} - else: - attributes = attribute_payload['attributes'] - thing = FakeThing(thing_name, thing_type, attributes, self.region_name) - self.things[thing.arn] = thing - return thing.thing_name, thing.arn - - def create_thing_type(self, thing_type_name, thing_type_properties): - if thing_type_properties is None: - thing_type_properties = {} - thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) - self.thing_types[thing_type.arn] = thing_type - return thing_type.thing_type_name, thing_type.arn - - def list_thing_types(self, thing_type_name=None): - if thing_type_name: - # It's weird but thing_type_name is filtered by forward match, not complete match - return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] - return self.thing_types.values() - - def list_things(self, attribute_name, attribute_value, thing_type_name, max_results, token): - all_things = [_.to_dict() for _ in self.things.values()] - if attribute_name is not None and thing_type_name is not None: - filtered_things = list(filter(lambda elem: - attribute_name in elem["attributes"] and - elem["attributes"][attribute_name] == attribute_value and - "thingTypeName" in elem and - elem["thingTypeName"] == thing_type_name, all_things)) - elif attribute_name is not None and thing_type_name is None: - filtered_things = list(filter(lambda elem: - attribute_name in elem["attributes"] and - elem["attributes"][attribute_name] == attribute_value, all_things)) - elif attribute_name is None and thing_type_name is not None: - filtered_things = list( - filter(lambda elem: "thingTypeName" in elem and elem["thingTypeName"] == thing_type_name, all_things)) - else: - filtered_things = all_things - - if token is None: - things = filtered_things[0:max_results] - next_token = str(max_results) if len(filtered_things) > max_results else None - else: - token = int(token) - things = filtered_things[token:token + max_results] - next_token = str(token + max_results) if len(filtered_things) > token + max_results else None - - return things, next_token - - def describe_thing(self, thing_name): - things = [_ for _ in self.things.values() if _.thing_name == thing_name] - if len(things) == 0: - raise ResourceNotFoundException() - return things[0] - - def describe_thing_type(self, thing_type_name): - thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] - if len(thing_types) == 0: - raise ResourceNotFoundException() - return thing_types[0] - - def delete_thing(self, thing_name, expected_version): - # TODO: handle expected_version - - # can raise ResourceNotFoundError - thing = self.describe_thing(thing_name) - del self.things[thing.arn] - - def delete_thing_type(self, thing_type_name): - # can raise ResourceNotFoundError - thing_type = self.describe_thing_type(thing_type_name) - del self.thing_types[thing_type.arn] - - def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): - # if attributes payload = {}, nothing - thing = self.describe_thing(thing_name) - thing_type = None - - if remove_thing_type and thing_type_name: - raise InvalidRequestException() - - # thing_type - if thing_type_name: - thing_types = self.list_thing_types() - filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] - if len(filtered_thing_types) == 0: - raise ResourceNotFoundException() - thing_type = filtered_thing_types[0] - thing.thing_type = thing_type - - if remove_thing_type: - thing.thing_type = None - - # attribute - if attribute_payload is not None and 'attributes' in attribute_payload: - do_merge = attribute_payload.get('merge', False) - attributes = attribute_payload['attributes'] - if not do_merge: - thing.attributes = attributes - else: - thing.attributes.update(attributes) - - def _random_string(self): - n = 20 - random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) - return random_str - - def create_keys_and_certificate(self, set_as_active): - # implement here - # caCertificate can be blank - key_pair = { - 'PublicKey': self._random_string(), - 'PrivateKey': self._random_string() - } - certificate_pem = self._random_string() - status = 'ACTIVE' if set_as_active else 'INACTIVE' - certificate = FakeCertificate(certificate_pem, status, self.region_name) - self.certificates[certificate.certificate_id] = certificate - return certificate, key_pair - - def delete_certificate(self, certificate_id): - self.describe_certificate(certificate_id) - del self.certificates[certificate_id] - - def describe_certificate(self, certificate_id): - certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] - if len(certs) == 0: - raise ResourceNotFoundException() - return certs[0] - - def list_certificates(self): - return self.certificates.values() - - def update_certificate(self, certificate_id, new_status): - cert = self.describe_certificate(certificate_id) - # TODO: validate new_status - cert.status = new_status - - def create_policy(self, policy_name, policy_document): - policy = FakePolicy(policy_name, policy_document, self.region_name) - self.policies[policy.name] = policy - return policy - - def attach_policy(self, policy_name, target): - principal = self._get_principal(target) - policy = self.get_policy(policy_name) - k = (target, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - - def detach_policy(self, policy_name, target): - # this may raises ResourceNotFoundException - self._get_principal(target) - self.get_policy(policy_name) - - k = (target, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - - def list_attached_policies(self, target): - policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] - return policies - - def list_policies(self): - policies = self.policies.values() - return policies - - def get_policy(self, policy_name): - policies = [_ for _ in self.policies.values() if _.name == policy_name] - if len(policies) == 0: - raise ResourceNotFoundException() - return policies[0] - - def delete_policy(self, policy_name): - policy = self.get_policy(policy_name) - del self.policies[policy.name] - - def create_policy_version(self, policy_name, policy_document, set_as_default): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) - policy.versions.append(version) - version.version_id = '{0}'.format(len(policy.versions)) - if set_as_default: - self.set_default_policy_version(policy_name, version.version_id) - return version - - def set_default_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - for version in policy.versions: - if version.version_id == version_id: - version.is_default = True - policy.default_version_id = version.version_id - policy.document = version.document - else: - version.is_default = False - - def get_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - for version in policy.versions: - if version.version_id == version_id: - return version - raise ResourceNotFoundException() - - def list_policy_versions(self, policy_name): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - return policy.versions - - def delete_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - if version_id == policy.default_version_id: - raise InvalidRequestException( - "Cannot delete the default version of a policy") - for i, v in enumerate(policy.versions): - if v.version_id == version_id: - del policy.versions[i] - return - raise ResourceNotFoundException() - - def _get_principal(self, principal_arn): - """ - raise ResourceNotFoundException - """ - if ':cert/' in principal_arn: - certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] - if len(certs) == 0: - raise ResourceNotFoundException() - principal = certs[0] - return principal - else: - # TODO: search for cognito_ids - pass - raise ResourceNotFoundException() - - def attach_principal_policy(self, policy_name, principal_arn): - principal = self._get_principal(principal_arn) - policy = self.get_policy(policy_name) - k = (principal_arn, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - - def detach_principal_policy(self, policy_name, principal_arn): - # this may raises ResourceNotFoundException - self._get_principal(principal_arn) - self.get_policy(policy_name) - - k = (principal_arn, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - - def list_principal_policies(self, principal_arn): - policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] - return policies - - def list_policy_principals(self, policy_name): - principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] - return principals - - def attach_thing_principal(self, thing_name, principal_arn): - principal = self._get_principal(principal_arn) - thing = self.describe_thing(thing_name) - k = (principal_arn, thing_name) - if k in self.principal_things: - return - self.principal_things[k] = (principal, thing) - - def detach_thing_principal(self, thing_name, principal_arn): - # this may raises ResourceNotFoundException - self._get_principal(principal_arn) - self.describe_thing(thing_name) - - k = (principal_arn, thing_name) - if k not in self.principal_things: - raise ResourceNotFoundException() - del self.principal_things[k] - - def list_principal_things(self, principal_arn): - thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] - return thing_names - - def list_thing_principals(self, thing_name): - principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] - return principals - - def describe_thing_group(self, thing_group_name): - thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] - if len(thing_groups) == 0: - raise ResourceNotFoundException() - return thing_groups[0] - - def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): - thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) - self.thing_groups[thing_group.arn] = thing_group - return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id - - def delete_thing_group(self, thing_group_name, expected_version): - thing_group = self.describe_thing_group(thing_group_name) - del self.thing_groups[thing_group.arn] - - def list_thing_groups(self, parent_group, name_prefix_filter, recursive): - thing_groups = self.thing_groups.values() - return thing_groups - - def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): - thing_group = self.describe_thing_group(thing_group_name) - if expected_version and expected_version != thing_group.version: - raise VersionConflictException(thing_group_name) - attribute_payload = thing_group_properties.get('attributePayload', None) - if attribute_payload is not None and 'attributes' in attribute_payload: - do_merge = attribute_payload.get('merge', False) - attributes = attribute_payload['attributes'] - if not do_merge: - thing_group.thing_group_properties['attributePayload']['attributes'] = attributes - else: - thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) - elif attribute_payload is not None and 'attributes' not in attribute_payload: - thing_group.attributes = {} - thing_group.version = thing_group.version + 1 - return thing_group.version - - def _identify_thing_group(self, thing_group_name, thing_group_arn): - # identify thing group - if thing_group_name is None and thing_group_arn is None: - raise InvalidRequestException( - ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' - ) - if thing_group_name is not None: - thing_group = self.describe_thing_group(thing_group_name) - if thing_group_arn and thing_group.arn != thing_group_arn: - raise InvalidRequestException( - 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' - ) - elif thing_group_arn is not None: - if thing_group_arn not in self.thing_groups: - raise InvalidRequestException() - thing_group = self.thing_groups[thing_group_arn] - return thing_group - - def _identify_thing(self, thing_name, thing_arn): - # identify thing - if thing_name is None and thing_arn is None: - raise InvalidRequestException( - 'Both thingArn and thingName are empty. Need to specify at least one of them' - ) - if thing_name is not None: - thing = self.describe_thing(thing_name) - if thing_arn and thing.arn != thing_arn: - raise InvalidRequestException( - 'ThingName thingArn does not match specified thingName in request' - ) - elif thing_arn is not None: - if thing_arn not in self.things: - raise InvalidRequestException() - thing = self.things[thing_arn] - return thing - - def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): - thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) - thing = self._identify_thing(thing_name, thing_arn) - if thing.arn in thing_group.things: - # aws ignores duplicate registration - return - thing_group.things[thing.arn] = thing - - def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): - thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) - thing = self._identify_thing(thing_name, thing_arn) - if thing.arn not in thing_group.things: - # aws ignores non-registered thing - return - del thing_group.things[thing.arn] - - def list_things_in_thing_group(self, thing_group_name, recursive): - thing_group = self.describe_thing_group(thing_group_name) - return thing_group.things.values() - - def list_thing_groups_for_thing(self, thing_name): - thing = self.describe_thing(thing_name) - all_thing_groups = self.list_thing_groups(None, None, None) - ret = [] - for thing_group in all_thing_groups: - if thing.arn in thing_group.things: - ret.append({ - 'groupName': thing_group.thing_group_name, - 'groupArn': thing_group.arn - }) - return ret - - def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): - thing = self.describe_thing(thing_name) - for thing_group_name in thing_groups_to_add: - thing_group = self.describe_thing_group(thing_group_name) - self.add_thing_to_thing_group( - thing_group.thing_group_name, None, - thing.thing_name, None - ) - for thing_group_name in thing_groups_to_remove: - thing_group = self.describe_thing_group(thing_group_name) - self.remove_thing_from_thing_group( - thing_group.thing_group_name, None, - thing.thing_name, None - ) - - def create_job(self, job_id, targets, document_source, document, description, presigned_url_config, - target_selection, job_executions_rollout_config, document_parameters): - job = FakeJob(job_id, targets, document_source, document, description, presigned_url_config, target_selection, - job_executions_rollout_config, document_parameters, self.region_name) - self.jobs[job_id] = job - return job.job_arn, job_id, description - - def describe_job(self, job_id): - return self.jobs[job_id] - - def get_job_document(self, job_id): - return self.jobs[job_id] - - -available_regions = boto3.session.Session().get_available_regions("iot") -iot_backends = {region: IoTBackend(region) for region in available_regions} +from __future__ import unicode_literals + +import hashlib +import random +import re +import string +import time +import uuid +from collections import OrderedDict +from datetime import datetime + +import boto3 + +from moto.core import BaseBackend, BaseModel +from .exceptions import ( + ResourceNotFoundException, + InvalidRequestException, + VersionConflictException +) + + +class FakeThing(BaseModel): + def __init__(self, thing_name, thing_type, attributes, region_name): + self.region_name = region_name + self.thing_name = thing_name + self.thing_type = thing_type + self.attributes = attributes + self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) + self.version = 1 + # TODO: we need to handle 'version'? + + # for iot-data + self.thing_shadow = None + + def to_dict(self, include_default_client_id=False): + obj = { + 'thingName': self.thing_name, + 'thingArn': self.arn, + 'attributes': self.attributes, + 'version': self.version + } + if self.thing_type: + obj['thingTypeName'] = self.thing_type.thing_type_name + if include_default_client_id: + obj['defaultClientId'] = self.thing_name + return obj + + +class FakeThingType(BaseModel): + def __init__(self, thing_type_name, thing_type_properties, region_name): + self.region_name = region_name + self.thing_type_name = thing_type_name + self.thing_type_properties = thing_type_properties + self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id + t = time.time() + self.metadata = { + 'deprecated': False, + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) + + def to_dict(self): + return { + 'thingTypeName': self.thing_type_name, + 'thingTypeId': self.thing_type_id, + 'thingTypeProperties': self.thing_type_properties, + 'thingTypeMetadata': self.metadata + } + + +class FakeThingGroup(BaseModel): + def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): + self.region_name = region_name + self.thing_group_name = thing_group_name + self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id + self.version = 1 # TODO: tmp + self.parent_group_name = parent_group_name + self.thing_group_properties = thing_group_properties or {} + t = time.time() + self.metadata = { + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) + self.things = OrderedDict() + + def to_dict(self): + return { + 'thingGroupName': self.thing_group_name, + 'thingGroupId': self.thing_group_id, + 'version': self.version, + 'thingGroupProperties': self.thing_group_properties, + 'thingGroupMetadata': self.metadata + } + + +class FakeCertificate(BaseModel): + def __init__(self, certificate_pem, status, region_name): + m = hashlib.sha256() + m.update(str(uuid.uuid4()).encode('utf-8')) + self.certificate_id = m.hexdigest() + self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) + self.certificate_pem = certificate_pem + self.status = status + + # TODO: must adjust + self.owner = '1' + self.transfer_data = {} + self.creation_date = time.time() + self.last_modified_date = self.creation_date + self.ca_certificate_id = None + + def to_dict(self): + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'creationDate': self.creation_date + } + + def to_description_dict(self): + """ + You might need keys below in some situation + - caCertificateId + - previousOwnedBy + """ + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'certificatePem': self.certificate_pem, + 'ownedBy': self.owner, + 'creationDate': self.creation_date, + 'lastModifiedDate': self.last_modified_date, + 'transferData': self.transfer_data + } + + +class FakePolicy(BaseModel): + def __init__(self, name, document, region_name, default_version_id='1'): + self.name = name + self.document = document + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) + self.default_version_id = default_version_id + self.versions = [FakePolicyVersion(self.name, document, True, region_name)] + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'defaultVersionId': self.default_version_id + } + + def to_dict_at_creation(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.default_version_id + } + + def to_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + } + + +class FakePolicyVersion(object): + + def __init__(self, + policy_name, + document, + is_default, + region_name): + self.name = policy_name + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) + self.document = document or {} + self.is_default = is_default + self.version_id = '1' + + self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'creationDate': self.create_datetime, + 'lastModifiedDate': self.last_modified_datetime, + 'generationId': self.version_id + } + + def to_dict_at_creation(self): + return { + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default + } + + def to_dict(self): + return { + 'versionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'createDate': self.create_datetime, + } + + +class FakeJob(BaseModel): + JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" + JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) + + def __init__(self, job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, region_name): + if not self._job_id_matcher(self.JOB_ID_REGEX, job_id): + raise InvalidRequestException() + + self.region_name = region_name + self.job_id = job_id + self.job_arn = 'arn:aws:iot:%s:1:job/%s' % (self.region_name, job_id) + self.targets = targets + self.document_source = document_source + self.document = document + self.description = description + self.presigned_url_config = presigned_url_config + self.target_selection = target_selection + self.job_executions_rollout_config = job_executions_rollout_config + self.status = None # IN_PROGRESS | CANCELED | COMPLETED + self.comment = None + self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.completed_at = None + self.job_process_details = { + 'processingTargets': targets, + 'numberOfQueuedThings': 1, + 'numberOfCanceledThings': 0, + 'numberOfSucceededThings': 0, + 'numberOfFailedThings': 0, + 'numberOfRejectedThings': 0, + 'numberOfInProgressThings': 0, + 'numberOfRemovedThings': 0 + } + self.document_parameters = document_parameters + + def to_dict(self): + + obj = { + 'jobArn': self.job_arn, + 'jobId': self.job_id, + 'targets': self.targets, + 'description': self.description, + 'presignedUrlConfig': self.presigned_url_config, + 'targetSelection': self.target_selection, + 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, + 'status': self.status, + 'comment': self.comment, + 'createdAt': self.created_at, + 'lastUpdatedAt': self.last_updated_at, + 'completedAt': self.completedAt, + 'jobProcessDetails': self.job_process_details, + 'documentParameters': self.document_parameters, + 'document': self.document, + 'documentSource': self.document_source + } + + return obj + + def _job_id_matcher(self, regex, argument): + regex_match = regex.match(argument) + length_match = len(argument) <= 64 + return regex_match and length_match + + +class FakeJobExecution(BaseModel): + + def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, status_details_map={}): + self.job_id = job_id + self.status = status # IN_PROGRESS | CANCELED | COMPLETED + self.force_canceled = force_canceled + self.status_details_map = status_details_map + self.thing_arn = thing_arn + self.queued_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.started_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.execution_number = 123 + self.version_number = 123 + self.approximate_seconds_before_time_out = 123 + + def to_dict(self): + obj = { + 'jobId': self.job_id, + 'status': self.status, + 'forceCancel': self.force_canceled, + 'statusDetails': {'detailsMap': self.status_details_map}, + 'thing_arn': self.thing_arn, + 'queuedAt': self.queued_at, + 'startedAt': self.started_at, + 'lastUpdatedAt': self.last_updated_at, + 'executionNumber': self.execution_number, + 'versionNumber': self.version_number, + 'approximateSecondsBeforeTimedOut': self.approximate_seconds_before_time_out + } + + return obj + + +class IoTBackend(BaseBackend): + def __init__(self, region_name=None): + super(IoTBackend, self).__init__() + self.region_name = region_name + self.things = OrderedDict() + self.jobs = OrderedDict() + self.job_executions = OrderedDict() + self.thing_types = OrderedDict() + self.thing_groups = OrderedDict() + self.certificates = OrderedDict() + self.policies = OrderedDict() + self.principal_policies = OrderedDict() + self.principal_things = OrderedDict() + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_thing(self, thing_name, thing_type_name, attribute_payload): + thing_types = self.list_thing_types() + thing_type = None + if thing_type_name: + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + if attribute_payload is None: + attributes = {} + elif 'attributes' not in attribute_payload: + attributes = {} + else: + attributes = attribute_payload['attributes'] + thing = FakeThing(thing_name, thing_type, attributes, self.region_name) + self.things[thing.arn] = thing + return thing.thing_name, thing.arn + + def create_thing_type(self, thing_type_name, thing_type_properties): + if thing_type_properties is None: + thing_type_properties = {} + thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) + self.thing_types[thing_type.arn] = thing_type + return thing_type.thing_type_name, thing_type.arn + + def list_thing_types(self, thing_type_name=None): + if thing_type_name: + # It's weird but thing_type_name is filtered by forward match, not complete match + return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] + return self.thing_types.values() + + def list_things(self, attribute_name, attribute_value, thing_type_name, max_results, token): + all_things = [_.to_dict() for _ in self.things.values()] + if attribute_name is not None and thing_type_name is not None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value and + "thingTypeName" in elem and + elem["thingTypeName"] == thing_type_name, all_things)) + elif attribute_name is not None and thing_type_name is None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value, all_things)) + elif attribute_name is None and thing_type_name is not None: + filtered_things = list( + filter(lambda elem: "thingTypeName" in elem and elem["thingTypeName"] == thing_type_name, all_things)) + else: + filtered_things = all_things + + if token is None: + things = filtered_things[0:max_results] + next_token = str(max_results) if len(filtered_things) > max_results else None + else: + token = int(token) + things = filtered_things[token:token + max_results] + next_token = str(token + max_results) if len(filtered_things) > token + max_results else None + + return things, next_token + + def describe_thing(self, thing_name): + things = [_ for _ in self.things.values() if _.thing_name == thing_name] + if len(things) == 0: + raise ResourceNotFoundException() + return things[0] + + def describe_thing_type(self, thing_type_name): + thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] + if len(thing_types) == 0: + raise ResourceNotFoundException() + return thing_types[0] + + def delete_thing(self, thing_name, expected_version): + # TODO: handle expected_version + + # can raise ResourceNotFoundError + thing = self.describe_thing(thing_name) + del self.things[thing.arn] + + def delete_thing_type(self, thing_type_name): + # can raise ResourceNotFoundError + thing_type = self.describe_thing_type(thing_type_name) + del self.thing_types[thing_type.arn] + + def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): + # if attributes payload = {}, nothing + thing = self.describe_thing(thing_name) + thing_type = None + + if remove_thing_type and thing_type_name: + raise InvalidRequestException() + + # thing_type + if thing_type_name: + thing_types = self.list_thing_types() + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + thing.thing_type = thing_type + + if remove_thing_type: + thing.thing_type = None + + # attribute + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing.attributes = attributes + else: + thing.attributes.update(attributes) + + def _random_string(self): + n = 20 + random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) + return random_str + + def create_keys_and_certificate(self, set_as_active): + # implement here + # caCertificate can be blank + key_pair = { + 'PublicKey': self._random_string(), + 'PrivateKey': self._random_string() + } + certificate_pem = self._random_string() + status = 'ACTIVE' if set_as_active else 'INACTIVE' + certificate = FakeCertificate(certificate_pem, status, self.region_name) + self.certificates[certificate.certificate_id] = certificate + return certificate, key_pair + + def delete_certificate(self, certificate_id): + self.describe_certificate(certificate_id) + del self.certificates[certificate_id] + + def describe_certificate(self, certificate_id): + certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] + if len(certs) == 0: + raise ResourceNotFoundException() + return certs[0] + + def list_certificates(self): + return self.certificates.values() + + def update_certificate(self, certificate_id, new_status): + cert = self.describe_certificate(certificate_id) + # TODO: validate new_status + cert.status = new_status + + def create_policy(self, policy_name, policy_document): + policy = FakePolicy(policy_name, policy_document, self.region_name) + self.policies[policy.name] = policy + return policy + + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_attached_policies(self, target): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] + return policies + + def list_policies(self): + policies = self.policies.values() + return policies + + def get_policy(self, policy_name): + policies = [_ for _ in self.policies.values() if _.name == policy_name] + if len(policies) == 0: + raise ResourceNotFoundException() + return policies[0] + + def delete_policy(self, policy_name): + policy = self.get_policy(policy_name) + del self.policies[policy.name] + + def create_policy_version(self, policy_name, policy_document, set_as_default): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) + policy.versions.append(version) + version.version_id = '{0}'.format(len(policy.versions)) + if set_as_default: + self.set_default_policy_version(policy_name, version.version_id) + return version + + def set_default_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + version.is_default = True + policy.default_version_id = version.version_id + policy.document = version.document + else: + version.is_default = False + + def get_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + return version + raise ResourceNotFoundException() + + def list_policy_versions(self, policy_name): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + return policy.versions + + def delete_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + if version_id == policy.default_version_id: + raise InvalidRequestException( + "Cannot delete the default version of a policy") + for i, v in enumerate(policy.versions): + if v.version_id == version_id: + del policy.versions[i] + return + raise ResourceNotFoundException() + + def _get_principal(self, principal_arn): + """ + raise ResourceNotFoundException + """ + if ':cert/' in principal_arn: + certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] + if len(certs) == 0: + raise ResourceNotFoundException() + principal = certs[0] + return principal + else: + # TODO: search for cognito_ids + pass + raise ResourceNotFoundException() + + def attach_principal_policy(self, policy_name, principal_arn): + principal = self._get_principal(principal_arn) + policy = self.get_policy(policy_name) + k = (principal_arn, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_principal_policy(self, policy_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.get_policy(policy_name) + + k = (principal_arn, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_principal_policies(self, principal_arn): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] + return policies + + def list_policy_principals(self, policy_name): + principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] + return principals + + def attach_thing_principal(self, thing_name, principal_arn): + principal = self._get_principal(principal_arn) + thing = self.describe_thing(thing_name) + k = (principal_arn, thing_name) + if k in self.principal_things: + return + self.principal_things[k] = (principal, thing) + + def detach_thing_principal(self, thing_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.describe_thing(thing_name) + + k = (principal_arn, thing_name) + if k not in self.principal_things: + raise ResourceNotFoundException() + del self.principal_things[k] + + def list_principal_things(self, principal_arn): + thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] + return thing_names + + def list_thing_principals(self, thing_name): + principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] + return principals + + def describe_thing_group(self, thing_group_name): + thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] + if len(thing_groups) == 0: + raise ResourceNotFoundException() + return thing_groups[0] + + def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): + thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) + self.thing_groups[thing_group.arn] = thing_group + return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id + + def delete_thing_group(self, thing_group_name, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + del self.thing_groups[thing_group.arn] + + def list_thing_groups(self, parent_group, name_prefix_filter, recursive): + thing_groups = self.thing_groups.values() + return thing_groups + + def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + if expected_version and expected_version != thing_group.version: + raise VersionConflictException(thing_group_name) + attribute_payload = thing_group_properties.get('attributePayload', None) + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing_group.thing_group_properties['attributePayload']['attributes'] = attributes + else: + thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) + elif attribute_payload is not None and 'attributes' not in attribute_payload: + thing_group.attributes = {} + thing_group.version = thing_group.version + 1 + return thing_group.version + + def _identify_thing_group(self, thing_group_name, thing_group_arn): + # identify thing group + if thing_group_name is None and thing_group_arn is None: + raise InvalidRequestException( + ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' + ) + if thing_group_name is not None: + thing_group = self.describe_thing_group(thing_group_name) + if thing_group_arn and thing_group.arn != thing_group_arn: + raise InvalidRequestException( + 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' + ) + elif thing_group_arn is not None: + if thing_group_arn not in self.thing_groups: + raise InvalidRequestException() + thing_group = self.thing_groups[thing_group_arn] + return thing_group + + def _identify_thing(self, thing_name, thing_arn): + # identify thing + if thing_name is None and thing_arn is None: + raise InvalidRequestException( + 'Both thingArn and thingName are empty. Need to specify at least one of them' + ) + if thing_name is not None: + thing = self.describe_thing(thing_name) + if thing_arn and thing.arn != thing_arn: + raise InvalidRequestException( + 'ThingName thingArn does not match specified thingName in request' + ) + elif thing_arn is not None: + if thing_arn not in self.things: + raise InvalidRequestException() + thing = self.things[thing_arn] + return thing + + def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn in thing_group.things: + # aws ignores duplicate registration + return + thing_group.things[thing.arn] = thing + + def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn not in thing_group.things: + # aws ignores non-registered thing + return + del thing_group.things[thing.arn] + + def list_things_in_thing_group(self, thing_group_name, recursive): + thing_group = self.describe_thing_group(thing_group_name) + return thing_group.things.values() + + def list_thing_groups_for_thing(self, thing_name): + thing = self.describe_thing(thing_name) + all_thing_groups = self.list_thing_groups(None, None, None) + ret = [] + for thing_group in all_thing_groups: + if thing.arn in thing_group.things: + ret.append({ + 'groupName': thing_group.thing_group_name, + 'groupArn': thing_group.arn + }) + return ret + + def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): + thing = self.describe_thing(thing_name) + for thing_group_name in thing_groups_to_add: + thing_group = self.describe_thing_group(thing_group_name) + self.add_thing_to_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + for thing_group_name in thing_groups_to_remove: + thing_group = self.describe_thing_group(thing_group_name) + self.remove_thing_from_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + + def create_job(self, job_id, targets, document_source, document, description, presigned_url_config, + target_selection, job_executions_rollout_config, document_parameters): + job = FakeJob(job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, self.region_name) + self.jobs[job_id] = job + + for thing_arn in targets: + thing_name = thing_arn.split(':')[-1] + job_execution = FakeJobExecution(job_id, thing_arn) + self.job_executions[(job_id, thing_name)] = job_execution + return job.job_arn, job_id, description + + def describe_job(self, job_id): + return self.jobs[job_id] + + def get_job_document(self, job_id): + return self.jobs[job_id] + + def describe_job_execution(self, job_id, thing_name, execution_number): + # TODO filter with execution number + return self.job_executions[(job_id, thing_name)] + + def list_job_executions_for_job(self, job_id, status, max_results, next_token): + job_executions = [self.job_executions[je] for je in self.job_executions if je[0] == job_id] + # TODO: implement filters + return job_executions, next_token + + +available_regions = boto3.session.Session().get_available_regions("iot") +iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 3ef5bc93ee87..14302cc2f037 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,497 +1,507 @@ -from __future__ import unicode_literals - -import json -from six.moves.urllib.parse import unquote - -from moto.core.responses import BaseResponse -from .models import iot_backends - - -class IoTResponse(BaseResponse): - SERVICE_NAME = 'iot' - - @property - def iot_backend(self): - return iot_backends[self.region] - - def create_thing(self): - thing_name = self._get_param("thingName") - thing_type_name = self._get_param("thingTypeName") - attribute_payload = self._get_param("attributePayload") - thing_name, thing_arn = self.iot_backend.create_thing( - thing_name=thing_name, - thing_type_name=thing_type_name, - attribute_payload=attribute_payload, - ) - return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) - - def create_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - thing_type_properties = self._get_param("thingTypeProperties") - thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( - thing_type_name=thing_type_name, - thing_type_properties=thing_type_properties, - ) - return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) - - def list_thing_types(self): - previous_next_token = self._get_param("nextToken") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - thing_type_name = self._get_param("thingTypeName") - thing_types = self.iot_backend.list_thing_types( - thing_type_name=thing_type_name - ) - - thing_types = [_.to_dict() for _ in thing_types] - if previous_next_token is None: - result = thing_types[0:max_results] - next_token = str(max_results) if len(thing_types) > max_results else None - else: - token = int(previous_next_token) - result = thing_types[token:token + max_results] - next_token = str(token + max_results) if len(thing_types) > token + max_results else None - - return json.dumps(dict(thingTypes=result, nextToken=next_token)) - - def list_things(self): - previous_next_token = self._get_param("nextToken") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - attribute_name = self._get_param("attributeName") - attribute_value = self._get_param("attributeValue") - thing_type_name = self._get_param("thingTypeName") - things, next_token = self.iot_backend.list_things( - attribute_name=attribute_name, - attribute_value=attribute_value, - thing_type_name=thing_type_name, - max_results=max_results, - token=previous_next_token - ) - - return json.dumps(dict(things=things, nextToken=next_token)) - - def describe_thing(self): - thing_name = self._get_param("thingName") - thing = self.iot_backend.describe_thing( - thing_name=thing_name, - ) - return json.dumps(thing.to_dict(include_default_client_id=True)) - - def describe_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - thing_type = self.iot_backend.describe_thing_type( - thing_type_name=thing_type_name, - ) - return json.dumps(thing_type.to_dict()) - - def delete_thing(self): - thing_name = self._get_param("thingName") - expected_version = self._get_param("expectedVersion") - self.iot_backend.delete_thing( - thing_name=thing_name, - expected_version=expected_version, - ) - return json.dumps(dict()) - - def delete_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - self.iot_backend.delete_thing_type( - thing_type_name=thing_type_name, - ) - return json.dumps(dict()) - - def update_thing(self): - thing_name = self._get_param("thingName") - thing_type_name = self._get_param("thingTypeName") - attribute_payload = self._get_param("attributePayload") - expected_version = self._get_param("expectedVersion") - remove_thing_type = self._get_param("removeThingType") - self.iot_backend.update_thing( - thing_name=thing_name, - thing_type_name=thing_type_name, - attribute_payload=attribute_payload, - expected_version=expected_version, - remove_thing_type=remove_thing_type, - ) - return json.dumps(dict()) - - def create_job(self): - job_arn, job_id, description = self.iot_backend.create_job( - job_id=self._get_param("jobId"), - targets=self._get_param("targets"), - description=self._get_param("description"), - document_source=self._get_param("documentSource"), - document=self._get_param("document"), - presigned_url_config=self._get_param("presignedUrlConfig"), - target_selection=self._get_param("targetSelection"), - job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), - document_parameters=self._get_param("documentParameters") - ) - - return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) - - def describe_job(self): - job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict( - documentSource=job.document_source, - job=dict( - comment=job.comment, - completedAt=job.completed_at, - createdAt=job.created_at, - description=job.description, - documentParameters=job.document_parameters, - jobArn=job.job_arn, - jobExecutionsRolloutConfig=job.job_executions_rollout_config, - jobId=job.job_id, - jobProcessDetails=job.job_process_details, - lastUpdatedAt=job.last_updated_at, - presignedUrlConfig=job.presigned_url_config, - status=job.status, - targets=job.targets, - targetSelection=job.target_selection - ))) - - def get_job_document(self): - job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) - - if job.document is not None: - return json.dumps({'document': job.document}) - else: - # job.document_source is not None: - # TODO: needs to be implemented to get document_source's content from S3 - return json.dumps({'document': ''}) - - def create_keys_and_certificate(self): - set_as_active = self._get_bool_param("setAsActive") - cert, key_pair = self.iot_backend.create_keys_and_certificate( - set_as_active=set_as_active, - ) - return json.dumps(dict( - certificateArn=cert.arn, - certificateId=cert.certificate_id, - certificatePem=cert.certificate_pem, - keyPair=key_pair - )) - - def delete_certificate(self): - certificate_id = self._get_param("certificateId") - self.iot_backend.delete_certificate( - certificate_id=certificate_id, - ) - return json.dumps(dict()) - - def describe_certificate(self): - certificate_id = self._get_param("certificateId") - certificate = self.iot_backend.describe_certificate( - certificate_id=certificate_id, - ) - return json.dumps(dict(certificateDescription=certificate.to_description_dict())) - - def list_certificates(self): - # page_size = self._get_int_param("pageSize") - # marker = self._get_param("marker") - # ascending_order = self._get_param("ascendingOrder") - certificates = self.iot_backend.list_certificates() - # TODO: implement pagination in the future - return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) - - def update_certificate(self): - certificate_id = self._get_param("certificateId") - new_status = self._get_param("newStatus") - self.iot_backend.update_certificate( - certificate_id=certificate_id, - new_status=new_status, - ) - return json.dumps(dict()) - - def create_policy(self): - policy_name = self._get_param("policyName") - policy_document = self._get_param("policyDocument") - policy = self.iot_backend.create_policy( - policy_name=policy_name, - policy_document=policy_document, - ) - return json.dumps(policy.to_dict_at_creation()) - - def list_policies(self): - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - policies = self.iot_backend.list_policies() - - # TODO: implement pagination in the future - return json.dumps(dict(policies=[_.to_dict() for _ in policies])) - - def get_policy(self): - policy_name = self._get_param("policyName") - policy = self.iot_backend.get_policy( - policy_name=policy_name, - ) - return json.dumps(policy.to_get_dict()) - - def delete_policy(self): - policy_name = self._get_param("policyName") - self.iot_backend.delete_policy( - policy_name=policy_name, - ) - return json.dumps(dict()) - - def create_policy_version(self): - policy_name = self._get_param('policyName') - policy_document = self._get_param('policyDocument') - set_as_default = self._get_bool_param('setAsDefault') - policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) - - return json.dumps(dict(policy_version.to_dict_at_creation())) - - def set_default_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - self.iot_backend.set_default_policy_version(policy_name, version_id) - - return json.dumps(dict()) - - def get_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - policy_version = self.iot_backend.get_policy_version(policy_name, version_id) - return json.dumps(dict(policy_version.to_get_dict())) - - def list_policy_versions(self): - policy_name = self._get_param('policyName') - policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) - - return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) - - def delete_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - self.iot_backend.delete_policy_version(policy_name, version_id) - - return json.dumps(dict()) - - def attach_policy(self): - policy_name = self._get_param("policyName") - principal = self._get_param('target') - self.iot_backend.attach_policy( - policy_name=policy_name, - target=principal, - ) - return json.dumps(dict()) - - def detach_policy(self): - policy_name = self._get_param("policyName") - principal = self._get_param('target') - self.iot_backend.detach_policy( - policy_name=policy_name, - target=principal, - ) - return json.dumps(dict()) - - def list_attached_policies(self): - principal = unquote(self._get_param('target')) - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - policies = self.iot_backend.list_attached_policies( - target=principal - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) - - def attach_principal_policy(self): - policy_name = self._get_param("policyName") - principal = self.headers.get('x-amzn-iot-principal') - self.iot_backend.attach_principal_policy( - policy_name=policy_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def detach_principal_policy(self): - policy_name = self._get_param("policyName") - principal = self.headers.get('x-amzn-iot-principal') - self.iot_backend.detach_principal_policy( - policy_name=policy_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def list_principal_policies(self): - principal = self.headers.get('x-amzn-iot-principal') - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - policies = self.iot_backend.list_principal_policies( - principal_arn=principal - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) - - def list_policy_principals(self): - policy_name = self.headers.get('x-amzn-iot-policy') - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - principals = self.iot_backend.list_policy_principals( - policy_name=policy_name, - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(principals=principals, nextMarker=next_marker)) - - def attach_thing_principal(self): - thing_name = self._get_param("thingName") - principal = self.headers.get('x-amzn-principal') - self.iot_backend.attach_thing_principal( - thing_name=thing_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def detach_thing_principal(self): - thing_name = self._get_param("thingName") - principal = self.headers.get('x-amzn-principal') - self.iot_backend.detach_thing_principal( - thing_name=thing_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def list_principal_things(self): - next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - principal = self.headers.get('x-amzn-principal') - things = self.iot_backend.list_principal_things( - principal_arn=principal, - ) - # TODO: implement pagination in the future - next_token = None - return json.dumps(dict(things=things, nextToken=next_token)) - - def list_thing_principals(self): - thing_name = self._get_param("thingName") - principals = self.iot_backend.list_thing_principals( - thing_name=thing_name, - ) - return json.dumps(dict(principals=principals)) - - def describe_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group = self.iot_backend.describe_thing_group( - thing_group_name=thing_group_name, - ) - return json.dumps(thing_group.to_dict()) - - def create_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - parent_group_name = self._get_param("parentGroupName") - thing_group_properties = self._get_param("thingGroupProperties") - thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( - thing_group_name=thing_group_name, - parent_group_name=parent_group_name, - thing_group_properties=thing_group_properties, - ) - return json.dumps(dict( - thingGroupName=thing_group_name, - thingGroupArn=thing_group_arn, - thingGroupId=thing_group_id) - ) - - def delete_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - expected_version = self._get_param("expectedVersion") - self.iot_backend.delete_thing_group( - thing_group_name=thing_group_name, - expected_version=expected_version, - ) - return json.dumps(dict()) - - def list_thing_groups(self): - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - parent_group = self._get_param("parentGroup") - name_prefix_filter = self._get_param("namePrefixFilter") - recursive = self._get_param("recursive") - thing_groups = self.iot_backend.list_thing_groups( - parent_group=parent_group, - name_prefix_filter=name_prefix_filter, - recursive=recursive, - ) - next_token = None - rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] - # TODO: implement pagination in the future - return json.dumps(dict(thingGroups=rets, nextToken=next_token)) - - def update_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_properties = self._get_param("thingGroupProperties") - expected_version = self._get_param("expectedVersion") - version = self.iot_backend.update_thing_group( - thing_group_name=thing_group_name, - thing_group_properties=thing_group_properties, - expected_version=expected_version, - ) - return json.dumps(dict(version=version)) - - def add_thing_to_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_arn = self._get_param("thingGroupArn") - thing_name = self._get_param("thingName") - thing_arn = self._get_param("thingArn") - self.iot_backend.add_thing_to_thing_group( - thing_group_name=thing_group_name, - thing_group_arn=thing_group_arn, - thing_name=thing_name, - thing_arn=thing_arn, - ) - return json.dumps(dict()) - - def remove_thing_from_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_arn = self._get_param("thingGroupArn") - thing_name = self._get_param("thingName") - thing_arn = self._get_param("thingArn") - self.iot_backend.remove_thing_from_thing_group( - thing_group_name=thing_group_name, - thing_group_arn=thing_group_arn, - thing_name=thing_name, - thing_arn=thing_arn, - ) - return json.dumps(dict()) - - def list_things_in_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - recursive = self._get_param("recursive") - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - things = self.iot_backend.list_things_in_thing_group( - thing_group_name=thing_group_name, - recursive=recursive, - ) - next_token = None - thing_names = [_.thing_name for _ in things] - # TODO: implement pagination in the future - return json.dumps(dict(things=thing_names, nextToken=next_token)) - - def list_thing_groups_for_thing(self): - thing_name = self._get_param("thingName") - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - thing_groups = self.iot_backend.list_thing_groups_for_thing( - thing_name=thing_name - ) - next_token = None - # TODO: implement pagination in the future - return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) - - def update_thing_groups_for_thing(self): - thing_name = self._get_param("thingName") - thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] - thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] - self.iot_backend.update_thing_groups_for_thing( - thing_name=thing_name, - thing_groups_to_add=thing_groups_to_add, - thing_groups_to_remove=thing_groups_to_remove, - ) - return json.dumps(dict()) +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import unquote + +from moto.core.responses import BaseResponse +from .models import iot_backends + + +class IoTResponse(BaseResponse): + SERVICE_NAME = 'iot' + + @property + def iot_backend(self): + return iot_backends[self.region] + + def create_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + thing_name, thing_arn = self.iot_backend.create_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + ) + return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) + + def create_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type_properties = self._get_param("thingTypeProperties") + thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( + thing_type_name=thing_type_name, + thing_type_properties=thing_type_properties, + ) + return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) + + def list_thing_types(self): + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + thing_type_name = self._get_param("thingTypeName") + thing_types = self.iot_backend.list_thing_types( + thing_type_name=thing_type_name + ) + + thing_types = [_.to_dict() for _ in thing_types] + if previous_next_token is None: + result = thing_types[0:max_results] + next_token = str(max_results) if len(thing_types) > max_results else None + else: + token = int(previous_next_token) + result = thing_types[token:token + max_results] + next_token = str(token + max_results) if len(thing_types) > token + max_results else None + + return json.dumps(dict(thingTypes=result, nextToken=next_token)) + + def list_things(self): + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + attribute_name = self._get_param("attributeName") + attribute_value = self._get_param("attributeValue") + thing_type_name = self._get_param("thingTypeName") + things, next_token = self.iot_backend.list_things( + attribute_name=attribute_name, + attribute_value=attribute_value, + thing_type_name=thing_type_name, + max_results=max_results, + token=previous_next_token + ) + + return json.dumps(dict(things=things, nextToken=next_token)) + + def describe_thing(self): + thing_name = self._get_param("thingName") + thing = self.iot_backend.describe_thing( + thing_name=thing_name, + ) + return json.dumps(thing.to_dict(include_default_client_id=True)) + + def describe_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type = self.iot_backend.describe_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(thing_type.to_dict()) + + def delete_thing(self): + thing_name = self._get_param("thingName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing( + thing_name=thing_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def delete_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + self.iot_backend.delete_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(dict()) + + def update_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + expected_version = self._get_param("expectedVersion") + remove_thing_type = self._get_param("removeThingType") + self.iot_backend.update_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + expected_version=expected_version, + remove_thing_type=remove_thing_type, + ) + return json.dumps(dict()) + + def create_job(self): + job_arn, job_id, description = self.iot_backend.create_job( + job_id=self._get_param("jobId"), + targets=self._get_param("targets"), + description=self._get_param("description"), + document_source=self._get_param("documentSource"), + document=self._get_param("document"), + presigned_url_config=self._get_param("presignedUrlConfig"), + target_selection=self._get_param("targetSelection"), + job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), + document_parameters=self._get_param("documentParameters") + ) + + return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) + + def describe_job(self): + job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) + return json.dumps(dict( + documentSource=job.document_source, + job=dict( + comment=job.comment, + completedAt=job.completed_at, + createdAt=job.created_at, + description=job.description, + documentParameters=job.document_parameters, + jobArn=job.job_arn, + jobExecutionsRolloutConfig=job.job_executions_rollout_config, + jobId=job.job_id, + jobProcessDetails=job.job_process_details, + lastUpdatedAt=job.last_updated_at, + presignedUrlConfig=job.presigned_url_config, + status=job.status, + targets=job.targets, + targetSelection=job.target_selection + ))) + + def get_job_document(self): + job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) + + if job.document is not None: + return json.dumps({'document': job.document}) + else: + # job.document_source is not None: + # TODO: needs to be implemented to get document_source's content from S3 + return json.dumps({'document': ''}) + + def list_job_executions_for_job(self): + job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=self._get_param("jobId"), + status=self._get_param("status"), + max_results=self._get_param( + "maxResults"), + next_token=self._get_param( + "nextToken")) + + return json.dumps(dict(executionSummaries=[_.to_dict() for _ in job_executions], nextToken=next_token)) + + def create_keys_and_certificate(self): + set_as_active = self._get_bool_param("setAsActive") + cert, key_pair = self.iot_backend.create_keys_and_certificate( + set_as_active=set_as_active, + ) + return json.dumps(dict( + certificateArn=cert.arn, + certificateId=cert.certificate_id, + certificatePem=cert.certificate_pem, + keyPair=key_pair + )) + + def delete_certificate(self): + certificate_id = self._get_param("certificateId") + self.iot_backend.delete_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict()) + + def describe_certificate(self): + certificate_id = self._get_param("certificateId") + certificate = self.iot_backend.describe_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict(certificateDescription=certificate.to_description_dict())) + + def list_certificates(self): + # page_size = self._get_int_param("pageSize") + # marker = self._get_param("marker") + # ascending_order = self._get_param("ascendingOrder") + certificates = self.iot_backend.list_certificates() + # TODO: implement pagination in the future + return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) + + def update_certificate(self): + certificate_id = self._get_param("certificateId") + new_status = self._get_param("newStatus") + self.iot_backend.update_certificate( + certificate_id=certificate_id, + new_status=new_status, + ) + return json.dumps(dict()) + + def create_policy(self): + policy_name = self._get_param("policyName") + policy_document = self._get_param("policyDocument") + policy = self.iot_backend.create_policy( + policy_name=policy_name, + policy_document=policy_document, + ) + return json.dumps(policy.to_dict_at_creation()) + + def list_policies(self): + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_policies() + + # TODO: implement pagination in the future + return json.dumps(dict(policies=[_.to_dict() for _ in policies])) + + def get_policy(self): + policy_name = self._get_param("policyName") + policy = self.iot_backend.get_policy( + policy_name=policy_name, + ) + return json.dumps(policy.to_get_dict()) + + def delete_policy(self): + policy_name = self._get_param("policyName") + self.iot_backend.delete_policy( + policy_name=policy_name, + ) + return json.dumps(dict()) + + def create_policy_version(self): + policy_name = self._get_param('policyName') + policy_document = self._get_param('policyDocument') + set_as_default = self._get_bool_param('setAsDefault') + policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) + + return json.dumps(dict(policy_version.to_dict_at_creation())) + + def set_default_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.set_default_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def get_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + policy_version = self.iot_backend.get_policy_version(policy_name, version_id) + return json.dumps(dict(policy_version.to_get_dict())) + + def list_policy_versions(self): + policy_name = self._get_param('policyName') + policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) + + return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) + + def delete_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.delete_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def attach_policy(self): + policy_name = self._get_param("policyName") + principal = self._get_param('target') + self.iot_backend.attach_policy( + policy_name=policy_name, + target=principal, + ) + return json.dumps(dict()) + + def detach_policy(self): + policy_name = self._get_param("policyName") + principal = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=principal, + ) + return json.dumps(dict()) + + def list_attached_policies(self): + principal = unquote(self._get_param('target')) + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + policies = self.iot_backend.list_attached_policies( + target=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def attach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.attach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.detach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_policies(self): + principal = self.headers.get('x-amzn-iot-principal') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_principal_policies( + principal_arn=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def list_policy_principals(self): + policy_name = self.headers.get('x-amzn-iot-policy') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + principals = self.iot_backend.list_policy_principals( + policy_name=policy_name, + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(principals=principals, nextMarker=next_marker)) + + def attach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.attach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.detach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_things(self): + next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + principal = self.headers.get('x-amzn-principal') + things = self.iot_backend.list_principal_things( + principal_arn=principal, + ) + # TODO: implement pagination in the future + next_token = None + return json.dumps(dict(things=things, nextToken=next_token)) + + def list_thing_principals(self): + thing_name = self._get_param("thingName") + principals = self.iot_backend.list_thing_principals( + thing_name=thing_name, + ) + return json.dumps(dict(principals=principals)) + + def describe_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group = self.iot_backend.describe_thing_group( + thing_group_name=thing_group_name, + ) + return json.dumps(thing_group.to_dict()) + + def create_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + parent_group_name = self._get_param("parentGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( + thing_group_name=thing_group_name, + parent_group_name=parent_group_name, + thing_group_properties=thing_group_properties, + ) + return json.dumps(dict( + thingGroupName=thing_group_name, + thingGroupArn=thing_group_arn, + thingGroupId=thing_group_id) + ) + + def delete_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing_group( + thing_group_name=thing_group_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def list_thing_groups(self): + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + parent_group = self._get_param("parentGroup") + name_prefix_filter = self._get_param("namePrefixFilter") + recursive = self._get_param("recursive") + thing_groups = self.iot_backend.list_thing_groups( + parent_group=parent_group, + name_prefix_filter=name_prefix_filter, + recursive=recursive, + ) + next_token = None + rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=rets, nextToken=next_token)) + + def update_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + expected_version = self._get_param("expectedVersion") + version = self.iot_backend.update_thing_group( + thing_group_name=thing_group_name, + thing_group_properties=thing_group_properties, + expected_version=expected_version, + ) + return json.dumps(dict(version=version)) + + def add_thing_to_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.add_thing_to_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def remove_thing_from_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.remove_thing_from_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def list_things_in_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + recursive = self._get_param("recursive") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + things = self.iot_backend.list_things_in_thing_group( + thing_group_name=thing_group_name, + recursive=recursive, + ) + next_token = None + thing_names = [_.thing_name for _ in things] + # TODO: implement pagination in the future + return json.dumps(dict(things=thing_names, nextToken=next_token)) + + def list_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + thing_groups = self.iot_backend.list_thing_groups_for_thing( + thing_name=thing_name + ) + next_token = None + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) + + def update_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] + thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] + self.iot_backend.update_thing_groups_for_thing( + thing_name=thing_name, + thing_groups_to_add=thing_groups_to_add, + thing_groups_to_remove=thing_groups_to_remove, + ) + return json.dumps(dict()) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 92fb3dfd0900..d5f277d1d3d2 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -874,3 +874,42 @@ def test_get_job_document_with_document(): job_document = client.get_job_document(jobId=job_id) job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") + +@mock_iot +def test_list_job_executions_for_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.list_job_executions_for_job(jobId=job_id) + job_execution.should.have.key('executionSummaries') + job_execution['executionSummaries'][0].should.have.key('jobId').which.should.equal(job_id) + From cfd12b6d19bb26de3935b7538224948196372fe9 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 7 Jan 2019 14:22:12 +0100 Subject: [PATCH 012/658] added IoT job_execution and job mocks --- IMPLEMENTATION_COVERAGE.md | 925 ++++++++++++++++++++++++++++++++++++- moto/iot/exceptions.py | 75 +-- moto/iot/models.py | 127 ++++- moto/iot/responses.py | 84 +++- tests/test_iot/test_iot.py | 260 ++++++++++- 5 files changed, 1399 insertions(+), 72 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index a153b92fcdb5..fcfe31835a1c 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -33,36 +33,59 @@ - [ ] update_certificate_authority ## alexaforbusiness - 0% implemented +- [ ] approve_skill - [ ] associate_contact_with_address_book - [ ] associate_device_with_room - [ ] associate_skill_group_with_room +- [ ] associate_skill_with_skill_group +- [ ] associate_skill_with_users - [ ] create_address_book +- [ ] create_business_report_schedule +- [ ] create_conference_provider - [ ] create_contact - [ ] create_profile - [ ] create_room - [ ] create_skill_group - [ ] create_user - [ ] delete_address_book +- [ ] delete_business_report_schedule +- [ ] delete_conference_provider - [ ] delete_contact +- [ ] delete_device - [ ] delete_profile - [ ] delete_room - [ ] delete_room_skill_parameter +- [ ] delete_skill_authorization - [ ] delete_skill_group - [ ] delete_user - [ ] disassociate_contact_from_address_book - [ ] disassociate_device_from_room +- [ ] disassociate_skill_from_skill_group +- [ ] disassociate_skill_from_users - [ ] disassociate_skill_group_from_room +- [ ] forget_smart_home_appliances - [ ] get_address_book +- [ ] get_conference_preference +- [ ] get_conference_provider - [ ] get_contact - [ ] get_device - [ ] get_profile - [ ] get_room - [ ] get_room_skill_parameter - [ ] get_skill_group +- [ ] list_business_report_schedules +- [ ] list_conference_providers - [ ] list_device_events - [ ] list_skills +- [ ] list_skills_store_categories +- [ ] list_skills_store_skills_by_category +- [ ] list_smart_home_appliances - [ ] list_tags +- [ ] put_conference_preference - [ ] put_room_skill_parameter +- [ ] put_skill_authorization +- [ ] register_avs_device +- [ ] reject_skill - [ ] resolve_room - [ ] revoke_invitation - [ ] search_address_books @@ -74,15 +97,40 @@ - [ ] search_users - [ ] send_invitation - [ ] start_device_sync +- [ ] start_smart_home_appliance_discovery - [ ] tag_resource - [ ] untag_resource - [ ] update_address_book +- [ ] update_business_report_schedule +- [ ] update_conference_provider - [ ] update_contact - [ ] update_device - [ ] update_profile - [ ] update_room - [ ] update_skill_group +## amplify - 0% implemented +- [ ] create_app +- [ ] create_branch +- [ ] create_domain_association +- [ ] delete_app +- [ ] delete_branch +- [ ] delete_domain_association +- [ ] delete_job +- [ ] get_app +- [ ] get_branch +- [ ] get_domain_association +- [ ] get_job +- [ ] list_apps +- [ ] list_branches +- [ ] list_domain_associations +- [ ] list_jobs +- [ ] start_job +- [ ] stop_job +- [ ] update_app +- [ ] update_branch +- [ ] update_domain_association + ## apigateway - 24% implemented - [ ] create_api_key - [ ] create_authorizer @@ -205,6 +253,67 @@ - [ ] update_usage_plan - [ ] update_vpc_link +## apigatewaymanagementapi - 0% implemented +- [ ] post_to_connection + +## apigatewayv2 - 0% implemented +- [ ] create_api +- [ ] create_api_mapping +- [ ] create_authorizer +- [ ] create_deployment +- [ ] create_domain_name +- [ ] create_integration +- [ ] create_integration_response +- [ ] create_model +- [ ] create_route +- [ ] create_route_response +- [ ] create_stage +- [ ] delete_api +- [ ] delete_api_mapping +- [ ] delete_authorizer +- [ ] delete_deployment +- [ ] delete_domain_name +- [ ] delete_integration +- [ ] delete_integration_response +- [ ] delete_model +- [ ] delete_route +- [ ] delete_route_response +- [ ] delete_stage +- [ ] get_api +- [ ] get_api_mapping +- [ ] get_api_mappings +- [ ] get_apis +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_deployment +- [ ] get_deployments +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_integration +- [ ] get_integration_response +- [ ] get_integration_responses +- [ ] get_integrations +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_route +- [ ] get_route_response +- [ ] get_route_responses +- [ ] get_routes +- [ ] get_stage +- [ ] get_stages +- [ ] update_api +- [ ] update_api_mapping +- [ ] update_authorizer +- [ ] update_deployment +- [ ] update_domain_name +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_model +- [ ] update_route +- [ ] update_route_response +- [ ] update_stage + ## application-autoscaling - 0% implemented - [ ] delete_scaling_policy - [ ] delete_scheduled_action @@ -217,8 +326,31 @@ - [ ] put_scheduled_action - [ ] register_scalable_target +## appmesh - 0% implemented +- [ ] create_mesh +- [ ] create_route +- [ ] create_virtual_node +- [ ] create_virtual_router +- [ ] delete_mesh +- [ ] delete_route +- [ ] delete_virtual_node +- [ ] delete_virtual_router +- [ ] describe_mesh +- [ ] describe_route +- [ ] describe_virtual_node +- [ ] describe_virtual_router +- [ ] list_meshes +- [ ] list_routes +- [ ] list_virtual_nodes +- [ ] list_virtual_routers +- [ ] update_route +- [ ] update_virtual_node +- [ ] update_virtual_router + ## appstream - 0% implemented - [ ] associate_fleet +- [ ] batch_associate_user_stack +- [ ] batch_disassociate_user_stack - [ ] copy_image - [ ] create_directory_config - [ ] create_fleet @@ -226,12 +358,14 @@ - [ ] create_image_builder_streaming_url - [ ] create_stack - [ ] create_streaming_url +- [ ] create_user - [ ] delete_directory_config - [ ] delete_fleet - [ ] delete_image - [ ] delete_image_builder - [ ] delete_image_permissions - [ ] delete_stack +- [ ] delete_user - [ ] describe_directory_configs - [ ] describe_fleets - [ ] describe_image_builders @@ -239,7 +373,11 @@ - [ ] describe_images - [ ] describe_sessions - [ ] describe_stacks +- [ ] describe_user_stack_associations +- [ ] describe_users +- [ ] disable_user - [ ] disassociate_fleet +- [ ] enable_user - [ ] expire_session - [ ] list_associated_fleets - [ ] list_associated_stacks @@ -258,15 +396,18 @@ ## appsync - 0% implemented - [ ] create_api_key - [ ] create_data_source +- [ ] create_function - [ ] create_graphql_api - [ ] create_resolver - [ ] create_type - [ ] delete_api_key - [ ] delete_data_source +- [ ] delete_function - [ ] delete_graphql_api - [ ] delete_resolver - [ ] delete_type - [ ] get_data_source +- [ ] get_function - [ ] get_graphql_api - [ ] get_introspection_schema - [ ] get_resolver @@ -274,12 +415,15 @@ - [ ] get_type - [ ] list_api_keys - [ ] list_data_sources +- [ ] list_functions - [ ] list_graphql_apis - [ ] list_resolvers +- [ ] list_resolvers_by_function - [ ] list_types - [ ] start_schema_creation - [ ] update_api_key - [ ] update_data_source +- [ ] update_function - [ ] update_graphql_api - [ ] update_resolver - [ ] update_type @@ -358,6 +502,7 @@ - [ ] delete_scaling_plan - [ ] describe_scaling_plan_resources - [ ] describe_scaling_plans +- [ ] get_scaling_plan_resource_forecast_data - [ ] update_scaling_plan ## batch - 93% implemented @@ -386,6 +531,7 @@ - [ ] delete_notification - [ ] delete_subscriber - [ ] describe_budget +- [ ] describe_budget_performance_history - [ ] describe_budgets - [ ] describe_notifications_for_budget - [ ] describe_subscribers_for_notification @@ -395,12 +541,31 @@ ## ce - 0% implemented - [ ] get_cost_and_usage +- [ ] get_cost_forecast - [ ] get_dimension_values - [ ] get_reservation_coverage - [ ] get_reservation_purchase_recommendation - [ ] get_reservation_utilization - [ ] get_tags +## chime - 0% implemented +- [ ] batch_suspend_user +- [ ] batch_unsuspend_user +- [ ] batch_update_user +- [ ] create_account +- [ ] delete_account +- [ ] get_account +- [ ] get_account_settings +- [ ] get_user +- [ ] invite_users +- [ ] list_accounts +- [ ] list_users +- [ ] logout_user +- [ ] reset_personal_pin +- [ ] update_account +- [ ] update_account_settings +- [ ] update_user + ## cloud9 - 0% implemented - [ ] create_environment_ec2 - [ ] create_environment_membership @@ -481,7 +646,7 @@ - [ ] upgrade_applied_schema - [ ] upgrade_published_schema -## cloudformation - 21% implemented +## cloudformation - 20% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set @@ -494,13 +659,17 @@ - [ ] delete_stack_set - [ ] describe_account_limits - [ ] describe_change_set +- [ ] describe_stack_drift_detection_status - [ ] describe_stack_events - [ ] describe_stack_instance - [ ] describe_stack_resource +- [ ] describe_stack_resource_drifts - [ ] describe_stack_resources - [ ] describe_stack_set - [ ] describe_stack_set_operation - [X] describe_stacks +- [ ] detect_stack_drift +- [ ] detect_stack_resource_drift - [ ] estimate_template_cost - [X] execute_change_set - [ ] get_stack_policy @@ -597,12 +766,14 @@ - [ ] copy_backup_to_region - [ ] create_cluster - [ ] create_hsm +- [ ] delete_backup - [ ] delete_cluster - [ ] delete_hsm - [ ] describe_backups - [ ] describe_clusters - [ ] initialize_cluster - [ ] list_tags +- [ ] restore_backup - [ ] tag_resource - [ ] untag_resource @@ -653,7 +824,7 @@ - [ ] stop_logging - [ ] update_trail -## cloudwatch - 56% implemented +## cloudwatch - 52% implemented - [X] delete_alarms - [X] delete_dashboards - [ ] describe_alarm_history @@ -664,6 +835,7 @@ - [X] get_dashboard - [ ] get_metric_data - [X] get_metric_statistics +- [ ] get_metric_widget_image - [X] list_dashboards - [ ] list_metrics - [X] put_dashboard @@ -678,12 +850,15 @@ - [ ] create_project - [ ] create_webhook - [ ] delete_project +- [ ] delete_source_credentials - [ ] delete_webhook +- [ ] import_source_credentials - [ ] invalidate_project_cache - [ ] list_builds - [ ] list_builds_for_project - [ ] list_curated_environment_images - [ ] list_projects +- [ ] list_source_credentials - [ ] start_build - [ ] stop_build - [ ] update_project @@ -696,6 +871,7 @@ - [ ] create_repository - [ ] delete_branch - [ ] delete_comment_content +- [ ] delete_file - [ ] delete_repository - [ ] describe_pull_request_events - [ ] get_blob @@ -705,6 +881,8 @@ - [ ] get_comments_for_pull_request - [ ] get_commit - [ ] get_differences +- [ ] get_file +- [ ] get_folder - [ ] get_merge_conflicts - [ ] get_pull_request - [ ] get_repository @@ -733,6 +911,7 @@ - [ ] batch_get_applications - [ ] batch_get_deployment_groups - [ ] batch_get_deployment_instances +- [ ] batch_get_deployment_targets - [ ] batch_get_deployments - [ ] batch_get_on_premises_instances - [ ] continue_deployment @@ -751,12 +930,14 @@ - [ ] get_deployment_config - [ ] get_deployment_group - [ ] get_deployment_instance +- [ ] get_deployment_target - [ ] get_on_premises_instance - [ ] list_application_revisions - [ ] list_applications - [ ] list_deployment_configs - [ ] list_deployment_groups - [ ] list_deployment_instances +- [ ] list_deployment_targets - [ ] list_deployments - [ ] list_git_hub_account_token_names - [ ] list_on_premises_instances @@ -937,6 +1118,7 @@ - [ ] update_user_attributes - [ ] update_user_pool - [X] update_user_pool_client +- [ ] update_user_pool_domain - [ ] verify_software_token - [ ] verify_user_attribute @@ -965,8 +1147,15 @@ - [ ] batch_detect_key_phrases - [ ] batch_detect_sentiment - [ ] batch_detect_syntax +- [ ] create_document_classifier +- [ ] create_entity_recognizer +- [ ] delete_document_classifier +- [ ] delete_entity_recognizer +- [ ] describe_document_classification_job +- [ ] describe_document_classifier - [ ] describe_dominant_language_detection_job - [ ] describe_entities_detection_job +- [ ] describe_entity_recognizer - [ ] describe_key_phrases_detection_job - [ ] describe_sentiment_detection_job - [ ] describe_topics_detection_job @@ -975,11 +1164,15 @@ - [ ] detect_key_phrases - [ ] detect_sentiment - [ ] detect_syntax +- [ ] list_document_classification_jobs +- [ ] list_document_classifiers - [ ] list_dominant_language_detection_jobs - [ ] list_entities_detection_jobs +- [ ] list_entity_recognizers - [ ] list_key_phrases_detection_jobs - [ ] list_sentiment_detection_jobs - [ ] list_topics_detection_jobs +- [ ] start_document_classification_job - [ ] start_dominant_language_detection_job - [ ] start_entities_detection_job - [ ] start_key_phrases_detection_job @@ -989,8 +1182,15 @@ - [ ] stop_entities_detection_job - [ ] stop_key_phrases_detection_job - [ ] stop_sentiment_detection_job +- [ ] stop_training_document_classifier +- [ ] stop_training_entity_recognizer + +## comprehendmedical - 0% implemented +- [ ] detect_entities +- [ ] detect_phi ## config - 0% implemented +- [ ] batch_get_aggregate_resource_config - [ ] batch_get_resource_config - [ ] delete_aggregation_authorization - [ ] delete_config_rule @@ -1017,12 +1217,15 @@ - [ ] describe_retention_configurations - [ ] get_aggregate_compliance_details_by_config_rule - [ ] get_aggregate_config_rule_compliance_summary +- [ ] get_aggregate_discovered_resource_counts +- [ ] get_aggregate_resource_config - [ ] get_compliance_details_by_config_rule - [ ] get_compliance_details_by_resource - [ ] get_compliance_summary_by_config_rule - [ ] get_compliance_summary_by_resource_type - [ ] get_discovered_resource_counts - [ ] get_resource_config_history +- [ ] list_aggregate_discovered_resources - [ ] list_discovered_resources - [ ] put_aggregation_authorization - [ ] put_config_rule @@ -1041,13 +1244,17 @@ - [ ] describe_user - [ ] describe_user_hierarchy_group - [ ] describe_user_hierarchy_structure +- [ ] get_contact_attributes +- [ ] get_current_metric_data - [ ] get_federation_token +- [ ] get_metric_data - [ ] list_routing_profiles - [ ] list_security_profiles - [ ] list_user_hierarchy_groups - [ ] list_users - [ ] start_outbound_voice_contact - [ ] stop_contact +- [ ] update_contact_attributes - [ ] update_user_hierarchy - [ ] update_user_identity_info - [ ] update_user_phone_config @@ -1080,6 +1287,33 @@ - [ ] set_task_status - [ ] validate_pipeline_definition +## datasync - 0% implemented +- [ ] cancel_task_execution +- [ ] create_agent +- [ ] create_location_efs +- [ ] create_location_nfs +- [ ] create_location_s3 +- [ ] create_task +- [ ] delete_agent +- [ ] delete_location +- [ ] delete_task +- [ ] describe_agent +- [ ] describe_location_efs +- [ ] describe_location_nfs +- [ ] describe_location_s3 +- [ ] describe_task +- [ ] describe_task_execution +- [ ] list_agents +- [ ] list_locations +- [ ] list_tags_for_resource +- [ ] list_task_executions +- [ ] list_tasks +- [ ] start_task_execution +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_agent +- [ ] update_task + ## dax - 0% implemented - [ ] create_cluster - [ ] create_parameter_group @@ -1214,6 +1448,7 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_lag +- [ ] update_virtual_interface_attributes ## discovery - 0% implemented - [ ] associate_configuration_items_to_application @@ -1293,6 +1528,7 @@ - [ ] test_connection ## ds - 0% implemented +- [ ] accept_shared_directory - [ ] add_ip_routes - [ ] add_tags_to_resource - [ ] cancel_schema_extension @@ -1301,11 +1537,13 @@ - [ ] create_computer - [ ] create_conditional_forwarder - [ ] create_directory +- [ ] create_log_subscription - [ ] create_microsoft_ad - [ ] create_snapshot - [ ] create_trust - [ ] delete_conditional_forwarder - [ ] delete_directory +- [ ] delete_log_subscription - [ ] delete_snapshot - [ ] delete_trust - [ ] deregister_event_topic @@ -1313,6 +1551,7 @@ - [ ] describe_directories - [ ] describe_domain_controllers - [ ] describe_event_topics +- [ ] describe_shared_directories - [ ] describe_snapshots - [ ] describe_trusts - [ ] disable_radius @@ -1322,20 +1561,25 @@ - [ ] get_directory_limits - [ ] get_snapshot_limits - [ ] list_ip_routes +- [ ] list_log_subscriptions - [ ] list_schema_extensions - [ ] list_tags_for_resource - [ ] register_event_topic +- [ ] reject_shared_directory - [ ] remove_ip_routes - [ ] remove_tags_from_resource - [ ] reset_user_password - [ ] restore_from_snapshot +- [ ] share_directory - [ ] start_schema_extension +- [ ] unshare_directory - [ ] update_conditional_forwarder - [ ] update_number_of_domain_controllers - [ ] update_radius +- [ ] update_trust - [ ] verify_trust -## dynamodb - 21% implemented +## dynamodb - 19% implemented - [ ] batch_get_item - [ ] batch_write_item - [ ] create_backup @@ -1346,6 +1590,7 @@ - [X] delete_table - [ ] describe_backup - [ ] describe_continuous_backups +- [ ] describe_endpoints - [ ] describe_global_table - [ ] describe_global_table_settings - [ ] describe_limits @@ -1362,6 +1607,8 @@ - [ ] restore_table_to_point_in_time - [X] scan - [ ] tag_resource +- [ ] transact_get_items +- [ ] transact_write_items - [ ] untag_resource - [ ] update_continuous_backups - [ ] update_global_table @@ -1376,29 +1623,36 @@ - [ ] get_shard_iterator - [ ] list_streams -## ec2 - 36% implemented +## ec2 - 30% implemented - [ ] accept_reserved_instances_exchange_quote +- [ ] accept_transit_gateway_vpc_attachment - [ ] accept_vpc_endpoint_connections - [X] accept_vpc_peering_connection +- [ ] advertise_byoip_cidr - [X] allocate_address - [ ] allocate_hosts +- [ ] apply_security_groups_to_client_vpn_target_network - [ ] assign_ipv6_addresses - [ ] assign_private_ip_addresses - [X] associate_address +- [ ] associate_client_vpn_target_network - [X] associate_dhcp_options - [ ] associate_iam_instance_profile - [X] associate_route_table - [ ] associate_subnet_cidr_block +- [ ] associate_transit_gateway_route_table - [X] associate_vpc_cidr_block - [ ] attach_classic_link_vpc - [X] attach_internet_gateway - [X] attach_network_interface - [X] attach_volume - [X] attach_vpn_gateway +- [ ] authorize_client_vpn_ingress - [X] authorize_security_group_egress - [X] authorize_security_group_ingress - [ ] bundle_instance - [ ] cancel_bundle_task +- [ ] cancel_capacity_reservation - [ ] cancel_conversion_task - [ ] cancel_export_task - [ ] cancel_import_task @@ -1409,6 +1663,9 @@ - [ ] copy_fpga_image - [X] copy_image - [X] copy_snapshot +- [ ] create_capacity_reservation +- [ ] create_client_vpn_endpoint +- [ ] create_client_vpn_route - [X] create_customer_gateway - [ ] create_default_subnet - [ ] create_default_vpc @@ -1437,6 +1694,10 @@ - [ ] create_spot_datafeed_subscription - [X] create_subnet - [X] create_tags +- [ ] create_transit_gateway +- [ ] create_transit_gateway_route +- [ ] create_transit_gateway_route_table +- [ ] create_transit_gateway_vpc_attachment - [X] create_volume - [X] create_vpc - [ ] create_vpc_endpoint @@ -1446,6 +1707,8 @@ - [X] create_vpn_connection - [ ] create_vpn_connection_route - [X] create_vpn_gateway +- [ ] delete_client_vpn_endpoint +- [ ] delete_client_vpn_route - [X] delete_customer_gateway - [ ] delete_dhcp_options - [ ] delete_egress_only_internet_gateway @@ -1469,6 +1732,10 @@ - [ ] delete_spot_datafeed_subscription - [X] delete_subnet - [X] delete_tags +- [ ] delete_transit_gateway +- [ ] delete_transit_gateway_route +- [ ] delete_transit_gateway_route_table +- [ ] delete_transit_gateway_vpc_attachment - [X] delete_volume - [X] delete_vpc - [ ] delete_vpc_endpoint_connection_notifications @@ -1478,13 +1745,21 @@ - [X] delete_vpn_connection - [ ] delete_vpn_connection_route - [X] delete_vpn_gateway +- [ ] deprovision_byoip_cidr - [X] deregister_image - [ ] describe_account_attributes - [X] describe_addresses - [ ] describe_aggregate_id_format - [X] describe_availability_zones - [ ] describe_bundle_tasks +- [ ] describe_byoip_cidrs +- [ ] describe_capacity_reservations - [ ] describe_classic_link_instances +- [ ] describe_client_vpn_authorization_rules +- [ ] describe_client_vpn_connections +- [ ] describe_client_vpn_endpoints +- [ ] describe_client_vpn_routes +- [ ] describe_client_vpn_target_networks - [ ] describe_conversion_tasks - [ ] describe_customer_gateways - [X] describe_dhcp_options @@ -1524,6 +1799,7 @@ - [ ] describe_placement_groups - [ ] describe_prefix_lists - [ ] describe_principal_id_format +- [ ] describe_public_ipv4_pools - [X] describe_regions - [ ] describe_reserved_instances - [ ] describe_reserved_instances_listings @@ -1545,6 +1821,10 @@ - [ ] describe_stale_security_groups - [ ] describe_subnets - [X] describe_tags +- [ ] describe_transit_gateway_attachments +- [ ] describe_transit_gateway_route_tables +- [ ] describe_transit_gateway_vpc_attachments +- [ ] describe_transit_gateways - [ ] describe_volume_attribute - [ ] describe_volume_status - [X] describe_volumes @@ -1567,29 +1847,42 @@ - [X] detach_network_interface - [X] detach_volume - [X] detach_vpn_gateway +- [ ] disable_transit_gateway_route_table_propagation - [ ] disable_vgw_route_propagation - [ ] disable_vpc_classic_link - [ ] disable_vpc_classic_link_dns_support - [X] disassociate_address +- [ ] disassociate_client_vpn_target_network - [ ] disassociate_iam_instance_profile - [X] disassociate_route_table - [ ] disassociate_subnet_cidr_block +- [ ] disassociate_transit_gateway_route_table - [X] disassociate_vpc_cidr_block +- [ ] enable_transit_gateway_route_table_propagation - [ ] enable_vgw_route_propagation - [ ] enable_volume_io - [ ] enable_vpc_classic_link - [ ] enable_vpc_classic_link_dns_support +- [ ] export_client_vpn_client_certificate_revocation_list +- [ ] export_client_vpn_client_configuration +- [ ] export_transit_gateway_routes - [ ] get_console_output - [ ] get_console_screenshot - [ ] get_host_reservation_purchase_preview - [ ] get_launch_template_data - [ ] get_password_data - [ ] get_reserved_instances_exchange_quote +- [ ] get_transit_gateway_attachment_propagations +- [ ] get_transit_gateway_route_table_associations +- [ ] get_transit_gateway_route_table_propagations +- [ ] import_client_vpn_client_certificate_revocation_list - [ ] import_image - [ ] import_instance - [X] import_key_pair - [ ] import_snapshot - [ ] import_volume +- [ ] modify_capacity_reservation +- [ ] modify_client_vpn_endpoint - [ ] modify_fleet - [ ] modify_fpga_image_attribute - [ ] modify_hosts @@ -1597,6 +1890,7 @@ - [ ] modify_identity_id_format - [ ] modify_image_attribute - [X] modify_instance_attribute +- [ ] modify_instance_capacity_reservation_attributes - [ ] modify_instance_credit_specification - [ ] modify_instance_placement - [ ] modify_launch_template @@ -1605,6 +1899,7 @@ - [ ] modify_snapshot_attribute - [X] modify_spot_fleet_request - [X] modify_subnet_attribute +- [ ] modify_transit_gateway_vpc_attachment - [ ] modify_volume - [ ] modify_volume_attribute - [X] modify_vpc_attribute @@ -1616,11 +1911,13 @@ - [ ] modify_vpc_tenancy - [ ] monitor_instances - [ ] move_address_to_vpc +- [ ] provision_byoip_cidr - [ ] purchase_host_reservation - [ ] purchase_reserved_instances_offering - [ ] purchase_scheduled_instances - [X] reboot_instances - [ ] register_image +- [ ] reject_transit_gateway_vpc_attachment - [ ] reject_vpc_endpoint_connections - [X] reject_vpc_peering_connection - [X] release_address @@ -1630,6 +1927,7 @@ - [X] replace_network_acl_entry - [X] replace_route - [X] replace_route_table_association +- [ ] replace_transit_gateway_route - [ ] report_instance_status - [X] request_spot_fleet - [X] request_spot_instances @@ -1639,20 +1937,24 @@ - [ ] reset_network_interface_attribute - [ ] reset_snapshot_attribute - [ ] restore_address_to_classic +- [ ] revoke_client_vpn_ingress - [X] revoke_security_group_egress - [X] revoke_security_group_ingress - [ ] run_instances - [ ] run_scheduled_instances +- [ ] search_transit_gateway_routes - [X] start_instances - [X] stop_instances +- [ ] terminate_client_vpn_connections - [X] terminate_instances - [ ] unassign_ipv6_addresses - [ ] unassign_private_ip_addresses - [ ] unmonitor_instances - [ ] update_security_group_rule_descriptions_egress - [ ] update_security_group_rule_descriptions_ingress +- [ ] withdraw_byoip_cidr -## ecr - 31% implemented +## ecr - 28% implemented - [ ] batch_check_layer_availability - [ ] batch_delete_image - [X] batch_get_image @@ -1670,15 +1972,19 @@ - [ ] get_repository_policy - [ ] initiate_layer_upload - [X] list_images +- [ ] list_tags_for_resource - [X] put_image - [ ] put_lifecycle_policy - [ ] set_repository_policy - [ ] start_lifecycle_policy_preview +- [ ] tag_resource +- [ ] untag_resource - [ ] upload_layer_part -## ecs - 87% implemented +## ecs - 72% implemented - [X] create_cluster - [X] create_service +- [ ] delete_account_setting - [X] delete_attributes - [X] delete_cluster - [X] delete_service @@ -1690,13 +1996,16 @@ - [X] describe_task_definition - [X] describe_tasks - [ ] discover_poll_endpoint +- [ ] list_account_settings - [X] list_attributes - [X] list_clusters - [X] list_container_instances - [X] list_services +- [ ] list_tags_for_resource - [X] list_task_definition_families - [X] list_task_definitions - [X] list_tasks +- [ ] put_account_setting - [X] put_attributes - [X] register_container_instance - [X] register_task_definition @@ -1705,6 +2014,8 @@ - [X] stop_task - [ ] submit_container_state_change - [ ] submit_task_state_change +- [ ] tag_resource +- [ ] untag_resource - [ ] update_container_agent - [X] update_container_instances_state - [X] update_service @@ -1727,7 +2038,10 @@ - [ ] create_cluster - [ ] delete_cluster - [ ] describe_cluster +- [ ] describe_update - [ ] list_clusters +- [ ] list_updates +- [ ] update_cluster_version ## elasticache - 0% implemented - [ ] add_tags_to_resource @@ -1739,6 +2053,7 @@ - [ ] create_cache_subnet_group - [ ] create_replication_group - [ ] create_snapshot +- [ ] decrease_replica_count - [ ] delete_cache_cluster - [ ] delete_cache_parameter_group - [ ] delete_cache_security_group @@ -1757,6 +2072,7 @@ - [ ] describe_reserved_cache_nodes - [ ] describe_reserved_cache_nodes_offerings - [ ] describe_snapshots +- [ ] increase_replica_count - [ ] list_allowed_node_type_modifications - [ ] list_tags_for_resource - [ ] modify_cache_cluster @@ -1934,6 +2250,7 @@ ## es - 0% implemented - [ ] add_tags +- [ ] cancel_elasticsearch_service_software_update - [ ] create_elasticsearch_domain - [ ] delete_elasticsearch_domain - [ ] delete_elasticsearch_service_role @@ -1952,6 +2269,7 @@ - [ ] list_tags - [ ] purchase_reserved_elasticsearch_instance_offering - [ ] remove_tags +- [ ] start_elasticsearch_service_software_update - [ ] update_elasticsearch_domain_config - [ ] upgrade_elasticsearch_domain @@ -1980,6 +2298,8 @@ - [ ] list_tags_for_delivery_stream - [ ] put_record - [ ] put_record_batch +- [ ] start_delivery_stream_encryption +- [ ] stop_delivery_stream_encryption - [ ] tag_delivery_stream - [ ] untag_delivery_stream - [ ] update_destination @@ -1994,10 +2314,24 @@ - [ ] get_notification_channel - [ ] get_policy - [ ] list_compliance_status +- [ ] list_member_accounts - [ ] list_policies - [ ] put_notification_channel - [ ] put_policy +## fsx - 0% implemented +- [ ] create_backup +- [ ] create_file_system +- [ ] create_file_system_from_backup +- [ ] delete_backup +- [ ] delete_file_system +- [ ] describe_backups +- [ ] describe_file_systems +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_file_system + ## gamelift - 0% implemented - [ ] accept_match - [ ] create_alias @@ -2102,7 +2436,26 @@ - [ ] upload_archive - [ ] upload_multipart_part -## glue - 6% implemented +## globalaccelerator - 0% implemented +- [ ] create_accelerator +- [ ] create_endpoint_group +- [ ] create_listener +- [ ] delete_accelerator +- [ ] delete_endpoint_group +- [ ] delete_listener +- [ ] describe_accelerator +- [ ] describe_accelerator_attributes +- [ ] describe_endpoint_group +- [ ] describe_listener +- [ ] list_accelerators +- [ ] list_endpoint_groups +- [ ] list_listeners +- [ ] update_accelerator +- [ ] update_accelerator_attributes +- [ ] update_endpoint_group +- [ ] update_listener + +## glue - 5% implemented - [ ] batch_create_partition - [ ] batch_delete_connection - [ ] batch_delete_partition @@ -2118,6 +2471,7 @@ - [ ] create_job - [ ] create_partition - [ ] create_script +- [ ] create_security_configuration - [X] create_table - [ ] create_trigger - [ ] create_user_defined_function @@ -2128,6 +2482,8 @@ - [ ] delete_dev_endpoint - [ ] delete_job - [ ] delete_partition +- [ ] delete_resource_policy +- [ ] delete_security_configuration - [ ] delete_table - [ ] delete_table_version - [ ] delete_trigger @@ -2140,6 +2496,7 @@ - [ ] get_crawler - [ ] get_crawler_metrics - [ ] get_crawlers +- [ ] get_data_catalog_encryption_settings - [X] get_database - [ ] get_databases - [ ] get_dataflow_graph @@ -2153,6 +2510,9 @@ - [ ] get_partition - [ ] get_partitions - [ ] get_plan +- [ ] get_resource_policy +- [ ] get_security_configuration +- [ ] get_security_configurations - [X] get_table - [ ] get_table_version - [ ] get_table_versions @@ -2162,6 +2522,8 @@ - [ ] get_user_defined_function - [ ] get_user_defined_functions - [ ] import_catalog_to_glue +- [ ] put_data_catalog_encryption_settings +- [ ] put_resource_policy - [ ] reset_job_bookmark - [ ] start_crawler - [ ] start_crawler_schedule @@ -2185,6 +2547,8 @@ ## greengrass - 0% implemented - [ ] associate_role_to_group - [ ] associate_service_role_to_account +- [ ] create_connector_definition +- [ ] create_connector_definition_version - [ ] create_core_definition - [ ] create_core_definition_version - [ ] create_deployment @@ -2202,6 +2566,7 @@ - [ ] create_software_update_job - [ ] create_subscription_definition - [ ] create_subscription_definition_version +- [ ] delete_connector_definition - [ ] delete_core_definition - [ ] delete_device_definition - [ ] delete_function_definition @@ -2212,7 +2577,10 @@ - [ ] disassociate_role_from_group - [ ] disassociate_service_role_from_account - [ ] get_associated_role +- [ ] get_bulk_deployment_status - [ ] get_connectivity_info +- [ ] get_connector_definition +- [ ] get_connector_definition_version - [ ] get_core_definition - [ ] get_core_definition_version - [ ] get_deployment_status @@ -2231,6 +2599,10 @@ - [ ] get_service_role_for_account - [ ] get_subscription_definition - [ ] get_subscription_definition_version +- [ ] list_bulk_deployment_detailed_reports +- [ ] list_bulk_deployments +- [ ] list_connector_definition_versions +- [ ] list_connector_definitions - [ ] list_core_definition_versions - [ ] list_core_definitions - [ ] list_deployments @@ -2248,7 +2620,10 @@ - [ ] list_subscription_definition_versions - [ ] list_subscription_definitions - [ ] reset_deployments +- [ ] start_bulk_deployment +- [ ] stop_bulk_deployment - [ ] update_connectivity_info +- [ ] update_connector_definition - [ ] update_core_definition - [ ] update_device_definition - [ ] update_function_definition @@ -2310,7 +2685,7 @@ - [ ] describe_event_types - [ ] describe_events -## iam - 47% implemented +## iam - 43% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -2361,6 +2736,7 @@ - [X] detach_user_policy - [X] enable_mfa_device - [ ] generate_credential_report +- [ ] generate_service_last_accessed_details - [ ] get_access_key_last_used - [X] get_account_authorization_details - [ ] get_account_password_policy @@ -2379,6 +2755,8 @@ - [X] get_role_policy - [ ] get_saml_provider - [X] get_server_certificate +- [ ] get_service_last_accessed_details +- [ ] get_service_last_accessed_details_with_entities - [ ] get_service_linked_role_deletion_status - [ ] get_ssh_public_key - [X] get_user @@ -2397,8 +2775,10 @@ - [X] list_mfa_devices - [ ] list_open_id_connect_providers - [X] list_policies +- [ ] list_policies_granting_service_access - [X] list_policy_versions - [X] list_role_policies +- [ ] list_role_tags - [ ] list_roles - [ ] list_saml_providers - [ ] list_server_certificates @@ -2406,6 +2786,7 @@ - [ ] list_signing_certificates - [ ] list_ssh_public_keys - [X] list_user_policies +- [ ] list_user_tags - [X] list_users - [ ] list_virtual_mfa_devices - [X] put_group_policy @@ -2421,6 +2802,10 @@ - [ ] set_default_policy_version - [ ] simulate_custom_policy - [ ] simulate_principal_policy +- [ ] tag_role +- [ ] tag_user +- [ ] untag_role +- [ ] untag_user - [X] update_access_key - [ ] update_account_password_policy - [ ] update_assume_role_policy @@ -2488,6 +2873,7 @@ ## iot - 31% implemented - [ ] accept_certificate_transfer +- [ ] add_thing_to_billing_group - [X] add_thing_to_thing_group - [ ] associate_targets_with_job - [X] attach_policy @@ -2497,10 +2883,12 @@ - [ ] cancel_audit_task - [ ] cancel_certificate_transfer - [ ] cancel_job -- [ ] cancel_job_execution +- [X] cancel_job_execution - [ ] clear_default_authorizer - [ ] create_authorizer +- [ ] create_billing_group - [ ] create_certificate_from_csr +- [ ] create_dynamic_thing_group - [X] create_job - [X] create_keys_and_certificate - [ ] create_ota_update @@ -2516,10 +2904,12 @@ - [ ] create_topic_rule - [ ] delete_account_audit_configuration - [ ] delete_authorizer +- [ ] delete_billing_group - [ ] delete_ca_certificate - [X] delete_certificate +- [ ] delete_dynamic_thing_group - [ ] delete_job -- [ ] delete_job_execution +- [X] delete_job_execution - [ ] delete_ota_update - [X] delete_policy - [X] delete_policy_version @@ -2537,6 +2927,7 @@ - [ ] describe_account_audit_configuration - [ ] describe_audit_task - [ ] describe_authorizer +- [ ] describe_billing_group - [ ] describe_ca_certificate - [X] describe_certificate - [ ] describe_default_authorizer @@ -2544,7 +2935,7 @@ - [ ] describe_event_configurations - [ ] describe_index - [X] describe_job -- [ ] describe_job_execution +- [X] describe_job_execution - [ ] describe_role_alias - [ ] describe_scheduled_audit - [ ] describe_security_profile @@ -2574,13 +2965,14 @@ - [ ] list_audit_findings - [ ] list_audit_tasks - [ ] list_authorizers +- [ ] list_billing_groups - [ ] list_ca_certificates - [X] list_certificates - [ ] list_certificates_by_ca - [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs +- [X] list_job_executions_for_job +- [X] list_job_executions_for_thing +- [X] list_jobs - [ ] list_ota_updates - [ ] list_outgoing_certificates - [X] list_policies @@ -2593,6 +2985,7 @@ - [ ] list_security_profiles - [ ] list_security_profiles_for_target - [ ] list_streams +- [ ] list_tags_for_resource - [ ] list_targets_for_policy - [ ] list_targets_for_security_profile - [X] list_thing_groups @@ -2602,6 +2995,7 @@ - [ ] list_thing_registration_tasks - [X] list_thing_types - [X] list_things +- [ ] list_things_in_billing_group - [X] list_things_in_thing_group - [ ] list_topic_rules - [ ] list_v2_logging_levels @@ -2610,6 +3004,7 @@ - [ ] register_certificate - [ ] register_thing - [ ] reject_certificate_transfer +- [ ] remove_thing_from_billing_group - [X] remove_thing_from_thing_group - [ ] replace_topic_rule - [ ] search_index @@ -2621,15 +3016,20 @@ - [ ] start_on_demand_audit_task - [ ] start_thing_registration_task - [ ] stop_thing_registration_task +- [ ] tag_resource - [ ] test_authorization - [ ] test_invoke_authorizer - [ ] transfer_certificate +- [ ] untag_resource - [ ] update_account_audit_configuration - [ ] update_authorizer +- [ ] update_billing_group - [ ] update_ca_certificate - [X] update_certificate +- [ ] update_dynamic_thing_group - [ ] update_event_configurations - [ ] update_indexing_configuration +- [ ] update_job - [ ] update_role_alias - [ ] update_scheduled_audit - [ ] update_security_profile @@ -2698,6 +3098,7 @@ - [ ] describe_pipeline - [ ] get_dataset_content - [ ] list_channels +- [ ] list_dataset_contents - [ ] list_datasets - [ ] list_datastores - [ ] list_pipelines @@ -2713,6 +3114,14 @@ - [ ] update_datastore - [ ] update_pipeline +## kafka - 0% implemented +- [ ] create_cluster +- [ ] delete_cluster +- [ ] describe_cluster +- [ ] get_bootstrap_brokers +- [ ] list_clusters +- [ ] list_nodes + ## kinesis - 46% implemented - [X] add_tags_to_stream - [X] create_stream @@ -2770,6 +3179,29 @@ - [ ] stop_application - [ ] update_application +## kinesisanalyticsv2 - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] create_application_snapshot +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] delete_application_snapshot +- [ ] describe_application +- [ ] describe_application_snapshot +- [ ] discover_input_schema +- [ ] list_application_snapshots +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + ## kinesisvideo - 0% implemented - [ ] create_stream - [ ] delete_stream @@ -2782,17 +3214,22 @@ - [ ] update_data_retention - [ ] update_stream -## kms - 37% implemented +## kms - 31% implemented - [X] cancel_key_deletion +- [ ] connect_custom_key_store - [ ] create_alias +- [ ] create_custom_key_store - [ ] create_grant - [X] create_key - [ ] decrypt - [X] delete_alias +- [ ] delete_custom_key_store - [ ] delete_imported_key_material +- [ ] describe_custom_key_stores - [X] describe_key - [X] disable_key - [X] disable_key_rotation +- [ ] disconnect_custom_key_store - [X] enable_key - [X] enable_key_rotation - [ ] encrypt @@ -2817,9 +3254,11 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_alias +- [ ] update_custom_key_store - [ ] update_key_description ## lambda - 0% implemented +- [ ] add_layer_version_permission - [ ] add_permission - [ ] create_alias - [ ] create_event_source_mapping @@ -2828,21 +3267,28 @@ - [ ] delete_event_source_mapping - [ ] delete_function - [ ] delete_function_concurrency +- [ ] delete_layer_version - [ ] get_account_settings - [ ] get_alias - [ ] get_event_source_mapping - [ ] get_function - [ ] get_function_configuration +- [ ] get_layer_version +- [ ] get_layer_version_policy - [ ] get_policy - [ ] invoke - [ ] invoke_async - [ ] list_aliases - [ ] list_event_source_mappings - [ ] list_functions +- [ ] list_layer_versions +- [ ] list_layers - [ ] list_tags - [ ] list_versions_by_function +- [ ] publish_layer_version - [ ] publish_version - [ ] put_function_concurrency +- [ ] remove_layer_version_permission - [ ] remove_permission - [ ] tag_resource - [ ] untag_resource @@ -2893,6 +3339,23 @@ - [ ] post_content - [ ] post_text +## license-manager - 0% implemented +- [ ] create_license_configuration +- [ ] delete_license_configuration +- [ ] get_license_configuration +- [ ] get_service_settings +- [ ] list_associations_for_license_configuration +- [ ] list_license_configurations +- [ ] list_license_specifications_for_resource +- [ ] list_resource_inventory +- [ ] list_tags_for_resource +- [ ] list_usage_for_license_configuration +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_license_configuration +- [ ] update_license_specifications_for_resource +- [ ] update_service_settings + ## lightsail - 0% implemented - [ ] allocate_static_ip - [ ] attach_disk @@ -2900,6 +3363,8 @@ - [ ] attach_load_balancer_tls_certificate - [ ] attach_static_ip - [ ] close_instance_public_ports +- [ ] copy_snapshot +- [ ] create_cloud_formation_stack - [ ] create_disk - [ ] create_disk_from_snapshot - [ ] create_disk_snapshot @@ -2911,6 +3376,9 @@ - [ ] create_key_pair - [ ] create_load_balancer - [ ] create_load_balancer_tls_certificate +- [ ] create_relational_database +- [ ] create_relational_database_from_snapshot +- [ ] create_relational_database_snapshot - [ ] delete_disk - [ ] delete_disk_snapshot - [ ] delete_domain @@ -2920,19 +3388,24 @@ - [ ] delete_key_pair - [ ] delete_load_balancer - [ ] delete_load_balancer_tls_certificate +- [ ] delete_relational_database +- [ ] delete_relational_database_snapshot - [ ] detach_disk - [ ] detach_instances_from_load_balancer - [ ] detach_static_ip - [ ] download_default_key_pair +- [ ] export_snapshot - [ ] get_active_names - [ ] get_blueprints - [ ] get_bundles +- [ ] get_cloud_formation_stack_records - [ ] get_disk - [ ] get_disk_snapshot - [ ] get_disk_snapshots - [ ] get_disks - [ ] get_domain - [ ] get_domains +- [ ] get_export_snapshot_records - [ ] get_instance - [ ] get_instance_access_details - [ ] get_instance_metric_data @@ -2951,6 +3424,18 @@ - [ ] get_operations - [ ] get_operations_for_resource - [ ] get_regions +- [ ] get_relational_database +- [ ] get_relational_database_blueprints +- [ ] get_relational_database_bundles +- [ ] get_relational_database_events +- [ ] get_relational_database_log_events +- [ ] get_relational_database_log_streams +- [ ] get_relational_database_master_user_password +- [ ] get_relational_database_metric_data +- [ ] get_relational_database_parameters +- [ ] get_relational_database_snapshot +- [ ] get_relational_database_snapshots +- [ ] get_relational_databases - [ ] get_static_ip - [ ] get_static_ips - [ ] import_key_pair @@ -2959,14 +3444,21 @@ - [ ] peer_vpc - [ ] put_instance_public_ports - [ ] reboot_instance +- [ ] reboot_relational_database - [ ] release_static_ip - [ ] start_instance +- [ ] start_relational_database - [ ] stop_instance +- [ ] stop_relational_database +- [ ] tag_resource - [ ] unpeer_vpc +- [ ] untag_resource - [ ] update_domain_entry - [ ] update_load_balancer_attribute +- [ ] update_relational_database +- [ ] update_relational_database_parameters -## logs - 27% implemented +## logs - 23% implemented - [ ] associate_kms_key - [ ] cancel_export_task - [ ] create_export_task @@ -2984,11 +3476,15 @@ - [X] describe_log_groups - [X] describe_log_streams - [ ] describe_metric_filters +- [ ] describe_queries - [ ] describe_resource_policies - [ ] describe_subscription_filters - [ ] disassociate_kms_key - [X] filter_log_events - [X] get_log_events +- [ ] get_log_group_fields +- [ ] get_log_record +- [ ] get_query_results - [ ] list_tags_log_group - [ ] put_destination - [ ] put_destination_policy @@ -2997,6 +3493,8 @@ - [ ] put_resource_policy - [ ] put_retention_policy - [ ] put_subscription_filter +- [ ] start_query +- [ ] stop_query - [ ] tag_log_group - [ ] test_metric_filter - [ ] untag_log_group @@ -3047,7 +3545,24 @@ - [ ] generate_data_set - [ ] start_support_data_export +## mediaconnect - 0% implemented +- [ ] add_flow_outputs +- [ ] create_flow +- [ ] delete_flow +- [ ] describe_flow +- [ ] grant_flow_entitlements +- [ ] list_entitlements +- [ ] list_flows +- [ ] remove_flow_output +- [ ] revoke_flow_entitlement +- [ ] start_flow +- [ ] stop_flow +- [ ] update_flow_entitlement +- [ ] update_flow_output +- [ ] update_flow_source + ## mediaconvert - 0% implemented +- [ ] associate_certificate - [ ] cancel_job - [ ] create_job - [ ] create_job_template @@ -3057,6 +3572,7 @@ - [ ] delete_preset - [ ] delete_queue - [ ] describe_endpoints +- [ ] disassociate_certificate - [ ] get_job - [ ] get_job_template - [ ] get_preset @@ -3109,6 +3625,7 @@ - [ ] list_channels - [ ] list_origin_endpoints - [ ] rotate_channel_credentials +- [ ] rotate_ingest_endpoint_credentials - [ ] update_channel - [ ] update_origin_endpoint @@ -3117,12 +3634,15 @@ - [ ] delete_container - [ ] delete_container_policy - [ ] delete_cors_policy +- [ ] delete_lifecycle_policy - [ ] describe_container - [ ] get_container_policy - [ ] get_cors_policy +- [ ] get_lifecycle_policy - [ ] list_containers - [ ] put_container_policy - [ ] put_cors_policy +- [ ] put_lifecycle_policy ## mediastore-data - 0% implemented - [ ] delete_object @@ -3140,6 +3660,7 @@ ## meteringmarketplace - 0% implemented - [ ] batch_meter_usage - [ ] meter_usage +- [ ] register_usage - [ ] resolve_customer ## mgh - 0% implemented @@ -3174,8 +3695,10 @@ ## mq - 0% implemented - [ ] create_broker - [ ] create_configuration +- [ ] create_tags - [ ] create_user - [ ] delete_broker +- [ ] delete_tags - [ ] delete_user - [ ] describe_broker - [ ] describe_configuration @@ -3184,6 +3707,7 @@ - [ ] list_brokers - [ ] list_configuration_revisions - [ ] list_configurations +- [ ] list_tags - [ ] list_users - [ ] reboot_broker - [ ] update_broker @@ -3378,6 +3902,7 @@ - [ ] describe_node_association_status - [ ] describe_servers - [ ] disassociate_node +- [ ] export_server_engine_attribute - [ ] restore_server - [ ] start_maintenance - [ ] update_server @@ -3452,6 +3977,7 @@ - [ ] delete_segment - [ ] delete_sms_channel - [ ] delete_user_endpoints +- [ ] delete_voice_channel - [ ] get_adm_channel - [ ] get_apns_channel - [ ] get_apns_sandbox_channel @@ -3483,6 +4009,7 @@ - [ ] get_segments - [ ] get_sms_channel - [ ] get_user_endpoints +- [ ] get_voice_channel - [ ] phone_number_validate - [ ] put_event_stream - [ ] put_events @@ -3503,6 +4030,46 @@ - [ ] update_gcm_channel - [ ] update_segment - [ ] update_sms_channel +- [ ] update_voice_channel + +## pinpoint-email - 0% implemented +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_dedicated_ip_pool +- [ ] create_deliverability_test_report +- [ ] create_email_identity +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_dedicated_ip_pool +- [ ] delete_email_identity +- [ ] get_account +- [ ] get_blacklist_reports +- [ ] get_configuration_set +- [ ] get_configuration_set_event_destinations +- [ ] get_dedicated_ip +- [ ] get_dedicated_ips +- [ ] get_deliverability_dashboard_options +- [ ] get_deliverability_test_report +- [ ] get_domain_statistics_report +- [ ] get_email_identity +- [ ] list_configuration_sets +- [ ] list_dedicated_ip_pools +- [ ] list_deliverability_test_reports +- [ ] list_email_identities +- [ ] put_account_dedicated_ip_warmup_attributes +- [ ] put_account_sending_attributes +- [ ] put_configuration_set_delivery_options +- [ ] put_configuration_set_reputation_options +- [ ] put_configuration_set_sending_options +- [ ] put_configuration_set_tracking_options +- [ ] put_dedicated_ip_in_pool +- [ ] put_dedicated_ip_warmup_attributes +- [ ] put_deliverability_dashboard_option +- [ ] put_email_identity_dkim_attributes +- [ ] put_email_identity_feedback_attributes +- [ ] put_email_identity_mail_from_attributes +- [ ] send_email +- [ ] update_configuration_set_event_destination ## polly - 55% implemented - [X] delete_lexicon @@ -3520,6 +4087,41 @@ - [ ] get_attribute_values - [ ] get_products +## quicksight - 0% implemented +- [ ] create_group +- [ ] create_group_membership +- [ ] delete_group +- [ ] delete_group_membership +- [ ] delete_user +- [ ] describe_group +- [ ] describe_user +- [ ] get_dashboard_embed_url +- [ ] list_group_memberships +- [ ] list_groups +- [ ] list_user_groups +- [ ] list_users +- [ ] register_user +- [ ] update_group +- [ ] update_user + +## ram - 0% implemented +- [ ] accept_resource_share_invitation +- [ ] associate_resource_share +- [ ] create_resource_share +- [ ] delete_resource_share +- [ ] disassociate_resource_share +- [ ] enable_sharing_with_aws_organization +- [ ] get_resource_policies +- [ ] get_resource_share_associations +- [ ] get_resource_share_invitations +- [ ] get_resource_shares +- [ ] list_principals +- [ ] list_resources +- [ ] reject_resource_share_invitation +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_resource_share + ## rds - 0% implemented - [ ] add_role_to_db_cluster - [ ] add_source_identifier_to_subscription @@ -3533,6 +4135,7 @@ - [ ] copy_db_snapshot - [ ] copy_option_group - [ ] create_db_cluster +- [ ] create_db_cluster_endpoint - [ ] create_db_cluster_parameter_group - [ ] create_db_cluster_snapshot - [ ] create_db_instance @@ -3542,26 +4145,32 @@ - [ ] create_db_snapshot - [ ] create_db_subnet_group - [ ] create_event_subscription +- [ ] create_global_cluster - [ ] create_option_group - [ ] delete_db_cluster +- [ ] delete_db_cluster_endpoint - [ ] delete_db_cluster_parameter_group - [ ] delete_db_cluster_snapshot - [ ] delete_db_instance +- [ ] delete_db_instance_automated_backup - [ ] delete_db_parameter_group - [ ] delete_db_security_group - [ ] delete_db_snapshot - [ ] delete_db_subnet_group - [ ] delete_event_subscription +- [ ] delete_global_cluster - [ ] delete_option_group - [ ] describe_account_attributes - [ ] describe_certificates - [ ] describe_db_cluster_backtracks +- [ ] describe_db_cluster_endpoints - [ ] describe_db_cluster_parameter_groups - [ ] describe_db_cluster_parameters - [ ] describe_db_cluster_snapshot_attributes - [ ] describe_db_cluster_snapshots - [ ] describe_db_clusters - [ ] describe_db_engine_versions +- [ ] describe_db_instance_automated_backups - [ ] describe_db_instances - [ ] describe_db_log_files - [ ] describe_db_parameter_groups @@ -3575,6 +4184,7 @@ - [ ] describe_event_categories - [ ] describe_event_subscriptions - [ ] describe_events +- [ ] describe_global_clusters - [ ] describe_option_group_options - [ ] describe_option_groups - [ ] describe_orderable_db_instance_options @@ -3588,6 +4198,7 @@ - [ ] list_tags_for_resource - [ ] modify_current_db_cluster_capacity - [ ] modify_db_cluster +- [ ] modify_db_cluster_endpoint - [ ] modify_db_cluster_parameter_group - [ ] modify_db_cluster_snapshot_attribute - [ ] modify_db_instance @@ -3596,11 +4207,13 @@ - [ ] modify_db_snapshot_attribute - [ ] modify_db_subnet_group - [ ] modify_event_subscription +- [ ] modify_global_cluster - [ ] modify_option_group - [ ] promote_read_replica - [ ] promote_read_replica_db_cluster - [ ] purchase_reserved_db_instances_offering - [ ] reboot_db_instance +- [ ] remove_from_global_cluster - [ ] remove_role_from_db_cluster - [ ] remove_source_identifier_from_subscription - [ ] remove_tags_from_resource @@ -3613,13 +4226,21 @@ - [ ] restore_db_instance_from_s3 - [ ] restore_db_instance_to_point_in_time - [ ] revoke_db_security_group_ingress +- [ ] start_db_cluster - [ ] start_db_instance +- [ ] stop_db_cluster - [ ] stop_db_instance -## redshift - 37% implemented +## rds-data - 0% implemented +- [ ] execute_sql + +## redshift - 32% implemented - [ ] accept_reserved_node_exchange - [ ] authorize_cluster_security_group_ingress - [ ] authorize_snapshot_access +- [ ] batch_delete_cluster_snapshots +- [ ] batch_modify_cluster_snapshots +- [ ] cancel_resize - [ ] copy_cluster_snapshot - [X] create_cluster - [X] create_cluster_parameter_group @@ -3630,6 +4251,7 @@ - [ ] create_hsm_client_certificate - [ ] create_hsm_configuration - [X] create_snapshot_copy_grant +- [ ] create_snapshot_schedule - [X] create_tags - [X] delete_cluster - [X] delete_cluster_parameter_group @@ -3640,7 +4262,9 @@ - [ ] delete_hsm_client_certificate - [ ] delete_hsm_configuration - [X] delete_snapshot_copy_grant +- [ ] delete_snapshot_schedule - [X] delete_tags +- [ ] describe_account_attributes - [ ] describe_cluster_db_revisions - [X] describe_cluster_parameter_groups - [ ] describe_cluster_parameters @@ -3662,6 +4286,8 @@ - [ ] describe_reserved_nodes - [ ] describe_resize - [X] describe_snapshot_copy_grants +- [ ] describe_snapshot_schedules +- [ ] describe_storage - [ ] describe_table_restore_status - [X] describe_tags - [ ] disable_logging @@ -3673,10 +4299,14 @@ - [X] modify_cluster - [ ] modify_cluster_db_revision - [ ] modify_cluster_iam_roles +- [ ] modify_cluster_maintenance - [ ] modify_cluster_parameter_group +- [ ] modify_cluster_snapshot +- [ ] modify_cluster_snapshot_schedule - [ ] modify_cluster_subnet_group - [ ] modify_event_subscription - [X] modify_snapshot_copy_retention_period +- [ ] modify_snapshot_schedule - [ ] purchase_reserved_node_offering - [ ] reboot_cluster - [ ] reset_cluster_parameter_group @@ -3744,6 +4374,40 @@ - [ ] tag_resources - [ ] untag_resources +## robomaker - 0% implemented +- [ ] batch_describe_simulation_job +- [ ] cancel_simulation_job +- [ ] create_deployment_job +- [ ] create_fleet +- [ ] create_robot +- [ ] create_robot_application +- [ ] create_robot_application_version +- [ ] create_simulation_application +- [ ] create_simulation_application_version +- [ ] create_simulation_job +- [ ] delete_fleet +- [ ] delete_robot +- [ ] delete_robot_application +- [ ] delete_simulation_application +- [ ] deregister_robot +- [ ] describe_deployment_job +- [ ] describe_fleet +- [ ] describe_robot +- [ ] describe_robot_application +- [ ] describe_simulation_application +- [ ] describe_simulation_job +- [ ] list_deployment_jobs +- [ ] list_fleets +- [ ] list_robot_applications +- [ ] list_robots +- [ ] list_simulation_applications +- [ ] list_simulation_jobs +- [ ] register_robot +- [ ] restart_simulation_job +- [ ] sync_deployment_job +- [ ] update_robot_application +- [ ] update_simulation_application + ## route53 - 12% implemented - [ ] associate_vpc_with_hosted_zone - [ ] change_resource_record_sets @@ -3828,7 +4492,31 @@ - [ ] update_tags_for_domain - [ ] view_billing -## s3 - 15% implemented +## route53resolver - 0% implemented +- [ ] associate_resolver_endpoint_ip_address +- [ ] associate_resolver_rule +- [ ] create_resolver_endpoint +- [ ] create_resolver_rule +- [ ] delete_resolver_endpoint +- [ ] delete_resolver_rule +- [ ] disassociate_resolver_endpoint_ip_address +- [ ] disassociate_resolver_rule +- [ ] get_resolver_endpoint +- [ ] get_resolver_rule +- [ ] get_resolver_rule_association +- [ ] get_resolver_rule_policy +- [ ] list_resolver_endpoint_ip_addresses +- [ ] list_resolver_endpoints +- [ ] list_resolver_rule_associations +- [ ] list_resolver_rules +- [ ] list_tags_for_resource +- [ ] put_resolver_rule_policy +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_resolver_endpoint +- [ ] update_resolver_rule + +## s3 - 13% implemented - [ ] abort_multipart_upload - [ ] complete_multipart_upload - [ ] copy_object @@ -3848,6 +4536,7 @@ - [ ] delete_object - [ ] delete_object_tagging - [ ] delete_objects +- [ ] delete_public_access_block - [ ] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration @@ -3862,6 +4551,7 @@ - [ ] get_bucket_notification - [ ] get_bucket_notification_configuration - [X] get_bucket_policy +- [ ] get_bucket_policy_status - [ ] get_bucket_replication - [ ] get_bucket_request_payment - [ ] get_bucket_tagging @@ -3869,8 +4559,12 @@ - [ ] get_bucket_website - [ ] get_object - [ ] get_object_acl +- [ ] get_object_legal_hold +- [ ] get_object_lock_configuration +- [ ] get_object_retention - [ ] get_object_tagging - [ ] get_object_torrent +- [ ] get_public_access_block - [ ] head_bucket - [ ] head_object - [ ] list_bucket_analytics_configurations @@ -3902,56 +4596,97 @@ - [ ] put_bucket_website - [ ] put_object - [ ] put_object_acl +- [ ] put_object_legal_hold +- [ ] put_object_lock_configuration +- [ ] put_object_retention - [ ] put_object_tagging +- [ ] put_public_access_block - [ ] restore_object - [ ] select_object_content - [ ] upload_part - [ ] upload_part_copy +## s3control - 0% implemented +- [ ] delete_public_access_block +- [ ] get_public_access_block +- [ ] put_public_access_block + ## sagemaker - 0% implemented - [ ] add_tags +- [ ] create_algorithm +- [ ] create_code_repository +- [ ] create_compilation_job - [ ] create_endpoint - [ ] create_endpoint_config - [ ] create_hyper_parameter_tuning_job +- [ ] create_labeling_job - [ ] create_model +- [ ] create_model_package - [ ] create_notebook_instance - [ ] create_notebook_instance_lifecycle_config - [ ] create_presigned_notebook_instance_url - [ ] create_training_job - [ ] create_transform_job +- [ ] create_workteam +- [ ] delete_algorithm +- [ ] delete_code_repository - [ ] delete_endpoint - [ ] delete_endpoint_config - [ ] delete_model +- [ ] delete_model_package - [ ] delete_notebook_instance - [ ] delete_notebook_instance_lifecycle_config - [ ] delete_tags +- [ ] delete_workteam +- [ ] describe_algorithm +- [ ] describe_code_repository +- [ ] describe_compilation_job - [ ] describe_endpoint - [ ] describe_endpoint_config - [ ] describe_hyper_parameter_tuning_job +- [ ] describe_labeling_job - [ ] describe_model +- [ ] describe_model_package - [ ] describe_notebook_instance - [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_subscribed_workteam - [ ] describe_training_job - [ ] describe_transform_job +- [ ] describe_workteam +- [ ] get_search_suggestions +- [ ] list_algorithms +- [ ] list_code_repositories +- [ ] list_compilation_jobs - [ ] list_endpoint_configs - [ ] list_endpoints - [ ] list_hyper_parameter_tuning_jobs +- [ ] list_labeling_jobs +- [ ] list_labeling_jobs_for_workteam +- [ ] list_model_packages - [ ] list_models - [ ] list_notebook_instance_lifecycle_configs - [ ] list_notebook_instances +- [ ] list_subscribed_workteams - [ ] list_tags - [ ] list_training_jobs - [ ] list_training_jobs_for_hyper_parameter_tuning_job - [ ] list_transform_jobs +- [ ] list_workteams +- [ ] render_ui_template +- [ ] search - [ ] start_notebook_instance +- [ ] stop_compilation_job - [ ] stop_hyper_parameter_tuning_job +- [ ] stop_labeling_job - [ ] stop_notebook_instance - [ ] stop_training_job - [ ] stop_transform_job +- [ ] update_code_repository - [ ] update_endpoint - [ ] update_endpoint_weights_and_capacities - [ ] update_notebook_instance - [ ] update_notebook_instance_lifecycle_config +- [ ] update_workteam ## sagemaker-runtime - 0% implemented - [ ] invoke_endpoint @@ -3988,13 +4723,47 @@ - [ ] update_secret - [ ] update_secret_version_stage +## securityhub - 0% implemented +- [ ] accept_invitation +- [ ] batch_disable_standards +- [ ] batch_enable_standards +- [ ] batch_import_findings +- [ ] create_insight +- [ ] create_members +- [ ] decline_invitations +- [ ] delete_insight +- [ ] delete_invitations +- [ ] delete_members +- [ ] disable_import_findings_for_product +- [ ] disable_security_hub +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] enable_import_findings_for_product +- [ ] enable_security_hub +- [ ] get_enabled_standards +- [ ] get_findings +- [ ] get_insight_results +- [ ] get_insights +- [ ] get_invitations_count +- [ ] get_master_account +- [ ] get_members +- [ ] invite_members +- [ ] list_enabled_products_for_import +- [ ] list_invitations +- [ ] list_members +- [ ] update_findings +- [ ] update_insight + ## serverlessrepo - 0% implemented - [ ] create_application - [ ] create_application_version - [ ] create_cloud_formation_change_set +- [ ] create_cloud_formation_template - [ ] delete_application - [ ] get_application - [ ] get_application_policy +- [ ] get_cloud_formation_template +- [ ] list_application_dependencies - [ ] list_application_versions - [ ] list_applications - [ ] put_application_policy @@ -4004,7 +4773,10 @@ - [ ] accept_portfolio_share - [ ] associate_principal_with_portfolio - [ ] associate_product_with_portfolio +- [ ] associate_service_action_with_provisioning_artifact - [ ] associate_tag_option_with_resource +- [ ] batch_associate_service_action_with_provisioning_artifact +- [ ] batch_disassociate_service_action_from_provisioning_artifact - [ ] copy_product - [ ] create_constraint - [ ] create_portfolio @@ -4012,6 +4784,7 @@ - [ ] create_product - [ ] create_provisioned_product_plan - [ ] create_provisioning_artifact +- [ ] create_service_action - [ ] create_tag_option - [ ] delete_constraint - [ ] delete_portfolio @@ -4019,10 +4792,12 @@ - [ ] delete_product - [ ] delete_provisioned_product_plan - [ ] delete_provisioning_artifact +- [ ] delete_service_action - [ ] delete_tag_option - [ ] describe_constraint - [ ] describe_copy_product_status - [ ] describe_portfolio +- [ ] describe_portfolio_share_status - [ ] describe_product - [ ] describe_product_as_admin - [ ] describe_product_view @@ -4031,22 +4806,32 @@ - [ ] describe_provisioning_artifact - [ ] describe_provisioning_parameters - [ ] describe_record +- [ ] describe_service_action - [ ] describe_tag_option +- [ ] disable_aws_organizations_access - [ ] disassociate_principal_from_portfolio - [ ] disassociate_product_from_portfolio +- [ ] disassociate_service_action_from_provisioning_artifact - [ ] disassociate_tag_option_from_resource +- [ ] enable_aws_organizations_access - [ ] execute_provisioned_product_plan +- [ ] execute_provisioned_product_service_action +- [ ] get_aws_organizations_access_status - [ ] list_accepted_portfolio_shares - [ ] list_constraints_for_portfolio - [ ] list_launch_paths +- [ ] list_organization_portfolio_access - [ ] list_portfolio_access - [ ] list_portfolios - [ ] list_portfolios_for_product - [ ] list_principals_for_portfolio - [ ] list_provisioned_product_plans - [ ] list_provisioning_artifacts +- [ ] list_provisioning_artifacts_for_service_action - [ ] list_record_history - [ ] list_resources_for_tag_option +- [ ] list_service_actions +- [ ] list_service_actions_for_provisioning_artifact - [ ] list_tag_options - [ ] provision_product - [ ] reject_portfolio_share @@ -4060,15 +4845,18 @@ - [ ] update_product - [ ] update_provisioned_product - [ ] update_provisioning_artifact +- [ ] update_service_action - [ ] update_tag_option ## servicediscovery - 0% implemented +- [ ] create_http_namespace - [ ] create_private_dns_namespace - [ ] create_public_dns_namespace - [ ] create_service - [ ] delete_namespace - [ ] delete_service - [ ] deregister_instance +- [ ] discover_instances - [ ] get_instance - [ ] get_instances_health_status - [ ] get_namespace @@ -4174,19 +4962,56 @@ - [ ] update_emergency_contact_settings - [ ] update_subscription +## signer - 0% implemented +- [ ] cancel_signing_profile +- [ ] describe_signing_job +- [ ] get_signing_platform +- [ ] get_signing_profile +- [ ] list_signing_jobs +- [ ] list_signing_platforms +- [ ] list_signing_profiles +- [ ] put_signing_profile +- [ ] start_signing_job + ## sms - 0% implemented +- [ ] create_app - [ ] create_replication_job +- [ ] delete_app +- [ ] delete_app_launch_configuration +- [ ] delete_app_replication_configuration - [ ] delete_replication_job - [ ] delete_server_catalog - [ ] disassociate_connector +- [ ] generate_change_set +- [ ] generate_template +- [ ] get_app +- [ ] get_app_launch_configuration +- [ ] get_app_replication_configuration - [ ] get_connectors - [ ] get_replication_jobs - [ ] get_replication_runs - [ ] get_servers - [ ] import_server_catalog +- [ ] launch_app +- [ ] list_apps +- [ ] put_app_launch_configuration +- [ ] put_app_replication_configuration +- [ ] start_app_replication - [ ] start_on_demand_replication_run +- [ ] stop_app_replication +- [ ] terminate_app +- [ ] update_app - [ ] update_replication_job +## sms-voice - 0% implemented +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] get_configuration_set_event_destinations +- [ ] send_voice_message +- [ ] update_configuration_set_event_destination + ## snowball - 0% implemented - [ ] cancel_cluster - [ ] cancel_job @@ -4261,9 +5086,10 @@ - [X] tag_queue - [X] untag_queue -## ssm - 11% implemented +## ssm - 10% implemented - [X] add_tags_to_resource - [ ] cancel_command +- [ ] cancel_maintenance_window_execution - [ ] create_activation - [ ] create_association - [ ] create_association_batch @@ -4304,15 +5130,19 @@ - [ ] describe_maintenance_window_execution_task_invocations - [ ] describe_maintenance_window_execution_tasks - [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_schedule - [ ] describe_maintenance_window_targets - [ ] describe_maintenance_window_tasks - [ ] describe_maintenance_windows +- [ ] describe_maintenance_windows_for_target - [ ] describe_parameters - [ ] describe_patch_baselines - [ ] describe_patch_group_state - [ ] describe_patch_groups +- [ ] describe_sessions - [ ] get_automation_execution - [X] get_command_invocation +- [ ] get_connection_status - [ ] get_default_patch_baseline - [ ] get_deployable_patch_snapshot_for_instance - [ ] get_document @@ -4351,11 +5181,14 @@ - [ ] register_target_with_maintenance_window - [ ] register_task_with_maintenance_window - [X] remove_tags_from_resource +- [ ] resume_session - [ ] send_automation_signal - [X] send_command - [ ] start_associations_once - [ ] start_automation_execution +- [ ] start_session - [ ] stop_automation_execution +- [ ] terminate_session - [ ] update_association - [ ] update_association_status - [ ] update_document @@ -4518,6 +5351,7 @@ ## transcribe - 0% implemented - [ ] create_vocabulary +- [ ] delete_transcription_job - [ ] delete_vocabulary - [ ] get_transcription_job - [ ] get_vocabulary @@ -4526,7 +5360,31 @@ - [ ] start_transcription_job - [ ] update_vocabulary +## transfer - 0% implemented +- [ ] create_server +- [ ] create_user +- [ ] delete_server +- [ ] delete_ssh_public_key +- [ ] delete_user +- [ ] describe_server +- [ ] describe_user +- [ ] import_ssh_public_key +- [ ] list_servers +- [ ] list_tags_for_resource +- [ ] list_users +- [ ] start_server +- [ ] stop_server +- [ ] tag_resource +- [ ] test_identity_provider +- [ ] untag_resource +- [ ] update_server +- [ ] update_user + ## translate - 0% implemented +- [ ] delete_terminology +- [ ] get_terminology +- [ ] import_terminology +- [ ] list_terminologies - [ ] translate_text ## waf - 0% implemented @@ -4545,6 +5403,7 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_logging_configuration - [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set @@ -4560,6 +5419,7 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_logging_configuration - [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys @@ -4576,6 +5436,7 @@ - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets +- [ ] list_logging_configurations - [ ] list_rate_based_rules - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets @@ -4586,6 +5447,7 @@ - [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_logging_configuration - [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set @@ -4617,6 +5479,7 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_logging_configuration - [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set @@ -4633,6 +5496,7 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_logging_configuration - [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys @@ -4650,6 +5514,7 @@ - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets +- [ ] list_logging_configurations - [ ] list_rate_based_rules - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets @@ -4661,6 +5526,7 @@ - [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_logging_configuration - [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set @@ -4709,6 +5575,7 @@ - [ ] get_document_version - [ ] get_folder - [ ] get_folder_path +- [ ] get_resources - [ ] initiate_document_version_upload - [ ] remove_all_resource_permissions - [ ] remove_resource_permission @@ -4758,13 +5625,22 @@ - [ ] create_workspaces - [ ] delete_ip_group - [ ] delete_tags +- [ ] delete_workspace_image +- [ ] describe_account +- [ ] describe_account_modifications +- [ ] describe_client_properties - [ ] describe_ip_groups - [ ] describe_tags - [ ] describe_workspace_bundles - [ ] describe_workspace_directories +- [ ] describe_workspace_images - [ ] describe_workspaces - [ ] describe_workspaces_connection_status - [ ] disassociate_ip_groups +- [ ] import_workspace_image +- [ ] list_available_management_cidr_ranges +- [ ] modify_account +- [ ] modify_client_properties - [ ] modify_workspace_properties - [ ] modify_workspace_state - [ ] reboot_workspaces @@ -4777,10 +5653,21 @@ ## xray - 0% implemented - [ ] batch_get_traces +- [ ] create_group +- [ ] create_sampling_rule +- [ ] delete_group +- [ ] delete_sampling_rule - [ ] get_encryption_config +- [ ] get_group +- [ ] get_groups +- [ ] get_sampling_rules +- [ ] get_sampling_statistic_summaries +- [ ] get_sampling_targets - [ ] get_service_graph - [ ] get_trace_graph - [ ] get_trace_summaries - [ ] put_encryption_config - [ ] put_telemetry_records - [ ] put_trace_segments +- [ ] update_group +- [ ] update_sampling_rule diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 7bbdb706df87..72cf735b2778 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -1,33 +1,42 @@ -from __future__ import unicode_literals -from moto.core.exceptions import JsonRESTError - - -class IoTClientError(JsonRESTError): - code = 400 - - -class ResourceNotFoundException(IoTClientError): - def __init__(self): - self.code = 404 - super(ResourceNotFoundException, self).__init__( - "ResourceNotFoundException", - "The specified resource does not exist" - ) - - -class InvalidRequestException(IoTClientError): - def __init__(self, msg=None): - self.code = 400 - super(InvalidRequestException, self).__init__( - "InvalidRequestException", - msg or "The request is not valid." - ) - - -class VersionConflictException(IoTClientError): - def __init__(self, name): - self.code = 409 - super(VersionConflictException, self).__init__( - 'VersionConflictException', - 'The version for thing %s does not match the expected version.' % name - ) +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class IoTClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(IoTClientError): + def __init__(self): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The specified resource does not exist" + ) + + +class InvalidRequestException(IoTClientError): + def __init__(self, msg=None): + self.code = 400 + super(InvalidRequestException, self).__init__( + "InvalidRequestException", + msg or "The request is not valid." + ) + + +class InvalidStateTransitionException(IoTClientError): + def __init__(self, msg=None): + self.code = 409 + super(InvalidStateTransitionException, self).__init__( + "InvalidStateTransitionException", + msg or "An attempt was made to change to an invalid state." + ) + + +class VersionConflictException(IoTClientError): + def __init__(self, name): + self.code = 409 + super(VersionConflictException, self).__init__( + 'VersionConflictException', + 'The version for thing %s does not match the expected version.' % name + ) diff --git a/moto/iot/models.py b/moto/iot/models.py index 1279a5baa710..a5128dcb23b0 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -15,6 +15,7 @@ from .exceptions import ( ResourceNotFoundException, InvalidRequestException, + InvalidStateTransitionException, VersionConflictException ) @@ -247,7 +248,6 @@ def __init__(self, job_id, targets, document_source, document, description, pres self.document_parameters = document_parameters def to_dict(self): - obj = { 'jobArn': self.job_arn, 'jobId': self.job_id, @@ -260,7 +260,7 @@ def to_dict(self): 'comment': self.comment, 'createdAt': self.created_at, 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completedAt, + 'completedAt': self.completed_at, 'jobProcessDetails': self.job_process_details, 'documentParameters': self.document_parameters, 'document': self.document, @@ -290,13 +290,13 @@ def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, sta self.version_number = 123 self.approximate_seconds_before_time_out = 123 - def to_dict(self): + def to_get_dict(self): obj = { 'jobId': self.job_id, 'status': self.status, - 'forceCancel': self.force_canceled, + 'forceCanceled': self.force_canceled, 'statusDetails': {'detailsMap': self.status_details_map}, - 'thing_arn': self.thing_arn, + 'thingArn': self.thing_arn, 'queuedAt': self.queued_at, 'startedAt': self.started_at, 'lastUpdatedAt': self.last_updated_at, @@ -307,6 +307,21 @@ def to_dict(self): return obj + def to_dict(self): + obj = { + 'jobId': self.job_id, + 'thingArn': self.thing_arn, + 'jobExecutionSummary': { + 'status': self.status, + 'queuedAt': self.queued_at, + 'startedAt': self.started_at, + 'lastUpdatedAt': self.last_updated_at, + 'executionNumber': self.execution_number, + } + } + + return obj + class IoTBackend(BaseBackend): def __init__(self, region_name=None): @@ -760,24 +775,114 @@ def create_job(self, job_id, targets, document_source, document, description, pr self.jobs[job_id] = job for thing_arn in targets: - thing_name = thing_arn.split(':')[-1] + thing_name = thing_arn.split(':')[-1].split('/')[-1] job_execution = FakeJobExecution(job_id, thing_arn) self.job_executions[(job_id, thing_name)] = job_execution return job.job_arn, job_id, description def describe_job(self, job_id): - return self.jobs[job_id] + jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] + if len(jobs) == 0: + raise ResourceNotFoundException() + return jobs[0] def get_job_document(self, job_id): return self.jobs[job_id] + def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): + # TODO: implement filters + all_jobs = [_.to_dict() for _ in self.jobs.values()] + filtered_jobs = all_jobs + + if token is None: + jobs = filtered_jobs[0:max_results] + next_token = str(max_results) if len(filtered_jobs) > max_results else None + else: + token = int(token) + jobs = filtered_jobs[token:token + max_results] + next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None + + return jobs, next_token + def describe_job_execution(self, job_id, thing_name, execution_number): - # TODO filter with execution number - return self.job_executions[(job_id, thing_name)] + try: + job_execution = self.job_executions[(job_id, thing_name)] + except KeyError: + raise ResourceNotFoundException() + + if job_execution is None or \ + (execution_number is not None and job_execution.execution_number != execution_number): + raise ResourceNotFoundException() + + return job_execution + + def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution is None: + raise ResourceNotFoundException() + + job_execution.force_canceled = force if force is not None else job_execution.force_canceled + # TODO: implement expected_version and status_details (at most 10 can be specified) + + if job_execution.status == 'IN_PROGRESS' and force: + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + elif job_execution.status != 'IN_PROGRESS': + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + else: + raise InvalidStateTransitionException() + + def delete_job_execution(self, job_id, thing_name, execution_number, force): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution.execution_number != execution_number: + raise ResourceNotFoundException() + + if job_execution.status == 'IN_PROGRESS' and force: + del self.job_executions[(job_id, thing_name)] + elif job_execution.status != 'IN_PROGRESS': + del self.job_executions[(job_id, thing_name)] + else: + raise InvalidStateTransitionException() def list_job_executions_for_job(self, job_id, status, max_results, next_token): - job_executions = [self.job_executions[je] for je in self.job_executions if je[0] == job_id] - # TODO: implement filters + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token + + def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + return job_executions, next_token diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 14302cc2f037..577992e7b308 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -160,15 +160,83 @@ def get_job_document(self): # TODO: needs to be implemented to get document_source's content from S3 return json.dumps({'document': ''}) + def list_jobs(self): + status = self._get_param("status"), + target_selection = self._get_param("targetSelection"), + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + previous_next_token = self._get_param("nextToken") + thing_group_name = self._get_param("thingGroupName"), + thing_group_id = self._get_param("thingGroupId") + jobs, next_token = self.iot_backend.list_jobs(status=status, + target_selection=target_selection, + max_results=max_results, + token=previous_next_token, + thing_group_name=thing_group_name, + thing_group_id=thing_group_id) + + return json.dumps(dict(jobs=jobs, nextToken=next_token)) + + def describe_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + job_execution = self.iot_backend.describe_job_execution(job_id=job_id, + thing_name=thing_name, + execution_number=execution_number) + + return json.dumps(dict(execution=job_execution.to_get_dict())) + + def cancel_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + force = self._get_bool_param("force") + expected_version = self._get_int_param("expectedVersion") + status_details = self._get_param("statusDetails") + + self.iot_backend.cancel_job_execution(job_id=job_id, + thing_name=thing_name, + force=force, + expected_version=expected_version, + status_details=status_details) + + return json.dumps(dict()) + + def delete_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + force = self._get_bool_param("force") + + self.iot_backend.delete_job_execution(job_id=job_id, + thing_name=thing_name, + execution_number=execution_number, + force=force) + + return json.dumps(dict()) + def list_job_executions_for_job(self): - job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=self._get_param("jobId"), - status=self._get_param("status"), - max_results=self._get_param( - "maxResults"), - next_token=self._get_param( - "nextToken")) - - return json.dumps(dict(executionSummaries=[_.to_dict() for _ in job_executions], nextToken=next_token)) + job_id = self._get_param("jobId") + status = self._get_param("status") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=job_id, + status=status, + max_results=max_results, + next_token=next_token) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) + + def list_job_executions_for_thing(self): + thing_name = self._get_param("thingName") + status = self._get_param("status") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_thing(thing_name=thing_name, + status=status, + max_results=max_results, + next_token=next_token) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) def create_keys_and_certificate(self): set_as_active = self._get_bool_param("setAsActive") diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index d5f277d1d3d2..d39e9818a9e6 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -4,9 +4,9 @@ import sure #noqa import boto3 +from botocore.exceptions import ClientError from moto import mock_iot - @mock_iot def test_attach_policy(): client = boto3.client('iot', region_name='ap-northeast-1') @@ -711,6 +711,69 @@ def test_create_job(): job.should.have.key('description') +@mock_iot +def test_list_jobs(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing# job document + # job_document = { + # "field": "value" + # } + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job1 = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job1.should.have.key('jobId').which.should.equal(job_id) + job1.should.have.key('jobArn') + job1.should.have.key('description') + + job2 = client.create_job( + jobId=job_id+"1", + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job2.should.have.key('jobId').which.should.equal(job_id+"1") + job2.should.have.key('jobArn') + job2.should.have.key('description') + + jobs = client.list_jobs() + jobs.should.have.key('jobs') + jobs.should_not.have.key('nextToken') + jobs['jobs'][0].should.have.key('jobId').which.should.equal(job_id) + jobs['jobs'][1].should.have.key('jobId').which.should.equal(job_id+"1") + + @mock_iot def test_describe_job(): client = boto3.client('iot', region_name='eu-west-1') @@ -875,6 +938,162 @@ def test_get_job_document_with_document(): job_document = client.get_job_document(jobId=job_id) job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") + +@mock_iot +def test_describe_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) + job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') + job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) + job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) + job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + job_execution['execution'].should.have.key('queuedAt') + job_execution['execution'].should.have.key('startedAt') + job_execution['execution'].should.have.key('lastUpdatedAt') + job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) + + job_execution = client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) + job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') + job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) + job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) + job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + job_execution['execution'].should.have.key('queuedAt') + job_execution['execution'].should.have.key('startedAt') + job_execution['execution'].should.have.key('lastUpdatedAt') + job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) + + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456) + except ClientError as exc: + error_code = exc.response['Error']['Code'] + error_code.should.equal('ResourceNotFoundException') + else: + raise Exception("Should have raised error") + + +@mock_iot +def test_cancel_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + client.cancel_job_execution(jobId=job_id, thingName=name) + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('status').which.should.equal('CANCELED') + + +@mock_iot +def test_delete_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + client.delete_job_execution(jobId=job_id, thingName=name, executionNumber=123) + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + except ClientError as exc: + error_code = exc.response['Error']['Code'] + error_code.should.equal('ResourceNotFoundException') + else: + raise Exception("Should have raised error") + + @mock_iot def test_list_job_executions_for_job(): client = boto3.client('iot', region_name='eu-west-1') @@ -911,5 +1130,44 @@ def test_list_job_executions_for_job(): job_execution = client.list_job_executions_for_job(jobId=job_id) job_execution.should.have.key('executionSummaries') + job_execution['executionSummaries'][0].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + + +@mock_iot +def test_list_job_executions_for_thing(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.list_job_executions_for_thing(thingName=name) + job_execution.should.have.key('executionSummaries') job_execution['executionSummaries'][0].should.have.key('jobId').which.should.equal(job_id) From 82f476bb46db1def6a485d056fe618acd7f64897 Mon Sep 17 00:00:00 2001 From: Stephan Date: Wed, 9 Jan 2019 16:18:22 +0100 Subject: [PATCH 013/658] adding more job mocks for IoT service --- moto/iot/models.py | 35 ++++++++++++++++- moto/iot/responses.py | 45 +++++++++++---------- tests/test_iot/test_iot.py | 80 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 22 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index a5128dcb23b0..ee4211f535c5 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -226,12 +226,14 @@ def __init__(self, job_id, targets, document_source, document, description, pres self.targets = targets self.document_source = document_source self.document = document + self.force = False self.description = description self.presigned_url_config = presigned_url_config self.target_selection = target_selection self.job_executions_rollout_config = job_executions_rollout_config - self.status = None # IN_PROGRESS | CANCELED | COMPLETED + self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED self.comment = None + self.reason_code = None self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.completed_at = None @@ -258,6 +260,8 @@ def to_dict(self): 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, 'status': self.status, 'comment': self.comment, + 'forceCanceled': self.force, + 'reasonCode': self.reason_code, 'createdAt': self.created_at, 'lastUpdatedAt': self.last_updated_at, 'completedAt': self.completed_at, @@ -778,7 +782,7 @@ def create_job(self, job_id, targets, document_source, document, description, pr thing_name = thing_arn.split(':')[-1].split('/')[-1] job_execution = FakeJobExecution(job_id, thing_arn) self.job_executions[(job_id, thing_name)] = job_execution - return job.job_arn, job_id, description + return job def describe_job(self, job_id): jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] @@ -786,6 +790,33 @@ def describe_job(self, job_id): raise ResourceNotFoundException() return jobs[0] + def delete_job(self, job_id, force): + job = self.jobs[job_id] + + if job.status == 'IN_PROGRESS' and force: + del self.jobs[job_id] + elif job.status != 'IN_PROGRESS': + del self.jobs[job_id] + else: + raise InvalidStateTransitionException() + + def cancel_job(self, job_id, reason_code, comment, force): + job = self.jobs[job_id] + + job.reason_code = reason_code if reason_code is not None else job.reason_code + job.comment = comment if comment is not None else job.comment + job.force = force if force is not None and force != job.force else job.force + job.status = 'CANCELED' + + if job.status == 'IN_PROGRESS' and force: + self.jobs[job_id] = job + elif job.status != 'IN_PROGRESS': + self.jobs[job_id] = job + else: + raise InvalidStateTransitionException() + + return job + def get_job_document(self, job_id): return self.jobs[job_id] diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 577992e7b308..3dc95e9f6ccb 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -115,7 +115,7 @@ def update_thing(self): return json.dumps(dict()) def create_job(self): - job_arn, job_id, description = self.iot_backend.create_job( + job = self.iot_backend.create_job( job_id=self._get_param("jobId"), targets=self._get_param("targets"), description=self._get_param("description"), @@ -127,28 +127,33 @@ def create_job(self): document_parameters=self._get_param("documentParameters") ) - return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) + return json.dumps(job.to_dict()) def describe_job(self): job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict( - documentSource=job.document_source, - job=dict( - comment=job.comment, - completedAt=job.completed_at, - createdAt=job.created_at, - description=job.description, - documentParameters=job.document_parameters, - jobArn=job.job_arn, - jobExecutionsRolloutConfig=job.job_executions_rollout_config, - jobId=job.job_id, - jobProcessDetails=job.job_process_details, - lastUpdatedAt=job.last_updated_at, - presignedUrlConfig=job.presigned_url_config, - status=job.status, - targets=job.targets, - targetSelection=job.target_selection - ))) + return json.dumps(dict(documentSource=job.document_source, job=job.to_dict())) + + def delete_job(self): + job_id = self._get_param("jobId") + force = self._get_bool_param("force") + + self.iot_backend.delete_job(job_id=job_id, + force=force) + + return json.dumps(dict()) + + def cancel_job(self): + job_id = self._get_param("jobId") + reason_code = self._get_param("reasonCode") + comment = self._get_param("comment") + force = self._get_bool_param("force") + + job = self.iot_backend.cancel_job(job_id=job_id, + reason_code=reason_code, + comment=comment, + force=force) + + return json.dumps(job.to_dict()) def get_job_document(self): job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index d39e9818a9e6..3cf412796c15 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -872,6 +872,86 @@ def test_describe_job_1(): "maximumPerMinute").which.should.equal(10) +@mock_iot +def test_delete_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + + client.delete_job(jobId=job_id) + + client.list_jobs()['jobs'].should.have.length_of(0) + + +@mock_iot +def test_cancel_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + + job = client.cancel_job(jobId=job_id, reasonCode='Because', comment='You are') + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("status").which.should.equal('CANCELED') + job.should.have.key('job').which.should.have.key("forceCanceled").which.should.equal(False) + job.should.have.key('job').which.should.have.key("reasonCode").which.should.equal('Because') + job.should.have.key('job').which.should.have.key("comment").which.should.equal('You are') + + @mock_iot def test_get_job_document_with_document_source(): client = boto3.client('iot', region_name='eu-west-1') From ded89416fbffe2b40a4b83e94c663f7c08ecc6ae Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 14 Jan 2019 12:19:43 +0100 Subject: [PATCH 014/658] updated implementation coverage --- IMPLEMENTATION_COVERAGE.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index fcfe31835a1c..8ac5f8e5ec3e 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -2871,7 +2871,7 @@ - [ ] unsubscribe_from_event - [ ] update_assessment_target -## iot - 31% implemented +## iot - 33% implemented - [ ] accept_certificate_transfer - [ ] add_thing_to_billing_group - [X] add_thing_to_thing_group @@ -2882,7 +2882,7 @@ - [X] attach_thing_principal - [ ] cancel_audit_task - [ ] cancel_certificate_transfer -- [ ] cancel_job +- [X] cancel_job - [X] cancel_job_execution - [ ] clear_default_authorizer - [ ] create_authorizer @@ -2908,7 +2908,7 @@ - [ ] delete_ca_certificate - [X] delete_certificate - [ ] delete_dynamic_thing_group -- [ ] delete_job +- [X] delete_job - [X] delete_job_execution - [ ] delete_ota_update - [X] delete_policy @@ -5213,11 +5213,14 @@ - [ ] list_activities - [ ] list_executions - [ ] list_state_machines +- [ ] list_tags_for_resource - [ ] send_task_failure - [ ] send_task_heartbeat - [ ] send_task_success - [ ] start_execution - [ ] stop_execution +- [ ] tag_resource +- [ ] untag_resource - [ ] update_state_machine ## storagegateway - 0% implemented From 498419462dcf4f28846fb95d52868185f8834e34 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 13:26:33 +0200 Subject: [PATCH 015/658] updaated --- file.tmp | 9 +++ moto/iot/exceptions.py | 11 ++- moto/iot/models.py | 149 ++++++++++++------------------------- moto/iot/responses.py | 50 ++++++++----- tests/test_iot/test_iot.py | 134 ++++++++++++++++++++++++++++++++- 5 files changed, 231 insertions(+), 122 deletions(-) create mode 100644 file.tmp diff --git a/file.tmp b/file.tmp new file mode 100644 index 000000000000..0b91630a9af7 --- /dev/null +++ b/file.tmp @@ -0,0 +1,9 @@ + + AWSTemplateFormatVersion: '2010-09-09' + Description: Simple CloudFormation Test Template + Resources: + S3Bucket: + Type: AWS::S3::Bucket + Properties: + AccessControl: PublicRead + BucketName: cf-test-bucket-1 diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index b8b3f1e843ef..b5725d8fe86f 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -24,6 +24,15 @@ def __init__(self, msg=None): ) +class InvalidStateTransitionException(IoTClientError): + def __init__(self, msg=None): + self.code = 409 + super(InvalidStateTransitionException, self).__init__( + "InvalidStateTransitionException", + msg or "An attempt was made to change to an invalid state." + ) + + class VersionConflictException(IoTClientError): def __init__(self, name): self.code = 409 @@ -47,4 +56,4 @@ def __init__(self, msg): self.code = 409 super(DeleteConflictException, self).__init__( 'DeleteConflictException', msg - ) \ No newline at end of file + ) diff --git a/moto/iot/models.py b/moto/iot/models.py index ee4211f535c5..9dcefbb8335e 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -13,6 +13,8 @@ from moto.core import BaseBackend, BaseModel from .exceptions import ( + CertificateStateException, + DeleteConflictException, ResourceNotFoundException, InvalidRequestException, InvalidStateTransitionException, @@ -226,14 +228,12 @@ def __init__(self, job_id, targets, document_source, document, description, pres self.targets = targets self.document_source = document_source self.document = document - self.force = False self.description = description self.presigned_url_config = presigned_url_config self.target_selection = target_selection self.job_executions_rollout_config = job_executions_rollout_config self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED self.comment = None - self.reason_code = None self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.completed_at = None @@ -260,11 +260,9 @@ def to_dict(self): 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, 'status': self.status, 'comment': self.comment, - 'forceCanceled': self.force, - 'reasonCode': self.reason_code, 'createdAt': self.created_at, 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completed_at, + 'completedAt': self.completedAt, 'jobProcessDetails': self.job_process_details, 'documentParameters': self.document_parameters, 'document': self.document, @@ -477,7 +475,25 @@ def create_keys_and_certificate(self, set_as_active): return certificate, key_pair def delete_certificate(self, certificate_id): - self.describe_certificate(certificate_id) + cert = self.describe_certificate(certificate_id) + if cert.status == 'ACTIVE': + raise CertificateStateException( + 'Certificate must be deactivated (not ACTIVE) before deletion.', certificate_id) + + certs = [k[0] for k, v in self.principal_things.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Things must be detached before deletion (arn: %s)' % certs[0] + ) + + certs = [k[0] for k, v in self.principal_policies.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Certificate policies must be detached before deletion (arn: %s)' % certs[0] + ) + del self.certificates[certificate_id] def describe_certificate(self, certificate_id): @@ -532,6 +548,14 @@ def get_policy(self, policy_name): return policies[0] def delete_policy(self, policy_name): + + policies = [k[1] for k, v in self.principal_policies.items() if k[1] == policy_name] + if len(policies) > 0: + raise DeleteConflictException( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' + % policy_name + ) + policy = self.get_policy(policy_name) del self.policies[policy.name] @@ -601,6 +625,14 @@ def _get_principal(self, principal_arn): pass raise ResourceNotFoundException() + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + def attach_principal_policy(self, policy_name, principal_arn): principal = self._get_principal(principal_arn) policy = self.get_policy(policy_name) @@ -609,6 +641,15 @@ def attach_principal_policy(self, policy_name, principal_arn): return self.principal_policies[k] = (principal, policy) + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + def detach_principal_policy(self, policy_name, principal_arn): # this may raises ResourceNotFoundException self._get_principal(principal_arn) @@ -820,102 +861,6 @@ def cancel_job(self, job_id, reason_code, comment, force): def get_job_document(self, job_id): return self.jobs[job_id] - def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): - # TODO: implement filters - all_jobs = [_.to_dict() for _ in self.jobs.values()] - filtered_jobs = all_jobs - - if token is None: - jobs = filtered_jobs[0:max_results] - next_token = str(max_results) if len(filtered_jobs) > max_results else None - else: - token = int(token) - jobs = filtered_jobs[token:token + max_results] - next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None - - return jobs, next_token - - def describe_job_execution(self, job_id, thing_name, execution_number): - try: - job_execution = self.job_executions[(job_id, thing_name)] - except KeyError: - raise ResourceNotFoundException() - - if job_execution is None or \ - (execution_number is not None and job_execution.execution_number != execution_number): - raise ResourceNotFoundException() - - return job_execution - - def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): - job_execution = self.job_executions[(job_id, thing_name)] - - if job_execution is None: - raise ResourceNotFoundException() - - job_execution.force_canceled = force if force is not None else job_execution.force_canceled - # TODO: implement expected_version and status_details (at most 10 can be specified) - - if job_execution.status == 'IN_PROGRESS' and force: - job_execution.status = 'CANCELED' - self.job_executions[(job_id, thing_name)] = job_execution - elif job_execution.status != 'IN_PROGRESS': - job_execution.status = 'CANCELED' - self.job_executions[(job_id, thing_name)] = job_execution - else: - raise InvalidStateTransitionException() - - def delete_job_execution(self, job_id, thing_name, execution_number, force): - job_execution = self.job_executions[(job_id, thing_name)] - - if job_execution.execution_number != execution_number: - raise ResourceNotFoundException() - - if job_execution.status == 'IN_PROGRESS' and force: - del self.job_executions[(job_id, thing_name)] - elif job_execution.status != 'IN_PROGRESS': - del self.job_executions[(job_id, thing_name)] - else: - raise InvalidStateTransitionException() - - def list_job_executions_for_job(self, job_id, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] - - if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) - - token = next_token - if token is None: - job_executions = job_executions[0:max_results] - next_token = str(max_results) if len(job_executions) > max_results else None - else: - token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None - - return job_executions, next_token - - def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] - - if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) - - token = next_token - if token is None: - job_executions = job_executions[0:max_results] - next_token = str(max_results) if len(job_executions) > max_results else None - else: - token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None - - return job_executions, next_token - available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 3dc95e9f6ccb..0a941ccfca92 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -115,23 +115,39 @@ def update_thing(self): return json.dumps(dict()) def create_job(self): - job = self.iot_backend.create_job( + job_arn, job_id, description = self.iot_backend.create_job( job_id=self._get_param("jobId"), targets=self._get_param("targets"), description=self._get_param("description"), document_source=self._get_param("documentSource"), document=self._get_param("document"), - presigned_url_config=self._get_param("presignedUrlConfig"), - target_selection=self._get_param("targetSelection"), + presigned_url_config=self._get_param("presignedUrlConfig"), target_selection=self._get_param("targetSelection"), job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), document_parameters=self._get_param("documentParameters") ) - return json.dumps(job.to_dict()) + return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) def describe_job(self): job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict(documentSource=job.document_source, job=job.to_dict())) + return json.dumps(dict( + documentSource=job.document_source, + job=dict( + comment=job.comment, + completedAt=job.completed_at, + createdAt=job.created_at, + description=job.description, + documentParameters=job.document_parameters, + jobArn=job.job_arn, + jobExecutionsRolloutConfig=job.job_executions_rollout_config, + jobId=job.job_id, + jobProcessDetails=job.job_process_details, + lastUpdatedAt=job.last_updated_at, + presignedUrlConfig=job.presigned_url_config, + status=job.status, + targets=job.targets, + targetSelection=job.target_selection + ))) def delete_job(self): job_id = self._get_param("jobId") @@ -140,8 +156,6 @@ def delete_job(self): self.iot_backend.delete_job(job_id=job_id, force=force) - return json.dumps(dict()) - def cancel_job(self): job_id = self._get_param("jobId") reason_code = self._get_param("reasonCode") @@ -354,19 +368,10 @@ def delete_policy_version(self): def attach_policy(self): policy_name = self._get_param("policyName") - principal = self._get_param('target') + target = self._get_param('target') self.iot_backend.attach_policy( policy_name=policy_name, - target=principal, - ) - return json.dumps(dict()) - - def detach_policy(self): - policy_name = self._get_param("policyName") - principal = self._get_param('target') - self.iot_backend.detach_policy( - policy_name=policy_name, - target=principal, + target=target, ) return json.dumps(dict()) @@ -390,6 +395,15 @@ def attach_principal_policy(self): ) return json.dumps(dict()) + def detach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + def detach_principal_policy(self): policy_name = self._get_param("policyName") principal = self.headers.get('x-amzn-iot-principal') diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 3cf412796c15..33497a3825e1 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -4,8 +4,9 @@ import sure #noqa import boto3 -from botocore.exceptions import ClientError from moto import mock_iot +from botocore.exceptions import ClientError +from nose.tools import assert_raises @mock_iot def test_attach_policy(): @@ -384,6 +385,96 @@ def test_certs(): res.should.have.key('certificates').which.should.have.length_of(0) +@mock_iot +def test_delete_policy_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_policy(policyName=policy_name) + e.exception.response['Error']['Message'].should.contain( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' % policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_policy(policyName=policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_delete_certificate_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_id = cert['certificateId'] + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + thing_name = 'thing-1' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + client.create_thing(thingName=thing_name) + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate must be deactivated (not ACTIVE) before deletion.') + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Things must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate policies must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(0) + + @mock_iot def test_certs_create_inactive(): client = boto3.client('iot', region_name='ap-northeast-1') @@ -432,6 +523,47 @@ def test_policy(): @mock_iot def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + # do nothing if policy have already attached to certificate + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + with assert_raises(ClientError) as e: + client.detach_policy(policyName=policy_name, target=cert_arn) + e.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + + +@mock_iot +def test_principal_policy_deprecated(): client = boto3.client('iot', region_name='ap-northeast-1') policy_name = 'my-policy' doc = '{}' From d98e96ddd7bff6cebbc6d1eb4b66703d6b38605b Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 13:32:42 +0200 Subject: [PATCH 016/658] :rotating_light: --- moto/iot/models.py | 17 ----------------- moto/iot/responses.py | 3 ++- 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 9dcefbb8335e..f34164483791 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -625,14 +625,6 @@ def _get_principal(self, principal_arn): pass raise ResourceNotFoundException() - def attach_policy(self, policy_name, target): - principal = self._get_principal(target) - policy = self.get_policy(policy_name) - k = (target, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - def attach_principal_policy(self, policy_name, principal_arn): principal = self._get_principal(principal_arn) policy = self.get_policy(policy_name) @@ -641,15 +633,6 @@ def attach_principal_policy(self, policy_name, principal_arn): return self.principal_policies[k] = (principal, policy) - def detach_policy(self, policy_name, target): - # this may raises ResourceNotFoundException - self._get_principal(target) - self.get_policy(policy_name) - k = (target, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - def detach_principal_policy(self, policy_name, principal_arn): # this may raises ResourceNotFoundException self._get_principal(principal_arn) diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 0a941ccfca92..5b805465b625 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -121,7 +121,8 @@ def create_job(self): description=self._get_param("description"), document_source=self._get_param("documentSource"), document=self._get_param("document"), - presigned_url_config=self._get_param("presignedUrlConfig"), target_selection=self._get_param("targetSelection"), + presigned_url_config=self._get_param("presignedUrlConfig"), + target_selection=self._get_param("targetSelection"), job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), document_parameters=self._get_param("documentParameters") ) From 86c127142bc0a85b7fffbde6fa437d26ada5ea08 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 15:44:10 +0200 Subject: [PATCH 017/658] :white_check_mark: --- moto/core/models.py | 5 -- moto/iot/models.py | 103 +++++++++++++++++++++++++++++++++++++++++- moto/iot/responses.py | 4 ++ 3 files changed, 105 insertions(+), 7 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index 9fe1e96bd32b..491e9f451805 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -22,11 +22,6 @@ ) -# "Mock" the AWS credentials as they can't be mocked in Botocore currently -os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") -os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") - - class BaseMockAWS(object): nested_count = 0 diff --git a/moto/iot/models.py b/moto/iot/models.py index f34164483791..855591ffc3bf 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -228,12 +228,14 @@ def __init__(self, job_id, targets, document_source, document, description, pres self.targets = targets self.document_source = document_source self.document = document + self.force = False self.description = description self.presigned_url_config = presigned_url_config self.target_selection = target_selection self.job_executions_rollout_config = job_executions_rollout_config self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED self.comment = None + self.reason_code = None self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.completed_at = None @@ -260,9 +262,11 @@ def to_dict(self): 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, 'status': self.status, 'comment': self.comment, + 'forceCanceled': self.force, + 'reasonCode': self.reason_code, 'createdAt': self.created_at, 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completedAt, + 'completedAt': self.completed_at, 'jobProcessDetails': self.job_process_details, 'documentParameters': self.document_parameters, 'document': self.document, @@ -806,7 +810,7 @@ def create_job(self, job_id, targets, document_source, document, description, pr thing_name = thing_arn.split(':')[-1].split('/')[-1] job_execution = FakeJobExecution(job_id, thing_arn) self.job_executions[(job_id, thing_name)] = job_execution - return job + return job.job_arn, job_id, description def describe_job(self, job_id): jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] @@ -844,6 +848,101 @@ def cancel_job(self, job_id, reason_code, comment, force): def get_job_document(self, job_id): return self.jobs[job_id] + def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): + # TODO: implement filters + all_jobs = [_.to_dict() for _ in self.jobs.values()] + filtered_jobs = all_jobs + + if token is None: + jobs = filtered_jobs[0:max_results] + next_token = str(max_results) if len(filtered_jobs) > max_results else None + else: + token = int(token) + jobs = filtered_jobs[token:token + max_results] + next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None + + return jobs, next_token + + def describe_job_execution(self, job_id, thing_name, execution_number): + try: + job_execution = self.job_executions[(job_id, thing_name)] + except KeyError: + raise ResourceNotFoundException() + + if job_execution is None or \ + (execution_number is not None and job_execution.execution_number != execution_number): + raise ResourceNotFoundException() + + return job_execution + + def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution is None: + raise ResourceNotFoundException() + + job_execution.force_canceled = force if force is not None else job_execution.force_canceled + # TODO: implement expected_version and status_details (at most 10 can be specified) + + if job_execution.status == 'IN_PROGRESS' and force: + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + elif job_execution.status != 'IN_PROGRESS': + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + else: + raise InvalidStateTransitionException() + + def delete_job_execution(self, job_id, thing_name, execution_number, force): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution.execution_number != execution_number: + raise ResourceNotFoundException() + + if job_execution.status == 'IN_PROGRESS' and force: + del self.job_executions[(job_id, thing_name)] + elif job_execution.status != 'IN_PROGRESS': + del self.job_executions[(job_id, thing_name)] + else: + raise InvalidStateTransitionException() + + def list_job_executions_for_job(self, job_id, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token + + def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 5b805465b625..df0b998713ca 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -139,6 +139,8 @@ def describe_job(self): createdAt=job.created_at, description=job.description, documentParameters=job.document_parameters, + forceCanceled=job.force, + reasonCode=job.reason_code, jobArn=job.job_arn, jobExecutionsRolloutConfig=job.job_executions_rollout_config, jobId=job.job_id, @@ -157,6 +159,8 @@ def delete_job(self): self.iot_backend.delete_job(job_id=job_id, force=force) + return json.dumps(dict()) + def cancel_job(self): job_id = self._get_param("jobId") reason_code = self._get_param("reasonCode") From a07533792d07e38d0483176cc4724a6f2e271dc6 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 15:44:17 +0200 Subject: [PATCH 018/658] :memo: --- IMPLEMENTATION_COVERAGE.md | 129 +++++++++++++++++++------------------ 1 file changed, 65 insertions(+), 64 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index ba91eddbdb0e..26ea1972ac46 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,3 +1,4 @@ + ## acm - 41% implemented - [X] add_tags_to_certificate - [X] delete_certificate @@ -440,7 +441,7 @@ - [ ] start_query_execution - [ ] stop_query_execution -## autoscaling - 42% implemented +## autoscaling - 44% implemented - [X] attach_instances - [X] attach_load_balancer_target_groups - [X] attach_load_balancers @@ -491,7 +492,7 @@ - [ ] resume_processes - [X] set_desired_capacity - [X] set_instance_health -- [ ] set_instance_protection +- [X] set_instance_protection - [X] suspend_processes - [ ] terminate_instance_in_auto_scaling_group - [X] update_auto_scaling_group @@ -645,19 +646,19 @@ - [ ] upgrade_applied_schema - [ ] upgrade_published_schema -## cloudformation - 20% implemented +## cloudformation - 40% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set - [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set +- [X] create_stack_instances +- [X] create_stack_set +- [X] delete_change_set - [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set +- [X] delete_stack_instances +- [X] delete_stack_set - [ ] describe_account_limits -- [ ] describe_change_set +- [X] describe_change_set - [ ] describe_stack_drift_detection_status - [ ] describe_stack_events - [ ] describe_stack_instance @@ -674,7 +675,7 @@ - [ ] get_stack_policy - [ ] get_template - [ ] get_template_summary -- [ ] list_change_sets +- [X] list_change_sets - [X] list_exports - [ ] list_imports - [ ] list_stack_instances @@ -688,9 +689,9 @@ - [ ] stop_stack_set_operation - [X] update_stack - [ ] update_stack_instances -- [ ] update_stack_set +- [X] update_stack_set - [ ] update_termination_protection -- [ ] validate_template +- [X] validate_template ## cloudfront - 0% implemented - [ ] create_cloud_front_origin_access_identity @@ -1023,9 +1024,9 @@ - [ ] unlink_identity - [ ] update_identity_pool -## cognito-idp - 27% implemented +## cognito-idp - 36% implemented - [ ] add_custom_attributes -- [ ] admin_add_user_to_group +- [X] admin_add_user_to_group - [ ] admin_confirm_sign_up - [X] admin_create_user - [X] admin_delete_user @@ -1039,9 +1040,9 @@ - [X] admin_initiate_auth - [ ] admin_link_provider_for_user - [ ] admin_list_devices -- [ ] admin_list_groups_for_user +- [X] admin_list_groups_for_user - [ ] admin_list_user_auth_events -- [ ] admin_remove_user_from_group +- [X] admin_remove_user_from_group - [ ] admin_reset_user_password - [ ] admin_respond_to_auth_challenge - [ ] admin_set_user_mfa_preference @@ -1055,14 +1056,14 @@ - [ ] confirm_device - [X] confirm_forgot_password - [ ] confirm_sign_up -- [ ] create_group +- [X] create_group - [X] create_identity_provider - [ ] create_resource_server - [ ] create_user_import_job - [X] create_user_pool - [X] create_user_pool_client - [X] create_user_pool_domain -- [ ] delete_group +- [X] delete_group - [X] delete_identity_provider - [ ] delete_resource_server - [ ] delete_user @@ -1081,7 +1082,7 @@ - [ ] forgot_password - [ ] get_csv_header - [ ] get_device -- [ ] get_group +- [X] get_group - [ ] get_identity_provider_by_identifier - [ ] get_signing_certificate - [ ] get_ui_customization @@ -1091,14 +1092,14 @@ - [ ] global_sign_out - [ ] initiate_auth - [ ] list_devices -- [ ] list_groups +- [X] list_groups - [X] list_identity_providers - [ ] list_resource_servers - [ ] list_user_import_jobs - [X] list_user_pool_clients - [X] list_user_pools - [X] list_users -- [ ] list_users_in_group +- [X] list_users_in_group - [ ] resend_confirmation_code - [X] respond_to_auth_challenge - [ ] set_risk_configuration @@ -1112,7 +1113,7 @@ - [ ] update_auth_event_feedback - [ ] update_device_status - [ ] update_group -- [ ] update_identity_provider +- [X] update_identity_provider - [ ] update_resource_server - [ ] update_user_attributes - [ ] update_user_pool @@ -1188,14 +1189,14 @@ - [ ] detect_entities - [ ] detect_phi -## config - 0% implemented +## config - 19% implemented - [ ] batch_get_aggregate_resource_config - [ ] batch_get_resource_config - [ ] delete_aggregation_authorization - [ ] delete_config_rule - [ ] delete_configuration_aggregator -- [ ] delete_configuration_recorder -- [ ] delete_delivery_channel +- [X] delete_configuration_recorder +- [X] delete_delivery_channel - [ ] delete_evaluation_results - [ ] delete_pending_aggregation_request - [ ] delete_retention_configuration @@ -1208,10 +1209,10 @@ - [ ] describe_config_rules - [ ] describe_configuration_aggregator_sources_status - [ ] describe_configuration_aggregators -- [ ] describe_configuration_recorder_status -- [ ] describe_configuration_recorders +- [X] describe_configuration_recorder_status +- [X] describe_configuration_recorders - [ ] describe_delivery_channel_status -- [ ] describe_delivery_channels +- [X] describe_delivery_channels - [ ] describe_pending_aggregation_requests - [ ] describe_retention_configurations - [ ] get_aggregate_compliance_details_by_config_rule @@ -1229,13 +1230,13 @@ - [ ] put_aggregation_authorization - [ ] put_config_rule - [ ] put_configuration_aggregator -- [ ] put_configuration_recorder -- [ ] put_delivery_channel +- [X] put_configuration_recorder +- [X] put_delivery_channel - [ ] put_evaluations - [ ] put_retention_configuration - [ ] start_config_rules_evaluation -- [ ] start_configuration_recorder -- [ ] stop_configuration_recorder +- [X] start_configuration_recorder +- [X] stop_configuration_recorder ## connect - 0% implemented - [ ] create_user @@ -1616,11 +1617,11 @@ - [ ] update_table - [ ] update_time_to_live -## dynamodbstreams - 0% implemented -- [ ] describe_stream -- [ ] get_records -- [ ] get_shard_iterator -- [ ] list_streams +## dynamodbstreams - 100% implemented +- [X] describe_stream +- [X] get_records +- [X] get_shard_iterator +- [X] list_streams ## ec2 - 30% implemented - [ ] accept_reserved_instances_exchange_quote @@ -2684,7 +2685,7 @@ - [ ] describe_event_types - [ ] describe_events -## iam - 43% implemented +## iam - 56% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -2701,7 +2702,7 @@ - [X] create_policy - [X] create_policy_version - [X] create_role -- [ ] create_saml_provider +- [X] create_saml_provider - [ ] create_service_linked_role - [ ] create_service_specific_credential - [X] create_user @@ -2720,11 +2721,11 @@ - [X] delete_role - [ ] delete_role_permissions_boundary - [X] delete_role_policy -- [ ] delete_saml_provider +- [X] delete_saml_provider - [X] delete_server_certificate - [ ] delete_service_linked_role - [ ] delete_service_specific_credential -- [ ] delete_signing_certificate +- [X] delete_signing_certificate - [ ] delete_ssh_public_key - [X] delete_user - [ ] delete_user_permissions_boundary @@ -2736,7 +2737,7 @@ - [X] enable_mfa_device - [ ] generate_credential_report - [ ] generate_service_last_accessed_details -- [ ] get_access_key_last_used +- [X] get_access_key_last_used - [X] get_account_authorization_details - [ ] get_account_password_policy - [ ] get_account_summary @@ -2752,7 +2753,7 @@ - [X] get_policy_version - [X] get_role - [X] get_role_policy -- [ ] get_saml_provider +- [X] get_saml_provider - [X] get_server_certificate - [ ] get_service_last_accessed_details - [ ] get_service_last_accessed_details_with_entities @@ -2777,12 +2778,12 @@ - [ ] list_policies_granting_service_access - [X] list_policy_versions - [X] list_role_policies -- [ ] list_role_tags -- [ ] list_roles -- [ ] list_saml_providers +- [X] list_role_tags +- [X] list_roles +- [X] list_saml_providers - [ ] list_server_certificates - [ ] list_service_specific_credentials -- [ ] list_signing_certificates +- [X] list_signing_certificates - [ ] list_ssh_public_keys - [X] list_user_policies - [ ] list_user_tags @@ -2801,9 +2802,9 @@ - [ ] set_default_policy_version - [ ] simulate_custom_policy - [ ] simulate_principal_policy -- [ ] tag_role +- [X] tag_role - [ ] tag_user -- [ ] untag_role +- [X] untag_role - [ ] untag_user - [X] update_access_key - [ ] update_account_password_policy @@ -2811,16 +2812,16 @@ - [ ] update_group - [X] update_login_profile - [ ] update_open_id_connect_provider_thumbprint -- [ ] update_role -- [ ] update_role_description -- [ ] update_saml_provider +- [X] update_role +- [X] update_role_description +- [X] update_saml_provider - [ ] update_server_certificate - [ ] update_service_specific_credential -- [ ] update_signing_certificate +- [X] update_signing_certificate - [ ] update_ssh_public_key -- [ ] update_user +- [X] update_user - [ ] upload_server_certificate -- [ ] upload_signing_certificate +- [X] upload_signing_certificate - [ ] upload_ssh_public_key ## importexport - 0% implemented @@ -3213,7 +3214,7 @@ - [ ] update_data_retention - [ ] update_stream -## kms - 31% implemented +## kms - 41% implemented - [X] cancel_key_deletion - [ ] connect_custom_key_store - [ ] create_alias @@ -3232,7 +3233,7 @@ - [X] enable_key - [X] enable_key_rotation - [ ] encrypt -- [ ] generate_data_key +- [X] generate_data_key - [ ] generate_data_key_without_plaintext - [ ] generate_random - [X] get_key_policy @@ -3243,18 +3244,18 @@ - [ ] list_grants - [ ] list_key_policies - [X] list_keys -- [ ] list_resource_tags +- [X] list_resource_tags - [ ] list_retirable_grants - [X] put_key_policy - [ ] re_encrypt - [ ] retire_grant - [ ] revoke_grant - [X] schedule_key_deletion -- [ ] tag_resource +- [X] tag_resource - [ ] untag_resource - [ ] update_alias - [ ] update_custom_key_store -- [ ] update_key_description +- [X] update_key_description ## lambda - 0% implemented - [ ] add_layer_version_permission @@ -4702,20 +4703,20 @@ - [ ] put_attributes - [ ] select -## secretsmanager - 27% implemented +## secretsmanager - 44% implemented - [ ] cancel_rotate_secret - [X] create_secret - [ ] delete_resource_policy -- [ ] delete_secret +- [X] delete_secret - [X] describe_secret - [X] get_random_password - [ ] get_resource_policy - [X] get_secret_value - [ ] list_secret_version_ids -- [ ] list_secrets +- [X] list_secrets - [ ] put_resource_policy - [ ] put_secret_value -- [ ] restore_secret +- [X] restore_secret - [X] rotate_secret - [ ] tag_resource - [ ] untag_resource From 8cd62728c603b8d0ff973ddad9e55b50769350b5 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 15:50:13 +0200 Subject: [PATCH 019/658] :rotating_light: --- moto/core/models.py | 1 - moto/iot/models.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/core/models.py b/moto/core/models.py index 491e9f451805..19267ca08860 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -4,7 +4,6 @@ import functools import inspect -import os import re import six from io import BytesIO diff --git a/moto/iot/models.py b/moto/iot/models.py index 855591ffc3bf..5dea4ee667a6 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -944,5 +944,6 @@ def list_job_executions_for_thing(self, thing_name, status, max_results, next_to return job_executions, next_token + available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} From 263d85834917693344e3b6915c95be310e8c537e Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 16:21:41 +0200 Subject: [PATCH 020/658] setting envvars --- moto/core/models.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/moto/core/models.py b/moto/core/models.py index 19267ca08860..9fe1e96bd32b 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -4,6 +4,7 @@ import functools import inspect +import os import re import six from io import BytesIO @@ -21,6 +22,11 @@ ) +# "Mock" the AWS credentials as they can't be mocked in Botocore currently +os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") +os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") + + class BaseMockAWS(object): nested_count = 0 From 3020ee408ae2c12a377019f5e1d0d10fc0f6a284 Mon Sep 17 00:00:00 2001 From: Stephan Date: Tue, 28 May 2019 08:56:49 +0200 Subject: [PATCH 021/658] Merged iot --- moto/iot/models.py | 1922 ++++++++++++------------ moto/iot/responses.py | 1226 ++++++++-------- tests/test_iot/test_iot.py | 2818 ++++++++++++++++++------------------ 3 files changed, 2983 insertions(+), 2983 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 4399e8790644..89d71dd14578 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -1,961 +1,961 @@ -from __future__ import unicode_literals - -import hashlib -import random -import re -import string -import time -import uuid -from collections import OrderedDict -from datetime import datetime - -import boto3 - -from moto.core import BaseBackend, BaseModel -from .exceptions import ( - CertificateStateException, - DeleteConflictException, - ResourceNotFoundException, - InvalidRequestException, - InvalidStateTransitionException, - VersionConflictException -) - - -class FakeThing(BaseModel): - def __init__(self, thing_name, thing_type, attributes, region_name): - self.region_name = region_name - self.thing_name = thing_name - self.thing_type = thing_type - self.attributes = attributes - self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) - self.version = 1 - # TODO: we need to handle 'version'? - - # for iot-data - self.thing_shadow = None - - def to_dict(self, include_default_client_id=False): - obj = { - 'thingName': self.thing_name, - 'thingArn': self.arn, - 'attributes': self.attributes, - 'version': self.version - } - if self.thing_type: - obj['thingTypeName'] = self.thing_type.thing_type_name - if include_default_client_id: - obj['defaultClientId'] = self.thing_name - return obj - - -class FakeThingType(BaseModel): - def __init__(self, thing_type_name, thing_type_properties, region_name): - self.region_name = region_name - self.thing_type_name = thing_type_name - self.thing_type_properties = thing_type_properties - self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id - t = time.time() - self.metadata = { - 'deprecated': False, - 'creationData': int(t * 1000) / 1000.0 - } - self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) - - def to_dict(self): - return { - 'thingTypeName': self.thing_type_name, - 'thingTypeId': self.thing_type_id, - 'thingTypeProperties': self.thing_type_properties, - 'thingTypeMetadata': self.metadata - } - - -class FakeThingGroup(BaseModel): - def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): - self.region_name = region_name - self.thing_group_name = thing_group_name - self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id - self.version = 1 # TODO: tmp - self.parent_group_name = parent_group_name - self.thing_group_properties = thing_group_properties or {} - t = time.time() - self.metadata = { - 'creationData': int(t * 1000) / 1000.0 - } - self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) - self.things = OrderedDict() - - def to_dict(self): - return { - 'thingGroupName': self.thing_group_name, - 'thingGroupId': self.thing_group_id, - 'version': self.version, - 'thingGroupProperties': self.thing_group_properties, - 'thingGroupMetadata': self.metadata - } - - -class FakeCertificate(BaseModel): - def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None): - m = hashlib.sha256() - m.update(str(uuid.uuid4()).encode('utf-8')) - self.certificate_id = m.hexdigest() - self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) - self.certificate_pem = certificate_pem - self.status = status - - # TODO: must adjust - self.owner = '1' - self.transfer_data = {} - self.creation_date = time.time() - self.last_modified_date = self.creation_date - - self.ca_certificate_id = None - self.ca_certificate_pem = ca_certificate_pem - if ca_certificate_pem: - m.update(str(uuid.uuid4()).encode('utf-8')) - self.ca_certificate_id = m.hexdigest() - - def to_dict(self): - return { - 'certificateArn': self.arn, - 'certificateId': self.certificate_id, - 'caCertificateId': self.ca_certificate_id, - 'status': self.status, - 'creationDate': self.creation_date - } - - def to_description_dict(self): - """ - You might need keys below in some situation - - caCertificateId - - previousOwnedBy - """ - return { - 'certificateArn': self.arn, - 'certificateId': self.certificate_id, - 'status': self.status, - 'certificatePem': self.certificate_pem, - 'ownedBy': self.owner, - 'creationDate': self.creation_date, - 'lastModifiedDate': self.last_modified_date, - 'transferData': self.transfer_data - } - - -class FakePolicy(BaseModel): - def __init__(self, name, document, region_name, default_version_id='1'): - self.name = name - self.document = document - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) - self.default_version_id = default_version_id - self.versions = [FakePolicyVersion(self.name, document, True, region_name)] - - def to_get_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'defaultVersionId': self.default_version_id - } - - def to_dict_at_creation(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.default_version_id - } - - def to_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - } - - -class FakePolicyVersion(object): - - def __init__(self, - policy_name, - document, - is_default, - region_name): - self.name = policy_name - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) - self.document = document or {} - self.is_default = is_default - self.version_id = '1' - - self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) - - def to_get_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'creationDate': self.create_datetime, - 'lastModifiedDate': self.last_modified_datetime, - 'generationId': self.version_id - } - - def to_dict_at_creation(self): - return { - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default - } - - def to_dict(self): - return { - 'versionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'createDate': self.create_datetime, - } - - -class FakeJob(BaseModel): - JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" - JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) - - def __init__(self, job_id, targets, document_source, document, description, presigned_url_config, target_selection, - job_executions_rollout_config, document_parameters, region_name): - if not self._job_id_matcher(self.JOB_ID_REGEX, job_id): - raise InvalidRequestException() - - self.region_name = region_name - self.job_id = job_id - self.job_arn = 'arn:aws:iot:%s:1:job/%s' % (self.region_name, job_id) - self.targets = targets - self.document_source = document_source - self.document = document - self.force = False - self.description = description - self.presigned_url_config = presigned_url_config - self.target_selection = target_selection - self.job_executions_rollout_config = job_executions_rollout_config - self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED - self.comment = None - self.reason_code = None - self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.completed_at = None - self.job_process_details = { - 'processingTargets': targets, - 'numberOfQueuedThings': 1, - 'numberOfCanceledThings': 0, - 'numberOfSucceededThings': 0, - 'numberOfFailedThings': 0, - 'numberOfRejectedThings': 0, - 'numberOfInProgressThings': 0, - 'numberOfRemovedThings': 0 - } - self.document_parameters = document_parameters - - def to_dict(self): - obj = { - 'jobArn': self.job_arn, - 'jobId': self.job_id, - 'targets': self.targets, - 'description': self.description, - 'presignedUrlConfig': self.presigned_url_config, - 'targetSelection': self.target_selection, - 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, - 'status': self.status, - 'comment': self.comment, - 'forceCanceled': self.force, - 'reasonCode': self.reason_code, - 'createdAt': self.created_at, - 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completed_at, - 'jobProcessDetails': self.job_process_details, - 'documentParameters': self.document_parameters, - 'document': self.document, - 'documentSource': self.document_source - } - - return obj - - def _job_id_matcher(self, regex, argument): - regex_match = regex.match(argument) - length_match = len(argument) <= 64 - return regex_match and length_match - - -class FakeJobExecution(BaseModel): - - def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, status_details_map={}): - self.job_id = job_id - self.status = status # IN_PROGRESS | CANCELED | COMPLETED - self.force_canceled = force_canceled - self.status_details_map = status_details_map - self.thing_arn = thing_arn - self.queued_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.started_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.execution_number = 123 - self.version_number = 123 - self.approximate_seconds_before_time_out = 123 - - def to_get_dict(self): - obj = { - 'jobId': self.job_id, - 'status': self.status, - 'forceCanceled': self.force_canceled, - 'statusDetails': {'detailsMap': self.status_details_map}, - 'thingArn': self.thing_arn, - 'queuedAt': self.queued_at, - 'startedAt': self.started_at, - 'lastUpdatedAt': self.last_updated_at, - 'executionNumber': self.execution_number, - 'versionNumber': self.version_number, - 'approximateSecondsBeforeTimedOut': self.approximate_seconds_before_time_out - } - - return obj - - def to_dict(self): - obj = { - 'jobId': self.job_id, - 'thingArn': self.thing_arn, - 'jobExecutionSummary': { - 'status': self.status, - 'queuedAt': self.queued_at, - 'startedAt': self.started_at, - 'lastUpdatedAt': self.last_updated_at, - 'executionNumber': self.execution_number, - } - } - - return obj - - -class IoTBackend(BaseBackend): - def __init__(self, region_name=None): - super(IoTBackend, self).__init__() - self.region_name = region_name - self.things = OrderedDict() - self.jobs = OrderedDict() - self.job_executions = OrderedDict() - self.thing_types = OrderedDict() - self.thing_groups = OrderedDict() - self.certificates = OrderedDict() - self.policies = OrderedDict() - self.principal_policies = OrderedDict() - self.principal_things = OrderedDict() - - def reset(self): - region_name = self.region_name - self.__dict__ = {} - self.__init__(region_name) - - def create_thing(self, thing_name, thing_type_name, attribute_payload): - thing_types = self.list_thing_types() - thing_type = None - if thing_type_name: - filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] - if len(filtered_thing_types) == 0: - raise ResourceNotFoundException() - thing_type = filtered_thing_types[0] - if attribute_payload is None: - attributes = {} - elif 'attributes' not in attribute_payload: - attributes = {} - else: - attributes = attribute_payload['attributes'] - thing = FakeThing(thing_name, thing_type, attributes, self.region_name) - self.things[thing.arn] = thing - return thing.thing_name, thing.arn - - def create_thing_type(self, thing_type_name, thing_type_properties): - if thing_type_properties is None: - thing_type_properties = {} - thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) - self.thing_types[thing_type.arn] = thing_type - return thing_type.thing_type_name, thing_type.arn - - def list_thing_types(self, thing_type_name=None): - if thing_type_name: - # It's weird but thing_type_name is filtered by forward match, not complete match - return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] - return self.thing_types.values() - - def list_things(self, attribute_name, attribute_value, thing_type_name, max_results, token): - all_things = [_.to_dict() for _ in self.things.values()] - if attribute_name is not None and thing_type_name is not None: - filtered_things = list(filter(lambda elem: - attribute_name in elem["attributes"] and - elem["attributes"][attribute_name] == attribute_value and - "thingTypeName" in elem and - elem["thingTypeName"] == thing_type_name, all_things)) - elif attribute_name is not None and thing_type_name is None: - filtered_things = list(filter(lambda elem: - attribute_name in elem["attributes"] and - elem["attributes"][attribute_name] == attribute_value, all_things)) - elif attribute_name is None and thing_type_name is not None: - filtered_things = list( - filter(lambda elem: "thingTypeName" in elem and elem["thingTypeName"] == thing_type_name, all_things)) - else: - filtered_things = all_things - - if token is None: - things = filtered_things[0:max_results] - next_token = str(max_results) if len(filtered_things) > max_results else None - else: - token = int(token) - things = filtered_things[token:token + max_results] - next_token = str(token + max_results) if len(filtered_things) > token + max_results else None - - return things, next_token - - def describe_thing(self, thing_name): - things = [_ for _ in self.things.values() if _.thing_name == thing_name] - if len(things) == 0: - raise ResourceNotFoundException() - return things[0] - - def describe_thing_type(self, thing_type_name): - thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] - if len(thing_types) == 0: - raise ResourceNotFoundException() - return thing_types[0] - - def delete_thing(self, thing_name, expected_version): - # TODO: handle expected_version - - # can raise ResourceNotFoundError - thing = self.describe_thing(thing_name) - del self.things[thing.arn] - - def delete_thing_type(self, thing_type_name): - # can raise ResourceNotFoundError - thing_type = self.describe_thing_type(thing_type_name) - del self.thing_types[thing_type.arn] - - def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): - # if attributes payload = {}, nothing - thing = self.describe_thing(thing_name) - thing_type = None - - if remove_thing_type and thing_type_name: - raise InvalidRequestException() - - # thing_type - if thing_type_name: - thing_types = self.list_thing_types() - filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] - if len(filtered_thing_types) == 0: - raise ResourceNotFoundException() - thing_type = filtered_thing_types[0] - thing.thing_type = thing_type - - if remove_thing_type: - thing.thing_type = None - - # attribute - if attribute_payload is not None and 'attributes' in attribute_payload: - do_merge = attribute_payload.get('merge', False) - attributes = attribute_payload['attributes'] - if not do_merge: - thing.attributes = attributes - else: - thing.attributes.update(attributes) - - def _random_string(self): - n = 20 - random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) - return random_str - - def create_keys_and_certificate(self, set_as_active): - # implement here - # caCertificate can be blank - key_pair = { - 'PublicKey': self._random_string(), - 'PrivateKey': self._random_string() - } - certificate_pem = self._random_string() - status = 'ACTIVE' if set_as_active else 'INACTIVE' - certificate = FakeCertificate(certificate_pem, status, self.region_name) - self.certificates[certificate.certificate_id] = certificate - return certificate, key_pair - - def delete_certificate(self, certificate_id): - cert = self.describe_certificate(certificate_id) - if cert.status == 'ACTIVE': - raise CertificateStateException( - 'Certificate must be deactivated (not ACTIVE) before deletion.', certificate_id) - - certs = [k[0] for k, v in self.principal_things.items() - if self._get_principal(k[0]).certificate_id == certificate_id] - if len(certs) > 0: - raise DeleteConflictException( - 'Things must be detached before deletion (arn: %s)' % certs[0] - ) - - certs = [k[0] for k, v in self.principal_policies.items() - if self._get_principal(k[0]).certificate_id == certificate_id] - if len(certs) > 0: - raise DeleteConflictException( - 'Certificate policies must be detached before deletion (arn: %s)' % certs[0] - ) - - del self.certificates[certificate_id] - - def describe_certificate(self, certificate_id): - certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] - if len(certs) == 0: - raise ResourceNotFoundException() - return certs[0] - - def list_certificates(self): - return self.certificates.values() - - def register_certificate(self, certificate_pem, ca_certificate_pem, set_as_active, status): - certificate = FakeCertificate(certificate_pem, 'ACTIVE' if set_as_active else status, - self.region_name, ca_certificate_pem) - self.certificates[certificate.certificate_id] = certificate - return certificate - - def update_certificate(self, certificate_id, new_status): - cert = self.describe_certificate(certificate_id) - # TODO: validate new_status - cert.status = new_status - - def create_policy(self, policy_name, policy_document): - policy = FakePolicy(policy_name, policy_document, self.region_name) - self.policies[policy.name] = policy - return policy - - def attach_policy(self, policy_name, target): - principal = self._get_principal(target) - policy = self.get_policy(policy_name) - k = (target, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - - def detach_policy(self, policy_name, target): - # this may raises ResourceNotFoundException - self._get_principal(target) - self.get_policy(policy_name) - - k = (target, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - - def list_attached_policies(self, target): - policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] - return policies - - def list_policies(self): - policies = self.policies.values() - return policies - - def get_policy(self, policy_name): - policies = [_ for _ in self.policies.values() if _.name == policy_name] - if len(policies) == 0: - raise ResourceNotFoundException() - return policies[0] - - def delete_policy(self, policy_name): - - policies = [k[1] for k, v in self.principal_policies.items() if k[1] == policy_name] - if len(policies) > 0: - raise DeleteConflictException( - 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' - % policy_name - ) - - policy = self.get_policy(policy_name) - del self.policies[policy.name] - - def create_policy_version(self, policy_name, policy_document, set_as_default): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) - policy.versions.append(version) - version.version_id = '{0}'.format(len(policy.versions)) - if set_as_default: - self.set_default_policy_version(policy_name, version.version_id) - return version - - def set_default_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - for version in policy.versions: - if version.version_id == version_id: - version.is_default = True - policy.default_version_id = version.version_id - policy.document = version.document - else: - version.is_default = False - - def get_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - for version in policy.versions: - if version.version_id == version_id: - return version - raise ResourceNotFoundException() - - def list_policy_versions(self, policy_name): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - return policy.versions - - def delete_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - if version_id == policy.default_version_id: - raise InvalidRequestException( - "Cannot delete the default version of a policy") - for i, v in enumerate(policy.versions): - if v.version_id == version_id: - del policy.versions[i] - return - raise ResourceNotFoundException() - - def _get_principal(self, principal_arn): - """ - raise ResourceNotFoundException - """ - if ':cert/' in principal_arn: - certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] - if len(certs) == 0: - raise ResourceNotFoundException() - principal = certs[0] - return principal - else: - # TODO: search for cognito_ids - pass - raise ResourceNotFoundException() - - def attach_principal_policy(self, policy_name, principal_arn): - principal = self._get_principal(principal_arn) - policy = self.get_policy(policy_name) - k = (principal_arn, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - - def detach_principal_policy(self, policy_name, principal_arn): - # this may raises ResourceNotFoundException - self._get_principal(principal_arn) - self.get_policy(policy_name) - - k = (principal_arn, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - - def list_principal_policies(self, principal_arn): - policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] - return policies - - def list_policy_principals(self, policy_name): - principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] - return principals - - def attach_thing_principal(self, thing_name, principal_arn): - principal = self._get_principal(principal_arn) - thing = self.describe_thing(thing_name) - k = (principal_arn, thing_name) - if k in self.principal_things: - return - self.principal_things[k] = (principal, thing) - - def detach_thing_principal(self, thing_name, principal_arn): - # this may raises ResourceNotFoundException - self._get_principal(principal_arn) - self.describe_thing(thing_name) - - k = (principal_arn, thing_name) - if k not in self.principal_things: - raise ResourceNotFoundException() - del self.principal_things[k] - - def list_principal_things(self, principal_arn): - thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] - return thing_names - - def list_thing_principals(self, thing_name): - principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] - return principals - - def describe_thing_group(self, thing_group_name): - thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] - if len(thing_groups) == 0: - raise ResourceNotFoundException() - return thing_groups[0] - - def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): - thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) - self.thing_groups[thing_group.arn] = thing_group - return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id - - def delete_thing_group(self, thing_group_name, expected_version): - thing_group = self.describe_thing_group(thing_group_name) - del self.thing_groups[thing_group.arn] - - def list_thing_groups(self, parent_group, name_prefix_filter, recursive): - thing_groups = self.thing_groups.values() - return thing_groups - - def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): - thing_group = self.describe_thing_group(thing_group_name) - if expected_version and expected_version != thing_group.version: - raise VersionConflictException(thing_group_name) - attribute_payload = thing_group_properties.get('attributePayload', None) - if attribute_payload is not None and 'attributes' in attribute_payload: - do_merge = attribute_payload.get('merge', False) - attributes = attribute_payload['attributes'] - if not do_merge: - thing_group.thing_group_properties['attributePayload']['attributes'] = attributes - else: - thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) - elif attribute_payload is not None and 'attributes' not in attribute_payload: - thing_group.attributes = {} - thing_group.version = thing_group.version + 1 - return thing_group.version - - def _identify_thing_group(self, thing_group_name, thing_group_arn): - # identify thing group - if thing_group_name is None and thing_group_arn is None: - raise InvalidRequestException( - ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' - ) - if thing_group_name is not None: - thing_group = self.describe_thing_group(thing_group_name) - if thing_group_arn and thing_group.arn != thing_group_arn: - raise InvalidRequestException( - 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' - ) - elif thing_group_arn is not None: - if thing_group_arn not in self.thing_groups: - raise InvalidRequestException() - thing_group = self.thing_groups[thing_group_arn] - return thing_group - - def _identify_thing(self, thing_name, thing_arn): - # identify thing - if thing_name is None and thing_arn is None: - raise InvalidRequestException( - 'Both thingArn and thingName are empty. Need to specify at least one of them' - ) - if thing_name is not None: - thing = self.describe_thing(thing_name) - if thing_arn and thing.arn != thing_arn: - raise InvalidRequestException( - 'ThingName thingArn does not match specified thingName in request' - ) - elif thing_arn is not None: - if thing_arn not in self.things: - raise InvalidRequestException() - thing = self.things[thing_arn] - return thing - - def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): - thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) - thing = self._identify_thing(thing_name, thing_arn) - if thing.arn in thing_group.things: - # aws ignores duplicate registration - return - thing_group.things[thing.arn] = thing - - def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): - thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) - thing = self._identify_thing(thing_name, thing_arn) - if thing.arn not in thing_group.things: - # aws ignores non-registered thing - return - del thing_group.things[thing.arn] - - def list_things_in_thing_group(self, thing_group_name, recursive): - thing_group = self.describe_thing_group(thing_group_name) - return thing_group.things.values() - - def list_thing_groups_for_thing(self, thing_name): - thing = self.describe_thing(thing_name) - all_thing_groups = self.list_thing_groups(None, None, None) - ret = [] - for thing_group in all_thing_groups: - if thing.arn in thing_group.things: - ret.append({ - 'groupName': thing_group.thing_group_name, - 'groupArn': thing_group.arn - }) - return ret - - def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): - thing = self.describe_thing(thing_name) - for thing_group_name in thing_groups_to_add: - thing_group = self.describe_thing_group(thing_group_name) - self.add_thing_to_thing_group( - thing_group.thing_group_name, None, - thing.thing_name, None - ) - for thing_group_name in thing_groups_to_remove: - thing_group = self.describe_thing_group(thing_group_name) - self.remove_thing_from_thing_group( - thing_group.thing_group_name, None, - thing.thing_name, None - ) - - def create_job(self, job_id, targets, document_source, document, description, presigned_url_config, - target_selection, job_executions_rollout_config, document_parameters): - job = FakeJob(job_id, targets, document_source, document, description, presigned_url_config, target_selection, - job_executions_rollout_config, document_parameters, self.region_name) - self.jobs[job_id] = job - - for thing_arn in targets: - thing_name = thing_arn.split(':')[-1].split('/')[-1] - job_execution = FakeJobExecution(job_id, thing_arn) - self.job_executions[(job_id, thing_name)] = job_execution - return job.job_arn, job_id, description - - def describe_job(self, job_id): - jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] - if len(jobs) == 0: - raise ResourceNotFoundException() - return jobs[0] - - def delete_job(self, job_id, force): - job = self.jobs[job_id] - - if job.status == 'IN_PROGRESS' and force: - del self.jobs[job_id] - elif job.status != 'IN_PROGRESS': - del self.jobs[job_id] - else: - raise InvalidStateTransitionException() - - def cancel_job(self, job_id, reason_code, comment, force): - job = self.jobs[job_id] - - job.reason_code = reason_code if reason_code is not None else job.reason_code - job.comment = comment if comment is not None else job.comment - job.force = force if force is not None and force != job.force else job.force - job.status = 'CANCELED' - - if job.status == 'IN_PROGRESS' and force: - self.jobs[job_id] = job - elif job.status != 'IN_PROGRESS': - self.jobs[job_id] = job - else: - raise InvalidStateTransitionException() - - return job - - def get_job_document(self, job_id): - return self.jobs[job_id] - - def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): - # TODO: implement filters - all_jobs = [_.to_dict() for _ in self.jobs.values()] - filtered_jobs = all_jobs - - if token is None: - jobs = filtered_jobs[0:max_results] - next_token = str(max_results) if len(filtered_jobs) > max_results else None - else: - token = int(token) - jobs = filtered_jobs[token:token + max_results] - next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None - - return jobs, next_token - - def describe_job_execution(self, job_id, thing_name, execution_number): - try: - job_execution = self.job_executions[(job_id, thing_name)] - except KeyError: - raise ResourceNotFoundException() - - if job_execution is None or \ - (execution_number is not None and job_execution.execution_number != execution_number): - raise ResourceNotFoundException() - - return job_execution - - def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): - job_execution = self.job_executions[(job_id, thing_name)] - - if job_execution is None: - raise ResourceNotFoundException() - - job_execution.force_canceled = force if force is not None else job_execution.force_canceled - # TODO: implement expected_version and status_details (at most 10 can be specified) - - if job_execution.status == 'IN_PROGRESS' and force: - job_execution.status = 'CANCELED' - self.job_executions[(job_id, thing_name)] = job_execution - elif job_execution.status != 'IN_PROGRESS': - job_execution.status = 'CANCELED' - self.job_executions[(job_id, thing_name)] = job_execution - else: - raise InvalidStateTransitionException() - - def delete_job_execution(self, job_id, thing_name, execution_number, force): - job_execution = self.job_executions[(job_id, thing_name)] - - if job_execution.execution_number != execution_number: - raise ResourceNotFoundException() - - if job_execution.status == 'IN_PROGRESS' and force: - del self.job_executions[(job_id, thing_name)] - elif job_execution.status != 'IN_PROGRESS': - del self.job_executions[(job_id, thing_name)] - else: - raise InvalidStateTransitionException() - - def list_job_executions_for_job(self, job_id, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] - - if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) - - token = next_token - if token is None: - job_executions = job_executions[0:max_results] - next_token = str(max_results) if len(job_executions) > max_results else None - else: - token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None - - return job_executions, next_token - - def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] - - if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) - - token = next_token - if token is None: - job_executions = job_executions[0:max_results] - next_token = str(max_results) if len(job_executions) > max_results else None - else: - token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None - - return job_executions, next_token - - -available_regions = boto3.session.Session().get_available_regions("iot") -iot_backends = {region: IoTBackend(region) for region in available_regions} +from __future__ import unicode_literals + +import hashlib +import random +import re +import string +import time +import uuid +from collections import OrderedDict +from datetime import datetime + +import boto3 + +from moto.core import BaseBackend, BaseModel +from .exceptions import ( + CertificateStateException, + DeleteConflictException, + ResourceNotFoundException, + InvalidRequestException, + InvalidStateTransitionException, + VersionConflictException +) + + +class FakeThing(BaseModel): + def __init__(self, thing_name, thing_type, attributes, region_name): + self.region_name = region_name + self.thing_name = thing_name + self.thing_type = thing_type + self.attributes = attributes + self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) + self.version = 1 + # TODO: we need to handle 'version'? + + # for iot-data + self.thing_shadow = None + + def to_dict(self, include_default_client_id=False): + obj = { + 'thingName': self.thing_name, + 'thingArn': self.arn, + 'attributes': self.attributes, + 'version': self.version + } + if self.thing_type: + obj['thingTypeName'] = self.thing_type.thing_type_name + if include_default_client_id: + obj['defaultClientId'] = self.thing_name + return obj + + +class FakeThingType(BaseModel): + def __init__(self, thing_type_name, thing_type_properties, region_name): + self.region_name = region_name + self.thing_type_name = thing_type_name + self.thing_type_properties = thing_type_properties + self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id + t = time.time() + self.metadata = { + 'deprecated': False, + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) + + def to_dict(self): + return { + 'thingTypeName': self.thing_type_name, + 'thingTypeId': self.thing_type_id, + 'thingTypeProperties': self.thing_type_properties, + 'thingTypeMetadata': self.metadata + } + + +class FakeThingGroup(BaseModel): + def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): + self.region_name = region_name + self.thing_group_name = thing_group_name + self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id + self.version = 1 # TODO: tmp + self.parent_group_name = parent_group_name + self.thing_group_properties = thing_group_properties or {} + t = time.time() + self.metadata = { + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) + self.things = OrderedDict() + + def to_dict(self): + return { + 'thingGroupName': self.thing_group_name, + 'thingGroupId': self.thing_group_id, + 'version': self.version, + 'thingGroupProperties': self.thing_group_properties, + 'thingGroupMetadata': self.metadata + } + + +class FakeCertificate(BaseModel): + def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None): + m = hashlib.sha256() + m.update(str(uuid.uuid4()).encode('utf-8')) + self.certificate_id = m.hexdigest() + self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) + self.certificate_pem = certificate_pem + self.status = status + + # TODO: must adjust + self.owner = '1' + self.transfer_data = {} + self.creation_date = time.time() + self.last_modified_date = self.creation_date + + self.ca_certificate_id = None + self.ca_certificate_pem = ca_certificate_pem + if ca_certificate_pem: + m.update(str(uuid.uuid4()).encode('utf-8')) + self.ca_certificate_id = m.hexdigest() + + def to_dict(self): + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'caCertificateId': self.ca_certificate_id, + 'status': self.status, + 'creationDate': self.creation_date + } + + def to_description_dict(self): + """ + You might need keys below in some situation + - caCertificateId + - previousOwnedBy + """ + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'certificatePem': self.certificate_pem, + 'ownedBy': self.owner, + 'creationDate': self.creation_date, + 'lastModifiedDate': self.last_modified_date, + 'transferData': self.transfer_data + } + + +class FakePolicy(BaseModel): + def __init__(self, name, document, region_name, default_version_id='1'): + self.name = name + self.document = document + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) + self.default_version_id = default_version_id + self.versions = [FakePolicyVersion(self.name, document, True, region_name)] + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'defaultVersionId': self.default_version_id + } + + def to_dict_at_creation(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.default_version_id + } + + def to_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + } + + +class FakePolicyVersion(object): + + def __init__(self, + policy_name, + document, + is_default, + region_name): + self.name = policy_name + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) + self.document = document or {} + self.is_default = is_default + self.version_id = '1' + + self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'creationDate': self.create_datetime, + 'lastModifiedDate': self.last_modified_datetime, + 'generationId': self.version_id + } + + def to_dict_at_creation(self): + return { + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default + } + + def to_dict(self): + return { + 'versionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'createDate': self.create_datetime, + } + + +class FakeJob(BaseModel): + JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" + JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) + + def __init__(self, job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, region_name): + if not self._job_id_matcher(self.JOB_ID_REGEX, job_id): + raise InvalidRequestException() + + self.region_name = region_name + self.job_id = job_id + self.job_arn = 'arn:aws:iot:%s:1:job/%s' % (self.region_name, job_id) + self.targets = targets + self.document_source = document_source + self.document = document + self.force = False + self.description = description + self.presigned_url_config = presigned_url_config + self.target_selection = target_selection + self.job_executions_rollout_config = job_executions_rollout_config + self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED + self.comment = None + self.reason_code = None + self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.completed_at = None + self.job_process_details = { + 'processingTargets': targets, + 'numberOfQueuedThings': 1, + 'numberOfCanceledThings': 0, + 'numberOfSucceededThings': 0, + 'numberOfFailedThings': 0, + 'numberOfRejectedThings': 0, + 'numberOfInProgressThings': 0, + 'numberOfRemovedThings': 0 + } + self.document_parameters = document_parameters + + def to_dict(self): + obj = { + 'jobArn': self.job_arn, + 'jobId': self.job_id, + 'targets': self.targets, + 'description': self.description, + 'presignedUrlConfig': self.presigned_url_config, + 'targetSelection': self.target_selection, + 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, + 'status': self.status, + 'comment': self.comment, + 'forceCanceled': self.force, + 'reasonCode': self.reason_code, + 'createdAt': self.created_at, + 'lastUpdatedAt': self.last_updated_at, + 'completedAt': self.completed_at, + 'jobProcessDetails': self.job_process_details, + 'documentParameters': self.document_parameters, + 'document': self.document, + 'documentSource': self.document_source + } + + return obj + + def _job_id_matcher(self, regex, argument): + regex_match = regex.match(argument) + length_match = len(argument) <= 64 + return regex_match and length_match + + +class FakeJobExecution(BaseModel): + + def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, status_details_map={}): + self.job_id = job_id + self.status = status # IN_PROGRESS | CANCELED | COMPLETED + self.force_canceled = force_canceled + self.status_details_map = status_details_map + self.thing_arn = thing_arn + self.queued_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.started_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.execution_number = 123 + self.version_number = 123 + self.approximate_seconds_before_time_out = 123 + + def to_get_dict(self): + obj = { + 'jobId': self.job_id, + 'status': self.status, + 'forceCanceled': self.force_canceled, + 'statusDetails': {'detailsMap': self.status_details_map}, + 'thingArn': self.thing_arn, + 'queuedAt': self.queued_at, + 'startedAt': self.started_at, + 'lastUpdatedAt': self.last_updated_at, + 'executionNumber': self.execution_number, + 'versionNumber': self.version_number, + 'approximateSecondsBeforeTimedOut': self.approximate_seconds_before_time_out + } + + return obj + + def to_dict(self): + obj = { + 'jobId': self.job_id, + 'thingArn': self.thing_arn, + 'jobExecutionSummary': { + 'status': self.status, + 'queuedAt': self.queued_at, + 'startedAt': self.started_at, + 'lastUpdatedAt': self.last_updated_at, + 'executionNumber': self.execution_number, + } + } + + return obj + + +class IoTBackend(BaseBackend): + def __init__(self, region_name=None): + super(IoTBackend, self).__init__() + self.region_name = region_name + self.things = OrderedDict() + self.jobs = OrderedDict() + self.job_executions = OrderedDict() + self.thing_types = OrderedDict() + self.thing_groups = OrderedDict() + self.certificates = OrderedDict() + self.policies = OrderedDict() + self.principal_policies = OrderedDict() + self.principal_things = OrderedDict() + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_thing(self, thing_name, thing_type_name, attribute_payload): + thing_types = self.list_thing_types() + thing_type = None + if thing_type_name: + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + if attribute_payload is None: + attributes = {} + elif 'attributes' not in attribute_payload: + attributes = {} + else: + attributes = attribute_payload['attributes'] + thing = FakeThing(thing_name, thing_type, attributes, self.region_name) + self.things[thing.arn] = thing + return thing.thing_name, thing.arn + + def create_thing_type(self, thing_type_name, thing_type_properties): + if thing_type_properties is None: + thing_type_properties = {} + thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) + self.thing_types[thing_type.arn] = thing_type + return thing_type.thing_type_name, thing_type.arn + + def list_thing_types(self, thing_type_name=None): + if thing_type_name: + # It's weird but thing_type_name is filtered by forward match, not complete match + return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] + return self.thing_types.values() + + def list_things(self, attribute_name, attribute_value, thing_type_name, max_results, token): + all_things = [_.to_dict() for _ in self.things.values()] + if attribute_name is not None and thing_type_name is not None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value and + "thingTypeName" in elem and + elem["thingTypeName"] == thing_type_name, all_things)) + elif attribute_name is not None and thing_type_name is None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value, all_things)) + elif attribute_name is None and thing_type_name is not None: + filtered_things = list( + filter(lambda elem: "thingTypeName" in elem and elem["thingTypeName"] == thing_type_name, all_things)) + else: + filtered_things = all_things + + if token is None: + things = filtered_things[0:max_results] + next_token = str(max_results) if len(filtered_things) > max_results else None + else: + token = int(token) + things = filtered_things[token:token + max_results] + next_token = str(token + max_results) if len(filtered_things) > token + max_results else None + + return things, next_token + + def describe_thing(self, thing_name): + things = [_ for _ in self.things.values() if _.thing_name == thing_name] + if len(things) == 0: + raise ResourceNotFoundException() + return things[0] + + def describe_thing_type(self, thing_type_name): + thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] + if len(thing_types) == 0: + raise ResourceNotFoundException() + return thing_types[0] + + def delete_thing(self, thing_name, expected_version): + # TODO: handle expected_version + + # can raise ResourceNotFoundError + thing = self.describe_thing(thing_name) + del self.things[thing.arn] + + def delete_thing_type(self, thing_type_name): + # can raise ResourceNotFoundError + thing_type = self.describe_thing_type(thing_type_name) + del self.thing_types[thing_type.arn] + + def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): + # if attributes payload = {}, nothing + thing = self.describe_thing(thing_name) + thing_type = None + + if remove_thing_type and thing_type_name: + raise InvalidRequestException() + + # thing_type + if thing_type_name: + thing_types = self.list_thing_types() + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + thing.thing_type = thing_type + + if remove_thing_type: + thing.thing_type = None + + # attribute + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing.attributes = attributes + else: + thing.attributes.update(attributes) + + def _random_string(self): + n = 20 + random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) + return random_str + + def create_keys_and_certificate(self, set_as_active): + # implement here + # caCertificate can be blank + key_pair = { + 'PublicKey': self._random_string(), + 'PrivateKey': self._random_string() + } + certificate_pem = self._random_string() + status = 'ACTIVE' if set_as_active else 'INACTIVE' + certificate = FakeCertificate(certificate_pem, status, self.region_name) + self.certificates[certificate.certificate_id] = certificate + return certificate, key_pair + + def delete_certificate(self, certificate_id): + cert = self.describe_certificate(certificate_id) + if cert.status == 'ACTIVE': + raise CertificateStateException( + 'Certificate must be deactivated (not ACTIVE) before deletion.', certificate_id) + + certs = [k[0] for k, v in self.principal_things.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Things must be detached before deletion (arn: %s)' % certs[0] + ) + + certs = [k[0] for k, v in self.principal_policies.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Certificate policies must be detached before deletion (arn: %s)' % certs[0] + ) + + del self.certificates[certificate_id] + + def describe_certificate(self, certificate_id): + certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] + if len(certs) == 0: + raise ResourceNotFoundException() + return certs[0] + + def list_certificates(self): + return self.certificates.values() + + def register_certificate(self, certificate_pem, ca_certificate_pem, set_as_active, status): + certificate = FakeCertificate(certificate_pem, 'ACTIVE' if set_as_active else status, + self.region_name, ca_certificate_pem) + self.certificates[certificate.certificate_id] = certificate + return certificate + + def update_certificate(self, certificate_id, new_status): + cert = self.describe_certificate(certificate_id) + # TODO: validate new_status + cert.status = new_status + + def create_policy(self, policy_name, policy_document): + policy = FakePolicy(policy_name, policy_document, self.region_name) + self.policies[policy.name] = policy + return policy + + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_attached_policies(self, target): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] + return policies + + def list_policies(self): + policies = self.policies.values() + return policies + + def get_policy(self, policy_name): + policies = [_ for _ in self.policies.values() if _.name == policy_name] + if len(policies) == 0: + raise ResourceNotFoundException() + return policies[0] + + def delete_policy(self, policy_name): + + policies = [k[1] for k, v in self.principal_policies.items() if k[1] == policy_name] + if len(policies) > 0: + raise DeleteConflictException( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' + % policy_name + ) + + policy = self.get_policy(policy_name) + del self.policies[policy.name] + + def create_policy_version(self, policy_name, policy_document, set_as_default): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) + policy.versions.append(version) + version.version_id = '{0}'.format(len(policy.versions)) + if set_as_default: + self.set_default_policy_version(policy_name, version.version_id) + return version + + def set_default_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + version.is_default = True + policy.default_version_id = version.version_id + policy.document = version.document + else: + version.is_default = False + + def get_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + return version + raise ResourceNotFoundException() + + def list_policy_versions(self, policy_name): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + return policy.versions + + def delete_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + if version_id == policy.default_version_id: + raise InvalidRequestException( + "Cannot delete the default version of a policy") + for i, v in enumerate(policy.versions): + if v.version_id == version_id: + del policy.versions[i] + return + raise ResourceNotFoundException() + + def _get_principal(self, principal_arn): + """ + raise ResourceNotFoundException + """ + if ':cert/' in principal_arn: + certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] + if len(certs) == 0: + raise ResourceNotFoundException() + principal = certs[0] + return principal + else: + # TODO: search for cognito_ids + pass + raise ResourceNotFoundException() + + def attach_principal_policy(self, policy_name, principal_arn): + principal = self._get_principal(principal_arn) + policy = self.get_policy(policy_name) + k = (principal_arn, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_principal_policy(self, policy_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.get_policy(policy_name) + + k = (principal_arn, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_principal_policies(self, principal_arn): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] + return policies + + def list_policy_principals(self, policy_name): + principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] + return principals + + def attach_thing_principal(self, thing_name, principal_arn): + principal = self._get_principal(principal_arn) + thing = self.describe_thing(thing_name) + k = (principal_arn, thing_name) + if k in self.principal_things: + return + self.principal_things[k] = (principal, thing) + + def detach_thing_principal(self, thing_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.describe_thing(thing_name) + + k = (principal_arn, thing_name) + if k not in self.principal_things: + raise ResourceNotFoundException() + del self.principal_things[k] + + def list_principal_things(self, principal_arn): + thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] + return thing_names + + def list_thing_principals(self, thing_name): + principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] + return principals + + def describe_thing_group(self, thing_group_name): + thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] + if len(thing_groups) == 0: + raise ResourceNotFoundException() + return thing_groups[0] + + def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): + thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) + self.thing_groups[thing_group.arn] = thing_group + return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id + + def delete_thing_group(self, thing_group_name, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + del self.thing_groups[thing_group.arn] + + def list_thing_groups(self, parent_group, name_prefix_filter, recursive): + thing_groups = self.thing_groups.values() + return thing_groups + + def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + if expected_version and expected_version != thing_group.version: + raise VersionConflictException(thing_group_name) + attribute_payload = thing_group_properties.get('attributePayload', None) + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing_group.thing_group_properties['attributePayload']['attributes'] = attributes + else: + thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) + elif attribute_payload is not None and 'attributes' not in attribute_payload: + thing_group.attributes = {} + thing_group.version = thing_group.version + 1 + return thing_group.version + + def _identify_thing_group(self, thing_group_name, thing_group_arn): + # identify thing group + if thing_group_name is None and thing_group_arn is None: + raise InvalidRequestException( + ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' + ) + if thing_group_name is not None: + thing_group = self.describe_thing_group(thing_group_name) + if thing_group_arn and thing_group.arn != thing_group_arn: + raise InvalidRequestException( + 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' + ) + elif thing_group_arn is not None: + if thing_group_arn not in self.thing_groups: + raise InvalidRequestException() + thing_group = self.thing_groups[thing_group_arn] + return thing_group + + def _identify_thing(self, thing_name, thing_arn): + # identify thing + if thing_name is None and thing_arn is None: + raise InvalidRequestException( + 'Both thingArn and thingName are empty. Need to specify at least one of them' + ) + if thing_name is not None: + thing = self.describe_thing(thing_name) + if thing_arn and thing.arn != thing_arn: + raise InvalidRequestException( + 'ThingName thingArn does not match specified thingName in request' + ) + elif thing_arn is not None: + if thing_arn not in self.things: + raise InvalidRequestException() + thing = self.things[thing_arn] + return thing + + def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn in thing_group.things: + # aws ignores duplicate registration + return + thing_group.things[thing.arn] = thing + + def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn not in thing_group.things: + # aws ignores non-registered thing + return + del thing_group.things[thing.arn] + + def list_things_in_thing_group(self, thing_group_name, recursive): + thing_group = self.describe_thing_group(thing_group_name) + return thing_group.things.values() + + def list_thing_groups_for_thing(self, thing_name): + thing = self.describe_thing(thing_name) + all_thing_groups = self.list_thing_groups(None, None, None) + ret = [] + for thing_group in all_thing_groups: + if thing.arn in thing_group.things: + ret.append({ + 'groupName': thing_group.thing_group_name, + 'groupArn': thing_group.arn + }) + return ret + + def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): + thing = self.describe_thing(thing_name) + for thing_group_name in thing_groups_to_add: + thing_group = self.describe_thing_group(thing_group_name) + self.add_thing_to_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + for thing_group_name in thing_groups_to_remove: + thing_group = self.describe_thing_group(thing_group_name) + self.remove_thing_from_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + + def create_job(self, job_id, targets, document_source, document, description, presigned_url_config, + target_selection, job_executions_rollout_config, document_parameters): + job = FakeJob(job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, self.region_name) + self.jobs[job_id] = job + + for thing_arn in targets: + thing_name = thing_arn.split(':')[-1].split('/')[-1] + job_execution = FakeJobExecution(job_id, thing_arn) + self.job_executions[(job_id, thing_name)] = job_execution + return job.job_arn, job_id, description + + def describe_job(self, job_id): + jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] + if len(jobs) == 0: + raise ResourceNotFoundException() + return jobs[0] + + def delete_job(self, job_id, force): + job = self.jobs[job_id] + + if job.status == 'IN_PROGRESS' and force: + del self.jobs[job_id] + elif job.status != 'IN_PROGRESS': + del self.jobs[job_id] + else: + raise InvalidStateTransitionException() + + def cancel_job(self, job_id, reason_code, comment, force): + job = self.jobs[job_id] + + job.reason_code = reason_code if reason_code is not None else job.reason_code + job.comment = comment if comment is not None else job.comment + job.force = force if force is not None and force != job.force else job.force + job.status = 'CANCELED' + + if job.status == 'IN_PROGRESS' and force: + self.jobs[job_id] = job + elif job.status != 'IN_PROGRESS': + self.jobs[job_id] = job + else: + raise InvalidStateTransitionException() + + return job + + def get_job_document(self, job_id): + return self.jobs[job_id] + + def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): + # TODO: implement filters + all_jobs = [_.to_dict() for _ in self.jobs.values()] + filtered_jobs = all_jobs + + if token is None: + jobs = filtered_jobs[0:max_results] + next_token = str(max_results) if len(filtered_jobs) > max_results else None + else: + token = int(token) + jobs = filtered_jobs[token:token + max_results] + next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None + + return jobs, next_token + + def describe_job_execution(self, job_id, thing_name, execution_number): + try: + job_execution = self.job_executions[(job_id, thing_name)] + except KeyError: + raise ResourceNotFoundException() + + if job_execution is None or \ + (execution_number is not None and job_execution.execution_number != execution_number): + raise ResourceNotFoundException() + + return job_execution + + def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution is None: + raise ResourceNotFoundException() + + job_execution.force_canceled = force if force is not None else job_execution.force_canceled + # TODO: implement expected_version and status_details (at most 10 can be specified) + + if job_execution.status == 'IN_PROGRESS' and force: + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + elif job_execution.status != 'IN_PROGRESS': + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + else: + raise InvalidStateTransitionException() + + def delete_job_execution(self, job_id, thing_name, execution_number, force): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution.execution_number != execution_number: + raise ResourceNotFoundException() + + if job_execution.status == 'IN_PROGRESS' and force: + del self.job_executions[(job_id, thing_name)] + elif job_execution.status != 'IN_PROGRESS': + del self.job_executions[(job_id, thing_name)] + else: + raise InvalidStateTransitionException() + + def list_job_executions_for_job(self, job_id, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token + + def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token + + +available_regions = boto3.session.Session().get_available_regions("iot") +iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 68a2060471c1..8954c7003d9a 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,613 +1,613 @@ -from __future__ import unicode_literals - -import json -from six.moves.urllib.parse import unquote - -from moto.core.responses import BaseResponse -from .models import iot_backends - - -class IoTResponse(BaseResponse): - SERVICE_NAME = 'iot' - - @property - def iot_backend(self): - return iot_backends[self.region] - - def create_thing(self): - thing_name = self._get_param("thingName") - thing_type_name = self._get_param("thingTypeName") - attribute_payload = self._get_param("attributePayload") - thing_name, thing_arn = self.iot_backend.create_thing( - thing_name=thing_name, - thing_type_name=thing_type_name, - attribute_payload=attribute_payload, - ) - return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) - - def create_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - thing_type_properties = self._get_param("thingTypeProperties") - thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( - thing_type_name=thing_type_name, - thing_type_properties=thing_type_properties, - ) - return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) - - def list_thing_types(self): - previous_next_token = self._get_param("nextToken") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - thing_type_name = self._get_param("thingTypeName") - thing_types = self.iot_backend.list_thing_types( - thing_type_name=thing_type_name - ) - - thing_types = [_.to_dict() for _ in thing_types] - if previous_next_token is None: - result = thing_types[0:max_results] - next_token = str(max_results) if len(thing_types) > max_results else None - else: - token = int(previous_next_token) - result = thing_types[token:token + max_results] - next_token = str(token + max_results) if len(thing_types) > token + max_results else None - - return json.dumps(dict(thingTypes=result, nextToken=next_token)) - - def list_things(self): - previous_next_token = self._get_param("nextToken") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - attribute_name = self._get_param("attributeName") - attribute_value = self._get_param("attributeValue") - thing_type_name = self._get_param("thingTypeName") - things, next_token = self.iot_backend.list_things( - attribute_name=attribute_name, - attribute_value=attribute_value, - thing_type_name=thing_type_name, - max_results=max_results, - token=previous_next_token - ) - - return json.dumps(dict(things=things, nextToken=next_token)) - - def describe_thing(self): - thing_name = self._get_param("thingName") - thing = self.iot_backend.describe_thing( - thing_name=thing_name, - ) - return json.dumps(thing.to_dict(include_default_client_id=True)) - - def describe_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - thing_type = self.iot_backend.describe_thing_type( - thing_type_name=thing_type_name, - ) - return json.dumps(thing_type.to_dict()) - - def delete_thing(self): - thing_name = self._get_param("thingName") - expected_version = self._get_param("expectedVersion") - self.iot_backend.delete_thing( - thing_name=thing_name, - expected_version=expected_version, - ) - return json.dumps(dict()) - - def delete_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - self.iot_backend.delete_thing_type( - thing_type_name=thing_type_name, - ) - return json.dumps(dict()) - - def update_thing(self): - thing_name = self._get_param("thingName") - thing_type_name = self._get_param("thingTypeName") - attribute_payload = self._get_param("attributePayload") - expected_version = self._get_param("expectedVersion") - remove_thing_type = self._get_param("removeThingType") - self.iot_backend.update_thing( - thing_name=thing_name, - thing_type_name=thing_type_name, - attribute_payload=attribute_payload, - expected_version=expected_version, - remove_thing_type=remove_thing_type, - ) - return json.dumps(dict()) - - def create_job(self): - job_arn, job_id, description = self.iot_backend.create_job( - job_id=self._get_param("jobId"), - targets=self._get_param("targets"), - description=self._get_param("description"), - document_source=self._get_param("documentSource"), - document=self._get_param("document"), - presigned_url_config=self._get_param("presignedUrlConfig"), - target_selection=self._get_param("targetSelection"), - job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), - document_parameters=self._get_param("documentParameters") - ) - - return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) - - def describe_job(self): - job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict( - documentSource=job.document_source, - job=dict( - comment=job.comment, - completedAt=job.completed_at, - createdAt=job.created_at, - description=job.description, - documentParameters=job.document_parameters, - forceCanceled=job.force, - reasonCode=job.reason_code, - jobArn=job.job_arn, - jobExecutionsRolloutConfig=job.job_executions_rollout_config, - jobId=job.job_id, - jobProcessDetails=job.job_process_details, - lastUpdatedAt=job.last_updated_at, - presignedUrlConfig=job.presigned_url_config, - status=job.status, - targets=job.targets, - targetSelection=job.target_selection - ))) - - def delete_job(self): - job_id = self._get_param("jobId") - force = self._get_bool_param("force") - - self.iot_backend.delete_job(job_id=job_id, - force=force) - - return json.dumps(dict()) - - def cancel_job(self): - job_id = self._get_param("jobId") - reason_code = self._get_param("reasonCode") - comment = self._get_param("comment") - force = self._get_bool_param("force") - - job = self.iot_backend.cancel_job(job_id=job_id, - reason_code=reason_code, - comment=comment, - force=force) - - return json.dumps(job.to_dict()) - - def get_job_document(self): - job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) - - if job.document is not None: - return json.dumps({'document': job.document}) - else: - # job.document_source is not None: - # TODO: needs to be implemented to get document_source's content from S3 - return json.dumps({'document': ''}) - - def list_jobs(self): - status = self._get_param("status"), - target_selection = self._get_param("targetSelection"), - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - previous_next_token = self._get_param("nextToken") - thing_group_name = self._get_param("thingGroupName"), - thing_group_id = self._get_param("thingGroupId") - jobs, next_token = self.iot_backend.list_jobs(status=status, - target_selection=target_selection, - max_results=max_results, - token=previous_next_token, - thing_group_name=thing_group_name, - thing_group_id=thing_group_id) - - return json.dumps(dict(jobs=jobs, nextToken=next_token)) - - def describe_job_execution(self): - job_id = self._get_param("jobId") - thing_name = self._get_param("thingName") - execution_number = self._get_int_param("executionNumber") - job_execution = self.iot_backend.describe_job_execution(job_id=job_id, - thing_name=thing_name, - execution_number=execution_number) - - return json.dumps(dict(execution=job_execution.to_get_dict())) - - def cancel_job_execution(self): - job_id = self._get_param("jobId") - thing_name = self._get_param("thingName") - force = self._get_bool_param("force") - expected_version = self._get_int_param("expectedVersion") - status_details = self._get_param("statusDetails") - - self.iot_backend.cancel_job_execution(job_id=job_id, - thing_name=thing_name, - force=force, - expected_version=expected_version, - status_details=status_details) - - return json.dumps(dict()) - - def delete_job_execution(self): - job_id = self._get_param("jobId") - thing_name = self._get_param("thingName") - execution_number = self._get_int_param("executionNumber") - force = self._get_bool_param("force") - - self.iot_backend.delete_job_execution(job_id=job_id, - thing_name=thing_name, - execution_number=execution_number, - force=force) - - return json.dumps(dict()) - - def list_job_executions_for_job(self): - job_id = self._get_param("jobId") - status = self._get_param("status") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - next_token = self._get_param("nextToken") - job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=job_id, - status=status, - max_results=max_results, - next_token=next_token) - - return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) - - def list_job_executions_for_thing(self): - thing_name = self._get_param("thingName") - status = self._get_param("status") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - next_token = self._get_param("nextToken") - job_executions, next_token = self.iot_backend.list_job_executions_for_thing(thing_name=thing_name, - status=status, - max_results=max_results, - next_token=next_token) - - return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) - - def create_keys_and_certificate(self): - set_as_active = self._get_bool_param("setAsActive") - cert, key_pair = self.iot_backend.create_keys_and_certificate( - set_as_active=set_as_active, - ) - return json.dumps(dict( - certificateArn=cert.arn, - certificateId=cert.certificate_id, - certificatePem=cert.certificate_pem, - keyPair=key_pair - )) - - def delete_certificate(self): - certificate_id = self._get_param("certificateId") - self.iot_backend.delete_certificate( - certificate_id=certificate_id, - ) - return json.dumps(dict()) - - def describe_certificate(self): - certificate_id = self._get_param("certificateId") - certificate = self.iot_backend.describe_certificate( - certificate_id=certificate_id, - ) - return json.dumps(dict(certificateDescription=certificate.to_description_dict())) - - def list_certificates(self): - # page_size = self._get_int_param("pageSize") - # marker = self._get_param("marker") - # ascending_order = self._get_param("ascendingOrder") - certificates = self.iot_backend.list_certificates() - # TODO: implement pagination in the future - return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) - - def register_certificate(self): - certificate_pem = self._get_param("certificatePem") - ca_certificate_pem = self._get_param("caCertificatePem") - set_as_active = self._get_bool_param("setAsActive") - status = self._get_param("status") - - cert = self.iot_backend.register_certificate( - certificate_pem=certificate_pem, - ca_certificate_pem=ca_certificate_pem, - set_as_active=set_as_active, - status=status - ) - return json.dumps(dict(certificateId=cert.certificate_id, certificateArn=cert.arn)) - - def update_certificate(self): - certificate_id = self._get_param("certificateId") - new_status = self._get_param("newStatus") - self.iot_backend.update_certificate( - certificate_id=certificate_id, - new_status=new_status, - ) - return json.dumps(dict()) - - def create_policy(self): - policy_name = self._get_param("policyName") - policy_document = self._get_param("policyDocument") - policy = self.iot_backend.create_policy( - policy_name=policy_name, - policy_document=policy_document, - ) - return json.dumps(policy.to_dict_at_creation()) - - def list_policies(self): - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - policies = self.iot_backend.list_policies() - - # TODO: implement pagination in the future - return json.dumps(dict(policies=[_.to_dict() for _ in policies])) - - def get_policy(self): - policy_name = self._get_param("policyName") - policy = self.iot_backend.get_policy( - policy_name=policy_name, - ) - return json.dumps(policy.to_get_dict()) - - def delete_policy(self): - policy_name = self._get_param("policyName") - self.iot_backend.delete_policy( - policy_name=policy_name, - ) - return json.dumps(dict()) - - def create_policy_version(self): - policy_name = self._get_param('policyName') - policy_document = self._get_param('policyDocument') - set_as_default = self._get_bool_param('setAsDefault') - policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) - - return json.dumps(dict(policy_version.to_dict_at_creation())) - - def set_default_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - self.iot_backend.set_default_policy_version(policy_name, version_id) - - return json.dumps(dict()) - - def get_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - policy_version = self.iot_backend.get_policy_version(policy_name, version_id) - return json.dumps(dict(policy_version.to_get_dict())) - - def list_policy_versions(self): - policy_name = self._get_param('policyName') - policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) - - return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) - - def delete_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - self.iot_backend.delete_policy_version(policy_name, version_id) - - return json.dumps(dict()) - - def attach_policy(self): - policy_name = self._get_param("policyName") - target = self._get_param('target') - self.iot_backend.attach_policy( - policy_name=policy_name, - target=target, - ) - return json.dumps(dict()) - - def list_attached_policies(self): - principal = unquote(self._get_param('target')) - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - policies = self.iot_backend.list_attached_policies( - target=principal - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) - - def attach_principal_policy(self): - policy_name = self._get_param("policyName") - principal = self.headers.get('x-amzn-iot-principal') - self.iot_backend.attach_principal_policy( - policy_name=policy_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def detach_policy(self): - policy_name = self._get_param("policyName") - target = self._get_param('target') - self.iot_backend.detach_policy( - policy_name=policy_name, - target=target, - ) - return json.dumps(dict()) - - def detach_principal_policy(self): - policy_name = self._get_param("policyName") - principal = self.headers.get('x-amzn-iot-principal') - self.iot_backend.detach_principal_policy( - policy_name=policy_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def list_principal_policies(self): - principal = self.headers.get('x-amzn-iot-principal') - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - policies = self.iot_backend.list_principal_policies( - principal_arn=principal - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) - - def list_policy_principals(self): - policy_name = self.headers.get('x-amzn-iot-policy') - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - principals = self.iot_backend.list_policy_principals( - policy_name=policy_name, - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(principals=principals, nextMarker=next_marker)) - - def attach_thing_principal(self): - thing_name = self._get_param("thingName") - principal = self.headers.get('x-amzn-principal') - self.iot_backend.attach_thing_principal( - thing_name=thing_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def detach_thing_principal(self): - thing_name = self._get_param("thingName") - principal = self.headers.get('x-amzn-principal') - self.iot_backend.detach_thing_principal( - thing_name=thing_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def list_principal_things(self): - next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - principal = self.headers.get('x-amzn-principal') - things = self.iot_backend.list_principal_things( - principal_arn=principal, - ) - # TODO: implement pagination in the future - next_token = None - return json.dumps(dict(things=things, nextToken=next_token)) - - def list_thing_principals(self): - thing_name = self._get_param("thingName") - principals = self.iot_backend.list_thing_principals( - thing_name=thing_name, - ) - return json.dumps(dict(principals=principals)) - - def describe_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group = self.iot_backend.describe_thing_group( - thing_group_name=thing_group_name, - ) - return json.dumps(thing_group.to_dict()) - - def create_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - parent_group_name = self._get_param("parentGroupName") - thing_group_properties = self._get_param("thingGroupProperties") - thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( - thing_group_name=thing_group_name, - parent_group_name=parent_group_name, - thing_group_properties=thing_group_properties, - ) - return json.dumps(dict( - thingGroupName=thing_group_name, - thingGroupArn=thing_group_arn, - thingGroupId=thing_group_id) - ) - - def delete_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - expected_version = self._get_param("expectedVersion") - self.iot_backend.delete_thing_group( - thing_group_name=thing_group_name, - expected_version=expected_version, - ) - return json.dumps(dict()) - - def list_thing_groups(self): - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - parent_group = self._get_param("parentGroup") - name_prefix_filter = self._get_param("namePrefixFilter") - recursive = self._get_param("recursive") - thing_groups = self.iot_backend.list_thing_groups( - parent_group=parent_group, - name_prefix_filter=name_prefix_filter, - recursive=recursive, - ) - next_token = None - rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] - # TODO: implement pagination in the future - return json.dumps(dict(thingGroups=rets, nextToken=next_token)) - - def update_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_properties = self._get_param("thingGroupProperties") - expected_version = self._get_param("expectedVersion") - version = self.iot_backend.update_thing_group( - thing_group_name=thing_group_name, - thing_group_properties=thing_group_properties, - expected_version=expected_version, - ) - return json.dumps(dict(version=version)) - - def add_thing_to_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_arn = self._get_param("thingGroupArn") - thing_name = self._get_param("thingName") - thing_arn = self._get_param("thingArn") - self.iot_backend.add_thing_to_thing_group( - thing_group_name=thing_group_name, - thing_group_arn=thing_group_arn, - thing_name=thing_name, - thing_arn=thing_arn, - ) - return json.dumps(dict()) - - def remove_thing_from_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_arn = self._get_param("thingGroupArn") - thing_name = self._get_param("thingName") - thing_arn = self._get_param("thingArn") - self.iot_backend.remove_thing_from_thing_group( - thing_group_name=thing_group_name, - thing_group_arn=thing_group_arn, - thing_name=thing_name, - thing_arn=thing_arn, - ) - return json.dumps(dict()) - - def list_things_in_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - recursive = self._get_param("recursive") - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - things = self.iot_backend.list_things_in_thing_group( - thing_group_name=thing_group_name, - recursive=recursive, - ) - next_token = None - thing_names = [_.thing_name for _ in things] - # TODO: implement pagination in the future - return json.dumps(dict(things=thing_names, nextToken=next_token)) - - def list_thing_groups_for_thing(self): - thing_name = self._get_param("thingName") - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - thing_groups = self.iot_backend.list_thing_groups_for_thing( - thing_name=thing_name - ) - next_token = None - # TODO: implement pagination in the future - return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) - - def update_thing_groups_for_thing(self): - thing_name = self._get_param("thingName") - thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] - thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] - self.iot_backend.update_thing_groups_for_thing( - thing_name=thing_name, - thing_groups_to_add=thing_groups_to_add, - thing_groups_to_remove=thing_groups_to_remove, - ) - return json.dumps(dict()) +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import unquote + +from moto.core.responses import BaseResponse +from .models import iot_backends + + +class IoTResponse(BaseResponse): + SERVICE_NAME = 'iot' + + @property + def iot_backend(self): + return iot_backends[self.region] + + def create_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + thing_name, thing_arn = self.iot_backend.create_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + ) + return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) + + def create_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type_properties = self._get_param("thingTypeProperties") + thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( + thing_type_name=thing_type_name, + thing_type_properties=thing_type_properties, + ) + return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) + + def list_thing_types(self): + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + thing_type_name = self._get_param("thingTypeName") + thing_types = self.iot_backend.list_thing_types( + thing_type_name=thing_type_name + ) + + thing_types = [_.to_dict() for _ in thing_types] + if previous_next_token is None: + result = thing_types[0:max_results] + next_token = str(max_results) if len(thing_types) > max_results else None + else: + token = int(previous_next_token) + result = thing_types[token:token + max_results] + next_token = str(token + max_results) if len(thing_types) > token + max_results else None + + return json.dumps(dict(thingTypes=result, nextToken=next_token)) + + def list_things(self): + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + attribute_name = self._get_param("attributeName") + attribute_value = self._get_param("attributeValue") + thing_type_name = self._get_param("thingTypeName") + things, next_token = self.iot_backend.list_things( + attribute_name=attribute_name, + attribute_value=attribute_value, + thing_type_name=thing_type_name, + max_results=max_results, + token=previous_next_token + ) + + return json.dumps(dict(things=things, nextToken=next_token)) + + def describe_thing(self): + thing_name = self._get_param("thingName") + thing = self.iot_backend.describe_thing( + thing_name=thing_name, + ) + return json.dumps(thing.to_dict(include_default_client_id=True)) + + def describe_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type = self.iot_backend.describe_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(thing_type.to_dict()) + + def delete_thing(self): + thing_name = self._get_param("thingName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing( + thing_name=thing_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def delete_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + self.iot_backend.delete_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(dict()) + + def update_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + expected_version = self._get_param("expectedVersion") + remove_thing_type = self._get_param("removeThingType") + self.iot_backend.update_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + expected_version=expected_version, + remove_thing_type=remove_thing_type, + ) + return json.dumps(dict()) + + def create_job(self): + job_arn, job_id, description = self.iot_backend.create_job( + job_id=self._get_param("jobId"), + targets=self._get_param("targets"), + description=self._get_param("description"), + document_source=self._get_param("documentSource"), + document=self._get_param("document"), + presigned_url_config=self._get_param("presignedUrlConfig"), + target_selection=self._get_param("targetSelection"), + job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), + document_parameters=self._get_param("documentParameters") + ) + + return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) + + def describe_job(self): + job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) + return json.dumps(dict( + documentSource=job.document_source, + job=dict( + comment=job.comment, + completedAt=job.completed_at, + createdAt=job.created_at, + description=job.description, + documentParameters=job.document_parameters, + forceCanceled=job.force, + reasonCode=job.reason_code, + jobArn=job.job_arn, + jobExecutionsRolloutConfig=job.job_executions_rollout_config, + jobId=job.job_id, + jobProcessDetails=job.job_process_details, + lastUpdatedAt=job.last_updated_at, + presignedUrlConfig=job.presigned_url_config, + status=job.status, + targets=job.targets, + targetSelection=job.target_selection + ))) + + def delete_job(self): + job_id = self._get_param("jobId") + force = self._get_bool_param("force") + + self.iot_backend.delete_job(job_id=job_id, + force=force) + + return json.dumps(dict()) + + def cancel_job(self): + job_id = self._get_param("jobId") + reason_code = self._get_param("reasonCode") + comment = self._get_param("comment") + force = self._get_bool_param("force") + + job = self.iot_backend.cancel_job(job_id=job_id, + reason_code=reason_code, + comment=comment, + force=force) + + return json.dumps(job.to_dict()) + + def get_job_document(self): + job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) + + if job.document is not None: + return json.dumps({'document': job.document}) + else: + # job.document_source is not None: + # TODO: needs to be implemented to get document_source's content from S3 + return json.dumps({'document': ''}) + + def list_jobs(self): + status = self._get_param("status"), + target_selection = self._get_param("targetSelection"), + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + previous_next_token = self._get_param("nextToken") + thing_group_name = self._get_param("thingGroupName"), + thing_group_id = self._get_param("thingGroupId") + jobs, next_token = self.iot_backend.list_jobs(status=status, + target_selection=target_selection, + max_results=max_results, + token=previous_next_token, + thing_group_name=thing_group_name, + thing_group_id=thing_group_id) + + return json.dumps(dict(jobs=jobs, nextToken=next_token)) + + def describe_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + job_execution = self.iot_backend.describe_job_execution(job_id=job_id, + thing_name=thing_name, + execution_number=execution_number) + + return json.dumps(dict(execution=job_execution.to_get_dict())) + + def cancel_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + force = self._get_bool_param("force") + expected_version = self._get_int_param("expectedVersion") + status_details = self._get_param("statusDetails") + + self.iot_backend.cancel_job_execution(job_id=job_id, + thing_name=thing_name, + force=force, + expected_version=expected_version, + status_details=status_details) + + return json.dumps(dict()) + + def delete_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + force = self._get_bool_param("force") + + self.iot_backend.delete_job_execution(job_id=job_id, + thing_name=thing_name, + execution_number=execution_number, + force=force) + + return json.dumps(dict()) + + def list_job_executions_for_job(self): + job_id = self._get_param("jobId") + status = self._get_param("status") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=job_id, + status=status, + max_results=max_results, + next_token=next_token) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) + + def list_job_executions_for_thing(self): + thing_name = self._get_param("thingName") + status = self._get_param("status") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_thing(thing_name=thing_name, + status=status, + max_results=max_results, + next_token=next_token) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) + + def create_keys_and_certificate(self): + set_as_active = self._get_bool_param("setAsActive") + cert, key_pair = self.iot_backend.create_keys_and_certificate( + set_as_active=set_as_active, + ) + return json.dumps(dict( + certificateArn=cert.arn, + certificateId=cert.certificate_id, + certificatePem=cert.certificate_pem, + keyPair=key_pair + )) + + def delete_certificate(self): + certificate_id = self._get_param("certificateId") + self.iot_backend.delete_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict()) + + def describe_certificate(self): + certificate_id = self._get_param("certificateId") + certificate = self.iot_backend.describe_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict(certificateDescription=certificate.to_description_dict())) + + def list_certificates(self): + # page_size = self._get_int_param("pageSize") + # marker = self._get_param("marker") + # ascending_order = self._get_param("ascendingOrder") + certificates = self.iot_backend.list_certificates() + # TODO: implement pagination in the future + return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) + + def register_certificate(self): + certificate_pem = self._get_param("certificatePem") + ca_certificate_pem = self._get_param("caCertificatePem") + set_as_active = self._get_bool_param("setAsActive") + status = self._get_param("status") + + cert = self.iot_backend.register_certificate( + certificate_pem=certificate_pem, + ca_certificate_pem=ca_certificate_pem, + set_as_active=set_as_active, + status=status + ) + return json.dumps(dict(certificateId=cert.certificate_id, certificateArn=cert.arn)) + + def update_certificate(self): + certificate_id = self._get_param("certificateId") + new_status = self._get_param("newStatus") + self.iot_backend.update_certificate( + certificate_id=certificate_id, + new_status=new_status, + ) + return json.dumps(dict()) + + def create_policy(self): + policy_name = self._get_param("policyName") + policy_document = self._get_param("policyDocument") + policy = self.iot_backend.create_policy( + policy_name=policy_name, + policy_document=policy_document, + ) + return json.dumps(policy.to_dict_at_creation()) + + def list_policies(self): + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_policies() + + # TODO: implement pagination in the future + return json.dumps(dict(policies=[_.to_dict() for _ in policies])) + + def get_policy(self): + policy_name = self._get_param("policyName") + policy = self.iot_backend.get_policy( + policy_name=policy_name, + ) + return json.dumps(policy.to_get_dict()) + + def delete_policy(self): + policy_name = self._get_param("policyName") + self.iot_backend.delete_policy( + policy_name=policy_name, + ) + return json.dumps(dict()) + + def create_policy_version(self): + policy_name = self._get_param('policyName') + policy_document = self._get_param('policyDocument') + set_as_default = self._get_bool_param('setAsDefault') + policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) + + return json.dumps(dict(policy_version.to_dict_at_creation())) + + def set_default_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.set_default_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def get_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + policy_version = self.iot_backend.get_policy_version(policy_name, version_id) + return json.dumps(dict(policy_version.to_get_dict())) + + def list_policy_versions(self): + policy_name = self._get_param('policyName') + policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) + + return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) + + def delete_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.delete_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def attach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.attach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + + def list_attached_policies(self): + principal = unquote(self._get_param('target')) + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + policies = self.iot_backend.list_attached_policies( + target=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def attach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.attach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + + def detach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.detach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_policies(self): + principal = self.headers.get('x-amzn-iot-principal') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_principal_policies( + principal_arn=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def list_policy_principals(self): + policy_name = self.headers.get('x-amzn-iot-policy') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + principals = self.iot_backend.list_policy_principals( + policy_name=policy_name, + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(principals=principals, nextMarker=next_marker)) + + def attach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.attach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.detach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_things(self): + next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + principal = self.headers.get('x-amzn-principal') + things = self.iot_backend.list_principal_things( + principal_arn=principal, + ) + # TODO: implement pagination in the future + next_token = None + return json.dumps(dict(things=things, nextToken=next_token)) + + def list_thing_principals(self): + thing_name = self._get_param("thingName") + principals = self.iot_backend.list_thing_principals( + thing_name=thing_name, + ) + return json.dumps(dict(principals=principals)) + + def describe_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group = self.iot_backend.describe_thing_group( + thing_group_name=thing_group_name, + ) + return json.dumps(thing_group.to_dict()) + + def create_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + parent_group_name = self._get_param("parentGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( + thing_group_name=thing_group_name, + parent_group_name=parent_group_name, + thing_group_properties=thing_group_properties, + ) + return json.dumps(dict( + thingGroupName=thing_group_name, + thingGroupArn=thing_group_arn, + thingGroupId=thing_group_id) + ) + + def delete_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing_group( + thing_group_name=thing_group_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def list_thing_groups(self): + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + parent_group = self._get_param("parentGroup") + name_prefix_filter = self._get_param("namePrefixFilter") + recursive = self._get_param("recursive") + thing_groups = self.iot_backend.list_thing_groups( + parent_group=parent_group, + name_prefix_filter=name_prefix_filter, + recursive=recursive, + ) + next_token = None + rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=rets, nextToken=next_token)) + + def update_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + expected_version = self._get_param("expectedVersion") + version = self.iot_backend.update_thing_group( + thing_group_name=thing_group_name, + thing_group_properties=thing_group_properties, + expected_version=expected_version, + ) + return json.dumps(dict(version=version)) + + def add_thing_to_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.add_thing_to_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def remove_thing_from_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.remove_thing_from_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def list_things_in_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + recursive = self._get_param("recursive") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + things = self.iot_backend.list_things_in_thing_group( + thing_group_name=thing_group_name, + recursive=recursive, + ) + next_token = None + thing_names = [_.thing_name for _ in things] + # TODO: implement pagination in the future + return json.dumps(dict(things=thing_names, nextToken=next_token)) + + def list_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + thing_groups = self.iot_backend.list_thing_groups_for_thing( + thing_name=thing_name + ) + next_token = None + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) + + def update_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] + thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] + self.iot_backend.update_thing_groups_for_thing( + thing_name=thing_name, + thing_groups_to_add=thing_groups_to_add, + thing_groups_to_remove=thing_groups_to_remove, + ) + return json.dumps(dict()) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 8f11912b0eeb..4a142b292437 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,1409 +1,1409 @@ -from __future__ import unicode_literals - -import json -import sure #noqa -import boto3 - -from moto import mock_iot -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -@mock_iot -def test_attach_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - res['policies'][0]['policyName'].should.equal('my-policy') - - -@mock_iot -def test_detach_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - res['policies'][0]['policyName'].should.equal('my-policy') - - client.detach_policy(policyName=policy_name, target=cert_arn) - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.be.empty - - -@mock_iot -def test_list_attached_policies(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - policies = client.list_attached_policies(target=cert['certificateArn']) - policies['policies'].should.be.empty - - -@mock_iot -def test_policy_versions(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - policy = client.create_policy(policyName=policy_name, policyDocument=doc) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) - policy.should.have.key('policyVersionId').which.should.equal('1') - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) - policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) - - policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), - setAsDefault=True) - policy1.should.have.key('policyArn').which.should_not.be.none - policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy1.should.have.key('policyVersionId').which.should.equal('2') - policy1.should.have.key('isDefaultVersion').which.should.equal(True) - - policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), - setAsDefault=False) - policy2.should.have.key('policyArn').which.should_not.be.none - policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) - policy2.should.have.key('policyVersionId').which.should.equal('3') - policy2.should.have.key('isDefaultVersion').which.should.equal(False) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) - - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) - list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) - default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) - - client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) - list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) - default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) - - client.delete_policy_version(policyName=policy_name, policyVersionId='1') - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) - - client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) - - # should fail as it's the default policy. Should use delete_policy instead - try: - client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) - assert False, 'Should have failed in previous call' - except Exception as exception: - exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') - - -@mock_iot -def test_things(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-thing' - type_name = 'my-type-name' - - # thing type - thing_type = client.create_thing_type(thingTypeName=type_name) - thing_type.should.have.key('thingTypeName').which.should.equal(type_name) - thing_type.should.have.key('thingTypeArn') - - res = client.list_thing_types() - res.should.have.key('thingTypes').which.should.have.length_of(1) - for thing_type in res['thingTypes']: - thing_type.should.have.key('thingTypeName').which.should_not.be.none - - thing_type = client.describe_thing_type(thingTypeName=type_name) - thing_type.should.have.key('thingTypeName').which.should.equal(type_name) - thing_type.should.have.key('thingTypeProperties') - thing_type.should.have.key('thingTypeMetadata') - - # thing - thing = client.create_thing(thingName=name, thingTypeName=type_name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should.have.key('thingName').which.should_not.be.none - thing.should.have.key('thingArn').which.should_not.be.none - - thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should.have.key('thingName').which.should_not.be.none - thing.should.have.key('thingArn').which.should_not.be.none - res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') - - thing = client.describe_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('defaultClientId') - thing.should.have.key('thingTypeName') - thing.should.have.key('attributes') - thing.should.have.key('version') - - # delete thing - client.delete_thing(thingName=name) - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(0) - - # delete thing type - client.delete_thing_type(thingTypeName=type_name) - res = client.list_thing_types() - res.should.have.key('thingTypes').which.should.have.length_of(0) - - -@mock_iot -def test_list_thing_types(): - client = boto3.client('iot', region_name='ap-northeast-1') - - for i in range(0, 100): - client.create_thing_type(thingTypeName=str(i + 1)) - - thing_types = client.list_thing_types() - thing_types.should.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(50) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('1') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50') - - thing_types = client.list_thing_types(nextToken=thing_types['nextToken']) - thing_types.should.have.key('thingTypes').which.should.have.length_of(50) - thing_types.should_not.have.key('nextToken') - thing_types['thingTypes'][0]['thingTypeName'].should.equal('51') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100') - - -@mock_iot -def test_list_thing_types_with_typename_filter(): - client = boto3.client('iot', region_name='ap-northeast-1') - - client.create_thing_type(thingTypeName='thing') - client.create_thing_type(thingTypeName='thingType') - client.create_thing_type(thingTypeName='thingTypeName') - client.create_thing_type(thingTypeName='thingTypeNameGroup') - client.create_thing_type(thingTypeName='shouldNotFind') - client.create_thing_type(thingTypeName='find me it shall not') - - thing_types = client.list_thing_types(thingTypeName='thing') - thing_types.should_not.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(4) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') - - thing_types = client.list_thing_types(thingTypeName='thingTypeName') - thing_types.should_not.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(2) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') - - -@mock_iot -def test_list_things_with_next_token(): - client = boto3.client('iot', region_name='ap-northeast-1') - - for i in range(0, 200): - client.create_thing(thingName=str(i + 1)) - - things = client.list_things() - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('1') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1') - things['things'][-1]['thingName'].should.equal('50') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50') - - things = client.list_things(nextToken=things['nextToken']) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('51') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51') - things['things'][-1]['thingName'].should.equal('100') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') - - things = client.list_things(nextToken=things['nextToken']) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('101') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101') - things['things'][-1]['thingName'].should.equal('150') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') - - things = client.list_things(nextToken=things['nextToken']) - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('151') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151') - things['things'][-1]['thingName'].should.equal('200') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') - - -@mock_iot -def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): - client = boto3.client('iot', region_name='ap-northeast-1') - client.create_thing_type(thingTypeName='my-thing-type') - - for i in range(0, 200): - if not (i + 1) % 3: - attribute_payload = { - 'attributes': { - 'foo': 'bar' - } - } - elif not (i + 1) % 5: - attribute_payload = { - 'attributes': { - 'bar': 'foo' - } - } - else: - attribute_payload = {} - - if not (i + 1) % 2: - thing_type_name = 'my-thing-type' - client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload) - else: - client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload) - - # Test filter for thingTypeName - things = client.list_things(thingTypeName=thing_type_name) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('2') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2') - things['things'][-1]['thingName'].should.equal('100') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') - all(item['thingTypeName'] == thing_type_name for item in things['things']) - - things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name) - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('102') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102') - things['things'][-1]['thingName'].should.equal('200') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') - all(item['thingTypeName'] == thing_type_name for item in things['things']) - - # Test filter for attributes - things = client.list_things(attributeName='foo', attributeValue='bar') - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('3') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3') - things['things'][-1]['thingName'].should.equal('150') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') - all(item['attributes'] == {'foo': 'bar'} for item in things['things']) - - things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar') - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(16) - things['things'][0]['thingName'].should.equal('153') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153') - things['things'][-1]['thingName'].should.equal('198') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') - all(item['attributes'] == {'foo': 'bar'} for item in things['things']) - - # Test filter for attributes and thingTypeName - things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar') - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(33) - things['things'][0]['thingName'].should.equal('6') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6') - things['things'][-1]['thingName'].should.equal('198') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') - all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things']) - - -@mock_iot -def test_certs(): - client = boto3.client('iot', region_name='us-east-1') - cert = client.create_keys_and_certificate(setAsActive=True) - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('certificatePem').which.should_not.be.none - cert.should.have.key('keyPair') - cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none - cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none - cert_id = cert['certificateId'] - - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('certificateArn').which.should_not.be.none - cert_desc.should.have.key('certificateId').which.should_not.be.none - cert_desc.should.have.key('certificatePem').which.should_not.be.none - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - cert_pem = cert_desc['certificatePem'] - - res = client.list_certificates() - for cert in res['certificates']: - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('status').which.should_not.be.none - cert.should.have.key('creationDate').which.should_not.be.none - - client.update_certificate(certificateId=cert_id, newStatus='REVOKED') - cert = client.describe_certificate(certificateId=cert_id) - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('REVOKED') - - client.delete_certificate(certificateId=cert_id) - res = client.list_certificates() - res.should.have.key('certificates') - - # Test register_certificate flow - cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True) - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('certificateArn').which.should_not.be.none - cert_id = cert['certificateId'] - - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - for cert in res['certificates']: - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('status').which.should_not.be.none - cert.should.have.key('creationDate').which.should_not.be.none - - client.update_certificate(certificateId=cert_id, newStatus='REVOKED') - cert = client.describe_certificate(certificateId=cert_id) - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('REVOKED') - - client.delete_certificate(certificateId=cert_id) - res = client.list_certificates() - res.should.have.key('certificates') - - -@mock_iot -def test_delete_policy_validation(): - doc = """{ - "Version": "2012-10-17", - "Statement":[ - { - "Effect":"Allow", - "Action":[ - "iot: *" - ], - "Resource":"*" - } - ] - } - """ - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - policy_name = 'my-policy' - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - - with assert_raises(ClientError) as e: - client.delete_policy(policyName=policy_name) - e.exception.response['Error']['Message'].should.contain( - 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' % policy_name) - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(1) - - client.detach_principal_policy(policyName=policy_name, principal=cert_arn) - client.delete_policy(policyName=policy_name) - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(0) - - -@mock_iot -def test_delete_certificate_validation(): - doc = """{ - "Version": "2012-10-17", - "Statement":[ - { - "Effect":"Allow", - "Action":[ - "iot: *" - ], - "Resource":"*" - } - ] - } - """ - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - cert_id = cert['certificateId'] - cert_arn = cert['certificateArn'] - policy_name = 'my-policy' - thing_name = 'thing-1' - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - client.create_thing(thingName=thing_name) - client.attach_thing_principal(thingName=thing_name, principal=cert_arn) - - with assert_raises(ClientError) as e: - client.delete_certificate(certificateId=cert_id) - e.exception.response['Error']['Message'].should.contain( - 'Certificate must be deactivated (not ACTIVE) before deletion.') - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - - client.update_certificate(certificateId=cert_id, newStatus='REVOKED') - with assert_raises(ClientError) as e: - client.delete_certificate(certificateId=cert_id) - e.exception.response['Error']['Message'].should.contain( - 'Things must be detached before deletion (arn: %s)' % cert_arn) - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - - client.detach_thing_principal(thingName=thing_name, principal=cert_arn) - with assert_raises(ClientError) as e: - client.delete_certificate(certificateId=cert_id) - e.exception.response['Error']['Message'].should.contain( - 'Certificate policies must be detached before deletion (arn: %s)' % cert_arn) - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - - client.detach_principal_policy(policyName=policy_name, principal=cert_arn) - client.delete_certificate(certificateId=cert_id) - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(0) - - -@mock_iot -def test_certs_create_inactive(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=False) - cert_id = cert['certificateId'] - - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('INACTIVE') - - client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - - -@mock_iot -def test_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-policy' - doc = '{}' - policy = client.create_policy(policyName=name, policyDocument=doc) - policy.should.have.key('policyName').which.should.equal(name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(doc) - policy.should.have.key('policyVersionId').which.should.equal('1') - - policy = client.get_policy(policyName=name) - policy.should.have.key('policyName').which.should.equal(name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(doc) - policy.should.have.key('defaultVersionId').which.should.equal('1') - - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - client.delete_policy(policyName=name) - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(0) - - -@mock_iot -def test_principal_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - client.create_policy(policyName=policy_name, policyDocument=doc) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - # do nothing if policy have already attached to certificate - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_policy(policyName=policy_name, target=cert_arn) - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(0) - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(0) - with assert_raises(ClientError) as e: - client.detach_policy(policyName=policy_name, target=cert_arn) - e.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') - - -@mock_iot -def test_principal_policy_deprecated(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - policy = client.create_policy(policyName=policy_name, policyDocument=doc) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_principal_policy(policyName=policy_name, principal=cert_arn) - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(0) - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(0) - - -@mock_iot -def test_principal_thing(): - client = boto3.client('iot', region_name='ap-northeast-1') - thing_name = 'my-thing' - thing = client.create_thing(thingName=thing_name) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_thing_principal(thingName=thing_name, principal=cert_arn) - - res = client.list_principal_things(principal=cert_arn) - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should_not.be.none - res = client.list_thing_principals(thingName=thing_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_thing_principal(thingName=thing_name, principal=cert_arn) - res = client.list_principal_things(principal=cert_arn) - res.should.have.key('things').which.should.have.length_of(0) - res = client.list_thing_principals(thingName=thing_name) - res.should.have.key('principals').which.should.have.length_of(0) - - -@mock_iot -def test_thing_groups(): - client = boto3.client('iot', region_name='ap-northeast-1') - group_name = 'my-group-name' - - # thing group - thing_group = client.create_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - res = client.list_thing_groups() - res.should.have.key('thingGroups').which.should.have.length_of(1) - for thing_group in res['thingGroups']: - thing_group.should.have.key('groupName').which.should_not.be.none - thing_group.should.have.key('groupArn').which.should_not.be.none - - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupProperties') - thing_group.should.have.key('thingGroupMetadata') - thing_group.should.have.key('version') - - # delete thing group - client.delete_thing_group(thingGroupName=group_name) - res = client.list_thing_groups() - res.should.have.key('thingGroups').which.should.have.length_of(0) - - # props create test - props = { - 'thingGroupDescription': 'my first thing group', - 'attributePayload': { - 'attributes': { - 'key1': 'val01', - 'Key02': 'VAL2' - } - } - } - thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('key1').which.should.equal('val01') - res_props.should.have.key('Key02').which.should.equal('VAL2') - - # props update test with merge - new_props = { - 'attributePayload': { - 'attributes': { - 'k3': 'v3' - }, - 'merge': True - } - } - client.update_thing_group( - thingGroupName=group_name, - thingGroupProperties=new_props - ) - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('key1').which.should.equal('val01') - res_props.should.have.key('Key02').which.should.equal('VAL2') - - res_props.should.have.key('k3').which.should.equal('v3') - - # props update test - new_props = { - 'attributePayload': { - 'attributes': { - 'k4': 'v4' - } - } - } - client.update_thing_group( - thingGroupName=group_name, - thingGroupProperties=new_props - ) - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('k4').which.should.equal('v4') - res_props.should_not.have.key('key1') - - -@mock_iot -def test_thing_group_relations(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-thing' - group_name = 'my-group-name' - - # thing group - thing_group = client.create_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # add in 4 way - client.add_thing_to_thing_group( - thingGroupName=group_name, - thingName=name - ) - client.add_thing_to_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingArn=thing['thingArn'] - ) - client.add_thing_to_thing_group( - thingGroupName=group_name, - thingArn=thing['thingArn'] - ) - client.add_thing_to_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingName=name - ) - - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(1) - - thing_groups = client.list_thing_groups_for_thing( - thingName=name - ) - thing_groups.should.have.key('thingGroups') - thing_groups['thingGroups'].should.have.length_of(1) - - # remove in 4 way - client.remove_thing_from_thing_group( - thingGroupName=group_name, - thingName=name - ) - client.remove_thing_from_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingArn=thing['thingArn'] - ) - client.remove_thing_from_thing_group( - thingGroupName=group_name, - thingArn=thing['thingArn'] - ) - client.remove_thing_from_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingName=name - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(0) - - # update thing group for thing - client.update_thing_groups_for_thing( - thingName=name, - thingGroupsToAdd=[ - group_name - ] - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(1) - - client.update_thing_groups_for_thing( - thingName=name, - thingGroupsToRemove=[ - group_name - ] - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(0) - - -@mock_iot -def test_create_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing# job document - # job_document = { - # "field": "value" - # } - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - -@mock_iot -def test_list_jobs(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing# job document - # job_document = { - # "field": "value" - # } - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job1 = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job1.should.have.key('jobId').which.should.equal(job_id) - job1.should.have.key('jobArn') - job1.should.have.key('description') - - job2 = client.create_job( - jobId=job_id+"1", - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job2.should.have.key('jobId').which.should.equal(job_id+"1") - job2.should.have.key('jobArn') - job2.should.have.key('description') - - jobs = client.list_jobs() - jobs.should.have.key('jobs') - jobs.should_not.have.key('nextToken') - jobs['jobs'][0].should.have.key('jobId').which.should.equal(job_id) - jobs['jobs'][1].should.have.key('jobId').which.should.equal(job_id+"1") - - -@mock_iot -def test_describe_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('documentSource') - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobArn") - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("targets") - job.should.have.key('job').which.should.have.key("jobProcessDetails") - job.should.have.key('job').which.should.have.key("lastUpdatedAt") - job.should.have.key('job').which.should.have.key("createdAt") - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") - job.should.have.key('job').which.should.have.key("presignedUrlConfig") - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) - - -@mock_iot -def test_describe_job_1(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobArn") - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("targets") - job.should.have.key('job').which.should.have.key("jobProcessDetails") - job.should.have.key('job').which.should.have.key("lastUpdatedAt") - job.should.have.key('job').which.should.have.key("createdAt") - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") - job.should.have.key('job').which.should.have.key("presignedUrlConfig") - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) - - -@mock_iot -def test_delete_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - - client.delete_job(jobId=job_id) - - client.list_jobs()['jobs'].should.have.length_of(0) - - -@mock_iot -def test_cancel_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - - job = client.cancel_job(jobId=job_id, reasonCode='Because', comment='You are') - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("status").which.should.equal('CANCELED') - job.should.have.key('job').which.should.have.key("forceCanceled").which.should.equal(False) - job.should.have.key('job').which.should.have.key("reasonCode").which.should.equal('Because') - job.should.have.key('job').which.should.have.key("comment").which.should.equal('You are') - - -@mock_iot -def test_get_job_document_with_document_source(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal('') - - -@mock_iot -def test_get_job_document_with_document(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") - - -@mock_iot -def test_describe_job_execution(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - job_execution = client.describe_job_execution(jobId=job_id, thingName=name) - job_execution.should.have.key('execution') - job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) - job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') - job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) - job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) - job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) - job_execution['execution'].should.have.key('queuedAt') - job_execution['execution'].should.have.key('startedAt') - job_execution['execution'].should.have.key('lastUpdatedAt') - job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) - job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) - job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) - - job_execution = client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) - job_execution.should.have.key('execution') - job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) - job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') - job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) - job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) - job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) - job_execution['execution'].should.have.key('queuedAt') - job_execution['execution'].should.have.key('startedAt') - job_execution['execution'].should.have.key('lastUpdatedAt') - job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) - job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) - job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) - - try: - client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456) - except ClientError as exc: - error_code = exc.response['Error']['Code'] - error_code.should.equal('ResourceNotFoundException') - else: - raise Exception("Should have raised error") - - -@mock_iot -def test_cancel_job_execution(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - client.cancel_job_execution(jobId=job_id, thingName=name) - job_execution = client.describe_job_execution(jobId=job_id, thingName=name) - job_execution.should.have.key('execution') - job_execution['execution'].should.have.key('status').which.should.equal('CANCELED') - - -@mock_iot -def test_delete_job_execution(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - client.delete_job_execution(jobId=job_id, thingName=name, executionNumber=123) - try: - client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) - except ClientError as exc: - error_code = exc.response['Error']['Code'] - error_code.should.equal('ResourceNotFoundException') - else: - raise Exception("Should have raised error") - - -@mock_iot -def test_list_job_executions_for_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - job_execution = client.list_job_executions_for_job(jobId=job_id) - job_execution.should.have.key('executionSummaries') - job_execution['executionSummaries'][0].should.have.key('thingArn').which.should.equal(thing["thingArn"]) - - -@mock_iot -def test_list_job_executions_for_thing(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - job_execution = client.list_job_executions_for_thing(thingName=name) - job_execution.should.have.key('executionSummaries') - job_execution['executionSummaries'][0].should.have.key('jobId').which.should.equal(job_id) - +from __future__ import unicode_literals + +import json +import sure #noqa +import boto3 + +from moto import mock_iot +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +@mock_iot +def test_attach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + +@mock_iot +def test_detach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.be.empty + + +@mock_iot +def test_list_attached_policies(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + policies = client.list_attached_policies(target=cert['certificateArn']) + policies['policies'].should.be.empty + + +@mock_iot +def test_policy_versions(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) + + policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), + setAsDefault=True) + policy1.should.have.key('policyArn').which.should_not.be.none + policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy1.should.have.key('policyVersionId').which.should.equal('2') + policy1.should.have.key('isDefaultVersion').which.should.equal(True) + + policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), + setAsDefault=False) + policy2.should.have.key('policyArn').which.should_not.be.none + policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy2.should.have.key('policyVersionId').which.should.equal('3') + policy2.should.have.key('isDefaultVersion').which.should.equal(False) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) + + client.delete_policy_version(policyName=policy_name, policyVersionId='1') + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) + + client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) + + # should fail as it's the default policy. Should use delete_policy instead + try: + client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + assert False, 'Should have failed in previous call' + except Exception as exception: + exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') + + +@mock_iot +def test_things(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + type_name = 'my-type-name' + + # thing type + thing_type = client.create_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeArn') + + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(1) + for thing_type in res['thingTypes']: + thing_type.should.have.key('thingTypeName').which.should_not.be.none + + thing_type = client.describe_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeProperties') + thing_type.should.have.key('thingTypeMetadata') + + # thing + thing = client.create_thing(thingName=name, thingTypeName=type_name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none + + thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none + res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') + + thing = client.describe_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('defaultClientId') + thing.should.have.key('thingTypeName') + thing.should.have.key('attributes') + thing.should.have.key('version') + + # delete thing + client.delete_thing(thingName=name) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(0) + + # delete thing type + client.delete_thing_type(thingTypeName=type_name) + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(0) + + +@mock_iot +def test_list_thing_types(): + client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 100): + client.create_thing_type(thingTypeName=str(i + 1)) + + thing_types = client.list_thing_types() + thing_types.should.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('1') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50') + + thing_types = client.list_thing_types(nextToken=thing_types['nextToken']) + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types.should_not.have.key('nextToken') + thing_types['thingTypes'][0]['thingTypeName'].should.equal('51') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100') + + +@mock_iot +def test_list_thing_types_with_typename_filter(): + client = boto3.client('iot', region_name='ap-northeast-1') + + client.create_thing_type(thingTypeName='thing') + client.create_thing_type(thingTypeName='thingType') + client.create_thing_type(thingTypeName='thingTypeName') + client.create_thing_type(thingTypeName='thingTypeNameGroup') + client.create_thing_type(thingTypeName='shouldNotFind') + client.create_thing_type(thingTypeName='find me it shall not') + + thing_types = client.list_thing_types(thingTypeName='thing') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(4) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + thing_types = client.list_thing_types(thingTypeName='thingTypeName') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(2) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + +@mock_iot +def test_list_things_with_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 200): + client.create_thing(thingName=str(i + 1)) + + things = client.list_things() + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('1') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1') + things['things'][-1]['thingName'].should.equal('50') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('51') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('101') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + + things = client.list_things(nextToken=things['nextToken']) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('151') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + + +@mock_iot +def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + client.create_thing_type(thingTypeName='my-thing-type') + + for i in range(0, 200): + if not (i + 1) % 3: + attribute_payload = { + 'attributes': { + 'foo': 'bar' + } + } + elif not (i + 1) % 5: + attribute_payload = { + 'attributes': { + 'bar': 'foo' + } + } + else: + attribute_payload = {} + + if not (i + 1) % 2: + thing_type_name = 'my-thing-type' + client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload) + else: + client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload) + + # Test filter for thingTypeName + things = client.list_things(thingTypeName=thing_type_name) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('2') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('102') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + # Test filter for attributes + things = client.list_things(attributeName='foo', attributeValue='bar') + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('3') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(16) + things['things'][0]['thingName'].should.equal('153') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + # Test filter for attributes and thingTypeName + things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(33) + things['things'][0]['thingName'].should.equal('6') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things']) + + +@mock_iot +def test_certs(): + client = boto3.client('iot', region_name='us-east-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificatePem').which.should_not.be.none + cert.should.have.key('keyPair') + cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none + cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('certificateArn').which.should_not.be.none + cert_desc.should.have.key('certificateId').which.should_not.be.none + cert_desc.should.have.key('certificatePem').which.should_not.be.none + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + cert_pem = cert_desc['certificatePem'] + + res = client.list_certificates() + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates') + + # Test register_certificate flow + cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True) + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificateArn').which.should_not.be.none + cert_id = cert['certificateId'] + + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates') + + +@mock_iot +def test_delete_policy_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_policy(policyName=policy_name) + e.exception.response['Error']['Message'].should.contain( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' % policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_policy(policyName=policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_delete_certificate_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_id = cert['certificateId'] + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + thing_name = 'thing-1' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + client.create_thing(thingName=thing_name) + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate must be deactivated (not ACTIVE) before deletion.') + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Things must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate policies must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(0) + + +@mock_iot +def test_certs_create_inactive(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=False) + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('INACTIVE') + + client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + +@mock_iot +def test_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=name) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('defaultVersionId').which.should.equal('1') + + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + client.delete_policy(policyName=name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + # do nothing if policy have already attached to certificate + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + with assert_raises(ClientError) as e: + client.detach_policy(policyName=policy_name, target=cert_arn) + e.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + + +@mock_iot +def test_principal_policy_deprecated(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_principal_thing(): + client = boto3.client('iot', region_name='ap-northeast-1') + thing_name = 'my-thing' + thing = client.create_thing(thingName=thing_name) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should_not.be.none + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(0) + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_thing_groups(): + client = boto3.client('iot', region_name='ap-northeast-1') + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(1) + for thing_group in res['thingGroups']: + thing_group.should.have.key('groupName').which.should_not.be.none + thing_group.should.have.key('groupArn').which.should_not.be.none + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupProperties') + thing_group.should.have.key('thingGroupMetadata') + thing_group.should.have.key('version') + + # delete thing group + client.delete_thing_group(thingGroupName=group_name) + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(0) + + # props create test + props = { + 'thingGroupDescription': 'my first thing group', + 'attributePayload': { + 'attributes': { + 'key1': 'val01', + 'Key02': 'VAL2' + } + } + } + thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + # props update test with merge + new_props = { + 'attributePayload': { + 'attributes': { + 'k3': 'v3' + }, + 'merge': True + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + res_props.should.have.key('k3').which.should.equal('v3') + + # props update test + new_props = { + 'attributePayload': { + 'attributes': { + 'k4': 'v4' + } + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('k4').which.should.equal('v4') + res_props.should_not.have.key('key1') + + +@mock_iot +def test_thing_group_relations(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # add in 4 way + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + thing_groups = client.list_thing_groups_for_thing( + thingName=name + ) + thing_groups.should.have.key('thingGroups') + thing_groups['thingGroups'].should.have.length_of(1) + + # remove in 4 way + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + # update thing group for thing + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToAdd=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToRemove=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + +@mock_iot +def test_create_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing# job document + # job_document = { + # "field": "value" + # } + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + +@mock_iot +def test_list_jobs(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing# job document + # job_document = { + # "field": "value" + # } + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job1 = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job1.should.have.key('jobId').which.should.equal(job_id) + job1.should.have.key('jobArn') + job1.should.have.key('description') + + job2 = client.create_job( + jobId=job_id+"1", + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job2.should.have.key('jobId').which.should.equal(job_id+"1") + job2.should.have.key('jobArn') + job2.should.have.key('description') + + jobs = client.list_jobs() + jobs.should.have.key('jobs') + jobs.should_not.have.key('nextToken') + jobs['jobs'][0].should.have.key('jobId').which.should.equal(job_id) + jobs['jobs'][1].should.have.key('jobId').which.should.equal(job_id+"1") + + +@mock_iot +def test_describe_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('documentSource') + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_describe_job_1(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_delete_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + + client.delete_job(jobId=job_id) + + client.list_jobs()['jobs'].should.have.length_of(0) + + +@mock_iot +def test_cancel_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + + job = client.cancel_job(jobId=job_id, reasonCode='Because', comment='You are') + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("status").which.should.equal('CANCELED') + job.should.have.key('job').which.should.have.key("forceCanceled").which.should.equal(False) + job.should.have.key('job').which.should.have.key("reasonCode").which.should.equal('Because') + job.should.have.key('job').which.should.have.key("comment").which.should.equal('You are') + + +@mock_iot +def test_get_job_document_with_document_source(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal('') + + +@mock_iot +def test_get_job_document_with_document(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") + + +@mock_iot +def test_describe_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) + job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') + job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) + job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) + job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + job_execution['execution'].should.have.key('queuedAt') + job_execution['execution'].should.have.key('startedAt') + job_execution['execution'].should.have.key('lastUpdatedAt') + job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) + + job_execution = client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) + job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') + job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) + job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) + job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + job_execution['execution'].should.have.key('queuedAt') + job_execution['execution'].should.have.key('startedAt') + job_execution['execution'].should.have.key('lastUpdatedAt') + job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) + + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456) + except ClientError as exc: + error_code = exc.response['Error']['Code'] + error_code.should.equal('ResourceNotFoundException') + else: + raise Exception("Should have raised error") + + +@mock_iot +def test_cancel_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + client.cancel_job_execution(jobId=job_id, thingName=name) + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('status').which.should.equal('CANCELED') + + +@mock_iot +def test_delete_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + client.delete_job_execution(jobId=job_id, thingName=name, executionNumber=123) + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + except ClientError as exc: + error_code = exc.response['Error']['Code'] + error_code.should.equal('ResourceNotFoundException') + else: + raise Exception("Should have raised error") + + +@mock_iot +def test_list_job_executions_for_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.list_job_executions_for_job(jobId=job_id) + job_execution.should.have.key('executionSummaries') + job_execution['executionSummaries'][0].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + + +@mock_iot +def test_list_job_executions_for_thing(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.list_job_executions_for_thing(thingName=name) + job_execution.should.have.key('executionSummaries') + job_execution['executionSummaries'][0].should.have.key('jobId').which.should.equal(job_id) + From 41b1482b595cef56ff9b0b49380a86555cb7e6cc Mon Sep 17 00:00:00 2001 From: mickeypash Date: Sat, 20 Jul 2019 21:36:21 +0100 Subject: [PATCH 022/658] Simplify conditional --- moto/ec2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 79838147e219..f91835581fb9 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2064,7 +2064,7 @@ def get_volume(self, volume_id): def delete_volume(self, volume_id): if volume_id in self.volumes: volume = self.volumes[volume_id] - if volume.attachment is not None: + if volume.attachment: raise VolumeInUseError(volume_id, volume.attachment.instance.id) return self.volumes.pop(volume_id) raise InvalidVolumeIdError(volume_id) From b94147a1d57ccfec5b18ddd5b9b9aed503d01015 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Fri, 30 Aug 2019 14:18:01 +0200 Subject: [PATCH 023/658] Merge remote-tracking branch 'upstream/master' --- moto/core/access_control.py | 365 +++++ moto/ec2/responses/launch_templates.py | 252 +++ moto/iam/policy_validation.py | 450 ++++++ moto/ses/feedback.py | 81 + moto/sts/exceptions.py | 15 + moto/sts/utils.py | 35 + other_langs/sqsSample.scala | 25 + tests/test_core/test_auth.py | 706 +++++++++ tests/test_core/test_context_manager.py | 12 + tests/test_core/test_socket.py | 48 + tests/test_ec2/test_launch_templates.py | 415 +++++ tests/test_iam/test_iam_policies.py | 1861 +++++++++++++++++++++++ tests/test_ses/test_ses_sns_boto3.py | 114 ++ update_version_from_git.py | 120 ++ 14 files changed, 4499 insertions(+) create mode 100644 moto/core/access_control.py create mode 100644 moto/ec2/responses/launch_templates.py create mode 100644 moto/iam/policy_validation.py create mode 100644 moto/ses/feedback.py create mode 100644 moto/sts/exceptions.py create mode 100644 moto/sts/utils.py create mode 100644 other_langs/sqsSample.scala create mode 100644 tests/test_core/test_auth.py create mode 100644 tests/test_core/test_context_manager.py create mode 100644 tests/test_core/test_socket.py create mode 100644 tests/test_ec2/test_launch_templates.py create mode 100644 tests/test_iam/test_iam_policies.py create mode 100644 tests/test_ses/test_ses_sns_boto3.py create mode 100644 update_version_from_git.py diff --git a/moto/core/access_control.py b/moto/core/access_control.py new file mode 100644 index 000000000000..3fb11eebd168 --- /dev/null +++ b/moto/core/access_control.py @@ -0,0 +1,365 @@ +""" +This implementation is NOT complete, there are many things to improve. +The following is a list of the most important missing features and inaccuracies. + +TODO add support for more principals, apart from IAM users and assumed IAM roles +TODO add support for the Resource and Condition parts of IAM policies +TODO add support and create tests for all services in moto (for example, API Gateway is probably not supported currently) +TODO implement service specific error messages (currently, EC2 and S3 are supported separately, everything else defaults to the errors IAM returns) +TODO include information about the action's resource in error messages (once the Resource element in IAM policies is supported) +TODO check all other actions that are performed by the action called by the user (for example, autoscaling:CreateAutoScalingGroup requires permission for iam:CreateServiceLinkedRole too - see https://docs.aws.amazon.com/autoscaling/ec2/userguide/control-access-using-iam.html) +TODO add support for resource-based policies + +""" + +import json +import logging +import re +from abc import abstractmethod, ABCMeta +from enum import Enum + +import six +from botocore.auth import SigV4Auth, S3SigV4Auth +from botocore.awsrequest import AWSRequest +from botocore.credentials import Credentials +from six import string_types + +from moto.iam.models import ACCOUNT_ID, Policy +from moto.iam import iam_backend +from moto.core.exceptions import SignatureDoesNotMatchError, AccessDeniedError, InvalidClientTokenIdError, AuthFailureError +from moto.s3.exceptions import ( + BucketAccessDeniedError, + S3AccessDeniedError, + BucketInvalidTokenError, + S3InvalidTokenError, + S3InvalidAccessKeyIdError, + BucketInvalidAccessKeyIdError, + BucketSignatureDoesNotMatchError, + S3SignatureDoesNotMatchError +) +from moto.sts import sts_backend + +log = logging.getLogger(__name__) + + +def create_access_key(access_key_id, headers): + if access_key_id.startswith("AKIA") or "X-Amz-Security-Token" not in headers: + return IAMUserAccessKey(access_key_id, headers) + else: + return AssumedRoleAccessKey(access_key_id, headers) + + +class IAMUserAccessKey(object): + + def __init__(self, access_key_id, headers): + iam_users = iam_backend.list_users('/', None, None) + for iam_user in iam_users: + for access_key in iam_user.access_keys: + if access_key.access_key_id == access_key_id: + self._owner_user_name = iam_user.name + self._access_key_id = access_key_id + self._secret_access_key = access_key.secret_access_key + if "X-Amz-Security-Token" in headers: + raise CreateAccessKeyFailure(reason="InvalidToken") + return + raise CreateAccessKeyFailure(reason="InvalidId") + + @property + def arn(self): + return "arn:aws:iam::{account_id}:user/{iam_user_name}".format( + account_id=ACCOUNT_ID, + iam_user_name=self._owner_user_name + ) + + def create_credentials(self): + return Credentials(self._access_key_id, self._secret_access_key) + + def collect_policies(self): + user_policies = [] + + inline_policy_names = iam_backend.list_user_policies(self._owner_user_name) + for inline_policy_name in inline_policy_names: + inline_policy = iam_backend.get_user_policy(self._owner_user_name, inline_policy_name) + user_policies.append(inline_policy) + + attached_policies, _ = iam_backend.list_attached_user_policies(self._owner_user_name) + user_policies += attached_policies + + user_groups = iam_backend.get_groups_for_user(self._owner_user_name) + for user_group in user_groups: + inline_group_policy_names = iam_backend.list_group_policies(user_group.name) + for inline_group_policy_name in inline_group_policy_names: + inline_user_group_policy = iam_backend.get_group_policy(user_group.name, inline_group_policy_name) + user_policies.append(inline_user_group_policy) + + attached_group_policies, _ = iam_backend.list_attached_group_policies(user_group.name) + user_policies += attached_group_policies + + return user_policies + + +class AssumedRoleAccessKey(object): + + def __init__(self, access_key_id, headers): + for assumed_role in sts_backend.assumed_roles: + if assumed_role.access_key_id == access_key_id: + self._access_key_id = access_key_id + self._secret_access_key = assumed_role.secret_access_key + self._session_token = assumed_role.session_token + self._owner_role_name = assumed_role.role_arn.split("/")[-1] + self._session_name = assumed_role.session_name + if headers["X-Amz-Security-Token"] != self._session_token: + raise CreateAccessKeyFailure(reason="InvalidToken") + return + raise CreateAccessKeyFailure(reason="InvalidId") + + @property + def arn(self): + return "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( + account_id=ACCOUNT_ID, + role_name=self._owner_role_name, + session_name=self._session_name + ) + + def create_credentials(self): + return Credentials(self._access_key_id, self._secret_access_key, self._session_token) + + def collect_policies(self): + role_policies = [] + + inline_policy_names = iam_backend.list_role_policies(self._owner_role_name) + for inline_policy_name in inline_policy_names: + _, inline_policy = iam_backend.get_role_policy(self._owner_role_name, inline_policy_name) + role_policies.append(inline_policy) + + attached_policies, _ = iam_backend.list_attached_role_policies(self._owner_role_name) + role_policies += attached_policies + + return role_policies + + +class CreateAccessKeyFailure(Exception): + + def __init__(self, reason, *args): + super(CreateAccessKeyFailure, self).__init__(*args) + self.reason = reason + + +@six.add_metaclass(ABCMeta) +class IAMRequestBase(object): + + def __init__(self, method, path, data, headers): + log.debug("Creating {class_name} with method={method}, path={path}, data={data}, headers={headers}".format( + class_name=self.__class__.__name__, method=method, path=path, data=data, headers=headers)) + self._method = method + self._path = path + self._data = data + self._headers = headers + credential_scope = self._get_string_between('Credential=', ',', self._headers['Authorization']) + credential_data = credential_scope.split('/') + self._region = credential_data[2] + self._service = credential_data[3] + self._action = self._service + ":" + (self._data["Action"][0] if isinstance(self._data["Action"], list) else self._data["Action"]) + try: + self._access_key = create_access_key(access_key_id=credential_data[0], headers=headers) + except CreateAccessKeyFailure as e: + self._raise_invalid_access_key(e.reason) + + def check_signature(self): + original_signature = self._get_string_between('Signature=', ',', self._headers['Authorization']) + calculated_signature = self._calculate_signature() + if original_signature != calculated_signature: + self._raise_signature_does_not_match() + + def check_action_permitted(self): + if self._action == 'sts:GetCallerIdentity': # always allowed, even if there's an explicit Deny for it + return True + policies = self._access_key.collect_policies() + + permitted = False + for policy in policies: + iam_policy = IAMPolicy(policy) + permission_result = iam_policy.is_action_permitted(self._action) + if permission_result == PermissionResult.DENIED: + self._raise_access_denied() + elif permission_result == PermissionResult.PERMITTED: + permitted = True + + if not permitted: + self._raise_access_denied() + + @abstractmethod + def _raise_signature_does_not_match(self): + raise NotImplementedError() + + @abstractmethod + def _raise_access_denied(self): + raise NotImplementedError() + + @abstractmethod + def _raise_invalid_access_key(self, reason): + raise NotImplementedError() + + @abstractmethod + def _create_auth(self, credentials): + raise NotImplementedError() + + @staticmethod + def _create_headers_for_aws_request(signed_headers, original_headers): + headers = {} + for key, value in original_headers.items(): + if key.lower() in signed_headers: + headers[key] = value + return headers + + def _create_aws_request(self): + signed_headers = self._get_string_between('SignedHeaders=', ',', self._headers['Authorization']).split(';') + headers = self._create_headers_for_aws_request(signed_headers, self._headers) + request = AWSRequest(method=self._method, url=self._path, data=self._data, headers=headers) + request.context['timestamp'] = headers['X-Amz-Date'] + + return request + + def _calculate_signature(self): + credentials = self._access_key.create_credentials() + auth = self._create_auth(credentials) + request = self._create_aws_request() + canonical_request = auth.canonical_request(request) + string_to_sign = auth.string_to_sign(request, canonical_request) + return auth.signature(string_to_sign, request) + + @staticmethod + def _get_string_between(first_separator, second_separator, string): + return string.partition(first_separator)[2].partition(second_separator)[0] + + +class IAMRequest(IAMRequestBase): + + def _raise_signature_does_not_match(self): + if self._service == "ec2": + raise AuthFailureError() + else: + raise SignatureDoesNotMatchError() + + def _raise_invalid_access_key(self, _): + if self._service == "ec2": + raise AuthFailureError() + else: + raise InvalidClientTokenIdError() + + def _create_auth(self, credentials): + return SigV4Auth(credentials, self._service, self._region) + + def _raise_access_denied(self): + raise AccessDeniedError( + user_arn=self._access_key.arn, + action=self._action + ) + + +class S3IAMRequest(IAMRequestBase): + + def _raise_signature_does_not_match(self): + if "BucketName" in self._data: + raise BucketSignatureDoesNotMatchError(bucket=self._data["BucketName"]) + else: + raise S3SignatureDoesNotMatchError() + + def _raise_invalid_access_key(self, reason): + if reason == "InvalidToken": + if "BucketName" in self._data: + raise BucketInvalidTokenError(bucket=self._data["BucketName"]) + else: + raise S3InvalidTokenError() + else: + if "BucketName" in self._data: + raise BucketInvalidAccessKeyIdError(bucket=self._data["BucketName"]) + else: + raise S3InvalidAccessKeyIdError() + + def _create_auth(self, credentials): + return S3SigV4Auth(credentials, self._service, self._region) + + def _raise_access_denied(self): + if "BucketName" in self._data: + raise BucketAccessDeniedError(bucket=self._data["BucketName"]) + else: + raise S3AccessDeniedError() + + +class IAMPolicy(object): + + def __init__(self, policy): + if isinstance(policy, Policy): + default_version = next(policy_version for policy_version in policy.versions if policy_version.is_default) + policy_document = default_version.document + elif isinstance(policy, string_types): + policy_document = policy + else: + policy_document = policy["policy_document"] + + self._policy_json = json.loads(policy_document) + + def is_action_permitted(self, action): + permitted = False + if isinstance(self._policy_json["Statement"], list): + for policy_statement in self._policy_json["Statement"]: + iam_policy_statement = IAMPolicyStatement(policy_statement) + permission_result = iam_policy_statement.is_action_permitted(action) + if permission_result == PermissionResult.DENIED: + return permission_result + elif permission_result == PermissionResult.PERMITTED: + permitted = True + else: # dict + iam_policy_statement = IAMPolicyStatement(self._policy_json["Statement"]) + return iam_policy_statement.is_action_permitted(action) + + if permitted: + return PermissionResult.PERMITTED + else: + return PermissionResult.NEUTRAL + + +class IAMPolicyStatement(object): + + def __init__(self, statement): + self._statement = statement + + def is_action_permitted(self, action): + is_action_concerned = False + + if "NotAction" in self._statement: + if not self._check_element_matches("NotAction", action): + is_action_concerned = True + else: # Action is present + if self._check_element_matches("Action", action): + is_action_concerned = True + + if is_action_concerned: + if self._statement["Effect"] == "Allow": + return PermissionResult.PERMITTED + else: # Deny + return PermissionResult.DENIED + else: + return PermissionResult.NEUTRAL + + def _check_element_matches(self, statement_element, value): + if isinstance(self._statement[statement_element], list): + for statement_element_value in self._statement[statement_element]: + if self._match(statement_element_value, value): + return True + return False + else: # string + return self._match(self._statement[statement_element], value) + + @staticmethod + def _match(pattern, string): + pattern = pattern.replace("*", ".*") + pattern = "^{pattern}$".format(pattern=pattern) + return re.match(pattern, string) + + +class PermissionResult(Enum): + PERMITTED = 1 + DENIED = 2 + NEUTRAL = 3 diff --git a/moto/ec2/responses/launch_templates.py b/moto/ec2/responses/launch_templates.py new file mode 100644 index 000000000000..a8d92a928876 --- /dev/null +++ b/moto/ec2/responses/launch_templates.py @@ -0,0 +1,252 @@ +import six +import uuid +from moto.core.responses import BaseResponse +from moto.ec2.models import OWNER_ID +from moto.ec2.exceptions import FilterNotImplementedError +from moto.ec2.utils import filters_from_querystring + +from xml.etree import ElementTree +from xml.dom import minidom + + +def xml_root(name): + root = ElementTree.Element(name, { + "xmlns": "http://ec2.amazonaws.com/doc/2016-11-15/" + }) + request_id = str(uuid.uuid4()) + "example" + ElementTree.SubElement(root, "requestId").text = request_id + + return root + + +def xml_serialize(tree, key, value): + name = key[0].lower() + key[1:] + if isinstance(value, list): + if name[-1] == 's': + name = name[:-1] + + name = name + 'Set' + + node = ElementTree.SubElement(tree, name) + + if isinstance(value, (str, int, float, six.text_type)): + node.text = str(value) + elif isinstance(value, dict): + for dictkey, dictvalue in six.iteritems(value): + xml_serialize(node, dictkey, dictvalue) + elif isinstance(value, list): + for item in value: + xml_serialize(node, 'item', item) + elif value is None: + pass + else: + raise NotImplementedError("Don't know how to serialize \"{}\" to xml".format(value.__class__)) + + +def pretty_xml(tree): + rough = ElementTree.tostring(tree, 'utf-8') + parsed = minidom.parseString(rough) + return parsed.toprettyxml(indent=' ') + + +def parse_object(raw_data): + out_data = {} + for key, value in six.iteritems(raw_data): + key_fix_splits = key.split("_") + key_len = len(key_fix_splits) + + new_key = "" + for i in range(0, key_len): + new_key += key_fix_splits[i][0].upper() + key_fix_splits[i][1:] + + data = out_data + splits = new_key.split(".") + for split in splits[:-1]: + if split not in data: + data[split] = {} + data = data[split] + + data[splits[-1]] = value + + out_data = parse_lists(out_data) + return out_data + + +def parse_lists(data): + for key, value in six.iteritems(data): + if isinstance(value, dict): + keys = data[key].keys() + is_list = all(map(lambda k: k.isnumeric(), keys)) + + if is_list: + new_value = [] + keys = sorted(list(keys)) + for k in keys: + lvalue = value[k] + if isinstance(lvalue, dict): + lvalue = parse_lists(lvalue) + new_value.append(lvalue) + data[key] = new_value + return data + + +class LaunchTemplates(BaseResponse): + def create_launch_template(self): + name = self._get_param('LaunchTemplateName') + version_description = self._get_param('VersionDescription') + tag_spec = self._parse_tag_specification("TagSpecification") + + raw_template_data = self._get_dict_param('LaunchTemplateData.') + parsed_template_data = parse_object(raw_template_data) + + if self.is_not_dryrun('CreateLaunchTemplate'): + if tag_spec: + if 'TagSpecifications' not in parsed_template_data: + parsed_template_data['TagSpecifications'] = [] + converted_tag_spec = [] + for resource_type, tags in six.iteritems(tag_spec): + converted_tag_spec.append({ + "ResourceType": resource_type, + "Tags": [{"Key": key, "Value": value} for key, value in six.iteritems(tags)], + }) + + parsed_template_data['TagSpecifications'].extend(converted_tag_spec) + + template = self.ec2_backend.create_launch_template(name, version_description, parsed_template_data) + version = template.default_version() + + tree = xml_root("CreateLaunchTemplateResponse") + xml_serialize(tree, "launchTemplate", { + "createTime": version.create_time, + "createdBy": "arn:aws:iam::{OWNER_ID}:root".format(OWNER_ID=OWNER_ID), + "defaultVersionNumber": template.default_version_number, + "latestVersionNumber": version.number, + "launchTemplateId": template.id, + "launchTemplateName": template.name + }) + + return pretty_xml(tree) + + def create_launch_template_version(self): + name = self._get_param('LaunchTemplateName') + tmpl_id = self._get_param('LaunchTemplateId') + if name: + template = self.ec2_backend.get_launch_template_by_name(name) + if tmpl_id: + template = self.ec2_backend.get_launch_template(tmpl_id) + + version_description = self._get_param('VersionDescription') + + raw_template_data = self._get_dict_param('LaunchTemplateData.') + template_data = parse_object(raw_template_data) + + if self.is_not_dryrun('CreateLaunchTemplate'): + version = template.create_version(template_data, version_description) + + tree = xml_root("CreateLaunchTemplateVersionResponse") + xml_serialize(tree, "launchTemplateVersion", { + "createTime": version.create_time, + "createdBy": "arn:aws:iam::{OWNER_ID}:root".format(OWNER_ID=OWNER_ID), + "defaultVersion": template.is_default(version), + "launchTemplateData": version.data, + "launchTemplateId": template.id, + "launchTemplateName": template.name, + "versionDescription": version.description, + "versionNumber": version.number, + }) + return pretty_xml(tree) + + # def delete_launch_template(self): + # pass + + # def delete_launch_template_versions(self): + # pass + + def describe_launch_template_versions(self): + name = self._get_param('LaunchTemplateName') + template_id = self._get_param('LaunchTemplateId') + if name: + template = self.ec2_backend.get_launch_template_by_name(name) + if template_id: + template = self.ec2_backend.get_launch_template(template_id) + + max_results = self._get_int_param("MaxResults", 15) + versions = self._get_multi_param("LaunchTemplateVersion") + min_version = self._get_int_param("MinVersion") + max_version = self._get_int_param("MaxVersion") + + filters = filters_from_querystring(self.querystring) + if filters: + raise FilterNotImplementedError("all filters", "DescribeLaunchTemplateVersions") + + if self.is_not_dryrun('DescribeLaunchTemplateVersions'): + tree = ElementTree.Element("DescribeLaunchTemplateVersionsResponse", { + "xmlns": "http://ec2.amazonaws.com/doc/2016-11-15/", + }) + request_id = ElementTree.SubElement(tree, "requestId") + request_id.text = "65cadec1-b364-4354-8ca8-4176dexample" + + versions_node = ElementTree.SubElement(tree, "launchTemplateVersionSet") + + ret_versions = [] + if versions: + for v in versions: + ret_versions.append(template.get_version(int(v))) + elif min_version: + if max_version: + vMax = max_version + else: + vMax = min_version + max_results + + vMin = min_version - 1 + ret_versions = template.versions[vMin:vMax] + elif max_version: + vMax = max_version + ret_versions = template.versions[:vMax] + else: + ret_versions = template.versions + + ret_versions = ret_versions[:max_results] + + for version in ret_versions: + xml_serialize(versions_node, "item", { + "createTime": version.create_time, + "createdBy": "arn:aws:iam::{OWNER_ID}:root".format(OWNER_ID=OWNER_ID), + "defaultVersion": True, + "launchTemplateData": version.data, + "launchTemplateId": template.id, + "launchTemplateName": template.name, + "versionDescription": version.description, + "versionNumber": version.number, + }) + + return pretty_xml(tree) + + def describe_launch_templates(self): + max_results = self._get_int_param("MaxResults", 15) + template_names = self._get_multi_param("LaunchTemplateName") + template_ids = self._get_multi_param("LaunchTemplateId") + filters = filters_from_querystring(self.querystring) + + if self.is_not_dryrun("DescribeLaunchTemplates"): + tree = ElementTree.Element("DescribeLaunchTemplatesResponse") + templates_node = ElementTree.SubElement(tree, "launchTemplates") + + templates = self.ec2_backend.get_launch_templates(template_names=template_names, template_ids=template_ids, filters=filters) + + templates = templates[:max_results] + + for template in templates: + xml_serialize(templates_node, "item", { + "createTime": template.create_time, + "createdBy": "arn:aws:iam::{OWNER_ID}:root".format(OWNER_ID=OWNER_ID), + "defaultVersionNumber": template.default_version_number, + "latestVersionNumber": template.latest_version_number, + "launchTemplateId": template.id, + "launchTemplateName": template.name, + }) + + return pretty_xml(tree) + + # def modify_launch_template(self): + # pass diff --git a/moto/iam/policy_validation.py b/moto/iam/policy_validation.py new file mode 100644 index 000000000000..6ee286072e47 --- /dev/null +++ b/moto/iam/policy_validation.py @@ -0,0 +1,450 @@ +import json +import re + +from six import string_types + +from moto.iam.exceptions import MalformedPolicyDocument + + +VALID_TOP_ELEMENTS = [ + "Version", + "Id", + "Statement", + "Conditions" +] + +VALID_VERSIONS = [ + "2008-10-17", + "2012-10-17" +] + +VALID_STATEMENT_ELEMENTS = [ + "Sid", + "Action", + "NotAction", + "Resource", + "NotResource", + "Effect", + "Condition" +] + +VALID_EFFECTS = [ + "Allow", + "Deny" +] + +VALID_CONDITIONS = [ + "StringEquals", + "StringNotEquals", + "StringEqualsIgnoreCase", + "StringNotEqualsIgnoreCase", + "StringLike", + "StringNotLike", + "NumericEquals", + "NumericNotEquals", + "NumericLessThan", + "NumericLessThanEquals", + "NumericGreaterThan", + "NumericGreaterThanEquals", + "DateEquals", + "DateNotEquals", + "DateLessThan", + "DateLessThanEquals", + "DateGreaterThan", + "DateGreaterThanEquals", + "Bool", + "BinaryEquals", + "IpAddress", + "NotIpAddress", + "ArnEquals", + "ArnLike", + "ArnNotEquals", + "ArnNotLike", + "Null" +] + +VALID_CONDITION_PREFIXES = [ + "ForAnyValue:", + "ForAllValues:" +] + +VALID_CONDITION_POSTFIXES = [ + "IfExists" +] + +SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS = { + "iam": 'IAM resource {resource} cannot contain region information.', + "s3": 'Resource {resource} can not contain region information.' +} + +VALID_RESOURCE_PATH_STARTING_VALUES = { + "iam": { + "values": ["user/", "federated-user/", "role/", "group/", "instance-profile/", "mfa/", "server-certificate/", + "policy/", "sms-mfa/", "saml-provider/", "oidc-provider/", "report/", "access-report/"], + "error_message": 'IAM resource path must either be "*" or start with {values}.' + } +} + + +class IAMPolicyDocumentValidator: + + def __init__(self, policy_document): + self._policy_document = policy_document + self._policy_json = {} + self._statements = [] + self._resource_error = "" # the first resource error found that does not generate a legacy parsing error + + def validate(self): + try: + self._validate_syntax() + except Exception: + raise MalformedPolicyDocument("Syntax errors in policy.") + try: + self._validate_version() + except Exception: + raise MalformedPolicyDocument("Policy document must be version 2012-10-17 or greater.") + try: + self._perform_first_legacy_parsing() + self._validate_resources_for_formats() + self._validate_not_resources_for_formats() + except Exception: + raise MalformedPolicyDocument("The policy failed legacy parsing") + try: + self._validate_sid_uniqueness() + except Exception: + raise MalformedPolicyDocument("Statement IDs (SID) in a single policy must be unique.") + try: + self._validate_action_like_exist() + except Exception: + raise MalformedPolicyDocument("Policy statement must contain actions.") + try: + self._validate_resource_exist() + except Exception: + raise MalformedPolicyDocument("Policy statement must contain resources.") + + if self._resource_error != "": + raise MalformedPolicyDocument(self._resource_error) + + self._validate_actions_for_prefixes() + self._validate_not_actions_for_prefixes() + + def _validate_syntax(self): + self._policy_json = json.loads(self._policy_document) + assert isinstance(self._policy_json, dict) + self._validate_top_elements() + self._validate_version_syntax() + self._validate_id_syntax() + self._validate_statements_syntax() + + def _validate_top_elements(self): + top_elements = self._policy_json.keys() + for element in top_elements: + assert element in VALID_TOP_ELEMENTS + + def _validate_version_syntax(self): + if "Version" in self._policy_json: + assert self._policy_json["Version"] in VALID_VERSIONS + + def _validate_version(self): + assert self._policy_json["Version"] == "2012-10-17" + + def _validate_sid_uniqueness(self): + sids = [] + for statement in self._statements: + if "Sid" in statement: + assert statement["Sid"] not in sids + sids.append(statement["Sid"]) + + def _validate_statements_syntax(self): + assert "Statement" in self._policy_json + assert isinstance(self._policy_json["Statement"], (dict, list)) + + if isinstance(self._policy_json["Statement"], dict): + self._statements.append(self._policy_json["Statement"]) + else: + self._statements += self._policy_json["Statement"] + + assert self._statements + for statement in self._statements: + self._validate_statement_syntax(statement) + + @staticmethod + def _validate_statement_syntax(statement): + assert isinstance(statement, dict) + for statement_element in statement.keys(): + assert statement_element in VALID_STATEMENT_ELEMENTS + + assert ("Resource" not in statement or "NotResource" not in statement) + assert ("Action" not in statement or "NotAction" not in statement) + + IAMPolicyDocumentValidator._validate_effect_syntax(statement) + IAMPolicyDocumentValidator._validate_action_syntax(statement) + IAMPolicyDocumentValidator._validate_not_action_syntax(statement) + IAMPolicyDocumentValidator._validate_resource_syntax(statement) + IAMPolicyDocumentValidator._validate_not_resource_syntax(statement) + IAMPolicyDocumentValidator._validate_condition_syntax(statement) + IAMPolicyDocumentValidator._validate_sid_syntax(statement) + + @staticmethod + def _validate_effect_syntax(statement): + assert "Effect" in statement + assert isinstance(statement["Effect"], string_types) + assert statement["Effect"].lower() in [allowed_effect.lower() for allowed_effect in VALID_EFFECTS] + + @staticmethod + def _validate_action_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "Action") + + @staticmethod + def _validate_not_action_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "NotAction") + + @staticmethod + def _validate_resource_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "Resource") + + @staticmethod + def _validate_not_resource_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "NotResource") + + @staticmethod + def _validate_string_or_list_of_strings_syntax(statement, key): + if key in statement: + assert isinstance(statement[key], (string_types, list)) + if isinstance(statement[key], list): + for resource in statement[key]: + assert isinstance(resource, string_types) + + @staticmethod + def _validate_condition_syntax(statement): + if "Condition" in statement: + assert isinstance(statement["Condition"], dict) + for condition_key, condition_value in statement["Condition"].items(): + assert isinstance(condition_value, dict) + for condition_element_key, condition_element_value in condition_value.items(): + assert isinstance(condition_element_value, (list, string_types)) + + if IAMPolicyDocumentValidator._strip_condition_key(condition_key) not in VALID_CONDITIONS: + assert not condition_value # empty dict + + @staticmethod + def _strip_condition_key(condition_key): + for valid_prefix in VALID_CONDITION_PREFIXES: + if condition_key.startswith(valid_prefix): + condition_key = condition_key[len(valid_prefix):] + break # strip only the first match + + for valid_postfix in VALID_CONDITION_POSTFIXES: + if condition_key.endswith(valid_postfix): + condition_key = condition_key[:-len(valid_postfix)] + break # strip only the first match + + return condition_key + + @staticmethod + def _validate_sid_syntax(statement): + if "Sid" in statement: + assert isinstance(statement["Sid"], string_types) + + def _validate_id_syntax(self): + if "Id" in self._policy_json: + assert isinstance(self._policy_json["Id"], string_types) + + def _validate_resource_exist(self): + for statement in self._statements: + assert ("Resource" in statement or "NotResource" in statement) + if "Resource" in statement and isinstance(statement["Resource"], list): + assert statement["Resource"] + elif "NotResource" in statement and isinstance(statement["NotResource"], list): + assert statement["NotResource"] + + def _validate_action_like_exist(self): + for statement in self._statements: + assert ("Action" in statement or "NotAction" in statement) + if "Action" in statement and isinstance(statement["Action"], list): + assert statement["Action"] + elif "NotAction" in statement and isinstance(statement["NotAction"], list): + assert statement["NotAction"] + + def _validate_actions_for_prefixes(self): + self._validate_action_like_for_prefixes("Action") + + def _validate_not_actions_for_prefixes(self): + self._validate_action_like_for_prefixes("NotAction") + + def _validate_action_like_for_prefixes(self, key): + for statement in self._statements: + if key in statement: + if isinstance(statement[key], string_types): + self._validate_action_prefix(statement[key]) + else: + for action in statement[key]: + self._validate_action_prefix(action) + + @staticmethod + def _validate_action_prefix(action): + action_parts = action.split(":") + if len(action_parts) == 1 and action_parts[0] != "*": + raise MalformedPolicyDocument("Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.") + elif len(action_parts) > 2: + raise MalformedPolicyDocument("Actions/Condition can contain only one colon.") + + vendor_pattern = re.compile(r'[^a-zA-Z0-9\-.]') + if action_parts[0] != "*" and vendor_pattern.search(action_parts[0]): + raise MalformedPolicyDocument("Vendor {vendor} is not valid".format(vendor=action_parts[0])) + + def _validate_resources_for_formats(self): + self._validate_resource_like_for_formats("Resource") + + def _validate_not_resources_for_formats(self): + self._validate_resource_like_for_formats("NotResource") + + def _validate_resource_like_for_formats(self, key): + for statement in self._statements: + if key in statement: + if isinstance(statement[key], string_types): + self._validate_resource_format(statement[key]) + else: + for resource in sorted(statement[key], reverse=True): + self._validate_resource_format(resource) + if self._resource_error == "": + IAMPolicyDocumentValidator._legacy_parse_resource_like(statement, key) + + def _validate_resource_format(self, resource): + if resource != "*": + resource_partitions = resource.partition(":") + + if resource_partitions[1] == "": + self._resource_error = 'Resource {resource} must be in ARN format or "*".'.format(resource=resource) + return + + resource_partitions = resource_partitions[2].partition(":") + if resource_partitions[0] != "aws": + remaining_resource_parts = resource_partitions[2].split(":") + + arn1 = remaining_resource_parts[0] if remaining_resource_parts[0] != "" or len(remaining_resource_parts) > 1 else "*" + arn2 = remaining_resource_parts[1] if len(remaining_resource_parts) > 1 else "*" + arn3 = remaining_resource_parts[2] if len(remaining_resource_parts) > 2 else "*" + arn4 = ":".join(remaining_resource_parts[3:]) if len(remaining_resource_parts) > 3 else "*" + self._resource_error = 'Partition "{partition}" is not valid for resource "arn:{partition}:{arn1}:{arn2}:{arn3}:{arn4}".'.format( + partition=resource_partitions[0], + arn1=arn1, + arn2=arn2, + arn3=arn3, + arn4=arn4 + ) + return + + if resource_partitions[1] != ":": + self._resource_error = "Resource vendor must be fully qualified and cannot contain regexes." + return + + resource_partitions = resource_partitions[2].partition(":") + + service = resource_partitions[0] + + if service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() and not resource_partitions[2].startswith(":"): + self._resource_error = SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[service].format(resource=resource) + return + + resource_partitions = resource_partitions[2].partition(":") + resource_partitions = resource_partitions[2].partition(":") + + if service in VALID_RESOURCE_PATH_STARTING_VALUES.keys(): + valid_start = False + for valid_starting_value in VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"]: + if resource_partitions[2].startswith(valid_starting_value): + valid_start = True + break + if not valid_start: + self._resource_error = VALID_RESOURCE_PATH_STARTING_VALUES[service]["error_message"].format( + values=", ".join(VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"]) + ) + + def _perform_first_legacy_parsing(self): + """This method excludes legacy parsing resources, since that have to be done later.""" + for statement in self._statements: + self._legacy_parse_statement(statement) + + @staticmethod + def _legacy_parse_statement(statement): + assert statement["Effect"] in VALID_EFFECTS # case-sensitive matching + if "Condition" in statement: + for condition_key, condition_value in statement["Condition"].items(): + IAMPolicyDocumentValidator._legacy_parse_condition(condition_key, condition_value) + + @staticmethod + def _legacy_parse_resource_like(statement, key): + if isinstance(statement[key], string_types): + if statement[key] != "*": + assert statement[key].count(":") >= 5 or "::" not in statement[key] + assert statement[key].split(":")[2] != "" + else: # list + for resource in statement[key]: + if resource != "*": + assert resource.count(":") >= 5 or "::" not in resource + assert resource[2] != "" + + @staticmethod + def _legacy_parse_condition(condition_key, condition_value): + stripped_condition_key = IAMPolicyDocumentValidator._strip_condition_key(condition_key) + + if stripped_condition_key.startswith("Date"): + for condition_element_key, condition_element_value in condition_value.items(): + if isinstance(condition_element_value, string_types): + IAMPolicyDocumentValidator._legacy_parse_date_condition_value(condition_element_value) + else: # it has to be a list + for date_condition_value in condition_element_value: + IAMPolicyDocumentValidator._legacy_parse_date_condition_value(date_condition_value) + + @staticmethod + def _legacy_parse_date_condition_value(date_condition_value): + if "t" in date_condition_value.lower() or "-" in date_condition_value: + IAMPolicyDocumentValidator._validate_iso_8601_datetime(date_condition_value.lower()) + else: # timestamp + assert 0 <= int(date_condition_value) <= 9223372036854775807 + + @staticmethod + def _validate_iso_8601_datetime(datetime): + datetime_parts = datetime.partition("t") + negative_year = datetime_parts[0].startswith("-") + date_parts = datetime_parts[0][1:].split("-") if negative_year else datetime_parts[0].split("-") + year = "-" + date_parts[0] if negative_year else date_parts[0] + assert -292275054 <= int(year) <= 292278993 + if len(date_parts) > 1: + month = date_parts[1] + assert 1 <= int(month) <= 12 + if len(date_parts) > 2: + day = date_parts[2] + assert 1 <= int(day) <= 31 + assert len(date_parts) < 4 + + time_parts = datetime_parts[2].split(":") + if time_parts[0] != "": + hours = time_parts[0] + assert 0 <= int(hours) <= 23 + if len(time_parts) > 1: + minutes = time_parts[1] + assert 0 <= int(minutes) <= 59 + if len(time_parts) > 2: + if "z" in time_parts[2]: + seconds_with_decimal_fraction = time_parts[2].partition("z")[0] + assert time_parts[2].partition("z")[2] == "" + elif "+" in time_parts[2]: + seconds_with_decimal_fraction = time_parts[2].partition("+")[0] + time_zone_data = time_parts[2].partition("+")[2].partition(":") + time_zone_hours = time_zone_data[0] + assert len(time_zone_hours) == 2 + assert 0 <= int(time_zone_hours) <= 23 + if time_zone_data[1] == ":": + time_zone_minutes = time_zone_data[2] + assert len(time_zone_minutes) == 2 + assert 0 <= int(time_zone_minutes) <= 59 + else: + seconds_with_decimal_fraction = time_parts[2] + seconds_with_decimal_fraction_partition = seconds_with_decimal_fraction.partition(".") + seconds = seconds_with_decimal_fraction_partition[0] + assert 0 <= int(seconds) <= 59 + if seconds_with_decimal_fraction_partition[1] == ".": + decimal_seconds = seconds_with_decimal_fraction_partition[2] + assert 0 <= int(decimal_seconds) <= 999999999 diff --git a/moto/ses/feedback.py b/moto/ses/feedback.py new file mode 100644 index 000000000000..2d32f9ce040d --- /dev/null +++ b/moto/ses/feedback.py @@ -0,0 +1,81 @@ +""" +SES Feedback messages +Extracted from https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html +""" +COMMON_MAIL = { + "notificationType": "Bounce, Complaint, or Delivery.", + "mail": { + "timestamp": "2018-10-08T14:05:45 +0000", + "messageId": "000001378603177f-7a5433e7-8edb-42ae-af10-f0181f34d6ee-000000", + "source": "sender@example.com", + "sourceArn": "arn:aws:ses:us-west-2:888888888888:identity/example.com", + "sourceIp": "127.0.3.0", + "sendingAccountId": "123456789012", + "destination": [ + "recipient@example.com" + ], + "headersTruncated": False, + "headers": [ + { + "name": "From", + "value": "\"Sender Name\" " + }, + { + "name": "To", + "value": "\"Recipient Name\" " + } + ], + "commonHeaders": { + "from": [ + "Sender Name " + ], + "date": "Mon, 08 Oct 2018 14:05:45 +0000", + "to": [ + "Recipient Name " + ], + "messageId": " custom-message-ID", + "subject": "Message sent using Amazon SES" + } + } +} +BOUNCE = { + "bounceType": "Permanent", + "bounceSubType": "General", + "bouncedRecipients": [ + { + "status": "5.0.0", + "action": "failed", + "diagnosticCode": "smtp; 550 user unknown", + "emailAddress": "recipient1@example.com" + }, + { + "status": "4.0.0", + "action": "delayed", + "emailAddress": "recipient2@example.com" + } + ], + "reportingMTA": "example.com", + "timestamp": "2012-05-25T14:59:38.605Z", + "feedbackId": "000001378603176d-5a4b5ad9-6f30-4198-a8c3-b1eb0c270a1d-000000", + "remoteMtaIp": "127.0.2.0" +} +COMPLAINT = { + "userAgent": "AnyCompany Feedback Loop (V0.01)", + "complainedRecipients": [ + { + "emailAddress": "recipient1@example.com" + } + ], + "complaintFeedbackType": "abuse", + "arrivalDate": "2009-12-03T04:24:21.000-05:00", + "timestamp": "2012-05-25T14:59:38.623Z", + "feedbackId": "000001378603177f-18c07c78-fa81-4a58-9dd1-fedc3cb8f49a-000000" +} +DELIVERY = { + "timestamp": "2014-05-28T22:41:01.184Z", + "processingTimeMillis": 546, + "recipients": ["success@simulator.amazonses.com"], + "smtpResponse": "250 ok: Message 64111812 accepted", + "reportingMTA": "a8-70.smtp-out.amazonses.com", + "remoteMtaIp": "127.0.2.0" +} diff --git a/moto/sts/exceptions.py b/moto/sts/exceptions.py new file mode 100644 index 000000000000..bddb56e3f062 --- /dev/null +++ b/moto/sts/exceptions.py @@ -0,0 +1,15 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class STSClientError(RESTError): + code = 400 + + +class STSValidationError(STSClientError): + + def __init__(self, *args, **kwargs): + super(STSValidationError, self).__init__( + "ValidationError", + *args, **kwargs + ) diff --git a/moto/sts/utils.py b/moto/sts/utils.py new file mode 100644 index 000000000000..50767729f156 --- /dev/null +++ b/moto/sts/utils.py @@ -0,0 +1,35 @@ +import base64 +import os +import random +import string + +import six + +ACCOUNT_SPECIFIC_ACCESS_KEY_PREFIX = "8NWMTLYQ" +ACCOUNT_SPECIFIC_ASSUMED_ROLE_ID_PREFIX = "3X42LBCD" +SESSION_TOKEN_PREFIX = "FQoGZXIvYXdzEBYaD" + + +def random_access_key_id(): + return ACCOUNT_SPECIFIC_ACCESS_KEY_PREFIX + _random_uppercase_or_digit_sequence(8) + + +def random_secret_access_key(): + return base64.b64encode(os.urandom(30)).decode() + + +def random_session_token(): + return SESSION_TOKEN_PREFIX + base64.b64encode(os.urandom(266))[len(SESSION_TOKEN_PREFIX):].decode() + + +def random_assumed_role_id(): + return ACCOUNT_SPECIFIC_ASSUMED_ROLE_ID_PREFIX + _random_uppercase_or_digit_sequence(9) + + +def _random_uppercase_or_digit_sequence(length): + return ''.join( + six.text_type( + random.choice( + string.ascii_uppercase + string.digits + )) for _ in range(length) + ) diff --git a/other_langs/sqsSample.scala b/other_langs/sqsSample.scala new file mode 100644 index 000000000000..f83daaa228ba --- /dev/null +++ b/other_langs/sqsSample.scala @@ -0,0 +1,25 @@ +package com.amazonaws.examples + +import com.amazonaws.client.builder.AwsClientBuilder +import com.amazonaws.regions.{Region, Regions} +import com.amazonaws.services.sqs.AmazonSQSClientBuilder + +import scala.jdk.CollectionConverters._ + +object QueueTest extends App { + val region = Region.getRegion(Regions.US_WEST_2).getName + val serviceEndpoint = "http://localhost:5000" + + val amazonSqs = AmazonSQSClientBuilder.standard() + .withEndpointConfiguration( + new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, region)) + .build + + val queueName = "my-first-queue" + amazonSqs.createQueue(queueName) + + val urls = amazonSqs.listQueues().getQueueUrls.asScala + println("Listing queues") + println(urls.map(url => s" - $url").mkString(System.lineSeparator)) + println() +} diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py new file mode 100644 index 000000000000..00229f808af4 --- /dev/null +++ b/tests/test_core/test_auth.py @@ -0,0 +1,706 @@ +import json + +import boto3 +import sure # noqa +from botocore.exceptions import ClientError +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +from moto import mock_iam, mock_ec2, mock_s3, mock_sts, mock_elbv2, mock_rds2 +from moto.core import set_initial_no_auth_action_count +from moto.iam.models import ACCOUNT_ID + + +@mock_iam +def create_user_with_access_key(user_name='test-user'): + client = boto3.client('iam', region_name='us-east-1') + client.create_user(UserName=user_name) + return client.create_access_key(UserName=user_name)['AccessKey'] + + +@mock_iam +def create_user_with_access_key_and_inline_policy(user_name, policy_document, policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_user(UserName=user_name) + client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + return client.create_access_key(UserName=user_name)['AccessKey'] + + +@mock_iam +def create_user_with_access_key_and_attached_policy(user_name, policy_document, policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_user(UserName=user_name) + policy_arn = client.create_policy( + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + )['Policy']['Arn'] + client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) + return client.create_access_key(UserName=user_name)['AccessKey'] + + +@mock_iam +def create_user_with_access_key_and_multiple_policies(user_name, inline_policy_document, + attached_policy_document, inline_policy_name='policy1', attached_policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_user(UserName=user_name) + policy_arn = client.create_policy( + PolicyName=attached_policy_name, + PolicyDocument=json.dumps(attached_policy_document) + )['Policy']['Arn'] + client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) + client.put_user_policy(UserName=user_name, PolicyName=inline_policy_name, PolicyDocument=json.dumps(inline_policy_document)) + return client.create_access_key(UserName=user_name)['AccessKey'] + + +def create_group_with_attached_policy_and_add_user(user_name, policy_document, + group_name='test-group', policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_group(GroupName=group_name) + policy_arn = client.create_policy( + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + )['Policy']['Arn'] + client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + client.add_user_to_group(GroupName=group_name, UserName=user_name) + + +def create_group_with_inline_policy_and_add_user(user_name, policy_document, + group_name='test-group', policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_group(GroupName=group_name) + client.put_group_policy( + GroupName=group_name, + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + ) + client.add_user_to_group(GroupName=group_name, UserName=user_name) + + +def create_group_with_multiple_policies_and_add_user(user_name, inline_policy_document, + attached_policy_document, group_name='test-group', + inline_policy_name='policy1', attached_policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_group(GroupName=group_name) + client.put_group_policy( + GroupName=group_name, + PolicyName=inline_policy_name, + PolicyDocument=json.dumps(inline_policy_document) + ) + policy_arn = client.create_policy( + PolicyName=attached_policy_name, + PolicyDocument=json.dumps(attached_policy_document) + )['Policy']['Arn'] + client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + client.add_user_to_group(GroupName=group_name, UserName=user_name) + + +@mock_iam +@mock_sts +def create_role_with_attached_policy_and_assume_it(role_name, trust_policy_document, + policy_document, session_name='session1', policy_name='policy1'): + iam_client = boto3.client('iam', region_name='us-east-1') + sts_client = boto3.client('sts', region_name='us-east-1') + role_arn = iam_client.create_role( + RoleName=role_name, + AssumeRolePolicyDocument=json.dumps(trust_policy_document) + )['Role']['Arn'] + policy_arn = iam_client.create_policy( + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + )['Policy']['Arn'] + iam_client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn) + return sts_client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)['Credentials'] + + +@mock_iam +@mock_sts +def create_role_with_inline_policy_and_assume_it(role_name, trust_policy_document, + policy_document, session_name='session1', policy_name='policy1'): + iam_client = boto3.client('iam', region_name='us-east-1') + sts_client = boto3.client('sts', region_name='us-east-1') + role_arn = iam_client.create_role( + RoleName=role_name, + AssumeRolePolicyDocument=json.dumps(trust_policy_document) + )['Role']['Arn'] + iam_client.put_role_policy( + RoleName=role_name, + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + ) + return sts_client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)['Credentials'] + + +@set_initial_no_auth_action_count(0) +@mock_iam +def test_invalid_client_token_id(): + client = boto3.client('iam', region_name='us-east-1', aws_access_key_id='invalid', aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.get_user() + ex.exception.response['Error']['Code'].should.equal('InvalidClientTokenId') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('The security token included in the request is invalid.') + + +@set_initial_no_auth_action_count(0) +@mock_ec2 +def test_auth_failure(): + client = boto3.client('ec2', region_name='us-east-1', aws_access_key_id='invalid', aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.describe_instances() + ex.exception.response['Error']['Code'].should.equal('AuthFailure') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(401) + ex.exception.response['Error']['Message'].should.equal('AWS was not able to validate the provided access credentials') + + +@set_initial_no_auth_action_count(2) +@mock_iam +def test_signature_does_not_match(): + access_key = create_user_with_access_key() + client = boto3.client('iam', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.get_user() + ex.exception.response['Error']['Code'].should.equal('SignatureDoesNotMatch') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('The request signature we calculated does not match the signature you provided. Check your AWS Secret Access Key and signing method. Consult the service documentation for details.') + + +@set_initial_no_auth_action_count(2) +@mock_ec2 +def test_auth_failure_with_valid_access_key_id(): + access_key = create_user_with_access_key() + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.describe_instances() + ex.exception.response['Error']['Code'].should.equal('AuthFailure') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(401) + ex.exception.response['Error']['Message'].should.equal('AWS was not able to validate the provided access credentials') + + +@set_initial_no_auth_action_count(2) +@mock_ec2 +def test_access_denied_with_no_policy(): + user_name = 'test-user' + access_key = create_user_with_access_key(user_name) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.describe_instances() + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:DescribeInstances" + ) + ) + + +@set_initial_no_auth_action_count(3) +@mock_ec2 +def test_access_denied_with_not_allowing_policy(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:Describe*" + ], + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.run_instances(MaxCount=1, MinCount=1) + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:RunInstances" + ) + ) + + +@set_initial_no_auth_action_count(3) +@mock_ec2 +def test_access_denied_with_denying_policy(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:*", + ], + "Resource": "*" + }, + { + "Effect": "Deny", + "Action": "ec2:CreateVpc", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.create_vpc(CidrBlock="10.0.0.0/16") + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:CreateVpc" + ) + ) + + +@set_initial_no_auth_action_count(3) +@mock_sts +def test_get_caller_identity_allowed_with_denying_policy(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "sts:GetCallerIdentity", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('sts', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.get_caller_identity().should.be.a(dict) + + +@set_initial_no_auth_action_count(3) +@mock_ec2 +def test_allowed_with_wildcard_action(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:Describe*", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.describe_tags()['Tags'].should.be.empty + + +@set_initial_no_auth_action_count(4) +@mock_iam +def test_allowed_with_explicit_action_in_attached_policy(): + user_name = 'test-user' + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "iam:ListGroups", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_attached_policy(user_name, attached_policy_document) + client = boto3.client('iam', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.list_groups()['Groups'].should.be.empty + + +@set_initial_no_auth_action_count(8) +@mock_s3 +@mock_iam +def test_s3_access_denied_with_denying_attached_group_policy(): + user_name = 'test-user' + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" + } + ] + } + group_attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "s3:List*", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_attached_policy(user_name, attached_policy_document) + create_group_with_attached_policy_and_add_user(user_name, group_attached_policy_document) + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.list_buckets() + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('Access Denied') + + +@set_initial_no_auth_action_count(6) +@mock_s3 +@mock_iam +def test_s3_access_denied_with_denying_inline_group_policy(): + user_name = 'test-user' + bucket_name = 'test-bucket' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "*", + "Resource": "*" + } + ] + } + group_inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "s3:GetObject", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + create_group_with_inline_policy_and_add_user(user_name, group_inline_policy_document) + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as ex: + client.get_object(Bucket=bucket_name, Key='sdfsdf') + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('Access Denied') + + +@set_initial_no_auth_action_count(10) +@mock_iam +@mock_ec2 +def test_access_denied_with_many_irrelevant_policies(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:Describe*", + "Resource": "*" + } + ] + } + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "*" + } + ] + } + group_inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "iam:List*", + "Resource": "*" + } + ] + } + group_attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "lambda:*", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_multiple_policies(user_name, inline_policy_document, + attached_policy_document) + create_group_with_multiple_policies_and_add_user(user_name, group_inline_policy_document, + group_attached_policy_document) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.create_key_pair(KeyName="TestKey") + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:CreateKeyPair" + ) + ) + + +@set_initial_no_auth_action_count(4) +@mock_iam +@mock_sts +@mock_ec2 +@mock_elbv2 +def test_allowed_with_temporary_credentials(): + role_name = 'test-role' + trust_policy_document = { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID)}, + "Action": "sts:AssumeRole" + } + } + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateLoadBalancer", + "ec2:DescribeSubnets" + ], + "Resource": "*" + } + ] + } + credentials = create_role_with_attached_policy_and_assume_it(role_name, trust_policy_document, attached_policy_document) + elbv2_client = boto3.client('elbv2', region_name='us-east-1', + aws_access_key_id=credentials['AccessKeyId'], + aws_secret_access_key=credentials['SecretAccessKey'], + aws_session_token=credentials['SessionToken']) + ec2_client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=credentials['AccessKeyId'], + aws_secret_access_key=credentials['SecretAccessKey'], + aws_session_token=credentials['SessionToken']) + subnets = ec2_client.describe_subnets()['Subnets'] + len(subnets).should.be.greater_than(1) + elbv2_client.create_load_balancer( + Name='test-load-balancer', + Subnets=[ + subnets[0]['SubnetId'], + subnets[1]['SubnetId'] + ] + )['LoadBalancers'].should.have.length_of(1) + + +@set_initial_no_auth_action_count(3) +@mock_iam +@mock_sts +@mock_rds2 +def test_access_denied_with_temporary_credentials(): + role_name = 'test-role' + session_name = 'test-session' + trust_policy_document = { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID)}, + "Action": "sts:AssumeRole" + } + } + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + 'rds:Describe*' + ], + "Resource": "*" + } + ] + } + credentials = create_role_with_inline_policy_and_assume_it(role_name, trust_policy_document, + attached_policy_document, session_name) + client = boto3.client('rds', region_name='us-east-1', + aws_access_key_id=credentials['AccessKeyId'], + aws_secret_access_key=credentials['SecretAccessKey'], + aws_session_token=credentials['SessionToken']) + with assert_raises(ClientError) as ex: + client.create_db_instance( + DBInstanceIdentifier='test-db-instance', + DBInstanceClass='db.t3', + Engine='aurora-postgresql' + ) + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + role_name=role_name, + session_name=session_name, + operation="rds:CreateDBInstance" + ) + ) + + +@set_initial_no_auth_action_count(3) +@mock_iam +def test_get_user_from_credentials(): + user_name = 'new-test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "iam:*", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('iam', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.get_user()['User']['UserName'].should.equal(user_name) + + +@set_initial_no_auth_action_count(0) +@mock_s3 +def test_s3_invalid_access_key_id(): + client = boto3.client('s3', region_name='us-east-1', aws_access_key_id='invalid', aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.list_buckets() + ex.exception.response['Error']['Code'].should.equal('InvalidAccessKeyId') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('The AWS Access Key Id you provided does not exist in our records.') + + +@set_initial_no_auth_action_count(3) +@mock_s3 +@mock_iam +def test_s3_signature_does_not_match(): + bucket_name = 'test-bucket' + access_key = create_user_with_access_key() + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key='invalid') + client.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as ex: + client.put_object(Bucket=bucket_name, Key="abc") + ex.exception.response['Error']['Code'].should.equal('SignatureDoesNotMatch') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('The request signature we calculated does not match the signature you provided. Check your key and signing method.') + + +@set_initial_no_auth_action_count(7) +@mock_s3 +@mock_iam +def test_s3_access_denied_not_action(): + user_name = 'test-user' + bucket_name = 'test-bucket' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "*", + "Resource": "*" + } + ] + } + group_inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "NotAction": "iam:GetUser", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + create_group_with_inline_policy_and_add_user(user_name, group_inline_policy_document) + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as ex: + client.delete_object(Bucket=bucket_name, Key='sdfsdf') + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('Access Denied') + + +@set_initial_no_auth_action_count(4) +@mock_iam +@mock_sts +@mock_s3 +def test_s3_invalid_token_with_temporary_credentials(): + role_name = 'test-role' + session_name = 'test-session' + bucket_name = 'test-bucket-888' + trust_policy_document = { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID)}, + "Action": "sts:AssumeRole" + } + } + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + '*' + ], + "Resource": "*" + } + ] + } + credentials = create_role_with_inline_policy_and_assume_it(role_name, trust_policy_document, + attached_policy_document, session_name) + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=credentials['AccessKeyId'], + aws_secret_access_key=credentials['SecretAccessKey'], + aws_session_token='invalid') + client.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as ex: + client.list_bucket_metrics_configurations(Bucket=bucket_name) + ex.exception.response['Error']['Code'].should.equal('InvalidToken') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal('The provided token is malformed or otherwise invalid.') diff --git a/tests/test_core/test_context_manager.py b/tests/test_core/test_context_manager.py new file mode 100644 index 000000000000..4824e021fffd --- /dev/null +++ b/tests/test_core/test_context_manager.py @@ -0,0 +1,12 @@ +import sure # noqa +import boto3 +from moto import mock_sqs, settings + + +def test_context_manager_returns_mock(): + with mock_sqs() as sqs_mock: + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="queue1") + + if not settings.TEST_SERVER_MODE: + list(sqs_mock.backends['us-west-1'].queues.keys()).should.equal(['queue1']) diff --git a/tests/test_core/test_socket.py b/tests/test_core/test_socket.py new file mode 100644 index 000000000000..2e73d7b5fa47 --- /dev/null +++ b/tests/test_core/test_socket.py @@ -0,0 +1,48 @@ +import unittest +from moto import mock_dynamodb2_deprecated, mock_dynamodb2 +import socket + +from six import PY3 + + +class TestSocketPair(unittest.TestCase): + + @mock_dynamodb2_deprecated + def test_asyncio_deprecated(self): + if PY3: + self.assertIn( + 'moto.packages.httpretty.core.fakesock.socket', + str(socket.socket), + 'Our mock should be present' + ) + import asyncio + self.assertIsNotNone(asyncio.get_event_loop()) + + @mock_dynamodb2_deprecated + def test_socket_pair_deprecated(self): + + # In Python2, the fakesocket is not set, for some reason. + if PY3: + self.assertIn( + 'moto.packages.httpretty.core.fakesock.socket', + str(socket.socket), + 'Our mock should be present' + ) + a, b = socket.socketpair() + self.assertIsNotNone(a) + self.assertIsNotNone(b) + if a: + a.close() + if b: + b.close() + + + @mock_dynamodb2 + def test_socket_pair(self): + a, b = socket.socketpair() + self.assertIsNotNone(a) + self.assertIsNotNone(b) + if a: + a.close() + if b: + b.close() diff --git a/tests/test_ec2/test_launch_templates.py b/tests/test_ec2/test_launch_templates.py new file mode 100644 index 000000000000..87e1d3986578 --- /dev/null +++ b/tests/test_ec2/test_launch_templates.py @@ -0,0 +1,415 @@ +import boto3 +import sure # noqa + +from nose.tools import assert_raises +from botocore.client import ClientError + +from moto import mock_ec2 + + +@mock_ec2 +def test_launch_template_create(): + cli = boto3.client("ec2", region_name="us-east-1") + + resp = cli.create_launch_template( + LaunchTemplateName="test-template", + + # the absolute minimum needed to create a template without other resources + LaunchTemplateData={ + "TagSpecifications": [{ + "ResourceType": "instance", + "Tags": [{ + "Key": "test", + "Value": "value", + }], + }], + }, + ) + + resp.should.have.key("LaunchTemplate") + lt = resp["LaunchTemplate"] + lt["LaunchTemplateName"].should.equal("test-template") + lt["DefaultVersionNumber"].should.equal(1) + lt["LatestVersionNumber"].should.equal(1) + + with assert_raises(ClientError) as ex: + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "TagSpecifications": [{ + "ResourceType": "instance", + "Tags": [{ + "Key": "test", + "Value": "value", + }], + }], + }, + ) + + str(ex.exception).should.equal( + 'An error occurred (InvalidLaunchTemplateName.AlreadyExistsException) when calling the CreateLaunchTemplate operation: Launch template name already in use.') + + +@mock_ec2 +def test_describe_launch_template_versions(): + template_data = { + "ImageId": "ami-abc123", + "DisableApiTermination": False, + "TagSpecifications": [{ + "ResourceType": "instance", + "Tags": [{ + "Key": "test", + "Value": "value", + }], + }], + "SecurityGroupIds": [ + "sg-1234", + "sg-ab5678", + ], + } + + cli = boto3.client("ec2", region_name="us-east-1") + + create_resp = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData=template_data) + + # test using name + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + Versions=['1']) + + templ = resp["LaunchTemplateVersions"][0]["LaunchTemplateData"] + templ.should.equal(template_data) + + # test using id + resp = cli.describe_launch_template_versions( + LaunchTemplateId=create_resp["LaunchTemplate"]["LaunchTemplateId"], + Versions=['1']) + + templ = resp["LaunchTemplateVersions"][0]["LaunchTemplateData"] + templ.should.equal(template_data) + + +@mock_ec2 +def test_create_launch_template_version(): + cli = boto3.client("ec2", region_name="us-east-1") + + create_resp = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + version_resp = cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + version_resp.should.have.key("LaunchTemplateVersion") + version = version_resp["LaunchTemplateVersion"] + version["DefaultVersion"].should.equal(False) + version["LaunchTemplateId"].should.equal(create_resp["LaunchTemplate"]["LaunchTemplateId"]) + version["VersionDescription"].should.equal("new ami") + version["VersionNumber"].should.equal(2) + + +@mock_ec2 +def test_create_launch_template_version_by_id(): + cli = boto3.client("ec2", region_name="us-east-1") + + create_resp = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + version_resp = cli.create_launch_template_version( + LaunchTemplateId=create_resp["LaunchTemplate"]["LaunchTemplateId"], + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + version_resp.should.have.key("LaunchTemplateVersion") + version = version_resp["LaunchTemplateVersion"] + version["DefaultVersion"].should.equal(False) + version["LaunchTemplateId"].should.equal(create_resp["LaunchTemplate"]["LaunchTemplateId"]) + version["VersionDescription"].should.equal("new ami") + version["VersionNumber"].should.equal(2) + + +@mock_ec2 +def test_describe_launch_template_versions_with_multiple_versions(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template") + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-abc123") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + + +@mock_ec2 +def test_describe_launch_template_versions_with_versions_option(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-hij789" + }, + VersionDescription="new ami, again") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + Versions=["2", "3"]) + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-hij789") + + +@mock_ec2 +def test_describe_launch_template_versions_with_min(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-hij789" + }, + VersionDescription="new ami, again") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + MinVersion="2") + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-hij789") + + +@mock_ec2 +def test_describe_launch_template_versions_with_max(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-hij789" + }, + VersionDescription="new ami, again") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + MaxVersion="2") + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-abc123") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + + +@mock_ec2 +def test_describe_launch_template_versions_with_min_and_max(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-hij789" + }, + VersionDescription="new ami, again") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-345abc" + }, + VersionDescription="new ami, because why not") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + MinVersion="2", + MaxVersion="3") + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-hij789") + + +@mock_ec2 +def test_describe_launch_templates(): + cli = boto3.client("ec2", region_name="us-east-1") + + lt_ids = [] + r = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + lt_ids.append(r["LaunchTemplate"]["LaunchTemplateId"]) + + r = cli.create_launch_template( + LaunchTemplateName="test-template2", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + lt_ids.append(r["LaunchTemplate"]["LaunchTemplateId"]) + + # general call, all templates + resp = cli.describe_launch_templates() + resp.should.have.key("LaunchTemplates") + resp["LaunchTemplates"].should.have.length_of(2) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("test-template") + resp["LaunchTemplates"][1]["LaunchTemplateName"].should.equal("test-template2") + + # filter by names + resp = cli.describe_launch_templates( + LaunchTemplateNames=["test-template2", "test-template"]) + resp.should.have.key("LaunchTemplates") + resp["LaunchTemplates"].should.have.length_of(2) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("test-template2") + resp["LaunchTemplates"][1]["LaunchTemplateName"].should.equal("test-template") + + # filter by ids + resp = cli.describe_launch_templates(LaunchTemplateIds=lt_ids) + resp.should.have.key("LaunchTemplates") + resp["LaunchTemplates"].should.have.length_of(2) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("test-template") + resp["LaunchTemplates"][1]["LaunchTemplateName"].should.equal("test-template2") + + +@mock_ec2 +def test_describe_launch_templates_with_filters(): + cli = boto3.client("ec2", region_name="us-east-1") + + r = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_tags( + Resources=[r["LaunchTemplate"]["LaunchTemplateId"]], + Tags=[ + {"Key": "tag1", "Value": "a value"}, + {"Key": "another-key", "Value": "this value"}, + ]) + + cli.create_launch_template( + LaunchTemplateName="no-tags", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + resp = cli.describe_launch_templates(Filters=[{ + "Name": "tag:tag1", "Values": ["a value"] + }]) + + resp["LaunchTemplates"].should.have.length_of(1) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("test-template") + + resp = cli.describe_launch_templates(Filters=[{ + "Name": "launch-template-name", "Values": ["no-tags"] + }]) + resp["LaunchTemplates"].should.have.length_of(1) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("no-tags") + + +@mock_ec2 +def test_create_launch_template_with_tag_spec(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={"ImageId": "ami-abc123"}, + TagSpecifications=[{ + "ResourceType": "instance", + "Tags": [ + {"Key": "key", "Value": "value"} + ] + }], + ) + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + Versions=["1"]) + version = resp["LaunchTemplateVersions"][0] + + version["LaunchTemplateData"].should.have.key("TagSpecifications") + version["LaunchTemplateData"]["TagSpecifications"].should.have.length_of(1) + version["LaunchTemplateData"]["TagSpecifications"][0].should.equal({ + "ResourceType": "instance", + "Tags": [ + {"Key": "key", "Value": "value"} + ] + }) diff --git a/tests/test_iam/test_iam_policies.py b/tests/test_iam/test_iam_policies.py new file mode 100644 index 000000000000..e1924a559a62 --- /dev/null +++ b/tests/test_iam/test_iam_policies.py @@ -0,0 +1,1861 @@ +import json + +import boto3 +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_iam + +invalid_policy_document_test_cases = [ + { + "document": "This is not a json document", + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2008-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2013-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17" + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": ["afd"] + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + "Extra field": "value" + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Extra field": "value" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Id": ["cd3a324d2343d942772346-34234234423404-4c2242343242349d1642ee"], + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Id": {}, + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "invalid", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "invalid", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "a a:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Vendor a a is not valid' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:List:Bucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Condition can contain only one colon.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Effect": "Allow", + "Action": "s:3s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + "error_message": 'Actions/Condition can contain only one colon.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "invalid resource" + } + }, + "error_message": 'Resource invalid resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "EnableDisableHongKong", + "Effect": "Allow", + "Action": [ + "account:EnableRegion", + "account:DisableRegion" + ], + "Resource": "", + "Condition": { + "StringEquals": {"account:TargetRegion": "ap-east-1"} + } + }, + { + "Sid": "ViewConsole", + "Effect": "Allow", + "Action": [ + "aws-portal:ViewAccount", + "account:ListRegions" + ], + "Resource": "" + } + ] + }, + "error_message": 'Resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s:3:ListBucket", + "Resource": "sdfsadf" + } + }, + "error_message": 'Resource sdfsadf must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["adf"] + } + }, + "error_message": 'Resource adf must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "" + } + }, + "error_message": 'Resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:bsdfdsafsad" + } + }, + "error_message": 'Partition "bsdfdsafsad" is not valid for resource "arn:bsdfdsafsad:*:*:*:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:b:cadfsdf" + } + }, + "error_message": 'Partition "b" is not valid for resource "arn:b:cadfsdf:*:*:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:b:c:d:e:f:g:h" + } + }, + "error_message": 'Partition "b" is not valid for resource "arn:b:c:d:e:f:g:h".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "aws:s3:::example_bucket" + } + }, + "error_message": 'Partition "s3" is not valid for resource "arn:s3:::example_bucket:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:error:s3:::example_bucket", + "arn:error:s3::example_bucket" + ] + } + }, + "error_message": 'Partition "error" is not valid for resource "arn:error:s3:::example_bucket".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [] + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket" + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [] + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "invalid" + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": [], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:::example_bucket" + } + }, + "error_message": 'IAM resource path must either be "*" or start with user/, federated-user/, role/, group/, instance-profile/, mfa/, server-certificate/, policy/, sms-mfa/, saml-provider/, oidc-provider/, report/, access-report/.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Resource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws" + } + }, + "error_message": 'Resource vendor must be fully qualified and cannot contain regexes.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": { + "a": "arn:aws:s3:::example_bucket" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": "s3:ListBucket", + "Resource": ["adfdf", {}] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "NotResource": [] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": [[]], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Action": [], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": {}, + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": [] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": "a" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "a": "b" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": "b" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": [] + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": {}} + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": {}} + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "x": { + "a": "1" + } + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "ForAnyValue::StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": [ + {"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}} + ] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:us-east-1::example_bucket" + } + }, + "error_message": 'IAM resource arn:aws:iam:us-east-1::example_bucket cannot contain region information.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:us-east-1::example_bucket" + } + }, + "error_message": 'Resource arn:aws:s3:us-east-1::example_bucket can not contain region information.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Sid": {}, + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Sid": [], + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'Statement IDs (SID) in a single policy must be unique.' + }, + { + "document": { + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Action": "iam:dsf", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "NotResource": "*" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "denY", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Statement": { + "Effect": "denY", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws::::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "allow", + "Resource": "arn:aws:s3:us-east-1::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "aLLow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "NotResource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "234-13" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.593194+1" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.1999999999+10:59" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThan": { + "a": "9223372036854775808" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:error:s3:::example_bucket", + "Condition": { + "DateGreaterThan": { + "a": "sdfdsf" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws::fdsasf" + } + }, + "error_message": 'The policy failed legacy parsing' + } +] + +valid_policy_documents = [ + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::example_bucket" + ] + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "iam: asdf safdsf af ", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::example_bucket", + "*" + ] + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "*", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "service-prefix:action-name", + "Resource": "*", + "Condition": { + "DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"}, + "DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "fsx:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:::user/example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s33:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:fdsasf" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": {} + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": {"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}} + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:cloudwatch:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:ec2:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:invalid-service:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:invalid-service:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"}, + "DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": []} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "a": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Sid": "dsfsdfsdfsdfsdfsadfsd", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ConsoleDisplay", + "Effect": "Allow", + "Action": [ + "iam:GetRole", + "iam:GetUser", + "iam:ListRoles", + "iam:ListRoleTags", + "iam:ListUsers", + "iam:ListUserTags" + ], + "Resource": "*" + }, + { + "Sid": "AddTag", + "Effect": "Allow", + "Action": [ + "iam:TagUser", + "iam:TagRole" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:RequestTag/CostCenter": [ + "A-123", + "B-456" + ] + }, + "ForAllValues:StringEquals": {"aws:TagKeys": "CostCenter"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": "s3:*", + "NotResource": [ + "arn:aws:s3:::HRBucket/Payroll", + "arn:aws:s3:::HRBucket/Payroll/*" + ] + } + }, + { + "Version": "2012-10-17", + "Id": "sdfsdfsdf", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "aaaaaadsfdsafsadfsadfaaaaa:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3-s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3.s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "NotResource": "*" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": { + "a": "01T" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "x": { + }, + "y": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "ForAnyValue:StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2019-07-01T13:20:15Z" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13T21:20:37.593194+00:00" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.593194+23" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThan": { + "a": "-292275054" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowViewAccountInfo", + "Effect": "Allow", + "Action": [ + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary", + "iam:ListVirtualMFADevices" + ], + "Resource": "*" + }, + { + "Sid": "AllowManageOwnPasswords", + "Effect": "Allow", + "Action": [ + "iam:ChangePassword", + "iam:GetUser" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnAccessKeys", + "Effect": "Allow", + "Action": [ + "iam:CreateAccessKey", + "iam:DeleteAccessKey", + "iam:ListAccessKeys", + "iam:UpdateAccessKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSigningCertificates", + "Effect": "Allow", + "Action": [ + "iam:DeleteSigningCertificate", + "iam:ListSigningCertificates", + "iam:UpdateSigningCertificate", + "iam:UploadSigningCertificate" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSSHPublicKeys", + "Effect": "Allow", + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnGitCredentials", + "Effect": "Allow", + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ListServiceSpecificCredentials", + "iam:ResetServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnVirtualMFADevice", + "Effect": "Allow", + "Action": [ + "iam:CreateVirtualMFADevice", + "iam:DeleteVirtualMFADevice" + ], + "Resource": "arn:aws:iam::*:mfa/${aws:username}" + }, + { + "Sid": "AllowManageOwnUserMFA", + "Effect": "Allow", + "Action": [ + "iam:DeactivateMFADevice", + "iam:EnableMFADevice", + "iam:ListMFADevices", + "iam:ResyncMFADevice" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "DenyAllExceptListedIfNoMFA", + "Effect": "Deny", + "NotAction": [ + "iam:CreateVirtualMFADevice", + "iam:EnableMFADevice", + "iam:GetUser", + "iam:ListMFADevices", + "iam:ListVirtualMFADevices", + "iam:ResyncMFADevice", + "sts:GetSessionToken" + ], + "Resource": "*", + "Condition": { + "BoolIfExists": { + "aws:MultiFactorAuthPresent": "false" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListAndDescribe", + "Effect": "Allow", + "Action": [ + "dynamodb:List*", + "dynamodb:DescribeReservedCapacity*", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTimeToLive" + ], + "Resource": "*" + }, + { + "Sid": "SpecificTable", + "Effect": "Allow", + "Action": [ + "dynamodb:BatchGet*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Get*", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:BatchWrite*", + "dynamodb:CreateTable", + "dynamodb:Delete*", + "dynamodb:Update*", + "dynamodb:PutItem" + ], + "Resource": "arn:aws:dynamodb:*:*:table/MyTable" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:instance/*" + ], + "Condition": { + "ArnEquals": {"ec2:SourceInstanceARN": "arn:aws:ec2:*:*:instance/instance-id"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": "arn:aws:ec2:*:*:instance/*", + "Condition": { + "StringEquals": {"ec2:ResourceTag/Department": "Development"} + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": "arn:aws:ec2:*:*:volume/*", + "Condition": { + "StringEquals": {"ec2:ResourceTag/VolumeUser": "${aws:username}"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "StartStopIfTags", + "Effect": "Allow", + "Action": [ + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:DescribeTags" + ], + "Resource": "arn:aws:ec2:region:account-id:instance/*", + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Project": "DataAnalytics", + "aws:PrincipalTag/Department": "Data" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListYourObjects", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["arn:aws:s3:::bucket-name"], + "Condition": { + "StringLike": { + "s3:prefix": ["cognito/application-name/${cognito-identity.amazonaws.com:sub}"] + } + } + }, + { + "Sid": "ReadWriteDeleteYourObjects", + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}", + "arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}/*" + ] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListAllMyBuckets", + "s3:GetBucketLocation" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::bucket-name", + "Condition": { + "StringLike": { + "s3:prefix": [ + "", + "home/", + "home/${aws:userid}/*" + ] + } + } + }, + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": [ + "arn:aws:s3:::bucket-name/home/${aws:userid}", + "arn:aws:s3:::bucket-name/home/${aws:userid}/*" + ] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ConsoleAccess", + "Effect": "Allow", + "Action": [ + "s3:GetAccountPublicAccessBlock", + "s3:GetBucketAcl", + "s3:GetBucketLocation", + "s3:GetBucketPolicyStatus", + "s3:GetBucketPublicAccessBlock", + "s3:ListAllMyBuckets" + ], + "Resource": "*" + }, + { + "Sid": "ListObjectsInBucket", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["arn:aws:s3:::bucket-name"] + }, + { + "Sid": "AllObjectActions", + "Effect": "Allow", + "Action": "s3:*Object", + "Resource": ["arn:aws:s3:::bucket-name/*"] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowViewAccountInfo", + "Effect": "Allow", + "Action": [ + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary" + ], + "Resource": "*" + }, + { + "Sid": "AllowManageOwnPasswords", + "Effect": "Allow", + "Action": [ + "iam:ChangePassword", + "iam:GetUser" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnAccessKeys", + "Effect": "Allow", + "Action": [ + "iam:CreateAccessKey", + "iam:DeleteAccessKey", + "iam:ListAccessKeys", + "iam:UpdateAccessKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSigningCertificates", + "Effect": "Allow", + "Action": [ + "iam:DeleteSigningCertificate", + "iam:ListSigningCertificates", + "iam:UpdateSigningCertificate", + "iam:UploadSigningCertificate" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSSHPublicKeys", + "Effect": "Allow", + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnGitCredentials", + "Effect": "Allow", + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ListServiceSpecificCredentials", + "iam:ResetServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "ec2:*", + "Resource": "*", + "Effect": "Allow", + "Condition": { + "StringEquals": { + "ec2:Region": "region" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "rds:*", + "Resource": ["arn:aws:rds:region:*:*"] + }, + { + "Effect": "Allow", + "Action": ["rds:Describe*"], + "Resource": ["*"] + } + ] + } +] + + +def test_create_policy_with_invalid_policy_documents(): + for test_case in invalid_policy_document_test_cases: + yield check_create_policy_with_invalid_policy_document, test_case + + +def test_create_policy_with_valid_policy_documents(): + for valid_policy_document in valid_policy_documents: + yield check_create_policy_with_valid_policy_document, valid_policy_document + + +@mock_iam +def check_create_policy_with_invalid_policy_document(test_case): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(ClientError) as ex: + conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument=json.dumps(test_case["document"])) + ex.exception.response['Error']['Code'].should.equal('MalformedPolicyDocument') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal(test_case["error_message"]) + + +@mock_iam +def check_create_policy_with_valid_policy_document(valid_policy_document): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument=json.dumps(valid_policy_document)) diff --git a/tests/test_ses/test_ses_sns_boto3.py b/tests/test_ses/test_ses_sns_boto3.py new file mode 100644 index 000000000000..37f79a8b04d9 --- /dev/null +++ b/tests/test_ses/test_ses_sns_boto3.py @@ -0,0 +1,114 @@ +from __future__ import unicode_literals + +import boto3 +import json +from botocore.exceptions import ClientError +from six.moves.email_mime_multipart import MIMEMultipart +from six.moves.email_mime_text import MIMEText + +import sure # noqa +from nose import tools +from moto import mock_ses, mock_sns, mock_sqs +from moto.ses.models import SESFeedback + + +@mock_ses +def test_enable_disable_ses_sns_communication(): + conn = boto3.client('ses', region_name='us-east-1') + conn.set_identity_notification_topic( + Identity='test.com', + NotificationType='Bounce', + SnsTopic='the-arn' + ) + conn.set_identity_notification_topic( + Identity='test.com', + NotificationType='Bounce' + ) + + +def __setup_feedback_env__(ses_conn, sns_conn, sqs_conn, domain, topic, queue, region, expected_msg): + """Setup the AWS environment to test the SES SNS Feedback""" + # Environment setup + # Create SQS queue + sqs_conn.create_queue(QueueName=queue) + # Create SNS topic + create_topic_response = sns_conn.create_topic(Name=topic) + topic_arn = create_topic_response["TopicArn"] + # Subscribe the SNS topic to the SQS queue + sns_conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:%s:123456789012:%s" % (region, queue)) + # Verify SES domain + ses_conn.verify_domain_identity(Domain=domain) + # Setup SES notification topic + if expected_msg is not None: + ses_conn.set_identity_notification_topic( + Identity=domain, + NotificationType=expected_msg, + SnsTopic=topic_arn + ) + + +def __test_sns_feedback__(addr, expected_msg): + region_name = "us-east-1" + ses_conn = boto3.client('ses', region_name=region_name) + sns_conn = boto3.client('sns', region_name=region_name) + sqs_conn = boto3.resource('sqs', region_name=region_name) + domain = "example.com" + topic = "bounce-arn-feedback" + queue = "feedback-test-queue" + + __setup_feedback_env__(ses_conn, sns_conn, sqs_conn, domain, topic, queue, region_name, expected_msg) + + # Send the message + kwargs = dict( + Source="test@" + domain, + Destination={ + "ToAddresses": [addr + "@" + domain], + "CcAddresses": ["test_cc@" + domain], + "BccAddresses": ["test_bcc@" + domain], + }, + Message={ + "Subject": {"Data": "test subject"}, + "Body": {"Text": {"Data": "test body"}} + } + ) + ses_conn.send_email(**kwargs) + + # Wait for messages in the queues + queue = sqs_conn.get_queue_by_name(QueueName=queue) + messages = queue.receive_messages(MaxNumberOfMessages=1) + if expected_msg is not None: + msg = messages[0].body + msg = json.loads(msg) + assert msg["Message"] == SESFeedback.generate_message(expected_msg) + else: + assert len(messages) == 0 + + +@mock_sqs +@mock_sns +@mock_ses +def test_no_sns_feedback(): + __test_sns_feedback__("test", None) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_bounce(): + __test_sns_feedback__(SESFeedback.BOUNCE_ADDR, SESFeedback.BOUNCE) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_complaint(): + __test_sns_feedback__(SESFeedback.COMPLAINT_ADDR, SESFeedback.COMPLAINT) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_delivery(): + __test_sns_feedback__(SESFeedback.SUCCESS_ADDR, SESFeedback.DELIVERY) diff --git a/update_version_from_git.py b/update_version_from_git.py new file mode 100644 index 000000000000..d72dc4ae96c2 --- /dev/null +++ b/update_version_from_git.py @@ -0,0 +1,120 @@ +""" +Adapted from https://github.com/pygame/pygameweb/blob/master/pygameweb/builds/update_version_from_git.py + +For updating the version from git. +__init__.py contains a __version__ field. +Update that. +If we are on master, we want to update the version as a pre-release. +git describe --tags +With these: + __init__.py + __version__= '0.0.2' + git describe --tags + 0.0.1-22-g729a5ae +We want this: + __init__.py + __version__= '0.0.2.dev22.g729a5ae' +Get the branch/tag name with this. + git symbolic-ref -q --short HEAD || git describe --tags --exact-match +""" + +import io +import os +import re +import subprocess + + +def migrate_source_attribute(attr, to_this, target_file, regex): + """Updates __magic__ attributes in the source file""" + change_this = re.compile(regex, re.S) + new_file = [] + found = False + + with open(target_file, 'r') as fp: + lines = fp.readlines() + + for line in lines: + if line.startswith(attr): + found = True + line = re.sub(change_this, to_this, line) + new_file.append(line) + + if found: + with open(target_file, 'w') as fp: + fp.writelines(new_file) + +def migrate_version(target_file, new_version): + """Updates __version__ in the source file""" + regex = r"['\"](.*)['\"]" + migrate_source_attribute('__version__', "'{new_version}'".format(new_version=new_version), target_file, regex) + + +def is_master_branch(): + cmd = ('git rev-parse --abbrev-ref HEAD') + tag_branch = subprocess.check_output(cmd, shell=True) + return tag_branch in [b'master\n'] + +def git_tag_name(): + cmd = ('git describe --tags') + tag_branch = subprocess.check_output(cmd, shell=True) + tag_branch = tag_branch.decode().strip() + return tag_branch + +def get_git_version_info(): + cmd = 'git describe --tags' + ver_str = subprocess.check_output(cmd, shell=True) + ver, commits_since, githash = ver_str.decode().strip().split('-') + return ver, commits_since, githash + +def prerelease_version(): + """ return what the prerelease version should be. + https://packaging.python.org/tutorials/distributing-packages/#pre-release-versioning + 0.0.2.dev22 + """ + ver, commits_since, githash = get_git_version_info() + initpy_ver = get_version() + + assert len(initpy_ver.split('.')) in [3, 4], 'moto/__init__.py version should be like 0.0.2.dev' + assert initpy_ver > ver, 'the moto/__init__.py version should be newer than the last tagged release.' + return '{initpy_ver}.{commits_since}'.format(initpy_ver=initpy_ver, commits_since=commits_since) + +def read(*parts): + """ Reads in file from *parts. + """ + try: + return io.open(os.path.join(*parts), 'r', encoding='utf-8').read() + except IOError: + return '' + +def get_version(): + """ Returns version from moto/__init__.py + """ + version_file = read('moto', '__init__.py') + version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', + version_file, re.MULTILINE) + if version_match: + return version_match.group(1) + raise RuntimeError('Unable to find version string.') + + +def release_version_correct(): + """Makes sure the: + - prerelease verion for master is correct. + - release version is correct for tags. + """ + if is_master_branch(): + # update for a pre release version. + initpy = os.path.abspath("moto/__init__.py") + + new_version = prerelease_version() + print('updating version in __init__.py to {new_version}'.format(new_version=new_version)) + assert len(new_version.split('.')) >= 4, 'moto/__init__.py version should be like 0.0.2.dev' + migrate_version(initpy, new_version) + else: + assert False, "No non-master deployments yet" + # check that we are a tag with the same version as in __init__.py + assert get_version() == git_tag_name(), 'git tag/branch name not the same as moto/__init__.py __verion__' + + +if __name__ == '__main__': + release_version_correct() From c95d472bf5ef9746521cf54c83bb61333c3eafcd Mon Sep 17 00:00:00 2001 From: Niels Laukens Date: Tue, 3 Sep 2019 14:54:46 +0200 Subject: [PATCH 024/658] Add (failing) test for ElasticBeanstalk --- tests/test_eb/test_eb.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 tests/test_eb/test_eb.py diff --git a/tests/test_eb/test_eb.py b/tests/test_eb/test_eb.py new file mode 100644 index 000000000000..924ed3adc45f --- /dev/null +++ b/tests/test_eb/test_eb.py @@ -0,0 +1,15 @@ +import boto3 +from moto import mock_eb + + +@mock_eb +def test_application(): + # Create Elastic Beanstalk Application + eb_client = boto3.client('elasticbeanstalk', region_name='us-east-1') + + eb_client.create_application( + ApplicationName="myapp", + ) + + eb_apps = eb_client.describe_applications() + eb_apps['Applications'][0]['ApplicationName'].should.equal("myapp") From 336f50349af0eddbb5d82776258abd9f847fa989 Mon Sep 17 00:00:00 2001 From: Niels Laukens Date: Tue, 3 Sep 2019 16:10:32 +0200 Subject: [PATCH 025/658] Add sub-minimal mocking of elasticbeanstalk:create_application() --- moto/__init__.py | 1 + moto/eb/__init__.py | 4 ++ moto/eb/exceptions.py | 7 +++ moto/eb/models.py | 37 ++++++++++++++++ moto/eb/responses.py | 92 ++++++++++++++++++++++++++++++++++++++++ moto/eb/urls.py | 11 +++++ tests/test_eb/test_eb.py | 34 ++++++++++++--- 7 files changed, 181 insertions(+), 5 deletions(-) create mode 100644 moto/eb/__init__.py create mode 100644 moto/eb/exceptions.py create mode 100644 moto/eb/models.py create mode 100644 moto/eb/responses.py create mode 100644 moto/eb/urls.py diff --git a/moto/__init__.py b/moto/__init__.py index 8594cedd2526..7cb6d0e39520 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -18,6 +18,7 @@ from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa from .dynamodbstreams import mock_dynamodbstreams # flake8: noqa +from .eb import mock_eb # flake8: noqa from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa from .ecr import mock_ecr, mock_ecr_deprecated # flake8: noqa from .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa diff --git a/moto/eb/__init__.py b/moto/eb/__init__.py new file mode 100644 index 000000000000..3e06e959525c --- /dev/null +++ b/moto/eb/__init__.py @@ -0,0 +1,4 @@ +from .models import eb_backends +from moto.core.models import base_decorator + +mock_eb = base_decorator(eb_backends) diff --git a/moto/eb/exceptions.py b/moto/eb/exceptions.py new file mode 100644 index 000000000000..c470d531779c --- /dev/null +++ b/moto/eb/exceptions.py @@ -0,0 +1,7 @@ +from moto.core.exceptions import RESTError + + +class InvalidParameterValueError(RESTError): + def __init__(self, message): + super(InvalidParameterValueError, self).__init__( + "InvalidParameterValue", message) diff --git a/moto/eb/models.py b/moto/eb/models.py new file mode 100644 index 000000000000..246d33cdebcc --- /dev/null +++ b/moto/eb/models.py @@ -0,0 +1,37 @@ +import boto.beanstalk + +from moto.core import BaseBackend, BaseModel +from .exceptions import InvalidParameterValueError + + +class FakeApplication(BaseModel): + def __init__(self, application_name): + self.application_name = application_name + + +class EBBackend(BaseBackend): + def __init__(self, region): + self.region = region + self.applications = dict() + + def reset(self): + # preserve region + region = self.region + self._reset_model_refs() + self.__dict__ = {} + self.__init__(region) + + def create_application(self, application_name): + if application_name in self.applications: + raise InvalidParameterValueError( + "Application {} already exists.".format(application_name) + ) + new_app = FakeApplication( + application_name=application_name, + ) + self.applications[application_name] = new_app + return new_app + + +eb_backends = dict((region.name, EBBackend(region.name)) + for region in boto.beanstalk.regions()) diff --git a/moto/eb/responses.py b/moto/eb/responses.py new file mode 100644 index 000000000000..9cf8b2e47aa2 --- /dev/null +++ b/moto/eb/responses.py @@ -0,0 +1,92 @@ +from moto.core.responses import BaseResponse +from .models import eb_backends + +EB_CREATE_APPLICATION = """ + + + + + 2019-09-03T13:08:29.049Z + + + + false + 180 + false + + + false + 200 + false + + + + arn:aws:elasticbeanstalk:{{ region_name }}:111122223333:application/{{ application_name }} + {{ application.application_name }} + 2019-09-03T13:08:29.049Z + + + + 1b6173c8-13aa-4b0a-99e9-eb36a1fb2778 + + +""" + + +EB_DESCRIBE_APPLICATIONS = """ + + + + {% for application in applications %} + + + 2019-09-03T13:08:29.049Z + + + + 180 + false + false + + + false + 200 + false + + + + arn:aws:elasticbeanstalk:{{ region_name }}:387323646340:application/{{ application.name }} + {{ application.application_name }} + 2019-09-03T13:08:29.049Z + + {% endfor %} + + + + 015a05eb-282e-4b76-bd18-663fdfaf42e4 + + +""" + + +class EBResponse(BaseResponse): + @property + def backend(self): + return eb_backends[self.region] + + def create_application(self): + app = self.backend.create_application( + application_name=self._get_param('ApplicationName'), + ) + + template = self.response_template(EB_CREATE_APPLICATION) + return template.render( + region_name=self.backend.region, + application=app, + ) + + def describe_applications(self): + template = self.response_template(EB_DESCRIBE_APPLICATIONS) + return template.render( + applications=self.backend.applications.values(), + ) diff --git a/moto/eb/urls.py b/moto/eb/urls.py new file mode 100644 index 000000000000..4cd4add13793 --- /dev/null +++ b/moto/eb/urls.py @@ -0,0 +1,11 @@ +from __future__ import unicode_literals + +from .responses import EBResponse + +url_bases = [ + r"https?://elasticbeanstalk.(?P[a-zA-Z0-9\-_]+).amazonaws.com", +] + +url_paths = { + '{0}/$': EBResponse.dispatch, +} diff --git a/tests/test_eb/test_eb.py b/tests/test_eb/test_eb.py index 924ed3adc45f..9e863e7f5e28 100644 --- a/tests/test_eb/test_eb.py +++ b/tests/test_eb/test_eb.py @@ -1,15 +1,39 @@ import boto3 +import sure # noqa +from botocore.exceptions import ClientError + from moto import mock_eb @mock_eb -def test_application(): +def test_create_application(): # Create Elastic Beanstalk Application - eb_client = boto3.client('elasticbeanstalk', region_name='us-east-1') + conn = boto3.client('elasticbeanstalk', region_name='us-east-1') + app = conn.create_application( + ApplicationName="myapp", + ) + app['Application']['ApplicationName'].should.equal("myapp") + + +@mock_eb +def test_create_application_dup(): + conn = boto3.client('elasticbeanstalk', region_name='us-east-1') + conn.create_application( + ApplicationName="myapp", + ) + conn.create_application.when.called_with( + ApplicationName="myapp", + ).should.throw(ClientError) - eb_client.create_application( + +@mock_eb +def test_describe_applications(): + # Create Elastic Beanstalk Application + conn = boto3.client('elasticbeanstalk', region_name='us-east-1') + conn.create_application( ApplicationName="myapp", ) - eb_apps = eb_client.describe_applications() - eb_apps['Applications'][0]['ApplicationName'].should.equal("myapp") + apps = conn.describe_applications() + len(apps['Applications']).should.equal(1) + apps['Applications'][0]['ApplicationName'].should.equal('myapp') From 6f23a39fc26c3cf51b4f0e2b49277be85024d666 Mon Sep 17 00:00:00 2001 From: Niels Laukens Date: Wed, 4 Sep 2019 15:33:15 +0200 Subject: [PATCH 026/658] Add minimal mocking of elasticbeanstalk:create_environment, describe_environments and list_available_solution_stacks --- moto/eb/models.py | 25 + moto/eb/responses.py | 1297 +++++++++++++++++++++++++++++++++++++- tests/test_eb/test_eb.py | 41 ++ 3 files changed, 1343 insertions(+), 20 deletions(-) diff --git a/moto/eb/models.py b/moto/eb/models.py index 246d33cdebcc..5b4655175556 100644 --- a/moto/eb/models.py +++ b/moto/eb/models.py @@ -1,12 +1,37 @@ +import weakref + import boto.beanstalk from moto.core import BaseBackend, BaseModel from .exceptions import InvalidParameterValueError +class FakeEnvironment(BaseModel): + def __init__(self, application, environment_name): + self.environment_name = environment_name + self.application = weakref.proxy(application) # weakref to break circular dependencies + + @property + def application_name(self): + return self.application.application_name + + class FakeApplication(BaseModel): def __init__(self, application_name): self.application_name = application_name + self.environments = dict() + + def create_environment(self, environment_name): + if environment_name in self.environments: + raise InvalidParameterValueError + + env = FakeEnvironment( + application=self, + environment_name=environment_name, + ) + self.environments[environment_name] = env + + return env class EBBackend(BaseBackend): diff --git a/moto/eb/responses.py b/moto/eb/responses.py index 9cf8b2e47aa2..fecdb8c21b97 100644 --- a/moto/eb/responses.py +++ b/moto/eb/responses.py @@ -1,5 +1,67 @@ from moto.core.responses import BaseResponse -from .models import eb_backends +from .models import eb_backends, EBBackend +from .exceptions import InvalidParameterValueError + + +class EBResponse(BaseResponse): + @property + def backend(self): + """ + :rtype: EBBackend + """ + return eb_backends[self.region] + + def create_application(self): + app = self.backend.create_application( + application_name=self._get_param('ApplicationName'), + ) + + template = self.response_template(EB_CREATE_APPLICATION) + return template.render( + region_name=self.backend.region, + application=app, + ) + + def describe_applications(self): + template = self.response_template(EB_DESCRIBE_APPLICATIONS) + return template.render( + applications=self.backend.applications.values(), + ) + + def create_environment(self): + application_name = self._get_param('ApplicationName') + environment_name = self._get_param('EnvironmentName') + try: + app = self.backend.applications[application_name] + except KeyError: + raise InvalidParameterValueError( + "No Application named \'{}\' found.".format(application_name) + ) + + env = app.create_environment(environment_name=environment_name) + + template = self.response_template(EB_CREATE_ENVIRONMENT) + return template.render( + environment=env, + region=self.backend.region, + ) + + def describe_environments(self): + envs = [] + + for app in self.backend.applications.values(): + for env in app.environments.values(): + envs.append(env) + + template = self.response_template(EB_DESCRIBE_ENVIRONMENTS) + return template.render( + environments=envs, + ) + + @staticmethod + def list_available_solution_stacks(): + return EB_LIST_AVAILABLE_SOLUTION_STACKS + EB_CREATE_APPLICATION = """ @@ -55,7 +117,7 @@ - arn:aws:elasticbeanstalk:{{ region_name }}:387323646340:application/{{ application.name }} + arn:aws:elasticbeanstalk:{{ region_name }}:111122223333:application/{{ application.name }} {{ application.application_name }} 2019-09-03T13:08:29.049Z @@ -69,24 +131,1219 @@ """ -class EBResponse(BaseResponse): - @property - def backend(self): - return eb_backends[self.region] +EB_CREATE_ENVIRONMENT = """ + + + {{ environment.solution_stack_name }} + Grey + arn:aws:elasticbeanstalk:{{ region }}:111122223333:environment/{{ environment.application_name }}/{{ environment.environment_name }} + 2019-09-04T09:41:24.222Z + 2019-09-04T09:41:24.222Z + {{ environment_id }} + arn:aws:elasticbeanstalk:{{ region }}::platform/{{ environment.platform_arn }} + + WebServer + Standard + 1.0 + + {{ environment.environment_name }} + {{ environment.application_name }} + Launching + + + 18dc8158-f5d7-4d5a-82ef-07fcaadf81c6 + + +""" - def create_application(self): - app = self.backend.create_application( - application_name=self._get_param('ApplicationName'), - ) - template = self.response_template(EB_CREATE_APPLICATION) - return template.render( - region_name=self.backend.region, - application=app, - ) +EB_DESCRIBE_ENVIRONMENTS = """ + + + + {% for env in environments %} + + {{ env.solution_stack_name }} + Grey + arn:aws:elasticbeanstalk:{{ region }}:123456789012:environment/{{ env.application_name }}/{{ env.environment_name }} + false + 2019-08-30T09:35:10.913Z + false + + 2019-08-22T07:02:47.332Z + {{ env.environment_id }} + 1 + arn:aws:elasticbeanstalk:{{ region }}::platform/{{ env.platform_arn }} + + WebServer + Standard + 1.0 + + No Data + {{ env.environment_name }} + + + + {{ env.application_name }} + Ready + + {% endfor %} + + + + dd56b215-01a0-40b2-bd1e-57589c39424f + + +""" - def describe_applications(self): - template = self.response_template(EB_DESCRIBE_APPLICATIONS) - return template.render( - applications=self.backend.applications.values(), - ) + +# Current list as of 2019-09-04 +EB_LIST_AVAILABLE_SOLUTION_STACKS = """ + + + + 64bit Amazon Linux 2018.03 v4.10.1 running Node.js + 64bit Amazon Linux 2018.03 v4.9.2 running Node.js + 64bit Amazon Linux 2018.03 v4.8.0 running Node.js + 64bit Amazon Linux 2018.03 v4.6.0 running Node.js + 64bit Amazon Linux 2018.03 v4.5.3 running Node.js + 64bit Amazon Linux 2018.03 v4.5.1 running Node.js + 64bit Amazon Linux 2018.03 v4.5.0 running Node.js + 64bit Amazon Linux 2017.09 v4.4.6 running Node.js + 64bit Amazon Linux 2017.09 v4.4.5 running Node.js + 64bit Amazon Linux 2017.09 v4.4.4 running Node.js + 64bit Amazon Linux 2017.09 v4.4.2 running Node.js + 64bit Amazon Linux 2017.09 v4.4.0 running Node.js + 64bit Amazon Linux 2017.03 v4.3.0 running Node.js + 64bit Amazon Linux 2017.03 v4.2.2 running Node.js + 64bit Amazon Linux 2017.03 v4.2.1 running Node.js + 64bit Amazon Linux 2017.03 v4.2.0 running Node.js + 64bit Amazon Linux 2017.03 v4.1.1 running Node.js + 64bit Amazon Linux 2017.03 v4.1.0 running Node.js + 64bit Amazon Linux 2016.09 v4.0.1 running Node.js + 64bit Amazon Linux 2016.09 v4.0.0 running Node.js + 64bit Amazon Linux 2016.09 v3.3.1 running Node.js + 64bit Amazon Linux 2016.09 v3.1.0 running Node.js + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.4 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.5 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.6 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.0 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.12 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.7 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.5 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.4 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.3 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.2 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.1 running PHP 7.2 + 64bit Amazon Linux 2018.03 v2.8.0 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 5.6 + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.0 + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.1 + 64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.0 + 64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.1 + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.4 + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.5 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.4 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.5 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.1 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.4 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.5 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.1 + 64bit Amazon Linux 2017.09 v2.6.2 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.2 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.1 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.4 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.5 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.6 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.0 + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.1 + 64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.1 + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.5 + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.6 + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.4.3 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.4 + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.5 + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.6 + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.4.1 running PHP 7.0 + 64bit Amazon Linux 2017.03 v2.4.0 running PHP 7.0 + 64bit Amazon Linux 2016.09 v2.3.2 running PHP 7.0 + 64bit Amazon Linux 2016.09 v2.3.1 running PHP 7.0 + 64bit Amazon Linux 2018.03 v2.9.1 running Python 3.6 + 64bit Amazon Linux 2018.03 v2.9.1 running Python 3.4 + 64bit Amazon Linux 2018.03 v2.9.1 running Python + 64bit Amazon Linux 2018.03 v2.9.1 running Python 2.7 + 64bit Amazon Linux 2018.03 v2.7.5 running Python 3.6 + 64bit Amazon Linux 2018.03 v2.7.1 running Python 3.6 + 64bit Amazon Linux 2018.03 v2.7.0 running Python 3.6 + 64bit Amazon Linux 2017.09 v2.6.4 running Python 3.6 + 64bit Amazon Linux 2017.09 v2.6.1 running Python 3.6 + 64bit Amazon Linux 2017.03 v2.4.0 running Python 3.4 + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Puma) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 1.9.3 + 64bit Amazon Linux 2018.03 v2.8.0 running Ruby 2.5 (Passenger Standalone) + 64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Puma) + 64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Passenger Standalone) + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8.5 Java 8 + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8 Java 8 + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 7 + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 6 + 64bit Amazon Linux 2018.03 v3.1.1 running Tomcat 8.5 Java 8 + 64bit Amazon Linux 2017.03 v2.6.5 running Tomcat 8 Java 8 + 64bit Amazon Linux 2017.03 v2.6.2 running Tomcat 8 Java 8 + 64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 8 Java 8 + 64bit Amazon Linux 2017.03 v2.6.0 running Tomcat 8 Java 8 + 64bit Amazon Linux 2016.09 v2.5.4 running Tomcat 8 Java 8 + 64bit Amazon Linux 2016.03 v2.1.0 running Tomcat 8 Java 8 + 64bit Windows Server Core 2016 v2.2.1 running IIS 10.0 + 64bit Windows Server 2016 v2.2.1 running IIS 10.0 + 64bit Windows Server Core 2012 R2 v2.2.1 running IIS 8.5 + 64bit Windows Server 2012 R2 v2.2.1 running IIS 8.5 + 64bit Windows Server Core 2016 v1.2.0 running IIS 10.0 + 64bit Windows Server 2016 v1.2.0 running IIS 10.0 + 64bit Windows Server Core 2012 R2 v1.2.0 running IIS 8.5 + 64bit Windows Server 2012 R2 v1.2.0 running IIS 8.5 + 64bit Windows Server 2012 v1.2.0 running IIS 8 + 64bit Windows Server 2008 R2 v1.2.0 running IIS 7.5 + 64bit Windows Server Core 2012 R2 running IIS 8.5 + 64bit Windows Server 2012 R2 running IIS 8.5 + 64bit Windows Server 2012 running IIS 8 + 64bit Windows Server 2008 R2 running IIS 7.5 + 64bit Amazon Linux 2018.03 v2.12.16 running Docker 18.06.1-ce + 64bit Amazon Linux 2016.09 v2.5.2 running Docker 1.12.6 + 64bit Amazon Linux 2018.03 v2.15.2 running Multi-container Docker 18.06.1-ce (Generic) + 64bit Debian jessie v2.12.16 running Go 1.4 (Preconfigured - Docker) + 64bit Debian jessie v2.12.16 running Go 1.3 (Preconfigured - Docker) + 64bit Debian jessie v2.12.16 running Python 3.4 (Preconfigured - Docker) + 64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker) + 64bit Amazon Linux 2018.03 v2.9.1 running Java 8 + 64bit Amazon Linux 2018.03 v2.9.1 running Java 7 + 64bit Amazon Linux 2018.03 v2.8.0 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.6 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.5 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.4 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.2 running Java 8 + 64bit Amazon Linux 2018.03 v2.7.1 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.8 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.5 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.4 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.3 running Java 8 + 64bit Amazon Linux 2017.09 v2.6.0 running Java 8 + 64bit Amazon Linux 2017.03 v2.5.4 running Java 8 + 64bit Amazon Linux 2017.03 v2.5.3 running Java 8 + 64bit Amazon Linux 2017.03 v2.5.2 running Java 8 + 64bit Amazon Linux 2016.09 v2.4.4 running Java 8 + 64bit Amazon Linux 2018.03 v2.12.1 running Go 1.12.7 + 64bit Amazon Linux 2018.03 v2.6.14 running Packer 1.0.3 + 64bit Amazon Linux 2018.03 v2.12.16 running GlassFish 5.0 Java 8 (Preconfigured - Docker) + + + + 64bit Amazon Linux 2018.03 v4.10.1 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.9.2 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.8.0 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.6.0 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.5.3 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.5.1 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v4.5.0 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.6 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.5 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.4 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.2 running Node.js + + zip + + + + 64bit Amazon Linux 2017.09 v4.4.0 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.3.0 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.2.2 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.2.1 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.2.0 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.1.1 running Node.js + + zip + + + + 64bit Amazon Linux 2017.03 v4.1.0 running Node.js + + zip + + + + 64bit Amazon Linux 2016.09 v4.0.1 running Node.js + + zip + + + + 64bit Amazon Linux 2016.09 v4.0.0 running Node.js + + zip + + + + 64bit Amazon Linux 2016.09 v3.3.1 running Node.js + + zip + + + + 64bit Amazon Linux 2016.09 v3.1.0 running Node.js + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.14 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.12 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.7 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.6 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.5 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.4 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.3 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.2 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.1 running PHP 7.2 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.0 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.0 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.6 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.5 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.2 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.2 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.1 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.5.0 running PHP 7.1 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.3 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.4 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.5 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 5.6 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.2 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.1 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.0 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2016.09 v2.3.2 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2016.09 v2.3.1 running PHP 7.0 + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Python 3.4 + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Python + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Python 2.7 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.5 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2018.03 v2.7.0 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2017.09 v2.6.1 running Python 3.6 + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.0 running Python 3.4 + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Puma) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.6 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.5 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.4 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.3 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.2 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.1 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 2.0 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v2.10.1 running Ruby 1.9.3 + + zip + + + + 64bit Amazon Linux 2018.03 v2.8.0 running Ruby 2.5 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Puma) + + zip + + + + 64bit Amazon Linux 2017.03 v2.4.4 running Ruby 2.3 (Passenger Standalone) + + zip + + + + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8.5 Java 8 + + war + zip + + + + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 7 + + war + zip + + + + 64bit Amazon Linux 2018.03 v3.2.1 running Tomcat 7 Java 6 + + war + zip + + + + 64bit Amazon Linux 2018.03 v3.1.1 running Tomcat 8.5 Java 8 + + war + zip + + + + 64bit Amazon Linux 2017.03 v2.6.5 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2017.03 v2.6.2 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2017.03 v2.6.1 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2017.03 v2.6.0 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2016.09 v2.5.4 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Amazon Linux 2016.03 v2.1.0 running Tomcat 8 Java 8 + + war + zip + + + + 64bit Windows Server Core 2016 v2.2.1 running IIS 10.0 + + zip + + + + 64bit Windows Server 2016 v2.2.1 running IIS 10.0 + + zip + + + + 64bit Windows Server Core 2012 R2 v2.2.1 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 R2 v2.2.1 running IIS 8.5 + + zip + + + + 64bit Windows Server Core 2016 v1.2.0 running IIS 10.0 + + zip + + + + 64bit Windows Server 2016 v1.2.0 running IIS 10.0 + + zip + + + + 64bit Windows Server Core 2012 R2 v1.2.0 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 R2 v1.2.0 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 v1.2.0 running IIS 8 + + zip + + + + 64bit Windows Server 2008 R2 v1.2.0 running IIS 7.5 + + zip + + + + 64bit Windows Server Core 2012 R2 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 R2 running IIS 8.5 + + zip + + + + 64bit Windows Server 2012 running IIS 8 + + zip + + + + 64bit Windows Server 2008 R2 running IIS 7.5 + + zip + + + + 64bit Amazon Linux 2018.03 v2.12.16 running Docker 18.06.1-ce + + + + 64bit Amazon Linux 2016.09 v2.5.2 running Docker 1.12.6 + + + + 64bit Amazon Linux 2018.03 v2.15.2 running Multi-container Docker 18.06.1-ce (Generic) + + zip + json + + + + 64bit Debian jessie v2.12.16 running Go 1.4 (Preconfigured - Docker) + + zip + + + + 64bit Debian jessie v2.12.16 running Go 1.3 (Preconfigured - Docker) + + zip + + + + 64bit Debian jessie v2.12.16 running Python 3.4 (Preconfigured - Docker) + + zip + + + + 64bit Debian jessie v2.10.0 running Python 3.4 (Preconfigured - Docker) + + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.9.1 running Java 7 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.8.0 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.6 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.5 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.4 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.2 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.7.1 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.8 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.5 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.4 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.3 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.09 v2.6.0 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.03 v2.5.4 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.03 v2.5.3 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2017.03 v2.5.2 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2016.09 v2.4.4 running Java 8 + + jar + zip + + + + 64bit Amazon Linux 2018.03 v2.12.1 running Go 1.12.7 + + zip + + + + 64bit Amazon Linux 2018.03 v2.6.14 running Packer 1.0.3 + + + + 64bit Amazon Linux 2018.03 v2.12.16 running GlassFish 5.0 Java 8 (Preconfigured - Docker) + + zip + + + + + + bd6bd2b2-9983-4845-b53b-fe53e8a5e1e7 + + +""" diff --git a/tests/test_eb/test_eb.py b/tests/test_eb/test_eb.py index 9e863e7f5e28..aafe524fd5d1 100644 --- a/tests/test_eb/test_eb.py +++ b/tests/test_eb/test_eb.py @@ -37,3 +37,44 @@ def test_describe_applications(): apps = conn.describe_applications() len(apps['Applications']).should.equal(1) apps['Applications'][0]['ApplicationName'].should.equal('myapp') + + +@mock_eb +def test_create_environment(): + # Create Elastic Beanstalk Environment + conn = boto3.client('elasticbeanstalk', region_name='us-east-1') + app = conn.create_application( + ApplicationName="myapp", + ) + env = conn.create_environment( + ApplicationName="myapp", + EnvironmentName="myenv", + ) + env['EnvironmentName'].should.equal("myenv") + + +@mock_eb +def test_describe_environments(): + # List Elastic Beanstalk Envs + conn = boto3.client('elasticbeanstalk', region_name='us-east-1') + conn.create_application( + ApplicationName="myapp", + ) + conn.create_environment( + ApplicationName="myapp", + EnvironmentName="myenv", + ) + + envs = conn.describe_environments() + envs = envs['Environments'] + len(envs).should.equal(1) + envs[0]['ApplicationName'].should.equal('myapp') + envs[0]['EnvironmentName'].should.equal('myenv') + + +@mock_eb +def test_list_available_solution_stacks(): + conn = boto3.client('elasticbeanstalk', region_name='us-east-1') + stacks = conn.list_available_solution_stacks() + len(stacks['SolutionStacks']).should.be.greater_than(0) + len(stacks['SolutionStacks']).should.be.equal(len(stacks['SolutionStackDetails'])) From 91fb40810242213349e0f436e01464425fdd0928 Mon Sep 17 00:00:00 2001 From: Niels Laukens Date: Wed, 4 Sep 2019 16:25:43 +0200 Subject: [PATCH 027/658] Move tags_from_query_string to core.utils --- moto/core/utils.py | 17 +++++++++++++++++ moto/ec2/responses/tags.py | 3 ++- moto/ec2/utils.py | 17 ----------------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/moto/core/utils.py b/moto/core/utils.py index ca670e871d61..acf76bb48109 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -297,3 +297,20 @@ def path_url(url): if parsed_url.query: path = path + '?' + parsed_url.query return path + + +def tags_from_query_string(querystring_dict): + prefix = 'Tag' + suffix = 'Key' + response_values = {} + for key, value in querystring_dict.items(): + if key.startswith(prefix) and key.endswith(suffix): + tag_index = key.replace(prefix + ".", "").replace("." + suffix, "") + tag_key = querystring_dict.get("Tag.{0}.Key".format(tag_index))[0] + tag_value_key = "Tag.{0}.Value".format(tag_index) + if tag_value_key in querystring_dict: + response_values[tag_key] = querystring_dict.get(tag_value_key)[ + 0] + else: + response_values[tag_key] = None + return response_values diff --git a/moto/ec2/responses/tags.py b/moto/ec2/responses/tags.py index 65d3da2554fe..37f2c3beab20 100644 --- a/moto/ec2/responses/tags.py +++ b/moto/ec2/responses/tags.py @@ -2,7 +2,8 @@ from moto.core.responses import BaseResponse from moto.ec2.models import validate_resource_ids -from moto.ec2.utils import tags_from_query_string, filters_from_querystring +from moto.ec2.utils import filters_from_querystring +from moto.core.utils import tags_from_query_string class TagResponse(BaseResponse): diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index e67cb39f48cf..f0d58d5fcd9b 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -198,23 +198,6 @@ def split_route_id(route_id): return values[0], values[1] -def tags_from_query_string(querystring_dict): - prefix = 'Tag' - suffix = 'Key' - response_values = {} - for key, value in querystring_dict.items(): - if key.startswith(prefix) and key.endswith(suffix): - tag_index = key.replace(prefix + ".", "").replace("." + suffix, "") - tag_key = querystring_dict.get("Tag.{0}.Key".format(tag_index))[0] - tag_value_key = "Tag.{0}.Value".format(tag_index) - if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[ - 0] - else: - response_values[tag_key] = None - return response_values - - def dhcp_configuration_from_querystring(querystring, option=u'DhcpConfiguration'): """ turn: From 9bfbd8e0088d93ccf7c0e4d81526f45db8f9bf50 Mon Sep 17 00:00:00 2001 From: Niels Laukens Date: Wed, 4 Sep 2019 16:55:34 +0200 Subject: [PATCH 028/658] Make tags_from_query_string() more flexible --- moto/core/utils.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/moto/core/utils.py b/moto/core/utils.py index acf76bb48109..6f75619d48a9 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -299,15 +299,27 @@ def path_url(url): return path -def tags_from_query_string(querystring_dict): - prefix = 'Tag' - suffix = 'Key' +def tags_from_query_string( + querystring_dict, + prefix="Tag", + key_suffix="Key", + value_suffix="Value" +): response_values = {} for key, value in querystring_dict.items(): - if key.startswith(prefix) and key.endswith(suffix): - tag_index = key.replace(prefix + ".", "").replace("." + suffix, "") - tag_key = querystring_dict.get("Tag.{0}.Key".format(tag_index))[0] - tag_value_key = "Tag.{0}.Value".format(tag_index) + if key.startswith(prefix) and key.endswith(key_suffix): + tag_index = key.replace(prefix + ".", "").replace("." + key_suffix, "") + tag_key = querystring_dict.get( + "{prefix}.{index}.{key_suffix}".format( + prefix=prefix, + index=tag_index, + key_suffix=key_suffix, + ))[0] + tag_value_key = "{prefix}.{index}.{value_suffix}".format( + prefix=prefix, + index=tag_index, + value_suffix=value_suffix, + ) if tag_value_key in querystring_dict: response_values[tag_key] = querystring_dict.get(tag_value_key)[ 0] From 7f387b0bb9842d3561f59ed7fa70b92e17791909 Mon Sep 17 00:00:00 2001 From: Niels Laukens Date: Wed, 4 Sep 2019 16:56:06 +0200 Subject: [PATCH 029/658] Add elasticbeanstalk Tags handling --- moto/eb/exceptions.py | 6 +++ moto/eb/models.py | 47 +++++++++++++++++++-- moto/eb/responses.py | 88 +++++++++++++++++++++++++++++++++++++--- tests/test_eb/test_eb.py | 72 ++++++++++++++++++++++++++++++++ 4 files changed, 203 insertions(+), 10 deletions(-) diff --git a/moto/eb/exceptions.py b/moto/eb/exceptions.py index c470d531779c..bf3a896187a8 100644 --- a/moto/eb/exceptions.py +++ b/moto/eb/exceptions.py @@ -5,3 +5,9 @@ class InvalidParameterValueError(RESTError): def __init__(self, message): super(InvalidParameterValueError, self).__init__( "InvalidParameterValue", message) + + +class ResourceNotFoundException(RESTError): + def __init__(self, message): + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", message) diff --git a/moto/eb/models.py b/moto/eb/models.py index 5b4655175556..c3c2aa20c98f 100644 --- a/moto/eb/models.py +++ b/moto/eb/models.py @@ -7,32 +7,70 @@ class FakeEnvironment(BaseModel): - def __init__(self, application, environment_name): - self.environment_name = environment_name + def __init__( + self, + application, + environment_name, + tags, + ): self.application = weakref.proxy(application) # weakref to break circular dependencies + self.environment_name = environment_name + self.tags = tags @property def application_name(self): return self.application.application_name + @property + def environment_arn(self): + return 'arn:aws:elasticbeanstalk:{region}:{account_id}:' \ + 'environment/{application_name}/{environment_name}'.format( + region=self.region, + account_id='123456789012', + application_name=self.application_name, + environment_name=self.environment_name, + ) + + @property + def platform_arn(self): + return 'TODO' # TODO + + @property + def solution_stack_name(self): + return 'TODO' # TODO + + @property + def region(self): + return self.application.region + class FakeApplication(BaseModel): - def __init__(self, application_name): + def __init__(self, backend, application_name): + self.backend = weakref.proxy(backend) # weakref to break cycles self.application_name = application_name self.environments = dict() - def create_environment(self, environment_name): + def create_environment( + self, + environment_name, + tags, + ): if environment_name in self.environments: raise InvalidParameterValueError env = FakeEnvironment( application=self, environment_name=environment_name, + tags=tags, ) self.environments[environment_name] = env return env + @property + def region(self): + return self.backend.region + class EBBackend(BaseBackend): def __init__(self, region): @@ -52,6 +90,7 @@ def create_application(self, application_name): "Application {} already exists.".format(application_name) ) new_app = FakeApplication( + backend=self, application_name=application_name, ) self.applications[application_name] = new_app diff --git a/moto/eb/responses.py b/moto/eb/responses.py index fecdb8c21b97..fbace1938d48 100644 --- a/moto/eb/responses.py +++ b/moto/eb/responses.py @@ -1,6 +1,7 @@ from moto.core.responses import BaseResponse +from moto.core.utils import tags_from_query_string from .models import eb_backends, EBBackend -from .exceptions import InvalidParameterValueError +from .exceptions import InvalidParameterValueError, ResourceNotFoundException class EBResponse(BaseResponse): @@ -38,7 +39,11 @@ def create_environment(self): "No Application named \'{}\' found.".format(application_name) ) - env = app.create_environment(environment_name=environment_name) + tags = tags_from_query_string(self.querystring, prefix="Tags.member") + env = app.create_environment( + environment_name=environment_name, + tags=tags, + ) template = self.response_template(EB_CREATE_ENVIRONMENT) return template.render( @@ -62,6 +67,48 @@ def describe_environments(self): def list_available_solution_stacks(): return EB_LIST_AVAILABLE_SOLUTION_STACKS + def _find_environment_by_arn(self, arn): + for app in self.backend.applications.keys(): + for env in self.backend.applications[app].environments.values(): + if env.environment_arn == arn: + return env + raise KeyError() + + def update_tags_for_resource(self): + resource_arn = self._get_param('ResourceArn') + try: + res = self._find_environment_by_arn(resource_arn) + except KeyError: + raise ResourceNotFoundException( + "Resource not found for ARN \'{}\'.".format(resource_arn) + ) + + tags_to_add = tags_from_query_string(self.querystring, prefix="TagsToAdd.member") + for key, value in tags_to_add.items(): + res.tags[key] = value + + tags_to_remove = self._get_multi_param('TagsToRemove.member') + for key in tags_to_remove: + del res.tags[key] + + return EB_UPDATE_TAGS_FOR_RESOURCE + + def list_tags_for_resource(self): + resource_arn = self._get_param('ResourceArn') + try: + res = self._find_environment_by_arn(resource_arn) + except KeyError: + raise ResourceNotFoundException( + "Resource not found for ARN \'{}\'.".format(resource_arn) + ) + tags = res.tags + + template = self.response_template(EB_LIST_TAGS_FOR_RESOURCE) + return template.render( + tags=tags, + arn=resource_arn, + ) + EB_CREATE_APPLICATION = """ @@ -136,11 +183,11 @@ def list_available_solution_stacks(): {{ environment.solution_stack_name }} Grey - arn:aws:elasticbeanstalk:{{ region }}:111122223333:environment/{{ environment.application_name }}/{{ environment.environment_name }} + {{ environment.environment_arn }} 2019-09-04T09:41:24.222Z 2019-09-04T09:41:24.222Z {{ environment_id }} - arn:aws:elasticbeanstalk:{{ region }}::platform/{{ environment.platform_arn }} + {{ environment.platform_arn }} WebServer Standard @@ -165,7 +212,7 @@ def list_available_solution_stacks(): {{ env.solution_stack_name }} Grey - arn:aws:elasticbeanstalk:{{ region }}:123456789012:environment/{{ env.application_name }}/{{ env.environment_name }} + {{ env.environment_arn }} false 2019-08-30T09:35:10.913Z false @@ -173,7 +220,7 @@ def list_available_solution_stacks(): 2019-08-22T07:02:47.332Z {{ env.environment_id }} 1 - arn:aws:elasticbeanstalk:{{ region }}::platform/{{ env.platform_arn }} + {{ env.platform_arn }} WebServer Standard @@ -1347,3 +1394,32 @@ def list_available_solution_stacks(): """ + + +EB_UPDATE_TAGS_FOR_RESOURCE = """ + + + f355d788-e67e-440f-b915-99e35254ffee + + +""" + + +EB_LIST_TAGS_FOR_RESOURCE = """ + + + + {% for key, value in tags.items() %} + + {{ key }} + {{ value }} + + {% endfor %} + + {{ arn }} + + + 178e410f-3b57-456f-a64c-a3b6a16da9ab + + +""" diff --git a/tests/test_eb/test_eb.py b/tests/test_eb/test_eb.py index aafe524fd5d1..2b5be4490562 100644 --- a/tests/test_eb/test_eb.py +++ b/tests/test_eb/test_eb.py @@ -72,6 +72,78 @@ def test_describe_environments(): envs[0]['EnvironmentName'].should.equal('myenv') +def tags_dict_to_list(tag_dict): + tag_list = [] + for key, value in tag_dict.items(): + tag_list.append({'Key': key, 'Value': value}) + return tag_list + + +def tags_list_to_dict(tag_list): + tag_dict = {} + for tag in tag_list: + tag_dict[tag['Key']] = tag['Value'] + return tag_dict + + +@mock_eb +def test_create_environment_tags(): + conn = boto3.client('elasticbeanstalk', region_name='us-east-1') + conn.create_application( + ApplicationName="myapp", + ) + env_tags = {'initial key': 'initial value'} + env = conn.create_environment( + ApplicationName="myapp", + EnvironmentName="myenv", + Tags=tags_dict_to_list(env_tags), + ) + + tags = conn.list_tags_for_resource( + ResourceArn=env['EnvironmentArn'], + ) + tags['ResourceArn'].should.equal(env['EnvironmentArn']) + tags_list_to_dict(tags['ResourceTags']).should.equal(env_tags) + + +@mock_eb +def test_update_tags(): + conn = boto3.client('elasticbeanstalk', region_name='us-east-1') + conn.create_application( + ApplicationName="myapp", + ) + env_tags = { + 'initial key': 'initial value', + 'to remove': 'delete me', + 'to update': 'original', + } + env = conn.create_environment( + ApplicationName="myapp", + EnvironmentName="myenv", + Tags=tags_dict_to_list(env_tags), + ) + + extra_env_tags = { + 'to update': 'new', + 'extra key': 'extra value', + } + conn.update_tags_for_resource( + ResourceArn=env['EnvironmentArn'], + TagsToAdd=tags_dict_to_list(extra_env_tags), + TagsToRemove=['to remove'], + ) + + total_env_tags = env_tags.copy() + total_env_tags.update(extra_env_tags) + del total_env_tags['to remove'] + + tags = conn.list_tags_for_resource( + ResourceArn=env['EnvironmentArn'], + ) + tags['ResourceArn'].should.equal(env['EnvironmentArn']) + tags_list_to_dict(tags['ResourceTags']).should.equal(total_env_tags) + + @mock_eb def test_list_available_solution_stacks(): conn = boto3.client('elasticbeanstalk', region_name='us-east-1') From 8f51bd6116b7194d2ef553064a66ff4bb3af734a Mon Sep 17 00:00:00 2001 From: Niels Laukens Date: Thu, 5 Sep 2019 11:38:19 +0200 Subject: [PATCH 030/658] EB: pass through SolutionStackName --- moto/eb/models.py | 8 ++++---- moto/eb/responses.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/moto/eb/models.py b/moto/eb/models.py index c3c2aa20c98f..fa7345f0d266 100644 --- a/moto/eb/models.py +++ b/moto/eb/models.py @@ -11,10 +11,12 @@ def __init__( self, application, environment_name, + solution_stack_name, tags, ): self.application = weakref.proxy(application) # weakref to break circular dependencies self.environment_name = environment_name + self.solution_stack_name = solution_stack_name self.tags = tags @property @@ -35,10 +37,6 @@ def environment_arn(self): def platform_arn(self): return 'TODO' # TODO - @property - def solution_stack_name(self): - return 'TODO' # TODO - @property def region(self): return self.application.region @@ -53,6 +51,7 @@ def __init__(self, backend, application_name): def create_environment( self, environment_name, + solution_stack_name, tags, ): if environment_name in self.environments: @@ -61,6 +60,7 @@ def create_environment( env = FakeEnvironment( application=self, environment_name=environment_name, + solution_stack_name=solution_stack_name, tags=tags, ) self.environments[environment_name] = env diff --git a/moto/eb/responses.py b/moto/eb/responses.py index fbace1938d48..c93efc3a1f70 100644 --- a/moto/eb/responses.py +++ b/moto/eb/responses.py @@ -31,7 +31,6 @@ def describe_applications(self): def create_environment(self): application_name = self._get_param('ApplicationName') - environment_name = self._get_param('EnvironmentName') try: app = self.backend.applications[application_name] except KeyError: @@ -41,7 +40,8 @@ def create_environment(self): tags = tags_from_query_string(self.querystring, prefix="Tags.member") env = app.create_environment( - environment_name=environment_name, + environment_name=self._get_param('EnvironmentName'), + solution_stack_name=self._get_param('SolutionStackName'), tags=tags, ) From 7fae0d52ad6220998ef07dca7cf7de79680c2c80 Mon Sep 17 00:00:00 2001 From: Niels Laukens Date: Thu, 5 Sep 2019 14:17:55 +0200 Subject: [PATCH 031/658] Fix linting --- moto/eb/models.py | 12 ++++++------ moto/eb/responses.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/moto/eb/models.py b/moto/eb/models.py index fa7345f0d266..4490bbd0c83d 100644 --- a/moto/eb/models.py +++ b/moto/eb/models.py @@ -26,12 +26,12 @@ def application_name(self): @property def environment_arn(self): return 'arn:aws:elasticbeanstalk:{region}:{account_id}:' \ - 'environment/{application_name}/{environment_name}'.format( - region=self.region, - account_id='123456789012', - application_name=self.application_name, - environment_name=self.environment_name, - ) + 'environment/{application_name}/{environment_name}'.format( + region=self.region, + account_id='123456789012', + application_name=self.application_name, + environment_name=self.environment_name, + ) @property def platform_arn(self): diff --git a/moto/eb/responses.py b/moto/eb/responses.py index c93efc3a1f70..905780c448f3 100644 --- a/moto/eb/responses.py +++ b/moto/eb/responses.py @@ -1,6 +1,6 @@ from moto.core.responses import BaseResponse from moto.core.utils import tags_from_query_string -from .models import eb_backends, EBBackend +from .models import eb_backends from .exceptions import InvalidParameterValueError, ResourceNotFoundException From b51d5ad65f27384233c21a0974cff64bd7476cd6 Mon Sep 17 00:00:00 2001 From: Niklas Janlert Date: Thu, 28 Mar 2019 15:10:57 +0100 Subject: [PATCH 032/658] Support x-amz-tagging-directive in s3 copy_object --- moto/s3/responses.py | 4 ++++ tests/test_s3/test_s3.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index fd3a7b2db38f..5c985f7a34d3 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1051,6 +1051,10 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers if mdirective is not None and mdirective == "REPLACE": metadata = metadata_from_headers(request.headers) new_key.set_metadata(metadata, replace=True) + tdirective = request.headers.get("x-amz-tagging-directive") + if tdirective == "REPLACE": + tagging = self._tagging_from_headers(request.headers) + new_key.set_tagging(tagging) template = self.response_template(S3_OBJECT_COPY_RESPONSE) response_headers.update(new_key.response_dict) return 200, response_headers, template.render(key=new_key) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 8f3c3538ca31..cf3ae71c8796 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1727,6 +1727,34 @@ def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): obj2_version_new.should_not.equal(None) +@mock_s3 +def test_boto3_copy_object_with_replacement_tagging(): + client = boto3.client("s3", region_name="eu-north-1") + client.create_bucket(Bucket="mybucket") + client.put_object( + Bucket="mybucket", Key="original", Body=b"test", Tagging="tag=old" + ) + + client.copy_object( + CopySource={"Bucket": "mybucket", "Key": "original"}, + Bucket="mybucket", + Key="copy1", + TaggingDirective="REPLACE", + Tagging="tag=new", + ) + client.copy_object( + CopySource={"Bucket": "mybucket", "Key": "original"}, + Bucket="mybucket", + Key="copy2", + TaggingDirective="COPY", + ) + + tags1 = client.get_object_tagging(Bucket="mybucket", Key="copy1")["TagSet"] + tags1.should.equal([{"Key": "tag", "Value": "new"}]) + tags2 = client.get_object_tagging(Bucket="mybucket", Key="copy2")["TagSet"] + tags2.should.equal([{"Key": "tag", "Value": "old"}]) + + @mock_s3 def test_boto3_deleted_versionings_list(): client = boto3.client("s3", region_name="us-east-1") From a6aa0f6dbf0b02b0d9e2b644ae83bd6d6f263612 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 23 Dec 2019 08:46:37 +0100 Subject: [PATCH 033/658] Update models.py --- moto/iot/models.py | 170 ++++++++++++++++++++++----------------------- 1 file changed, 84 insertions(+), 86 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index b2599de1dc99..3c3e0cfe2e13 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -30,7 +30,7 @@ def __init__(self, thing_name, thing_type, attributes, region_name): self.attributes = attributes self.arn = "arn:aws:iot:%s:1:thing/%s" % (self.region_name, thing_name) self.version = 1 - # TODO: we need to handle 'version'? + # TODO: we need to handle "version"? # for iot-data self.thing_shadow = None @@ -97,7 +97,7 @@ def __init__( break # if parent arn found (should always be found) if parent_thing_group_structure: - # copy parent's rootToParentThingGroups + # copy parent"s rootToParentThingGroups if "rootToParentThingGroups" in parent_thing_group_structure.metadata: self.metadata["rootToParentThingGroups"].extend( parent_thing_group_structure.metadata["rootToParentThingGroups"] @@ -175,27 +175,27 @@ def to_description_dict(self): class FakePolicy(BaseModel): - def __init__(self, name, document, region_name, default_version_id='1'): + def __init__(self, name, document, region_name, default_version_id="1"): self.name = name self.document = document - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) + self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, name) self.default_version_id = default_version_id self.versions = [FakePolicyVersion(self.name, document, True, region_name)] def to_get_dict(self): return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'defaultVersionId': self.default_version_id + "policyName": self.name, + "policyArn": self.arn, + "policyDocument": self.document, + "defaultVersionId": self.default_version_id } def to_dict_at_creation(self): return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.default_version_id + "policyName": self.name, + "policyArn": self.arn, + "policyDocument": self.document, + "policyVersionId": self.default_version_id } def to_dict(self): @@ -210,39 +210,39 @@ def __init__(self, is_default, region_name): self.name = policy_name - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) + self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, policy_name) self.document = document or {} self.is_default = is_default - self.version_id = '1' + self.version_id = "1" self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) def to_get_dict(self): return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'creationDate': self.create_datetime, - 'lastModifiedDate': self.last_modified_datetime, - 'generationId': self.version_id + "policyName": self.name, + "policyArn": self.arn, + "policyDocument": self.document, + "policyVersionId": self.version_id, + "isDefaultVersion": self.is_default, + "creationDate": self.create_datetime, + "lastModifiedDate": self.last_modified_datetime, + "generationId": self.version_id } def to_dict_at_creation(self): return { - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default + "policyArn": self.arn, + "policyDocument": self.document, + "policyVersionId": self.version_id, + "isDefaultVersion": self.is_default } def to_dict(self): return { - 'versionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'createDate': self.create_datetime, + "versionId": self.version_id, + "isDefaultVersion": self.is_default, + "createDate": self.create_datetime, } @@ -277,7 +277,7 @@ def __init__( self.presigned_url_config = presigned_url_config self.target_selection = target_selection self.job_executions_rollout_config = job_executions_rollout_config - self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED + self.status = "QUEUED" # IN_PROGRESS | CANCELED | COMPLETED self.comment = None self.reason_code = None self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) @@ -297,24 +297,24 @@ def __init__( def to_dict(self): obj = { - 'jobArn': self.job_arn, - 'jobId': self.job_id, - 'targets': self.targets, - 'description': self.description, - 'presignedUrlConfig': self.presigned_url_config, - 'targetSelection': self.target_selection, - 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, - 'status': self.status, - 'comment': self.comment, - 'forceCanceled': self.force, - 'reasonCode': self.reason_code, - 'createdAt': self.created_at, - 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completed_at, - 'jobProcessDetails': self.job_process_details, - 'documentParameters': self.document_parameters, - 'document': self.document, - 'documentSource': self.document_source + "jobArn": self.job_arn, + "jobId": self.job_id, + "targets": self.targets, + "description": self.description, + "presignedUrlConfig": self.presigned_url_config, + "targetSelection": self.target_selection, + "jobExecutionsRolloutConfig": self.job_executions_rollout_config, + "status": self.status, + "comment": self.comment, + "forceCanceled": self.force, + "reasonCode": self.reason_code, + "createdAt": self.created_at, + "lastUpdatedAt": self.last_updated_at, + "completedAt": self.completed_at, + "jobProcessDetails": self.job_process_details, + "documentParameters": self.document_parameters, + "document": self.document, + "documentSource": self.document_source } return obj @@ -327,7 +327,7 @@ def _job_id_matcher(self, regex, argument): class FakeJobExecution(BaseModel): - def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, status_details_map={}): + def __init__(self, job_id, thing_arn, status="QUEUED", force_canceled=False, status_details_map={}): self.job_id = job_id self.status = status # IN_PROGRESS | CANCELED | COMPLETED self.force_canceled = force_canceled @@ -342,31 +342,31 @@ def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, sta def to_get_dict(self): obj = { - 'jobId': self.job_id, - 'status': self.status, - 'forceCanceled': self.force_canceled, - 'statusDetails': {'detailsMap': self.status_details_map}, - 'thingArn': self.thing_arn, - 'queuedAt': self.queued_at, - 'startedAt': self.started_at, - 'lastUpdatedAt': self.last_updated_at, - 'executionNumber': self.execution_number, - 'versionNumber': self.version_number, - 'approximateSecondsBeforeTimedOut': self.approximate_seconds_before_time_out + "jobId": self.job_id, + "status": self.status, + "forceCanceled": self.force_canceled, + "statusDetails": {"detailsMap": self.status_details_map}, + "thingArn": self.thing_arn, + "queuedAt": self.queued_at, + "startedAt": self.started_at, + "lastUpdatedAt": self.last_updated_at, + "executionNumber": self.execution_number, + "versionNumber": self.version_number, + "approximateSecondsBeforeTimedOut": self.approximate_seconds_before_time_out } return obj def to_dict(self): obj = { - 'jobId': self.job_id, - 'thingArn': self.thing_arn, - 'jobExecutionSummary': { - 'status': self.status, - 'queuedAt': self.queued_at, - 'startedAt': self.started_at, - 'lastUpdatedAt': self.last_updated_at, - 'executionNumber': self.execution_number, + "jobId": self.job_id, + "thingArn": self.thing_arn, + "jobExecutionSummary": { + "status": self.status, + "queuedAt": self.queued_at, + "startedAt": self.started_at, + "lastUpdatedAt": self.last_updated_at, + "executionNumber": self.execution_number, } } @@ -423,7 +423,7 @@ def create_thing_type(self, thing_type_name, thing_type_properties): def list_thing_types(self, thing_type_name=None): if thing_type_name: - # It's weird but thing_type_name is filtered by forward match, not complete match + # It"s weird but thing_type_name is filtered by forward match, not complete match return [ _ for _ in self.thing_types.values() @@ -686,7 +686,7 @@ def create_policy_version(self, policy_name, policy_document, set_as_default): raise ResourceNotFoundException() version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) policy.versions.append(version) - version.version_id = '{0}'.format(len(policy.versions)) + version.version_id = "{0}".format(len(policy.versions)) if set_as_default: self.set_default_policy_version(policy_name, version.version_id) return version @@ -976,7 +976,7 @@ def create_job( self.jobs[job_id] = job for thing_arn in targets: - thing_name = thing_arn.split(':')[-1].split('/')[-1] + thing_name = thing_arn.split(":")[-1].split("/")[-1] job_execution = FakeJobExecution(job_id, thing_arn) self.job_executions[(job_id, thing_name)] = job_execution return job.job_arn, job_id, description @@ -990,9 +990,9 @@ def describe_job(self, job_id): def delete_job(self, job_id, force): job = self.jobs[job_id] - if job.status == 'IN_PROGRESS' and force: + if job.status == "IN_PROGRESS" and force: del self.jobs[job_id] - elif job.status != 'IN_PROGRESS': + elif job.status != "IN_PROGRESS": del self.jobs[job_id] else: raise InvalidStateTransitionException() @@ -1003,11 +1003,11 @@ def cancel_job(self, job_id, reason_code, comment, force): job.reason_code = reason_code if reason_code is not None else job.reason_code job.comment = comment if comment is not None else job.comment job.force = force if force is not None and force != job.force else job.force - job.status = 'CANCELED' + job.status = "CANCELED" - if job.status == 'IN_PROGRESS' and force: + if job.status == "IN_PROGRESS" and force: self.jobs[job_id] = job - elif job.status != 'IN_PROGRESS': + elif job.status != "IN_PROGRESS": self.jobs[job_id] = job else: raise InvalidStateTransitionException() @@ -1053,11 +1053,11 @@ def cancel_job_execution(self, job_id, thing_name, force, expected_version, stat job_execution.force_canceled = force if force is not None else job_execution.force_canceled # TODO: implement expected_version and status_details (at most 10 can be specified) - if job_execution.status == 'IN_PROGRESS' and force: - job_execution.status = 'CANCELED' + if job_execution.status == "IN_PROGRESS" and force: + job_execution.status = "CANCELED" self.job_executions[(job_id, thing_name)] = job_execution - elif job_execution.status != 'IN_PROGRESS': - job_execution.status = 'CANCELED' + elif job_execution.status != "IN_PROGRESS": + job_execution.status = "CANCELED" self.job_executions[(job_id, thing_name)] = job_execution else: raise InvalidStateTransitionException() @@ -1068,9 +1068,9 @@ def delete_job_execution(self, job_id, thing_name, execution_number, force): if job_execution.execution_number != execution_number: raise ResourceNotFoundException() - if job_execution.status == 'IN_PROGRESS' and force: + if job_execution.status == "IN_PROGRESS" and force: del self.job_executions[(job_id, thing_name)] - elif job_execution.status != 'IN_PROGRESS': + elif job_execution.status != "IN_PROGRESS": del self.job_executions[(job_id, thing_name)] else: raise InvalidStateTransitionException() @@ -1080,8 +1080,7 @@ def list_job_executions_for_job(self, job_id, status, max_results, next_token): if status is not None: job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) + status in elem["status"] and elem["status"] == status, job_executions)) token = next_token if token is None: @@ -1099,8 +1098,7 @@ def list_job_executions_for_thing(self, thing_name, status, max_results, next_to if status is not None: job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) + status in elem["status"] and elem["status"] == status, job_executions)) token = next_token if token is None: From ed8d5edb5070c6c815c83fbd547d62ad8181ea7d Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 23 Dec 2019 09:01:53 +0100 Subject: [PATCH 034/658] fix linting errors --- moto/iot/exceptions.py | 2 +- moto/iot/models.py | 120 +++++++++++----- moto/iot/responses.py | 172 ++++++++++++---------- tests/test_iot/test_iot.py | 285 ++++++++++++++++++++++--------------- 4 files changed, 347 insertions(+), 232 deletions(-) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 2854fbb178a3..d114a12ad557 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -27,7 +27,7 @@ def __init__(self, msg=None): self.code = 409 super(InvalidStateTransitionException, self).__init__( "InvalidStateTransitionException", - msg or "An attempt was made to change to an invalid state." + msg or "An attempt was made to change to an invalid state.", ) diff --git a/moto/iot/models.py b/moto/iot/models.py index 3c3e0cfe2e13..37e9b4ef9155 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -18,7 +18,7 @@ ResourceNotFoundException, InvalidRequestException, InvalidStateTransitionException, - VersionConflictException + VersionConflictException, ) @@ -187,7 +187,7 @@ def to_get_dict(self): "policyName": self.name, "policyArn": self.arn, "policyDocument": self.document, - "defaultVersionId": self.default_version_id + "defaultVersionId": self.default_version_id, } def to_dict_at_creation(self): @@ -195,7 +195,7 @@ def to_dict_at_creation(self): "policyName": self.name, "policyArn": self.arn, "policyDocument": self.document, - "policyVersionId": self.default_version_id + "policyVersionId": self.default_version_id, } def to_dict(self): @@ -203,12 +203,7 @@ def to_dict(self): class FakePolicyVersion(object): - - def __init__(self, - policy_name, - document, - is_default, - region_name): + def __init__(self, policy_name, document, is_default, region_name): self.name = policy_name self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, policy_name) self.document = document or {} @@ -227,7 +222,7 @@ def to_get_dict(self): "isDefaultVersion": self.is_default, "creationDate": self.create_datetime, "lastModifiedDate": self.last_modified_datetime, - "generationId": self.version_id + "generationId": self.version_id, } def to_dict_at_creation(self): @@ -235,7 +230,7 @@ def to_dict_at_creation(self): "policyArn": self.arn, "policyDocument": self.document, "policyVersionId": self.version_id, - "isDefaultVersion": self.is_default + "isDefaultVersion": self.is_default, } def to_dict(self): @@ -314,7 +309,7 @@ def to_dict(self): "jobProcessDetails": self.job_process_details, "documentParameters": self.document_parameters, "document": self.document, - "documentSource": self.document_source + "documentSource": self.document_source, } return obj @@ -326,8 +321,14 @@ def _job_id_matcher(self, regex, argument): class FakeJobExecution(BaseModel): - - def __init__(self, job_id, thing_arn, status="QUEUED", force_canceled=False, status_details_map={}): + def __init__( + self, + job_id, + thing_arn, + status="QUEUED", + force_canceled=False, + status_details_map={}, + ): self.job_id = job_id self.status = status # IN_PROGRESS | CANCELED | COMPLETED self.force_canceled = force_canceled @@ -352,7 +353,7 @@ def to_get_dict(self): "lastUpdatedAt": self.last_updated_at, "executionNumber": self.execution_number, "versionNumber": self.version_number, - "approximateSecondsBeforeTimedOut": self.approximate_seconds_before_time_out + "approximateSecondsBeforeTimedOut": self.approximate_seconds_before_time_out, } return obj @@ -367,7 +368,7 @@ def to_dict(self): "startedAt": self.started_at, "lastUpdatedAt": self.last_updated_at, "executionNumber": self.execution_number, - } + }, } return obj @@ -684,7 +685,9 @@ def create_policy_version(self, policy_name, policy_document, set_as_default): policy = self.get_policy(policy_name) if not policy: raise ResourceNotFoundException() - version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) + version = FakePolicyVersion( + policy_name, policy_document, set_as_default, self.region_name + ) policy.versions.append(version) version.version_id = "{0}".format(len(policy.versions)) if set_as_default: @@ -724,7 +727,8 @@ def delete_policy_version(self, policy_name, version_id): raise ResourceNotFoundException() if version_id == policy.default_version_id: raise InvalidRequestException( - "Cannot delete the default version of a policy") + "Cannot delete the default version of a policy" + ) for i, v in enumerate(policy.versions): if v.version_id == version_id: del policy.versions[i] @@ -1017,7 +1021,15 @@ def cancel_job(self, job_id, reason_code, comment, force): def get_job_document(self, job_id): return self.jobs[job_id] - def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): + def list_jobs( + self, + status, + target_selection, + max_results, + token, + thing_group_name, + thing_group_id, + ): # TODO: implement filters all_jobs = [_.to_dict() for _ in self.jobs.values()] filtered_jobs = all_jobs @@ -1027,8 +1039,12 @@ def list_jobs(self, status, target_selection, max_results, token, thing_group_na next_token = str(max_results) if len(filtered_jobs) > max_results else None else: token = int(token) - jobs = filtered_jobs[token:token + max_results] - next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None + jobs = filtered_jobs[token : token + max_results] + next_token = ( + str(token + max_results) + if len(filtered_jobs) > token + max_results + else None + ) return jobs, next_token @@ -1038,19 +1054,25 @@ def describe_job_execution(self, job_id, thing_name, execution_number): except KeyError: raise ResourceNotFoundException() - if job_execution is None or \ - (execution_number is not None and job_execution.execution_number != execution_number): + if job_execution is None or ( + execution_number is not None + and job_execution.execution_number != execution_number + ): raise ResourceNotFoundException() return job_execution - def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): + def cancel_job_execution( + self, job_id, thing_name, force, expected_version, status_details + ): job_execution = self.job_executions[(job_id, thing_name)] if job_execution is None: raise ResourceNotFoundException() - job_execution.force_canceled = force if force is not None else job_execution.force_canceled + job_execution.force_canceled = ( + force if force is not None else job_execution.force_canceled + ) # TODO: implement expected_version and status_details (at most 10 can be specified) if job_execution.status == "IN_PROGRESS" and force: @@ -1076,11 +1098,19 @@ def delete_job_execution(self, job_id, thing_name, execution_number, force): raise InvalidStateTransitionException() def list_job_executions_for_job(self, job_id, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] + job_executions = [ + self.job_executions[je].to_dict() + for je in self.job_executions + if je[0] == job_id + ] if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and elem["status"] == status, job_executions)) + job_executions = list( + filter( + lambda elem: status in elem["status"] and elem["status"] == status, + job_executions, + ) + ) token = next_token if token is None: @@ -1088,17 +1118,31 @@ def list_job_executions_for_job(self, job_id, status, max_results, next_token): next_token = str(max_results) if len(job_executions) > max_results else None else: token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None + job_executions = job_executions[token : token + max_results] + next_token = ( + str(token + max_results) + if len(job_executions) > token + max_results + else None + ) return job_executions, next_token - def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] + def list_job_executions_for_thing( + self, thing_name, status, max_results, next_token + ): + job_executions = [ + self.job_executions[je].to_dict() + for je in self.job_executions + if je[1] == thing_name + ] if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and elem["status"] == status, job_executions)) + job_executions = list( + filter( + lambda elem: status in elem["status"] and elem["status"] == status, + job_executions, + ) + ) token = next_token if token is None: @@ -1106,8 +1150,12 @@ def list_job_executions_for_thing(self, thing_name, status, max_results, next_to next_token = str(max_results) if len(job_executions) > max_results else None else: token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None + job_executions = job_executions[token : token + max_results] + next_token = ( + str(token + max_results) + if len(job_executions) > token + max_results + else None + ) return job_executions, next_token diff --git a/moto/iot/responses.py b/moto/iot/responses.py index e88e9264af08..c12d4b5c5ebb 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -133,33 +133,35 @@ def create_job(self): def describe_job(self): job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict( - documentSource=job.document_source, - job=dict( - comment=job.comment, - completedAt=job.completed_at, - createdAt=job.created_at, - description=job.description, - documentParameters=job.document_parameters, - forceCanceled=job.force, - reasonCode=job.reason_code, - jobArn=job.job_arn, - jobExecutionsRolloutConfig=job.job_executions_rollout_config, - jobId=job.job_id, - jobProcessDetails=job.job_process_details, - lastUpdatedAt=job.last_updated_at, - presignedUrlConfig=job.presigned_url_config, - status=job.status, - targets=job.targets, - targetSelection=job.target_selection - ))) + return json.dumps( + dict( + documentSource=job.document_source, + job=dict( + comment=job.comment, + completedAt=job.completed_at, + createdAt=job.created_at, + description=job.description, + documentParameters=job.document_parameters, + forceCanceled=job.force, + reasonCode=job.reason_code, + jobArn=job.job_arn, + jobExecutionsRolloutConfig=job.job_executions_rollout_config, + jobId=job.job_id, + jobProcessDetails=job.job_process_details, + lastUpdatedAt=job.last_updated_at, + presignedUrlConfig=job.presigned_url_config, + status=job.status, + targets=job.targets, + targetSelection=job.target_selection, + ), + ) + ) def delete_job(self): job_id = self._get_param("jobId") force = self._get_bool_param("force") - self.iot_backend.delete_job(job_id=job_id, - force=force) + self.iot_backend.delete_job(job_id=job_id, force=force) return json.dumps(dict()) @@ -169,10 +171,9 @@ def cancel_job(self): comment = self._get_param("comment") force = self._get_bool_param("force") - job = self.iot_backend.cancel_job(job_id=job_id, - reason_code=reason_code, - comment=comment, - force=force) + job = self.iot_backend.cancel_job( + job_id=job_id, reason_code=reason_code, comment=comment, force=force + ) return json.dumps(job.to_dict()) @@ -180,25 +181,29 @@ def get_job_document(self): job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) if job.document is not None: - return json.dumps({'document': job.document}) + return json.dumps({"document": job.document}) else: # job.document_source is not None: # TODO: needs to be implemented to get document_source's content from S3 - return json.dumps({'document': ''}) + return json.dumps({"document": ""}) def list_jobs(self): - status = self._get_param("status"), - target_selection = self._get_param("targetSelection"), - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + status = (self._get_param("status"),) + target_selection = (self._get_param("targetSelection"),) + max_results = self._get_int_param( + "maxResults", 50 + ) # not the default, but makes testing easier previous_next_token = self._get_param("nextToken") - thing_group_name = self._get_param("thingGroupName"), + thing_group_name = (self._get_param("thingGroupName"),) thing_group_id = self._get_param("thingGroupId") - jobs, next_token = self.iot_backend.list_jobs(status=status, - target_selection=target_selection, - max_results=max_results, - token=previous_next_token, - thing_group_name=thing_group_name, - thing_group_id=thing_group_id) + jobs, next_token = self.iot_backend.list_jobs( + status=status, + target_selection=target_selection, + max_results=max_results, + token=previous_next_token, + thing_group_name=thing_group_name, + thing_group_id=thing_group_id, + ) return json.dumps(dict(jobs=jobs, nextToken=next_token)) @@ -206,9 +211,9 @@ def describe_job_execution(self): job_id = self._get_param("jobId") thing_name = self._get_param("thingName") execution_number = self._get_int_param("executionNumber") - job_execution = self.iot_backend.describe_job_execution(job_id=job_id, - thing_name=thing_name, - execution_number=execution_number) + job_execution = self.iot_backend.describe_job_execution( + job_id=job_id, thing_name=thing_name, execution_number=execution_number + ) return json.dumps(dict(execution=job_execution.to_get_dict())) @@ -219,11 +224,13 @@ def cancel_job_execution(self): expected_version = self._get_int_param("expectedVersion") status_details = self._get_param("statusDetails") - self.iot_backend.cancel_job_execution(job_id=job_id, - thing_name=thing_name, - force=force, - expected_version=expected_version, - status_details=status_details) + self.iot_backend.cancel_job_execution( + job_id=job_id, + thing_name=thing_name, + force=force, + expected_version=expected_version, + status_details=status_details, + ) return json.dumps(dict()) @@ -233,34 +240,41 @@ def delete_job_execution(self): execution_number = self._get_int_param("executionNumber") force = self._get_bool_param("force") - self.iot_backend.delete_job_execution(job_id=job_id, - thing_name=thing_name, - execution_number=execution_number, - force=force) + self.iot_backend.delete_job_execution( + job_id=job_id, + thing_name=thing_name, + execution_number=execution_number, + force=force, + ) return json.dumps(dict()) def list_job_executions_for_job(self): job_id = self._get_param("jobId") status = self._get_param("status") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + max_results = self._get_int_param( + "maxResults", 50 + ) # not the default, but makes testing easier next_token = self._get_param("nextToken") - job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=job_id, - status=status, - max_results=max_results, - next_token=next_token) + job_executions, next_token = self.iot_backend.list_job_executions_for_job( + job_id=job_id, status=status, max_results=max_results, next_token=next_token + ) return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) def list_job_executions_for_thing(self): thing_name = self._get_param("thingName") status = self._get_param("status") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + max_results = self._get_int_param( + "maxResults", 50 + ) # not the default, but makes testing easier next_token = self._get_param("nextToken") - job_executions, next_token = self.iot_backend.list_job_executions_for_thing(thing_name=thing_name, - status=status, - max_results=max_results, - next_token=next_token) + job_executions, next_token = self.iot_backend.list_job_executions_for_thing( + thing_name=thing_name, + status=status, + max_results=max_results, + next_token=next_token, + ) return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) @@ -352,35 +366,39 @@ def delete_policy(self): return json.dumps(dict()) def create_policy_version(self): - policy_name = self._get_param('policyName') - policy_document = self._get_param('policyDocument') - set_as_default = self._get_bool_param('setAsDefault') - policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) + policy_name = self._get_param("policyName") + policy_document = self._get_param("policyDocument") + set_as_default = self._get_bool_param("setAsDefault") + policy_version = self.iot_backend.create_policy_version( + policy_name, policy_document, set_as_default + ) return json.dumps(dict(policy_version.to_dict_at_creation())) def set_default_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') + policy_name = self._get_param("policyName") + version_id = self._get_param("policyVersionId") self.iot_backend.set_default_policy_version(policy_name, version_id) return json.dumps(dict()) def get_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') + policy_name = self._get_param("policyName") + version_id = self._get_param("policyVersionId") policy_version = self.iot_backend.get_policy_version(policy_name, version_id) return json.dumps(dict(policy_version.to_get_dict())) def list_policy_versions(self): - policy_name = self._get_param('policyName') - policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) + policy_name = self._get_param("policyName") + policiy_versions = self.iot_backend.list_policy_versions( + policy_name=policy_name + ) return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) def delete_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') + policy_name = self._get_param("policyName") + version_id = self._get_param("policyVersionId") self.iot_backend.delete_policy_version(policy_name, version_id) return json.dumps(dict()) @@ -392,15 +410,15 @@ def attach_policy(self): return json.dumps(dict()) def list_attached_policies(self): - principal = unquote(self._get_param('target')) + principal = unquote(self._get_param("target")) # marker = self._get_param("marker") # page_size = self._get_int_param("pageSize") - policies = self.iot_backend.list_attached_policies( - target=principal - ) + policies = self.iot_backend.list_attached_policies(target=principal) # TODO: implement pagination in the future next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + return json.dumps( + dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker) + ) def attach_principal_policy(self): policy_name = self._get_param("policyName") diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 49a0af974e7c..f8c4f579c0ce 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,13 +1,14 @@ from __future__ import unicode_literals import json -import sure #noqa +import sure # noqa import boto3 from moto import mock_iot from botocore.exceptions import ClientError from nose.tools import assert_raises + @mock_iot def test_attach_policy(): client = boto3.client("iot", region_name="ap-northeast-1") @@ -68,67 +69,111 @@ def test_policy_versions(): policy.should.have.key("policyName").which.should.equal(policy_name) policy.should.have.key("policyArn").which.should_not.be.none policy.should.have.key("policyDocument").which.should.equal(json.dumps({})) - policy.should.have.key("defaultVersionId").which.should.equal(policy["defaultVersionId"]) + policy.should.have.key("defaultVersionId").which.should.equal( + policy["defaultVersionId"] + ) - policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({"version": "version_1"}), - setAsDefault=True) + policy1 = client.create_policy_version( + policyName=policy_name, + policyDocument=json.dumps({"version": "version_1"}), + setAsDefault=True, + ) policy1.should.have.key("policyArn").which.should_not.be.none - policy1.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_1"})) + policy1.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_1"}) + ) policy1.should.have.key("policyVersionId").which.should.equal("2") policy1.should.have.key("isDefaultVersion").which.should.equal(True) - policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({"version": "version_2"}), - setAsDefault=False) + policy2 = client.create_policy_version( + policyName=policy_name, + policyDocument=json.dumps({"version": "version_2"}), + setAsDefault=False, + ) policy2.should.have.key("policyArn").which.should_not.be.none - policy2.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_2"})) + policy2.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_2"}) + ) policy2.should.have.key("policyVersionId").which.should.equal("3") policy2.should.have.key("isDefaultVersion").which.should.equal(False) policy = client.get_policy(policyName=policy_name) policy.should.have.key("policyName").which.should.equal(policy_name) policy.should.have.key("policyArn").which.should_not.be.none - policy.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_1"})) - policy.should.have.key("defaultVersionId").which.should.equal(policy1["policyVersionId"]) + policy.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_1"}) + ) + policy.should.have.key("defaultVersionId").which.should.equal( + policy1["policyVersionId"] + ) policy_versions = client.list_policy_versions(policyName=policy_name) policy_versions.should.have.key("policyVersions").which.should.have.length_of(3) - list(map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])) - default_policy[0].should.have.key("versionId").should.equal(policy1["policyVersionId"]) + list( + map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ).count(True).should.equal(1) + default_policy = list( + filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ) + default_policy[0].should.have.key("versionId").should.equal( + policy1["policyVersionId"] + ) policy = client.get_policy(policyName=policy_name) policy.should.have.key("policyName").which.should.equal(policy_name) policy.should.have.key("policyArn").which.should_not.be.none - policy.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_1"})) - policy.should.have.key("defaultVersionId").which.should.equal(policy1["policyVersionId"]) + policy.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_1"}) + ) + policy.should.have.key("defaultVersionId").which.should.equal( + policy1["policyVersionId"] + ) - client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2["policyVersionId"]) + client.set_default_policy_version( + policyName=policy_name, policyVersionId=policy2["policyVersionId"] + ) policy_versions = client.list_policy_versions(policyName=policy_name) policy_versions.should.have.key("policyVersions").which.should.have.length_of(3) - list(map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])) - default_policy[0].should.have.key("versionId").should.equal(policy2["policyVersionId"]) + list( + map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ).count(True).should.equal(1) + default_policy = list( + filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ) + default_policy[0].should.have.key("versionId").should.equal( + policy2["policyVersionId"] + ) policy = client.get_policy(policyName=policy_name) policy.should.have.key("policyName").which.should.equal(policy_name) policy.should.have.key("policyArn").which.should_not.be.none - policy.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_2"})) - policy.should.have.key("defaultVersionId").which.should.equal(policy2["policyVersionId"]) + policy.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_2"}) + ) + policy.should.have.key("defaultVersionId").which.should.equal( + policy2["policyVersionId"] + ) client.delete_policy_version(policyName=policy_name, policyVersionId="1") policy_versions = client.list_policy_versions(policyName=policy_name) policy_versions.should.have.key("policyVersions").which.should.have.length_of(2) - client.delete_policy_version(policyName=policy_name, policyVersionId=policy1["policyVersionId"]) + client.delete_policy_version( + policyName=policy_name, policyVersionId=policy1["policyVersionId"] + ) policy_versions = client.list_policy_versions(policyName=policy_name) policy_versions.should.have.key("policyVersions").which.should.have.length_of(1) # should fail as it"s the default policy. Should use delete_policy instead try: - client.delete_policy_version(policyName=policy_name, policyVersionId=policy2["policyVersionId"]) + client.delete_policy_version( + policyName=policy_name, policyVersionId=policy2["policyVersionId"] + ) assert False, "Should have failed in previous call" except Exception as exception: - exception.response["Error"]["Message"].should.equal("Cannot delete the default version of a policy") + exception.response["Error"]["Message"].should.equal( + "Cannot delete the default version of a policy" + ) @mock_iot @@ -1159,9 +1204,7 @@ def test_list_jobs(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job1 = client.create_job( jobId=job_id, @@ -1170,12 +1213,10 @@ def test_list_jobs(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job1.should.have.key("jobId").which.should.equal(job_id) @@ -1183,21 +1224,19 @@ def test_list_jobs(): job1.should.have.key("description") job2 = client.create_job( - jobId=job_id+"1", + jobId=job_id + "1", targets=[thing["thingArn"]], document=json.dumps(job_document), description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) - job2.should.have.key("jobId").which.should.equal(job_id+"1") + job2.should.have.key("jobId").which.should.equal(job_id + "1") job2.should.have.key("jobArn") job2.should.have.key("description") @@ -1205,7 +1244,7 @@ def test_list_jobs(): jobs.should.have.key("jobs") jobs.should_not.have.key("nextToken") jobs["jobs"][0].should.have.key("jobId").which.should.equal(job_id) - jobs["jobs"][1].should.have.key("jobId").which.should.equal(job_id+"1") + jobs["jobs"][1].should.have.key("jobId").which.should.equal(job_id + "1") @mock_iot @@ -1297,14 +1336,21 @@ def test_describe_job_1(): job.should.have.key("job").which.should.have.key("lastUpdatedAt") job.should.have.key("job").which.should.have.key("createdAt") job.should.have.key("job").which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key("job").which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key("job").which.should.have.key( + "targetSelection" + ).which.should.equal("CONTINUOUS") job.should.have.key("job").which.should.have.key("presignedUrlConfig") - job.should.have.key("job").which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal("arn:aws:iam::1:role/service-role/iot_job_role") - job.should.have.key("job").which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key("job").which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) + job.should.have.key("job").which.should.have.key( + "presignedUrlConfig" + ).which.should.have.key("roleArn").which.should.equal( + "arn:aws:iam::1:role/service-role/iot_job_role" + ) + job.should.have.key("job").which.should.have.key( + "presignedUrlConfig" + ).which.should.have.key("expiresInSec").which.should.equal(123) + job.should.have.key("job").which.should.have.key( + "jobExecutionsRolloutConfig" + ).which.should.have.key("maximumPerMinute").which.should.equal(10) @mock_iot @@ -1323,12 +1369,10 @@ def test_delete_job(): documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1359,12 +1403,10 @@ def test_cancel_job(): documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1381,10 +1423,18 @@ def test_cancel_job(): job = client.describe_job(jobId=job_id) job.should.have.key("job") job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key("job").which.should.have.key("status").which.should.equal("CANCELED") - job.should.have.key("job").which.should.have.key("forceCanceled").which.should.equal(False) - job.should.have.key("job").which.should.have.key("reasonCode").which.should.equal("Because") - job.should.have.key("job").which.should.have.key("comment").which.should.equal("You are") + job.should.have.key("job").which.should.have.key("status").which.should.equal( + "CANCELED" + ) + job.should.have.key("job").which.should.have.key( + "forceCanceled" + ).which.should.equal(False) + job.should.have.key("job").which.should.have.key("reasonCode").which.should.equal( + "Because" + ) + job.should.have.key("job").which.should.have.key("comment").which.should.equal( + "You are" + ) @mock_iot @@ -1403,12 +1453,10 @@ def test_get_job_document_with_document_source(): documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1429,9 +1477,7 @@ def test_get_job_document_with_document(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1439,19 +1485,17 @@ def test_get_job_document_with_document(): document=json.dumps(job_document), presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) job.should.have.key("jobArn") job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key("document").which.should.equal("{\"field\": \"value\"}") + job_document.should.have.key("document").which.should.equal('{"field": "value"}') @mock_iot @@ -1465,9 +1509,7 @@ def test_describe_job_execution(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1476,12 +1518,10 @@ def test_describe_job_execution(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1492,29 +1532,51 @@ def test_describe_job_execution(): job_execution.should.have.key("execution") job_execution["execution"].should.have.key("jobId").which.should.equal(job_id) job_execution["execution"].should.have.key("status").which.should.equal("QUEUED") - job_execution["execution"].should.have.key("forceCanceled").which.should.equal(False) - job_execution["execution"].should.have.key("statusDetails").which.should.equal({"detailsMap": {}}) - job_execution["execution"].should.have.key("thingArn").which.should.equal(thing["thingArn"]) + job_execution["execution"].should.have.key("forceCanceled").which.should.equal( + False + ) + job_execution["execution"].should.have.key("statusDetails").which.should.equal( + {"detailsMap": {}} + ) + job_execution["execution"].should.have.key("thingArn").which.should.equal( + thing["thingArn"] + ) job_execution["execution"].should.have.key("queuedAt") job_execution["execution"].should.have.key("startedAt") job_execution["execution"].should.have.key("lastUpdatedAt") - job_execution["execution"].should.have.key("executionNumber").which.should.equal(123) + job_execution["execution"].should.have.key("executionNumber").which.should.equal( + 123 + ) job_execution["execution"].should.have.key("versionNumber").which.should.equal(123) - job_execution["execution"].should.have.key("approximateSecondsBeforeTimedOut").which.should.equal(123) + job_execution["execution"].should.have.key( + "approximateSecondsBeforeTimedOut" + ).which.should.equal(123) - job_execution = client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + job_execution = client.describe_job_execution( + jobId=job_id, thingName=name, executionNumber=123 + ) job_execution.should.have.key("execution") job_execution["execution"].should.have.key("jobId").which.should.equal(job_id) job_execution["execution"].should.have.key("status").which.should.equal("QUEUED") - job_execution["execution"].should.have.key("forceCanceled").which.should.equal(False) - job_execution["execution"].should.have.key("statusDetails").which.should.equal({"detailsMap": {}}) - job_execution["execution"].should.have.key("thingArn").which.should.equal(thing["thingArn"]) + job_execution["execution"].should.have.key("forceCanceled").which.should.equal( + False + ) + job_execution["execution"].should.have.key("statusDetails").which.should.equal( + {"detailsMap": {}} + ) + job_execution["execution"].should.have.key("thingArn").which.should.equal( + thing["thingArn"] + ) job_execution["execution"].should.have.key("queuedAt") job_execution["execution"].should.have.key("startedAt") job_execution["execution"].should.have.key("lastUpdatedAt") - job_execution["execution"].should.have.key("executionNumber").which.should.equal(123) + job_execution["execution"].should.have.key("executionNumber").which.should.equal( + 123 + ) job_execution["execution"].should.have.key("versionNumber").which.should.equal(123) - job_execution["execution"].should.have.key("approximateSecondsBeforeTimedOut").which.should.equal(123) + job_execution["execution"].should.have.key( + "approximateSecondsBeforeTimedOut" + ).which.should.equal(123) try: client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456) @@ -1536,9 +1598,7 @@ def test_cancel_job_execution(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1547,12 +1607,10 @@ def test_cancel_job_execution(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1576,9 +1634,7 @@ def test_delete_job_execution(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1587,12 +1643,10 @@ def test_delete_job_execution(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1620,9 +1674,7 @@ def test_list_job_executions_for_job(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1631,12 +1683,10 @@ def test_list_job_executions_for_job(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1645,7 +1695,9 @@ def test_list_job_executions_for_job(): job_execution = client.list_job_executions_for_job(jobId=job_id) job_execution.should.have.key("executionSummaries") - job_execution["executionSummaries"][0].should.have.key("thingArn").which.should.equal(thing["thingArn"]) + job_execution["executionSummaries"][0].should.have.key( + "thingArn" + ).which.should.equal(thing["thingArn"]) @mock_iot @@ -1659,9 +1711,7 @@ def test_list_job_executions_for_thing(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1670,12 +1720,10 @@ def test_list_job_executions_for_thing(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1684,5 +1732,6 @@ def test_list_job_executions_for_thing(): job_execution = client.list_job_executions_for_thing(thingName=name) job_execution.should.have.key("executionSummaries") - job_execution["executionSummaries"][0].should.have.key("jobId").which.should.equal(job_id) - + job_execution["executionSummaries"][0].should.have.key("jobId").which.should.equal( + job_id + ) From 4f86a9c3cd89ced8610153805a934eed05c3e659 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 23 Dec 2019 10:24:56 +0100 Subject: [PATCH 035/658] replace CRLF line ending with LF --- file.tmp | 9 --------- travis_moto_server.sh | 8 ++++---- 2 files changed, 4 insertions(+), 13 deletions(-) delete mode 100644 file.tmp diff --git a/file.tmp b/file.tmp deleted file mode 100644 index 0b91630a9af7..000000000000 --- a/file.tmp +++ /dev/null @@ -1,9 +0,0 @@ - - AWSTemplateFormatVersion: '2010-09-09' - Description: Simple CloudFormation Test Template - Resources: - S3Bucket: - Type: AWS::S3::Bucket - Properties: - AccessControl: PublicRead - BucketName: cf-test-bucket-1 diff --git a/travis_moto_server.sh b/travis_moto_server.sh index 3c6947fd95d4..902644b20556 100755 --- a/travis_moto_server.sh +++ b/travis_moto_server.sh @@ -1,5 +1,5 @@ -#!/usr/bin/env bash -set -e -pip install flask -pip install /moto/dist/moto*.gz +#!/usr/bin/env bash +set -e +pip install flask +pip install /moto/dist/moto*.gz moto_server -H 0.0.0.0 -p 5000 \ No newline at end of file From 6cb0428d20871d7a8931664fb496932629f332d1 Mon Sep 17 00:00:00 2001 From: Bryan Alexander Date: Wed, 15 Jan 2020 10:41:54 -0600 Subject: [PATCH 036/658] adds tagging support for cloudwatch events service --- moto/events/models.py | 27 ++++++++++ moto/events/responses.py | 23 ++++++++ moto/utilities/tagging_service.py | 56 ++++++++++++++++++++ tests/test_events/test_events.py | 50 ++++++++++++++--- tests/test_utilities/test_tagging_service.py | 53 ++++++++++++++++++ 5 files changed, 201 insertions(+), 8 deletions(-) create mode 100644 moto/utilities/tagging_service.py create mode 100644 tests/test_utilities/test_tagging_service.py diff --git a/moto/events/models.py b/moto/events/models.py index 548d41393b7f..84a663b6df9f 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -6,6 +6,7 @@ from moto.core.exceptions import JsonRESTError from moto.core import BaseBackend, BaseModel from moto.sts.models import ACCOUNT_ID +from moto.utilities.tagging_service import TaggingService class Rule(BaseModel): @@ -104,6 +105,7 @@ def __init__(self, region_name): self.region_name = region_name self.event_buses = {} self.event_sources = {} + self.tagger = TaggingService() self._add_default_event_bus() @@ -360,7 +362,32 @@ def delete_event_bus(self, name): ) self.event_buses.pop(name, None) + + def list_tags_for_resource(self, arn): + name = arn.split('/')[-1] + if name in self.rules: + return self.tagger.list_tags_for_resource(self.rules[name].arn) + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + def tag_resource(self, arn, tags): + name = arn.split('/')[-1] + if name in self.rules: + self.tagger.tag_resource(self.rules[name].arn, tags) + return {} + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + + def untag_resource(self, arn, tag_names): + name = arn.split('/')[-1] + if name in self.rules: + self.tagger.untag_resource_using_names(self.rules[name].arn, tag_names) + return {} + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) events_backends = {} for region in Session().get_available_regions("events"): diff --git a/moto/events/responses.py b/moto/events/responses.py index b415564f8a88..68c2114a6af0 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -297,3 +297,26 @@ def delete_event_bus(self): self.events_backend.delete_event_bus(name) return "", self.response_headers + + def list_tags_for_resource(self): + arn = self._get_param("ResourceARN") + + result = self.events_backend.list_tags_for_resource(arn) + + return json.dumps(result), self.response_headers + + def tag_resource(self): + arn = self._get_param("ResourceARN") + tags = self._get_param("Tags") + + result = self.events_backend.tag_resource(arn, tags) + + return json.dumps(result), self.response_headers + + def untag_resource(self): + arn = self._get_param("ResourceARN") + tags = self._get_param("TagKeys") + + result = self.events_backend.untag_resource(arn, tags) + + return json.dumps(result), self.response_headers diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py new file mode 100644 index 000000000000..5eae095ece8d --- /dev/null +++ b/moto/utilities/tagging_service.py @@ -0,0 +1,56 @@ +class TaggingService: + def __init__(self, tagName='Tags', keyName='Key', valueName='Value'): + self.tagName = tagName + self.keyName = keyName + self.valueName = valueName + self.tags = {} + + def list_tags_for_resource(self, arn): + result = [] + if arn in self.tags: + for k, v in self.tags[arn].items(): + result.append({self.keyName: k, self.valueName: v}) + return {self.tagName: result} + + def tag_resource(self, arn, tags): + if arn not in self.tags: + self.tags[arn] = {} + for t in tags: + if self.valueName in t: + self.tags[arn][t[self.keyName]] = t[self.valueName] + else: + self.tags[arn][t[self.keyName]] = None + + def untag_resource_using_names(self, arn, tag_names): + for name in tag_names: + if name in self.tags.get(arn, {}): + del self.tags[arn][name] + + def untag_resource_using_tags(self, arn, tags): + m = self.tags.get(arn, {}) + for t in tags: + if self.keyName in t: + if t[self.keyName] in m: + if self.valueName in t: + if m[t[self.keyName]] != t[self.valueName]: + continue + # If both key and value are provided, match both before deletion + del m[t[self.keyName]] + + def extract_tag_names(self, tags): + results = [] + if len(tags) == 0: + return results + for tag in tags: + if self.keyName in tag: + results.append(tag[self.keyName]) + return results + + def flatten_tag_list(self, tags): + result = {} + for t in tags: + if self.valueName in t: + result[t[self.keyName]] = t[self.valueName] + else: + result[t[self.keyName]] = None + return result diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 14d872806baa..6e9ca3a03efd 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -5,8 +5,10 @@ from moto.events import mock_events from botocore.exceptions import ClientError +from moto.core.exceptions import JsonRESTError from nose.tools import assert_raises from moto.core import ACCOUNT_ID +from moto.events.models import EventsBackend RULES = [ {"Name": "test1", "ScheduleExpression": "rate(5 minutes)"}, @@ -136,14 +138,6 @@ def test_list_rule_names_by_target(): assert rule in test_2_target["Rules"] -@mock_events -def test_list_rules(): - client = generate_environment() - - rules = client.list_rules() - assert len(rules["Rules"]) == len(RULES) - - @mock_events def test_delete_rule(): client = generate_environment() @@ -461,3 +455,43 @@ def test_delete_event_bus_errors(): client.delete_event_bus.when.called_with(Name="default").should.throw( ClientError, "Cannot delete event bus default." ) + +@mock_events +def test_rule_tagging_happy(): + client = generate_environment() + rule_name = get_random_rule()["Name"] + rule_arn = client.describe_rule(Name=rule_name).get("Arn") + + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + client.tag_resource(ResourceARN=rule_arn, Tags=tags) + + actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags") + assert tags == actual + + client.untag_resource(ResourceARN=rule_arn, TagKeys=["key1"]) + + actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags") + expected = [{"Key": "key2", "Value": "value2"}] + assert expected == actual + +@mock_events +def test_rule_tagging_sad(): + b = EventsBackend("us-west-2") + + try: + b.tag_resource('unknown', []) + raise 'tag_resource should fail if ResourceARN is not known' + except JsonRESTError: + pass + + try: + b.untag_resource('unknown', []) + raise 'untag_resource should fail if ResourceARN is not known' + except JsonRESTError: + pass + + try: + b.list_tags_for_resource('unknown') + raise 'list_tags_for_resource should fail if ResourceARN is not known' + except JsonRESTError: + pass \ No newline at end of file diff --git a/tests/test_utilities/test_tagging_service.py b/tests/test_utilities/test_tagging_service.py new file mode 100644 index 000000000000..94415cb2aca5 --- /dev/null +++ b/tests/test_utilities/test_tagging_service.py @@ -0,0 +1,53 @@ +import unittest + +from moto.utilities.tagging_service import TaggingService + + +class TestTaggingService(unittest.TestCase): + def test_list_empty(self): + svc = TaggingService() + result = svc.list_tags_for_resource('test') + self.assertEqual(result, {'Tags': []}) + + def test_create_tag(self): + svc = TaggingService('TheTags', 'TagKey', 'TagValue') + tags = [{'TagKey': 'key_key', 'TagValue': 'value_value'}] + svc.tag_resource('arn', tags) + actual = svc.list_tags_for_resource('arn') + expected = {'TheTags': [{'TagKey': 'key_key', 'TagValue': 'value_value'}]} + self.assertDictEqual(expected, actual) + + def test_create_tag_without_value(self): + svc = TaggingService() + tags = [{'Key': 'key_key'}] + svc.tag_resource('arn', tags) + actual = svc.list_tags_for_resource('arn') + expected = {'Tags': [{'Key': 'key_key', 'Value': ''}]} + self.assertDictEqual(expected, actual) + + def test_delete_tag(self): + svc = TaggingService() + tags = [{'Key': 'key_key', 'Value': 'value_value'}] + svc.tag_resource('arn', tags) + svc.untag_resource('arn', ['key_key']) + result = svc.list_tags_for_resource('arn') + self.assertEqual( + result, {'Tags': []}) + + def test_list_empty_delete(self): + svc = TaggingService() + svc.untag_resource('arn', ['key_key']) + result = svc.list_tags_for_resource('arn') + self.assertEqual( + result, {'Tags': []}) + + def test_extract_tag_names(self): + svc = TaggingService() + tags = [{'Key': 'key1', 'Value': 'value1'}, {'Key': 'key2', 'Value': 'value2'}] + actual = svc.extract_tag_names(tags) + expected = ['key1', 'key2'] + self.assertEqual(expected, actual) + + +if __name__ == '__main__': + unittest.main() From 85207b885b69ec0fb217cc074bd471a8bde98e05 Mon Sep 17 00:00:00 2001 From: Bryan Alexander Date: Thu, 16 Jan 2020 12:10:38 -0600 Subject: [PATCH 037/658] updates KMS service to use TaggingService --- moto/kms/models.py | 45 +++++++++++++++++++-------- moto/kms/responses.py | 18 ++++++++--- tests/test_kms/test_kms.py | 59 +++++++++++++++++++++--------------- tests/test_kms/test_utils.py | 8 ++--- 4 files changed, 85 insertions(+), 45 deletions(-) diff --git a/moto/kms/models.py b/moto/kms/models.py index 22f0039b2047..32fcd23aeb94 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -7,13 +7,14 @@ from boto3 import Session from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import JsonRESTError from moto.core.utils import iso_8601_datetime_without_milliseconds - +from moto.utilities.tagging_service import TaggingService from .utils import decrypt, encrypt, generate_key_id, generate_master_key class Key(BaseModel): - def __init__(self, policy, key_usage, description, tags, region): + def __init__(self, policy, key_usage, description, region): self.id = generate_key_id() self.policy = policy self.key_usage = key_usage @@ -24,7 +25,6 @@ def __init__(self, policy, key_usage, description, tags, region): self.account_id = "012345678912" self.key_rotation_status = False self.deletion_date = None - self.tags = tags or {} self.key_material = generate_master_key() @property @@ -70,11 +70,12 @@ def create_from_cloudformation_json( policy=properties["KeyPolicy"], key_usage="ENCRYPT_DECRYPT", description=properties["Description"], - tags=properties.get("Tags"), region=region_name, ) key.key_rotation_status = properties["EnableKeyRotation"] key.enabled = properties["Enabled"] + kms_backend.tag_resource(key.id, properties.get("Tags")) + return key def get_cfn_attribute(self, attribute_name): @@ -89,24 +90,19 @@ class KmsBackend(BaseBackend): def __init__(self): self.keys = {} self.key_to_aliases = defaultdict(set) + self.tagger = TaggingService(keyName='TagKey', valueName='TagValue') def create_key(self, policy, key_usage, description, tags, region): - key = Key(policy, key_usage, description, tags, region) + key = Key(policy, key_usage, description, region) self.keys[key.id] = key + if tags != None and len(tags) > 0: + self.tag_resource(key.id, tags) return key def update_key_description(self, key_id, description): key = self.keys[self.get_key_id(key_id)] key.description = description - def tag_resource(self, key_id, tags): - key = self.keys[self.get_key_id(key_id)] - key.tags = tags - - def list_resource_tags(self, key_id): - key = self.keys[self.get_key_id(key_id)] - return key.tags - def delete_key(self, key_id): if key_id in self.keys: if key_id in self.key_to_aliases: @@ -282,6 +278,29 @@ def generate_data_key( return plaintext, ciphertext_blob, arn + def list_resource_tags(self, key_id): + if key_id in self.keys: + return self.tagger.list_tags_for_resource(key_id) + raise JsonRESTError( + "NotFoundException", "The request was rejected because the specified entity or resource could not be found." + ) + + def tag_resource(self, key_id, tags): + if key_id in self.keys: + self.tagger.tag_resource(key_id, tags) + return {} + raise JsonRESTError( + "NotFoundException", "The request was rejected because the specified entity or resource could not be found." + ) + + def untag_resource(self, key_id, tag_names): + if key_id in self.keys: + self.tagger.untag_resource_using_names(key_id, tag_names) + return {} + raise JsonRESTError( + "NotFoundException", "The request was rejected because the specified entity or resource could not be found." + ) + kms_backends = {} for region in Session().get_available_regions("kms"): diff --git a/moto/kms/responses.py b/moto/kms/responses.py index d3a9726e1b61..3658f0d37cec 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -143,17 +143,27 @@ def tag_resource(self): self._validate_cmk_id(key_id) - self.kms_backend.tag_resource(key_id, tags) - return json.dumps({}) + result = self.kms_backend.tag_resource(key_id, tags) + return json.dumps(result) + + def untag_resource(self): + """https://docs.aws.amazon.com/kms/latest/APIReference/API_UntagResource.html""" + key_id = self.parameters.get("KeyId") + tag_names = self.parameters.get("TagKeys") + + self._validate_cmk_id(key_id) + + result = self.kms_backend.untag_resource(key_id, tag_names) + return json.dumps(result) def list_resource_tags(self): """https://docs.aws.amazon.com/kms/latest/APIReference/API_ListResourceTags.html""" key_id = self.parameters.get("KeyId") - self._validate_cmk_id(key_id) tags = self.kms_backend.list_resource_tags(key_id) - return json.dumps({"Tags": tags, "NextMarker": None, "Truncated": False}) + tags.update({"NextMarker": None, "Truncated": False}) + return json.dumps(tags) def describe_key(self): """https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html""" diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 70fa687874ed..6a35ee2c811b 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -17,7 +17,8 @@ from freezegun import freeze_time from nose.tools import assert_raises from parameterized import parameterized - +from moto.core.exceptions import JsonRESTError +from moto.kms.models import KmsBackend from moto.kms.exceptions import NotFoundException as MotoNotFoundException from moto import mock_kms, mock_kms_deprecated @@ -910,36 +911,46 @@ def test_update_key_description(): result = client.update_key_description(KeyId=key_id, Description="new_description") assert "ResponseMetadata" in result - @mock_kms -def test_tag_resource(): +def test_key_tagging_happy(): client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="cancel-key-deletion") - response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) + key = client.create_key(Description="test-key-tagging") + key_id = key["KeyMetadata"]["KeyId"] - keyid = response["KeyId"] - response = client.tag_resource( - KeyId=keyid, Tags=[{"TagKey": "string", "TagValue": "string"}] - ) + tags = [{"TagKey": "key1", "TagValue": "value1"}, {"TagKey": "key2", "TagValue": "value2"}] + client.tag_resource(KeyId=key_id, Tags=tags) - # Shouldn't have any data, just header - assert len(response.keys()) == 1 + result = client.list_resource_tags(KeyId=key_id) + actual = result.get("Tags", []) + assert tags == actual + client.untag_resource(KeyId=key_id, TagKeys=["key1"]) -@mock_kms -def test_list_resource_tags(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="cancel-key-deletion") - response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) + actual = client.list_resource_tags(KeyId=key_id).get("Tags", []) + expected = [{"TagKey": "key2", "TagValue": "value2"}] + assert expected == actual - keyid = response["KeyId"] - response = client.tag_resource( - KeyId=keyid, Tags=[{"TagKey": "string", "TagValue": "string"}] - ) - - response = client.list_resource_tags(KeyId=keyid) - assert response["Tags"][0]["TagKey"] == "string" - assert response["Tags"][0]["TagValue"] == "string" +@mock_kms +def test_key_tagging_sad(): + b = KmsBackend() + + try: + b.tag_resource('unknown', []) + raise 'tag_resource should fail if KeyId is not known' + except JsonRESTError: + pass + + try: + b.untag_resource('unknown', []) + raise 'untag_resource should fail if KeyId is not known' + except JsonRESTError: + pass + + try: + b.list_resource_tags('unknown') + raise 'list_resource_tags should fail if KeyId is not known' + except JsonRESTError: + pass @parameterized( diff --git a/tests/test_kms/test_utils.py b/tests/test_kms/test_utils.py index f5478e0ef078..29ea969b595d 100644 --- a/tests/test_kms/test_utils.py +++ b/tests/test_kms/test_utils.py @@ -102,7 +102,7 @@ def test_deserialize_ciphertext_blob(raw, serialized): @parameterized(((ec[0],) for ec in ENCRYPTION_CONTEXT_VECTORS)) def test_encrypt_decrypt_cycle(encryption_context): plaintext = b"some secret plaintext" - master_key = Key("nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = encrypt( @@ -133,7 +133,7 @@ def test_encrypt_unknown_key_id(): def test_decrypt_invalid_ciphertext_format(): - master_key = Key("nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} with assert_raises(InvalidCiphertextException): @@ -153,7 +153,7 @@ def test_decrypt_unknwown_key_id(): def test_decrypt_invalid_ciphertext(): - master_key = Key("nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = ( master_key.id.encode("utf-8") + b"123456789012" @@ -171,7 +171,7 @@ def test_decrypt_invalid_ciphertext(): def test_decrypt_invalid_encryption_context(): plaintext = b"some secret plaintext" - master_key = Key("nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = encrypt( From eaa8c8db6e77b93f12ee90f2aa0fed483ce45d30 Mon Sep 17 00:00:00 2001 From: Brady Date: Thu, 16 Jan 2020 21:00:24 -0500 Subject: [PATCH 038/658] add tagging support to events --- moto/events/models.py | 28 ++++++++ moto/events/responses.py | 23 ++++++ moto/utilities/__init__.py | 0 moto/utilities/tagging_service.py | 56 +++++++++++++++ tests/test_events/test_events.py | 74 ++++++++++++++++---- tests/test_utilities/test_tagging_service.py | 59 ++++++++++++++++ 6 files changed, 228 insertions(+), 12 deletions(-) create mode 100644 moto/utilities/__init__.py create mode 100644 moto/utilities/tagging_service.py create mode 100644 tests/test_utilities/test_tagging_service.py diff --git a/moto/events/models.py b/moto/events/models.py index 548d41393b7f..695cfb17a12d 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -6,6 +6,7 @@ from moto.core.exceptions import JsonRESTError from moto.core import BaseBackend, BaseModel from moto.sts.models import ACCOUNT_ID +from moto.utilities.tagging_service import TaggingService class Rule(BaseModel): @@ -104,6 +105,7 @@ def __init__(self, region_name): self.region_name = region_name self.event_buses = {} self.event_sources = {} + self.tagger = TaggingService() self._add_default_event_bus() @@ -361,6 +363,32 @@ def delete_event_bus(self, name): self.event_buses.pop(name, None) + def list_tags_for_resource(self, arn): + name = arn.split("/")[-1] + if name in self.rules: + return self.tagger.list_tags_for_resource(self.rules[name].arn) + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + + def tag_resource(self, arn, tags): + name = arn.split("/")[-1] + if name in self.rules: + self.tagger.tag_resource(self.rules[name].arn, tags) + return {} + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + + def untag_resource(self, arn, tag_names): + name = arn.split("/")[-1] + if name in self.rules: + self.tagger.untag_resource_using_names(self.rules[name].arn, tag_names) + return {} + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + events_backends = {} for region in Session().get_available_regions("events"): diff --git a/moto/events/responses.py b/moto/events/responses.py index b415564f8a88..68c2114a6af0 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -297,3 +297,26 @@ def delete_event_bus(self): self.events_backend.delete_event_bus(name) return "", self.response_headers + + def list_tags_for_resource(self): + arn = self._get_param("ResourceARN") + + result = self.events_backend.list_tags_for_resource(arn) + + return json.dumps(result), self.response_headers + + def tag_resource(self): + arn = self._get_param("ResourceARN") + tags = self._get_param("Tags") + + result = self.events_backend.tag_resource(arn, tags) + + return json.dumps(result), self.response_headers + + def untag_resource(self): + arn = self._get_param("ResourceARN") + tags = self._get_param("TagKeys") + + result = self.events_backend.untag_resource(arn, tags) + + return json.dumps(result), self.response_headers diff --git a/moto/utilities/__init__.py b/moto/utilities/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py new file mode 100644 index 000000000000..8c7a86f1d768 --- /dev/null +++ b/moto/utilities/tagging_service.py @@ -0,0 +1,56 @@ +class TaggingService: + def __init__(self, tagName="Tags", keyName="Key", valueName="Value"): + self.tagName = tagName + self.keyName = keyName + self.valueName = valueName + self.tags = {} + + def list_tags_for_resource(self, arn): + result = [] + if arn in self.tags: + for k, v in self.tags[arn].items(): + result.append({self.keyName: k, self.valueName: v}) + return {self.tagName: result} + + def tag_resource(self, arn, tags): + if arn not in self.tags: + self.tags[arn] = {} + for t in tags: + if self.valueName in t: + self.tags[arn][t[self.keyName]] = t[self.valueName] + else: + self.tags[arn][t[self.keyName]] = None + + def untag_resource_using_names(self, arn, tag_names): + for name in tag_names: + if name in self.tags.get(arn, {}): + del self.tags[arn][name] + + def untag_resource_using_tags(self, arn, tags): + m = self.tags.get(arn, {}) + for t in tags: + if self.keyName in t: + if t[self.keyName] in m: + if self.valueName in t: + if m[t[self.keyName]] != t[self.valueName]: + continue + # If both key and value are provided, match both before deletion + del m[t[self.keyName]] + + def extract_tag_names(self, tags): + results = [] + if len(tags) == 0: + return results + for tag in tags: + if self.keyName in tag: + results.append(tag[self.keyName]) + return results + + def flatten_tag_list(self, tags): + result = {} + for t in tags: + if self.valueName in t: + result[t[self.keyName]] = t[self.valueName] + else: + result[t[self.keyName]] = None + return result diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 14d872806baa..d276a1705c1b 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -1,12 +1,15 @@ -import random -import boto3 import json -import sure # noqa +import random +import unittest -from moto.events import mock_events +import boto3 from botocore.exceptions import ClientError from nose.tools import assert_raises + from moto.core import ACCOUNT_ID +from moto.core.exceptions import JsonRESTError +from moto.events import mock_events +from moto.events.models import EventsBackend RULES = [ {"Name": "test1", "ScheduleExpression": "rate(5 minutes)"}, @@ -136,14 +139,6 @@ def test_list_rule_names_by_target(): assert rule in test_2_target["Rules"] -@mock_events -def test_list_rules(): - client = generate_environment() - - rules = client.list_rules() - assert len(rules["Rules"]) == len(RULES) - - @mock_events def test_delete_rule(): client = generate_environment() @@ -461,3 +456,58 @@ def test_delete_event_bus_errors(): client.delete_event_bus.when.called_with(Name="default").should.throw( ClientError, "Cannot delete event bus default." ) + + +@mock_events +def test_rule_tagging_happy(): + client = generate_environment() + rule_name = get_random_rule()["Name"] + rule_arn = client.describe_rule(Name=rule_name).get("Arn") + + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + client.tag_resource(ResourceARN=rule_arn, Tags=tags) + + actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags") + tc = unittest.TestCase("__init__") + expected = [{"Value": "value1", "Key": "key1"}, {"Value": "value2", "Key": "key2"}] + tc.assertTrue( + (expected[0] == actual[0] and expected[1] == actual[1]) + or (expected[1] == actual[0] and expected[0] == actual[1]) + ) + + client.untag_resource(ResourceARN=rule_arn, TagKeys=["key1"]) + + actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags") + expected = [{"Key": "key2", "Value": "value2"}] + assert expected == actual + + +def freeze_dict(obj): + if isinstance(obj, dict): + dict_items = list(obj.items()) + dict_items.append(("__frozen__", True)) + return tuple([(k, freeze_dict(v)) for k, v in dict_items]) + return obj + + +@mock_events +def test_rule_tagging_sad(): + b = EventsBackend("us-west-2") + + try: + b.tag_resource("unknown", []) + raise "tag_resource should fail if ResourceARN is not known" + except JsonRESTError: + pass + + try: + b.untag_resource("unknown", []) + raise "untag_resource should fail if ResourceARN is not known" + except JsonRESTError: + pass + + try: + b.list_tags_for_resource("unknown") + raise "list_tags_for_resource should fail if ResourceARN is not known" + except JsonRESTError: + pass diff --git a/tests/test_utilities/test_tagging_service.py b/tests/test_utilities/test_tagging_service.py new file mode 100644 index 000000000000..1cd820a194ab --- /dev/null +++ b/tests/test_utilities/test_tagging_service.py @@ -0,0 +1,59 @@ +import unittest + +from moto.utilities.tagging_service import TaggingService + + +class TestTaggingService(unittest.TestCase): + def test_list_empty(self): + svc = TaggingService() + result = svc.list_tags_for_resource("test") + self.assertEqual(result, {"Tags": []}) + + def test_create_tag(self): + svc = TaggingService("TheTags", "TagKey", "TagValue") + tags = [{"TagKey": "key_key", "TagValue": "value_value"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"TheTags": [{"TagKey": "key_key", "TagValue": "value_value"}]} + self.assertDictEqual(expected, actual) + + def test_create_tag_without_value(self): + svc = TaggingService() + tags = [{"Key": "key_key"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"Tags": [{"Key": "key_key", "Value": None}]} + self.assertDictEqual(expected, actual) + + def test_delete_tag_using_names(self): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + self.assertEqual(result, {"Tags": []}) + + def test_list_empty_delete(self): + svc = TaggingService() + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + self.assertEqual(result, {"Tags": []}) + + def test_delete_tag_using_tags(self): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_tags("arn", tags) + result = svc.list_tags_for_resource("arn") + self.assertEqual(result, {"Tags": []}) + + def test_extract_tag_names(self): + svc = TaggingService() + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + actual = svc.extract_tag_names(tags) + expected = ["key1", "key2"] + self.assertEqual(expected, actual) + + +if __name__ == "__main__": + unittest.main() From 1e851fb1d8b328b18a54702d31a44423138b1e83 Mon Sep 17 00:00:00 2001 From: Brady Date: Fri, 17 Jan 2020 10:12:58 -0500 Subject: [PATCH 039/658] remove dead code --- tests/test_events/test_events.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index d276a1705c1b..4fb3b4029bcc 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -482,14 +482,6 @@ def test_rule_tagging_happy(): assert expected == actual -def freeze_dict(obj): - if isinstance(obj, dict): - dict_items = list(obj.items()) - dict_items.append(("__frozen__", True)) - return tuple([(k, freeze_dict(v)) for k, v in dict_items]) - return obj - - @mock_events def test_rule_tagging_sad(): b = EventsBackend("us-west-2") From 20020c51709b8312208113a161d9bb01858ab4fd Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Thu, 23 Jan 2020 15:46:50 +0100 Subject: [PATCH 040/658] Change whitespaces back to the way they were and fix typos --- moto/iot/models.py | 4 +- .../single_instance_with_ebs_volume.py | 520 +++++++++--------- 2 files changed, 262 insertions(+), 262 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index eeaef18961dd..de4383b964c1 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -97,7 +97,7 @@ def __init__( break # if parent arn found (should always be found) if parent_thing_group_structure: - # copy parent"s rootToParentThingGroups + # copy parent's rootToParentThingGroups if "rootToParentThingGroups" in parent_thing_group_structure.metadata: self.metadata["rootToParentThingGroups"].extend( parent_thing_group_structure.metadata["rootToParentThingGroups"] @@ -424,7 +424,7 @@ def create_thing_type(self, thing_type_name, thing_type_properties): def list_thing_types(self, thing_type_name=None): if thing_type_name: - # It"s weird but thing_type_name is filtered by forward match, not complete match + # It's weird but thing_type_name is filtered by forward match, not complete match return [ _ for _ in self.thing_types.values() diff --git a/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py b/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py index 8226b5ad3650..7962d2c56efd 100644 --- a/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py +++ b/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py @@ -1,260 +1,260 @@ -from __future__ import unicode_literals - -template = { - "Description": "AWS CloudFormation Sample Template Gollum_Single_Instance_With_EBS_Volume: Gollum is a simple wiki system built on top of Git that powers GitHub Wikis. This template installs a Gollum Wiki stack on a single EC2 instance with an EBS volume for storage and demonstrates using the AWS CloudFormation bootstrap scripts to install the packages and files necessary at instance launch time. **WARNING** This template creates an Amazon EC2 instance and an EBS volume. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "SSHLocation": { - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", - "Description": "The IP address range that can be used to SSH to the EC2 instances", - "Default": "0.0.0.0/0", - "MinLength": "9", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "MaxLength": "18", - "Type": "String", - }, - "KeyName": { - "Type": "String", - "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instances", - "MinLength": "1", - "AllowedPattern": "[\\x20-\\x7E]*", - "MaxLength": "255", - "ConstraintDescription": "can contain only ASCII characters.", - }, - "InstanceType": { - "Default": "m1.small", - "ConstraintDescription": "must be a valid EC2 instance type.", - "Type": "String", - "Description": "WebServer EC2 instance type", - "AllowedValues": [ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge", - ], - }, - "VolumeSize": { - "Description": "WebServer EC2 instance type", - "Default": "5", - "Type": "Number", - "MaxValue": "1024", - "MinValue": "5", - "ConstraintDescription": "must be between 5 and 1024 Gb.", - }, - }, - "AWSTemplateFormatVersion": "2010-09-09", - "Outputs": { - "WebsiteURL": { - "Description": "URL for Gollum wiki", - "Value": { - "Fn::Join": [ - "", - ["http://", {"Fn::GetAtt": ["WebServer", "PublicDnsName"]}], - ] - }, - } - }, - "Resources": { - "WebServerSecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "SecurityGroupIngress": [ - { - "ToPort": "80", - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0", - "FromPort": "80", - }, - { - "ToPort": "22", - "IpProtocol": "tcp", - "CidrIp": {"Ref": "SSHLocation"}, - "FromPort": "22", - }, - ], - "GroupDescription": "Enable SSH access and HTTP access on the inbound port", - }, - }, - "WebServer": { - "Type": "AWS::EC2::Instance", - "Properties": { - "UserData": { - "Fn::Base64": { - "Fn::Join": [ - "", - [ - "#!/bin/bash -v\n", - "yum update -y aws-cfn-bootstrap\n", - "# Helper function\n", - "function error_exit\n", - "{\n", - ' /opt/aws/bin/cfn-signal -e 1 -r "$1" \'', - {"Ref": "WaitHandle"}, - "'\n", - " exit 1\n", - "}\n", - "# Install Rails packages\n", - "/opt/aws/bin/cfn-init -s ", - {"Ref": "AWS::StackId"}, - " -r WebServer ", - " --region ", - {"Ref": "AWS::Region"}, - " || error_exit 'Failed to run cfn-init'\n", - "# Wait for the EBS volume to show up\n", - "while [ ! -e /dev/sdh ]; do echo Waiting for EBS volume to attach; sleep 5; done\n", - "# Format the EBS volume and mount it\n", - "mkdir /var/wikidata\n", - "/sbin/mkfs -t ext3 /dev/sdh1\n", - "mount /dev/sdh1 /var/wikidata\n", - "# Initialize the wiki and fire up the server\n", - "cd /var/wikidata\n", - "git init\n", - "gollum --port 80 --host 0.0.0.0 &\n", - "# If all is well so signal success\n", - '/opt/aws/bin/cfn-signal -e $? -r "Rails application setup complete" \'', - {"Ref": "WaitHandle"}, - "'\n", - ], - ] - } - }, - "KeyName": {"Ref": "KeyName"}, - "SecurityGroups": [{"Ref": "WebServerSecurityGroup"}], - "InstanceType": {"Ref": "InstanceType"}, - "ImageId": { - "Fn::FindInMap": [ - "AWSRegionArch2AMI", - {"Ref": "AWS::Region"}, - { - "Fn::FindInMap": [ - "AWSInstanceType2Arch", - {"Ref": "InstanceType"}, - "Arch", - ] - }, - ] - }, - }, - "Metadata": { - "AWS::CloudFormation::Init": { - "config": { - "packages": { - "rubygems": { - "nokogiri": ["1.5.10"], - "rdiscount": [], - "gollum": ["1.1.1"], - }, - "yum": { - "libxslt-devel": [], - "gcc": [], - "git": [], - "rubygems": [], - "ruby-devel": [], - "ruby-rdoc": [], - "make": [], - "libxml2-devel": [], - }, - } - } - } - }, - }, - "DataVolume": { - "Type": "AWS::EC2::Volume", - "Properties": { - "Tags": [{"Value": "Gollum Data Volume", "Key": "Usage"}], - "AvailabilityZone": {"Fn::GetAtt": ["WebServer", "AvailabilityZone"]}, - "Size": "100", - }, - }, - "MountPoint": { - "Type": "AWS::EC2::VolumeAttachment", - "Properties": { - "InstanceId": {"Ref": "WebServer"}, - "Device": "/dev/sdh", - "VolumeId": {"Ref": "DataVolume"}, - }, - }, - "WaitCondition": { - "DependsOn": "MountPoint", - "Type": "AWS::CloudFormation::WaitCondition", - "Properties": {"Handle": {"Ref": "WaitHandle"}, "Timeout": "300"}, - "Metadata": { - "Comment1": "Note that the WaitCondition is dependent on the volume mount point allowing the volume to be created and attached to the EC2 instance", - "Comment2": "The instance bootstrap script waits for the volume to be attached to the instance prior to installing Gollum and signalling completion", - }, - }, - "WaitHandle": {"Type": "AWS::CloudFormation::WaitConditionHandle"}, - }, - "Mappings": { - "AWSInstanceType2Arch": { - "m3.2xlarge": {"Arch": "64"}, - "m2.2xlarge": {"Arch": "64"}, - "m1.small": {"Arch": "64"}, - "c1.medium": {"Arch": "64"}, - "cg1.4xlarge": {"Arch": "64HVM"}, - "m2.xlarge": {"Arch": "64"}, - "t1.micro": {"Arch": "64"}, - "cc1.4xlarge": {"Arch": "64HVM"}, - "m1.medium": {"Arch": "64"}, - "cc2.8xlarge": {"Arch": "64HVM"}, - "m1.large": {"Arch": "64"}, - "m1.xlarge": {"Arch": "64"}, - "m2.4xlarge": {"Arch": "64"}, - "c1.xlarge": {"Arch": "64"}, - "m3.xlarge": {"Arch": "64"}, - }, - "AWSRegionArch2AMI": { - "ap-southeast-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-b4b0cae6", - "64": "ami-beb0caec", - }, - "ap-southeast-2": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-b3990e89", - "64": "ami-bd990e87", - }, - "us-west-2": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-38fe7308", - "64": "ami-30fe7300", - }, - "us-east-1": { - "64HVM": "ami-0da96764", - "32": "ami-31814f58", - "64": "ami-1b814f72", - }, - "ap-northeast-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-0644f007", - "64": "ami-0a44f00b", - }, - "us-west-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-11d68a54", - "64": "ami-1bd68a5e", - }, - "eu-west-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-973b06e3", - "64": "ami-953b06e1", - }, - "sa-east-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-3e3be423", - "64": "ami-3c3be421", - }, - }, - }, -} +from __future__ import unicode_literals + +template = { + "Description": "AWS CloudFormation Sample Template Gollum_Single_Instance_With_EBS_Volume: Gollum is a simple wiki system built on top of Git that powers GitHub Wikis. This template installs a Gollum Wiki stack on a single EC2 instance with an EBS volume for storage and demonstrates using the AWS CloudFormation bootstrap scripts to install the packages and files necessary at instance launch time. **WARNING** This template creates an Amazon EC2 instance and an EBS volume. You will be billed for the AWS resources used if you create a stack from this template.", + "Parameters": { + "SSHLocation": { + "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", + "Description": "The IP address range that can be used to SSH to the EC2 instances", + "Default": "0.0.0.0/0", + "MinLength": "9", + "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", + "MaxLength": "18", + "Type": "String", + }, + "KeyName": { + "Type": "String", + "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instances", + "MinLength": "1", + "AllowedPattern": "[\\x20-\\x7E]*", + "MaxLength": "255", + "ConstraintDescription": "can contain only ASCII characters.", + }, + "InstanceType": { + "Default": "m1.small", + "ConstraintDescription": "must be a valid EC2 instance type.", + "Type": "String", + "Description": "WebServer EC2 instance type", + "AllowedValues": [ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "m3.xlarge", + "m3.2xlarge", + "c1.medium", + "c1.xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "cg1.4xlarge", + ], + }, + "VolumeSize": { + "Description": "WebServer EC2 instance type", + "Default": "5", + "Type": "Number", + "MaxValue": "1024", + "MinValue": "5", + "ConstraintDescription": "must be between 5 and 1024 Gb.", + }, + }, + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "WebsiteURL": { + "Description": "URL for Gollum wiki", + "Value": { + "Fn::Join": [ + "", + ["http://", {"Fn::GetAtt": ["WebServer", "PublicDnsName"]}], + ] + }, + } + }, + "Resources": { + "WebServerSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "SecurityGroupIngress": [ + { + "ToPort": "80", + "IpProtocol": "tcp", + "CidrIp": "0.0.0.0/0", + "FromPort": "80", + }, + { + "ToPort": "22", + "IpProtocol": "tcp", + "CidrIp": {"Ref": "SSHLocation"}, + "FromPort": "22", + }, + ], + "GroupDescription": "Enable SSH access and HTTP access on the inbound port", + }, + }, + "WebServer": { + "Type": "AWS::EC2::Instance", + "Properties": { + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash -v\n", + "yum update -y aws-cfn-bootstrap\n", + "# Helper function\n", + "function error_exit\n", + "{\n", + ' /opt/aws/bin/cfn-signal -e 1 -r "$1" \'', + {"Ref": "WaitHandle"}, + "'\n", + " exit 1\n", + "}\n", + "# Install Rails packages\n", + "/opt/aws/bin/cfn-init -s ", + {"Ref": "AWS::StackId"}, + " -r WebServer ", + " --region ", + {"Ref": "AWS::Region"}, + " || error_exit 'Failed to run cfn-init'\n", + "# Wait for the EBS volume to show up\n", + "while [ ! -e /dev/sdh ]; do echo Waiting for EBS volume to attach; sleep 5; done\n", + "# Format the EBS volume and mount it\n", + "mkdir /var/wikidata\n", + "/sbin/mkfs -t ext3 /dev/sdh1\n", + "mount /dev/sdh1 /var/wikidata\n", + "# Initialize the wiki and fire up the server\n", + "cd /var/wikidata\n", + "git init\n", + "gollum --port 80 --host 0.0.0.0 &\n", + "# If all is well so signal success\n", + '/opt/aws/bin/cfn-signal -e $? -r "Rails application setup complete" \'', + {"Ref": "WaitHandle"}, + "'\n", + ], + ] + } + }, + "KeyName": {"Ref": "KeyName"}, + "SecurityGroups": [{"Ref": "WebServerSecurityGroup"}], + "InstanceType": {"Ref": "InstanceType"}, + "ImageId": { + "Fn::FindInMap": [ + "AWSRegionArch2AMI", + {"Ref": "AWS::Region"}, + { + "Fn::FindInMap": [ + "AWSInstanceType2Arch", + {"Ref": "InstanceType"}, + "Arch", + ] + }, + ] + }, + }, + "Metadata": { + "AWS::CloudFormation::Init": { + "config": { + "packages": { + "rubygems": { + "nokogiri": ["1.5.10"], + "rdiscount": [], + "gollum": ["1.1.1"], + }, + "yum": { + "libxslt-devel": [], + "gcc": [], + "git": [], + "rubygems": [], + "ruby-devel": [], + "ruby-rdoc": [], + "make": [], + "libxml2-devel": [], + }, + } + } + } + }, + }, + "DataVolume": { + "Type": "AWS::EC2::Volume", + "Properties": { + "Tags": [{"Value": "Gollum Data Volume", "Key": "Usage"}], + "AvailabilityZone": {"Fn::GetAtt": ["WebServer", "AvailabilityZone"]}, + "Size": "100", + }, + }, + "MountPoint": { + "Type": "AWS::EC2::VolumeAttachment", + "Properties": { + "InstanceId": {"Ref": "WebServer"}, + "Device": "/dev/sdh", + "VolumeId": {"Ref": "DataVolume"}, + }, + }, + "WaitCondition": { + "DependsOn": "MountPoint", + "Type": "AWS::CloudFormation::WaitCondition", + "Properties": {"Handle": {"Ref": "WaitHandle"}, "Timeout": "300"}, + "Metadata": { + "Comment1": "Note that the WaitCondition is dependent on the volume mount point allowing the volume to be created and attached to the EC2 instance", + "Comment2": "The instance bootstrap script waits for the volume to be attached to the instance prior to installing Gollum and signalling completion", + }, + }, + "WaitHandle": {"Type": "AWS::CloudFormation::WaitConditionHandle"}, + }, + "Mappings": { + "AWSInstanceType2Arch": { + "m3.2xlarge": {"Arch": "64"}, + "m2.2xlarge": {"Arch": "64"}, + "m1.small": {"Arch": "64"}, + "c1.medium": {"Arch": "64"}, + "cg1.4xlarge": {"Arch": "64HVM"}, + "m2.xlarge": {"Arch": "64"}, + "t1.micro": {"Arch": "64"}, + "cc1.4xlarge": {"Arch": "64HVM"}, + "m1.medium": {"Arch": "64"}, + "cc2.8xlarge": {"Arch": "64HVM"}, + "m1.large": {"Arch": "64"}, + "m1.xlarge": {"Arch": "64"}, + "m2.4xlarge": {"Arch": "64"}, + "c1.xlarge": {"Arch": "64"}, + "m3.xlarge": {"Arch": "64"}, + }, + "AWSRegionArch2AMI": { + "ap-southeast-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-b4b0cae6", + "64": "ami-beb0caec", + }, + "ap-southeast-2": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-b3990e89", + "64": "ami-bd990e87", + }, + "us-west-2": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-38fe7308", + "64": "ami-30fe7300", + }, + "us-east-1": { + "64HVM": "ami-0da96764", + "32": "ami-31814f58", + "64": "ami-1b814f72", + }, + "ap-northeast-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-0644f007", + "64": "ami-0a44f00b", + }, + "us-west-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-11d68a54", + "64": "ami-1bd68a5e", + }, + "eu-west-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-973b06e3", + "64": "ami-953b06e1", + }, + "sa-east-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-3e3be423", + "64": "ami-3c3be421", + }, + }, + }, +} From 2ae09c5335c237105f61c12342ae97aa190efd73 Mon Sep 17 00:00:00 2001 From: Roque Pinel <1685896+repinel@users.noreply.github.com> Date: Tue, 14 Aug 2018 19:00:03 -0400 Subject: [PATCH 041/658] Fix the `StatusCode` returned by lambda invoke According to the AWS documentation: ``` The HTTP status code will be in the 200 range for successful request. For the RequestResponse invocation type this status code will be 200. For the Event invocation type this status code will be 202. For the DryRun invocation type the status code will be 204. ``` --- moto/awslambda/responses.py | 8 ++++++- tests/test_awslambda/test_lambda.py | 35 +++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 46203c10d17f..3d9b3ee3ccc0 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -172,7 +172,13 @@ def _invoke(self, request, full_url): function_name, qualifier, self.body, self.headers, response_headers ) if payload: - return 202, response_headers, payload + if request.headers['X-Amz-Invocation-Type'] == 'Event': + status_code = 202 + elif request.headers['X-Amz-Invocation-Type'] == 'DryRun': + status_code = 204 + else: + status_code = 200 + return status_code, response_headers, payload else: return 404, response_headers, "{}" diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 2835729f8fe0..6601537fda14 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -113,7 +113,7 @@ def test_invoke_requestresponse_function(): Payload=json.dumps(in_data), ) - success_result["StatusCode"].should.equal(202) + success_result["StatusCode"].should.equal(200) result_obj = json.loads( base64.b64decode(success_result["LogResult"]).decode("utf-8") ) @@ -151,6 +151,37 @@ def test_invoke_event_function(): json.loads(success_result["Payload"].read().decode("utf-8")).should.equal({}) +@mock_lambda +def test_invoke_dryrun_function(): + conn = boto3.client('lambda', 'us-west-2') + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role=get_role_name(), + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': get_test_zip_file1(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + conn.invoke.when.called_with( + FunctionName='notAFunction', + InvocationType='Event', + Payload='{}' + ).should.throw(botocore.client.ClientError) + + in_data = {'msg': 'So long and thanks for all the fish'} + success_result = conn.invoke( + FunctionName='testFunction', InvocationType='DryRun', Payload=json.dumps(in_data)) + success_result["StatusCode"].should.equal(204) + json.loads(success_result['Payload'].read().decode( + 'utf-8')).should.equal({}) + + if settings.TEST_SERVER_MODE: @mock_ec2 @@ -179,7 +210,7 @@ def test_invoke_function_get_ec2_volume(): InvocationType="RequestResponse", Payload=json.dumps(in_data), ) - result["StatusCode"].should.equal(202) + result["StatusCode"].should.equal(200) actual_payload = json.loads(result["Payload"].read().decode("utf-8")) expected_payload = {"id": vol.id, "state": vol.state, "size": vol.size} actual_payload.should.equal(expected_payload) From ee8231202a82e09982c8ade459476f3b27b21d43 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 24 Jan 2020 09:08:48 +0000 Subject: [PATCH 042/658] Fix linting --- moto/awslambda/responses.py | 4 ++-- tests/test_awslambda/test_lambda.py | 28 +++++++++++++--------------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 3d9b3ee3ccc0..d79336e23232 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -172,9 +172,9 @@ def _invoke(self, request, full_url): function_name, qualifier, self.body, self.headers, response_headers ) if payload: - if request.headers['X-Amz-Invocation-Type'] == 'Event': + if request.headers["X-Amz-Invocation-Type"] == "Event": status_code = 202 - elif request.headers['X-Amz-Invocation-Type'] == 'DryRun': + elif request.headers["X-Amz-Invocation-Type"] == "DryRun": status_code = 204 else: status_code = 200 diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 6601537fda14..8f8c03026654 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -153,33 +153,31 @@ def test_invoke_event_function(): @mock_lambda def test_invoke_dryrun_function(): - conn = boto3.client('lambda', 'us-west-2') + conn = boto3.client("lambda", "us-west-2") conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', + FunctionName="testFunction", + Runtime="python2.7", Role=get_role_name(), - Handler='lambda_function.lambda_handler', - Code={ - 'ZipFile': get_test_zip_file1(), - }, - Description='test lambda function', + Handler="lambda_function.lambda_handler", + Code={"ZipFile": get_test_zip_file1(),}, + Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) conn.invoke.when.called_with( - FunctionName='notAFunction', - InvocationType='Event', - Payload='{}' + FunctionName="notAFunction", InvocationType="Event", Payload="{}" ).should.throw(botocore.client.ClientError) - in_data = {'msg': 'So long and thanks for all the fish'} + in_data = {"msg": "So long and thanks for all the fish"} success_result = conn.invoke( - FunctionName='testFunction', InvocationType='DryRun', Payload=json.dumps(in_data)) + FunctionName="testFunction", + InvocationType="DryRun", + Payload=json.dumps(in_data), + ) success_result["StatusCode"].should.equal(204) - json.loads(success_result['Payload'].read().decode( - 'utf-8')).should.equal({}) + json.loads(success_result["Payload"].read().decode("utf-8")).should.equal({}) if settings.TEST_SERVER_MODE: From ccd0257acc6326e17b9eb94ab40287501666e6d3 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 24 Jan 2020 10:09:56 +0000 Subject: [PATCH 043/658] Fix Lambda tests for DryRuns --- tests/test_awslambda/test_lambda.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 8f8c03026654..446856f6047f 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -177,7 +177,6 @@ def test_invoke_dryrun_function(): Payload=json.dumps(in_data), ) success_result["StatusCode"].should.equal(204) - json.loads(success_result["Payload"].read().decode("utf-8")).should.equal({}) if settings.TEST_SERVER_MODE: From f74f08581a5fd11e82a89d92fa1a39e6bd04138e Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 10:36:51 +0000 Subject: [PATCH 044/658] S3 - Add IllegalLocationConstraint validation when creating buckets --- moto/s3/exceptions.py | 12 ++ moto/s3/responses.py | 22 +++- moto/s3/utils.py | 2 +- tests/test_s3/test_s3.py | 183 ++++++++++++++++---------- tests/test_s3/test_s3_lifecycle.py | 28 ++-- tests/test_s3/test_s3_storageclass.py | 30 +++-- 6 files changed, 182 insertions(+), 95 deletions(-) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 1f2ead639eaa..bc339772e62e 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -127,6 +127,18 @@ def __init__(self, method, *args, **kwargs): ) +class IllegalLocationConstraintException(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(IllegalLocationConstraintException, self).__init__( + "IllegalLocationConstraintException", + "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.", + *args, + **kwargs + ) + + class MalformedXML(S3ClientError): code = 400 diff --git a/moto/s3/responses.py b/moto/s3/responses.py index a04427172a0f..6041201bfc35 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -29,6 +29,7 @@ InvalidPartOrder, MalformedXML, MalformedACLError, + IllegalLocationConstraintException, InvalidNotificationARN, InvalidNotificationEvent, ObjectNotInActiveTierError, @@ -585,6 +586,15 @@ def _truncate_result(self, result_keys, max_keys): next_continuation_token = None return result_keys, is_truncated, next_continuation_token + def _body_contains_location_constraint(self, body): + if body: + try: + xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"] + return True + except KeyError: + pass + return False + def _bucket_response_put( self, request, body, region_name, bucket_name, querystring ): @@ -680,10 +690,16 @@ def _bucket_response_put( return "" else: + # us-east-1, the default AWS region behaves a bit differently + # - you should not use it as a location constraint --> it fails + # - querying the location constraint returns None + # - LocationConstraint has to be specified if outside us-east-1 + if ( + region_name != DEFAULT_REGION_NAME + and not self._body_contains_location_constraint(body) + ): + raise IllegalLocationConstraintException() if body: - # us-east-1, the default AWS region behaves a bit differently - # - you should not use it as a location constraint --> it fails - # - querying the location constraint returns None try: forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][ "LocationConstraint" diff --git a/moto/s3/utils.py b/moto/s3/utils.py index e7d9e55808e7..e22b6b860262 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -37,7 +37,7 @@ def bucket_name_from_url(url): REGION_URL_REGEX = re.compile( r"^https?://(s3[-\.](?P.+)\.amazonaws\.com/(.+)|" - r"(.+)\.s3-(?P.+)\.amazonaws\.com)/?" + r"(.+)\.s3[-\.](?P.+)\.amazonaws\.com)/?" ) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 294beca87f39..afea4d55e494 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -27,6 +27,7 @@ import six import requests import tests.backport_assert_raises # noqa +from moto.s3.responses import DEFAULT_REGION_NAME from nose import SkipTest from nose.tools import assert_raises @@ -68,7 +69,7 @@ def __init__(self, name, value): self.value = value def save(self): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.put_object(Bucket="mybucket", Key=self.name, Body=self.value) @@ -119,7 +120,7 @@ def test_append_to_value__empty_key(): @mock_s3 def test_my_model_save(): # Create Bucket so that test can run - conn = boto3.resource("s3", region_name="us-east-1") + conn = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) conn.create_bucket(Bucket="mybucket") #################################### @@ -133,7 +134,7 @@ def test_my_model_save(): @mock_s3 def test_key_etag(): - conn = boto3.resource("s3", region_name="us-east-1") + conn = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) conn.create_bucket(Bucket="mybucket") model_instance = MyModel("steve", "is awesome") @@ -519,9 +520,9 @@ def test_bucket_with_dash(): def test_create_existing_bucket(): "Trying to create a bucket that already exists should raise an Error" conn = boto.s3.connect_to_region("us-west-2") - conn.create_bucket("foobar") + conn.create_bucket("foobar", location="us-west-2") with assert_raises(S3CreateError): - conn.create_bucket("foobar") + conn.create_bucket("foobar", location="us-west-2") @mock_s3_deprecated @@ -535,7 +536,7 @@ def test_create_existing_bucket_in_us_east_1(): us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if bucket exists it Amazon S3 will not do anything). """ - conn = boto.s3.connect_to_region("us-east-1") + conn = boto.s3.connect_to_region(DEFAULT_REGION_NAME) conn.create_bucket("foobar") bucket = conn.create_bucket("foobar") bucket.name.should.equal("foobar") @@ -544,7 +545,7 @@ def test_create_existing_bucket_in_us_east_1(): @mock_s3_deprecated def test_other_region(): conn = S3Connection("key", "secret", host="s3-website-ap-southeast-2.amazonaws.com") - conn.create_bucket("foobar") + conn.create_bucket("foobar", location="ap-southeast-2") list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) @@ -995,7 +996,9 @@ def test_bucket_acl_switching(): def test_s3_object_in_public_bucket(): s3 = boto3.resource("s3") bucket = s3.Bucket("test-bucket") - bucket.create(ACL="public-read") + bucket.create( + ACL="public-read", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) bucket.put_object(Body=b"ABCD", Key="file.txt") s3_anonymous = boto3.resource("s3") @@ -1026,7 +1029,9 @@ def test_s3_object_in_public_bucket(): def test_s3_object_in_private_bucket(): s3 = boto3.resource("s3") bucket = s3.Bucket("test-bucket") - bucket.create(ACL="private") + bucket.create( + ACL="private", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) bucket.put_object(ACL="private", Body=b"ABCD", Key="file.txt") s3_anonymous = boto3.resource("s3") @@ -1086,19 +1091,46 @@ def test_setting_content_encoding(): @mock_s3_deprecated def test_bucket_location(): conn = boto.s3.connect_to_region("us-west-2") - bucket = conn.create_bucket("mybucket") + bucket = conn.create_bucket("mybucket", location="us-west-2") bucket.get_location().should.equal("us-west-2") @mock_s3 -def test_bucket_location_us_east_1(): - cli = boto3.client("s3") +def test_bucket_location_default(): + cli = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" # No LocationConstraint ==> us-east-1 cli.create_bucket(Bucket=bucket_name) cli.get_bucket_location(Bucket=bucket_name)["LocationConstraint"].should.equal(None) +@mock_s3 +def test_bucket_location_nondefault(): + cli = boto3.client("s3", region_name="eu-central-1") + bucket_name = "mybucket" + # LocationConstraint set for non default regions + resp = cli.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": "eu-central-1"}, + ) + cli.get_bucket_location(Bucket=bucket_name)["LocationConstraint"].should.equal( + "eu-central-1" + ) + + +@mock_s3 +def test_s3_location_should_error_outside_useast1(): + s3 = boto3.client("s3", region_name="eu-west-1") + + bucket_name = "asdfasdfsdfdsfasda" + + with assert_raises(ClientError) as e: + s3.create_bucket(Bucket=bucket_name) + e.exception.response["Error"]["Message"].should.equal( + "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." + ) + + @mock_s3_deprecated def test_ranged_get(): conn = boto.connect_s3() @@ -1222,7 +1254,7 @@ def test_key_with_trailing_slash_in_ordinary_calling_format(): @mock_s3 def test_boto3_key_etag(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="steve", Body=b"is awesome") resp = s3.get_object(Bucket="mybucket", Key="steve") @@ -1231,7 +1263,7 @@ def test_boto3_key_etag(): @mock_s3 def test_website_redirect_location(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="steve", Body=b"is awesome") @@ -1248,7 +1280,7 @@ def test_website_redirect_location(): @mock_s3 def test_boto3_list_objects_truncated_response(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"1") s3.put_object(Bucket="mybucket", Key="two", Body=b"22") @@ -1294,7 +1326,7 @@ def test_boto3_list_objects_truncated_response(): @mock_s3 def test_boto3_list_keys_xml_escaped(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") key_name = "Q&A.txt" s3.put_object(Bucket="mybucket", Key=key_name, Body=b"is awesome") @@ -1314,7 +1346,7 @@ def test_boto3_list_keys_xml_escaped(): @mock_s3 def test_boto3_list_objects_v2_common_prefix_pagination(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") max_keys = 1 @@ -1343,7 +1375,7 @@ def test_boto3_list_objects_v2_common_prefix_pagination(): @mock_s3 def test_boto3_list_objects_v2_truncated_response(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"1") s3.put_object(Bucket="mybucket", Key="two", Body=b"22") @@ -1400,7 +1432,7 @@ def test_boto3_list_objects_v2_truncated_response(): @mock_s3 def test_boto3_list_objects_v2_truncated_response_start_after(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"1") s3.put_object(Bucket="mybucket", Key="two", Body=b"22") @@ -1442,7 +1474,7 @@ def test_boto3_list_objects_v2_truncated_response_start_after(): @mock_s3 def test_boto3_list_objects_v2_fetch_owner(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"11") @@ -1456,7 +1488,7 @@ def test_boto3_list_objects_v2_fetch_owner(): @mock_s3 def test_boto3_list_objects_v2_truncate_combined_keys_and_folders(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="1/2", Body="") s3.put_object(Bucket="mybucket", Key="2", Body="") @@ -1486,7 +1518,7 @@ def test_boto3_list_objects_v2_truncate_combined_keys_and_folders(): @mock_s3 def test_boto3_bucket_create(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") s3.Object("blah", "hello.txt").put(Body="some text") @@ -1511,10 +1543,10 @@ def test_bucket_create_duplicate(): @mock_s3 def test_bucket_create_force_us_east_1(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) with assert_raises(ClientError) as exc: s3.create_bucket( - Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-east-1"} + Bucket="blah", CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME} ) exc.exception.response["Error"]["Code"].should.equal("InvalidLocationConstraint") @@ -1522,7 +1554,9 @@ def test_bucket_create_force_us_east_1(): @mock_s3 def test_boto3_bucket_create_eu_central(): s3 = boto3.resource("s3", region_name="eu-central-1") - s3.create_bucket(Bucket="blah") + s3.create_bucket( + Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-central-1"} + ) s3.Object("blah", "hello.txt").put(Body="some text") @@ -1533,7 +1567,7 @@ def test_boto3_bucket_create_eu_central(): @mock_s3 def test_boto3_head_object(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") s3.Object("blah", "hello.txt").put(Body="some text") @@ -1551,7 +1585,7 @@ def test_boto3_head_object(): @mock_s3 def test_boto3_bucket_deletion(): - cli = boto3.client("s3", region_name="us-east-1") + cli = boto3.client("s3", region_name=DEFAULT_REGION_NAME) cli.create_bucket(Bucket="foobar") cli.put_object(Bucket="foobar", Key="the-key", Body="some value") @@ -1582,7 +1616,7 @@ def test_boto3_bucket_deletion(): @mock_s3 def test_boto3_get_object(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") s3.Object("blah", "hello.txt").put(Body="some text") @@ -1599,7 +1633,7 @@ def test_boto3_get_object(): @mock_s3 def test_boto3_get_missing_object_with_part_number(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") with assert_raises(ClientError) as e: @@ -1612,7 +1646,7 @@ def test_boto3_get_missing_object_with_part_number(): @mock_s3 def test_boto3_head_object_with_versioning(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) bucket = s3.create_bucket(Bucket="blah") bucket.Versioning().enable() @@ -1642,7 +1676,7 @@ def test_boto3_head_object_with_versioning(): @mock_s3 def test_boto3_copy_object_with_versioning(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket( Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"} @@ -1706,7 +1740,7 @@ def test_boto3_copy_object_with_versioning(): @mock_s3 def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket( Bucket="src", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"} @@ -1730,7 +1764,7 @@ def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): @mock_s3 def test_boto3_deleted_versionings_list(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="blah") client.put_bucket_versioning( @@ -1747,7 +1781,7 @@ def test_boto3_deleted_versionings_list(): @mock_s3 def test_boto3_delete_versioned_bucket(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="blah") client.put_bucket_versioning( @@ -1762,7 +1796,7 @@ def test_boto3_delete_versioned_bucket(): @mock_s3 def test_boto3_get_object_if_modified_since(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "blah" s3.create_bucket(Bucket=bucket_name) @@ -1782,7 +1816,7 @@ def test_boto3_get_object_if_modified_since(): @mock_s3 def test_boto3_head_object_if_modified_since(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "blah" s3.create_bucket(Bucket=bucket_name) @@ -1804,7 +1838,7 @@ def test_boto3_head_object_if_modified_since(): @reduced_min_part_size def test_boto3_multipart_etag(): # Create Bucket so that test can run - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") upload_id = s3.create_multipart_upload(Bucket="mybucket", Key="the-key")["UploadId"] @@ -1848,7 +1882,7 @@ def test_boto3_multipart_etag(): @mock_s3 @reduced_min_part_size def test_boto3_multipart_part_size(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") mpu = s3.create_multipart_upload(Bucket="mybucket", Key="the-key") @@ -1883,7 +1917,7 @@ def test_boto3_multipart_part_size(): @mock_s3 def test_boto3_put_object_with_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -1897,7 +1931,7 @@ def test_boto3_put_object_with_tagging(): @mock_s3 def test_boto3_put_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -1944,7 +1978,7 @@ def test_boto3_put_bucket_tagging(): @mock_s3 def test_boto3_get_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) s3.put_bucket_tagging( @@ -1975,7 +2009,7 @@ def test_boto3_get_bucket_tagging(): @mock_s3 def test_boto3_delete_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -2002,7 +2036,7 @@ def test_boto3_delete_bucket_tagging(): @mock_s3 def test_boto3_put_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -2062,7 +2096,7 @@ def test_boto3_put_bucket_cors(): @mock_s3 def test_boto3_get_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -2103,7 +2137,7 @@ def test_boto3_get_bucket_cors(): @mock_s3 def test_boto3_delete_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) s3.put_bucket_cors( @@ -2127,7 +2161,7 @@ def test_boto3_delete_bucket_cors(): @mock_s3 def test_put_bucket_acl_body(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="bucket") bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] s3.put_bucket_acl( @@ -2225,7 +2259,7 @@ def test_put_bucket_acl_body(): @mock_s3 def test_put_bucket_notification(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="bucket") # With no configuration: @@ -2421,7 +2455,7 @@ def test_put_bucket_notification(): @mock_s3 def test_put_bucket_notification_errors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="bucket") # With incorrect ARNs: @@ -2488,7 +2522,7 @@ def test_put_bucket_notification_errors(): @mock_s3 def test_boto3_put_bucket_logging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" log_bucket = "logbucket" wrong_region_bucket = "wrongregionlogbucket" @@ -2667,7 +2701,7 @@ def test_boto3_put_bucket_logging(): @mock_s3 def test_boto3_put_object_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2711,7 +2745,7 @@ def test_boto3_put_object_tagging(): @mock_s3 def test_boto3_put_object_tagging_on_earliest_version(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2778,7 +2812,7 @@ def test_boto3_put_object_tagging_on_earliest_version(): @mock_s3 def test_boto3_put_object_tagging_on_both_version(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2858,7 +2892,7 @@ def test_boto3_put_object_tagging_on_both_version(): @mock_s3 def test_boto3_put_object_tagging_with_single_tag(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2876,7 +2910,7 @@ def test_boto3_put_object_tagging_with_single_tag(): @mock_s3 def test_boto3_get_object_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2905,7 +2939,7 @@ def test_boto3_get_object_tagging(): @mock_s3 def test_boto3_list_object_versions(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" s3.create_bucket(Bucket=bucket_name) @@ -2927,7 +2961,7 @@ def test_boto3_list_object_versions(): @mock_s3 def test_boto3_list_object_versions_with_versioning_disabled(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" s3.create_bucket(Bucket=bucket_name) @@ -2950,7 +2984,7 @@ def test_boto3_list_object_versions_with_versioning_disabled(): @mock_s3 def test_boto3_list_object_versions_with_versioning_enabled_late(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" s3.create_bucket(Bucket=bucket_name) @@ -2978,7 +3012,7 @@ def test_boto3_list_object_versions_with_versioning_enabled_late(): @mock_s3 def test_boto3_bad_prefix_list_object_versions(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" bad_prefix = "key-that-does-not-exist" @@ -2997,7 +3031,7 @@ def test_boto3_bad_prefix_list_object_versions(): @mock_s3 def test_boto3_delete_markers(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions-and-unicode-ó" s3.create_bucket(Bucket=bucket_name) @@ -3040,7 +3074,7 @@ def test_boto3_delete_markers(): @mock_s3 def test_boto3_multiple_delete_markers(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions-and-unicode-ó" s3.create_bucket(Bucket=bucket_name) @@ -3091,7 +3125,7 @@ def test_boto3_multiple_delete_markers(): def test_get_stream_gzipped(): payload = b"this is some stuff here" - s3_client = boto3.client("s3", region_name="us-east-1") + s3_client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3_client.create_bucket(Bucket="moto-tests") buffer_ = BytesIO() with GzipFile(fileobj=buffer_, mode="w") as f: @@ -3129,7 +3163,7 @@ def test_get_stream_gzipped(): @mock_s3 def test_boto3_bucket_name_too_long(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) with assert_raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 64) exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") @@ -3137,7 +3171,7 @@ def test_boto3_bucket_name_too_long(): @mock_s3 def test_boto3_bucket_name_too_short(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) with assert_raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 2) exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") @@ -3146,7 +3180,7 @@ def test_boto3_bucket_name_too_short(): @mock_s3 def test_accelerated_none_when_unspecified(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) resp.shouldnt.have.key("Status") @@ -3155,7 +3189,7 @@ def test_accelerated_none_when_unspecified(): @mock_s3 def test_can_enable_bucket_acceleration(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) resp = s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"} @@ -3171,7 +3205,7 @@ def test_can_enable_bucket_acceleration(): @mock_s3 def test_can_suspend_bucket_acceleration(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) resp = s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"} @@ -3191,7 +3225,10 @@ def test_can_suspend_bucket_acceleration(): def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): bucket_name = "some_bucket" s3 = boto3.client("s3") - s3.create_bucket(Bucket=bucket_name) + s3.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) resp = s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Suspended"} ) @@ -3205,7 +3242,7 @@ def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): @mock_s3 def test_accelerate_configuration_status_validation(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) with assert_raises(ClientError) as exc: s3.put_bucket_accelerate_configuration( @@ -3217,7 +3254,7 @@ def test_accelerate_configuration_status_validation(): @mock_s3 def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): bucket_name = "some.bucket.with.dots" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) with assert_raises(ClientError) as exc: s3.put_bucket_accelerate_configuration( @@ -3227,7 +3264,7 @@ def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): def store_and_read_back_a_key(key): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" body = b"Some body" @@ -3255,7 +3292,7 @@ def test_root_dir_with_empty_name_works(): ) @mock_s3 def test_delete_objects_with_url_encoded_key(key): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" body = b"Some body" @@ -3282,7 +3319,7 @@ def assert_deleted(): @mock_s3 @mock_config def test_public_access_block(): - client = boto3.client("s3") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="mybucket") # Try to get the public access block (should not exist by default) @@ -3349,7 +3386,7 @@ def test_public_access_block(): assert ce.exception.response["ResponseMetadata"]["HTTPStatusCode"] == 400 # Test that things work with AWS Config: - config_client = boto3.client("config", region_name="us-east-1") + config_client = boto3.client("config", region_name=DEFAULT_REGION_NAME) result = config_client.get_resource_config_history( resourceType="AWS::S3::Bucket", resourceId="mybucket" ) diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 260b248f1ba2..0a2e66b5c598 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -16,7 +16,7 @@ @mock_s3_deprecated def test_lifecycle_create(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") lifecycle = Lifecycle() lifecycle.add_rule("myid", "", "Enabled", 30) @@ -33,7 +33,9 @@ def test_lifecycle_create(): @mock_s3 def test_lifecycle_with_filters(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) # Create a lifecycle rule with a Filter (no tags): lfc = { @@ -245,7 +247,9 @@ def test_lifecycle_with_filters(): @mock_s3 def test_lifecycle_with_eodm(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -293,7 +297,9 @@ def test_lifecycle_with_eodm(): @mock_s3 def test_lifecycle_with_nve(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -327,7 +333,9 @@ def test_lifecycle_with_nve(): @mock_s3 def test_lifecycle_with_nvt(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -393,7 +401,9 @@ def test_lifecycle_with_nvt(): @mock_s3 def test_lifecycle_with_aimu(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -432,7 +442,7 @@ def test_lifecycle_with_aimu(): @mock_s3_deprecated def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") lifecycle = Lifecycle() transition = Transition(days=30, storage_class="GLACIER") @@ -451,7 +461,7 @@ def test_lifecycle_with_glacier_transition(): @mock_s3_deprecated def test_lifecycle_multi(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") date = "2022-10-12T00:00:00.000Z" sc = "GLACIER" @@ -493,7 +503,7 @@ def test_lifecycle_multi(): @mock_s3_deprecated def test_lifecycle_delete(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") lifecycle = Lifecycle() lifecycle.add_rule(expiration=30) diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py index dbdc85c4257c..f1a0479b2ad3 100644 --- a/tests/test_s3/test_s3_storageclass.py +++ b/tests/test_s3/test_s3_storageclass.py @@ -11,7 +11,7 @@ @mock_s3 def test_s3_storage_class_standard(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") # add an object to the bucket with standard storage @@ -26,7 +26,9 @@ def test_s3_storage_class_standard(): @mock_s3 def test_s3_storage_class_infrequent_access(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-2"} + ) # add an object to the bucket with standard storage @@ -46,7 +48,9 @@ def test_s3_storage_class_infrequent_access(): def test_s3_storage_class_intelligent_tiering(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-east-2"} + ) s3.put_object( Bucket="Bucket", Key="my_key_infrequent", @@ -61,7 +65,7 @@ def test_s3_storage_class_intelligent_tiering(): @mock_s3 def test_s3_storage_class_copy(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD" @@ -86,7 +90,7 @@ def test_s3_storage_class_copy(): @mock_s3 def test_s3_invalid_copied_storage_class(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD" @@ -119,7 +123,9 @@ def test_s3_invalid_copied_storage_class(): @mock_s3 def test_s3_invalid_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) # Try to add an object with an invalid storage class with assert_raises(ClientError) as err: @@ -137,7 +143,9 @@ def test_s3_invalid_storage_class(): @mock_s3 def test_s3_default_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body") @@ -150,7 +158,9 @@ def test_s3_default_storage_class(): @mock_s3 def test_s3_copy_object_error_for_glacier_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="GLACIER" @@ -169,7 +179,9 @@ def test_s3_copy_object_error_for_glacier_storage_class(): @mock_s3 def test_s3_copy_object_error_for_deep_archive_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="DEEP_ARCHIVE" From a86cba79de14285ffb050699d2891bd1701fe1cf Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 11:48:32 +0000 Subject: [PATCH 045/658] Add S3 LocationConstraint to Lambda tests --- tests/test_awslambda/test_lambda.py | 142 +++++++++++------- .../test_resourcegroupstaggingapi.py | 5 +- tests/test_s3/test_s3.py | 3 +- 3 files changed, 95 insertions(+), 55 deletions(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index dfd6431e7ed1..48d04ef55c03 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -86,14 +86,14 @@ def lambda_handler(event, context): @mock_lambda def test_list_functions(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) result = conn.list_functions() result["Functions"].should.have.length_of(0) @mock_lambda def test_invoke_requestresponse_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -126,7 +126,7 @@ def test_invoke_requestresponse_function(): @mock_lambda def test_invoke_event_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -156,11 +156,11 @@ def test_invoke_event_function(): @mock_ec2 @mock_lambda def test_invoke_function_get_ec2_volume(): - conn = boto3.resource("ec2", "us-west-2") - vol = conn.create_volume(Size=99, AvailabilityZone="us-west-2") + conn = boto3.resource("ec2", _lambda_region) + vol = conn.create_volume(Size=99, AvailabilityZone=_lambda_region) vol = conn.Volume(vol.id) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python3.7", @@ -190,14 +190,14 @@ def test_invoke_function_get_ec2_volume(): @mock_ec2 @mock_lambda def test_invoke_function_from_sns(): - logs_conn = boto3.client("logs", region_name="us-west-2") - sns_conn = boto3.client("sns", region_name="us-west-2") + logs_conn = boto3.client("logs", region_name=_lambda_region) + sns_conn = boto3.client("sns", region_name=_lambda_region) sns_conn.create_topic(Name="some-topic") topics_json = sns_conn.list_topics() topics = topics_json["Topics"] topic_arn = topics[0]["TopicArn"] - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) result = conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -240,7 +240,7 @@ def test_invoke_function_from_sns(): @mock_lambda def test_create_based_on_s3_with_missing_bucket(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function.when.called_with( FunctionName="testFunction", @@ -260,12 +260,15 @@ def test_create_based_on_s3_with_missing_bucket(): @mock_s3 @freeze_time("2015-01-01 00:00:00") def test_create_function_from_aws_bucket(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) result = conn.create_function( FunctionName="testFunction", @@ -313,7 +316,7 @@ def test_create_function_from_aws_bucket(): @mock_lambda @freeze_time("2015-01-01 00:00:00") def test_create_function_from_zipfile(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() result = conn.create_function( FunctionName="testFunction", @@ -358,12 +361,15 @@ def test_create_function_from_zipfile(): @mock_s3 @freeze_time("2015-01-01 00:00:00") def test_get_function(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file1() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -427,7 +433,10 @@ def test_get_function(): def test_get_function_by_arn(): bucket_name = "test-bucket" s3_conn = boto3.client("s3", "us-east-1") - s3_conn.create_bucket(Bucket=bucket_name) + s3_conn.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket=bucket_name, Key="test.zip", Body=zip_content) @@ -452,12 +461,15 @@ def test_get_function_by_arn(): @mock_lambda @mock_s3 def test_delete_function(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -488,7 +500,10 @@ def test_delete_function(): def test_delete_function_by_arn(): bucket_name = "test-bucket" s3_conn = boto3.client("s3", "us-east-1") - s3_conn.create_bucket(Bucket=bucket_name) + s3_conn.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket=bucket_name, Key="test.zip", Body=zip_content) @@ -513,7 +528,7 @@ def test_delete_function_by_arn(): @mock_lambda def test_delete_unknown_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.delete_function.when.called_with( FunctionName="testFunctionThatDoesntExist" ).should.throw(botocore.client.ClientError) @@ -522,12 +537,15 @@ def test_delete_unknown_function(): @mock_lambda @mock_s3 def test_publish(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -572,12 +590,15 @@ def test_list_create_list_get_delete_list(): test `list -> create -> list -> get -> delete -> list` integration """ - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.list_functions()["Functions"].should.have.length_of(0) @@ -674,12 +695,15 @@ def test_tags(): """ test list_tags -> tag_resource -> list_tags -> tag_resource -> list_tags -> untag_resource -> list_tags integration """ - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) function = conn.create_function( FunctionName="testFunction", @@ -731,7 +755,7 @@ def test_tags_not_found(): """ Test list_tags and tag_resource when the lambda with the given arn does not exist """ - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.list_tags.when.called_with( Resource="arn:aws:lambda:{}:function:not-found".format(ACCOUNT_ID) ).should.throw(botocore.client.ClientError) @@ -749,7 +773,7 @@ def test_tags_not_found(): @mock_lambda def test_invoke_async_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -772,7 +796,7 @@ def test_invoke_async_function(): @mock_lambda @freeze_time("2015-01-01 00:00:00") def test_get_function_created_with_zipfile(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() result = conn.create_function( FunctionName="testFunction", @@ -818,7 +842,7 @@ def test_get_function_created_with_zipfile(): @mock_lambda def test_add_function_permission(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() conn.create_function( FunctionName="testFunction", @@ -849,7 +873,7 @@ def test_add_function_permission(): @mock_lambda def test_get_function_policy(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() conn.create_function( FunctionName="testFunction", @@ -884,12 +908,15 @@ def test_get_function_policy(): @mock_lambda @mock_s3 def test_list_versions_by_function(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -940,12 +967,15 @@ def test_list_versions_by_function(): @mock_lambda @mock_s3 def test_create_function_with_already_exists(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -977,7 +1007,7 @@ def test_create_function_with_already_exists(): @mock_lambda @mock_s3 def test_list_versions_by_function_for_nonexistent_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) versions = conn.list_versions_by_function(FunctionName="testFunction") assert len(versions["Versions"]) == 0 @@ -1326,12 +1356,15 @@ def test_delete_event_source_mapping(): @mock_lambda @mock_s3 def test_update_configuration(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) fxn = conn.create_function( FunctionName="testFunction", @@ -1374,7 +1407,7 @@ def test_update_configuration(): @mock_lambda def test_update_function_zip(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content_one = get_test_zip_file1() @@ -1429,13 +1462,16 @@ def test_update_function_zip(): @mock_lambda @mock_s3 def test_update_function_s3(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file1() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) fxn = conn.create_function( FunctionName="testFunctionS3", @@ -1516,7 +1552,7 @@ def test_create_function_with_unknown_arn(): def create_invalid_lambda(role): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() with assert_raises(ClientError) as err: conn.create_function( @@ -1535,7 +1571,7 @@ def create_invalid_lambda(role): def get_role_name(): with mock_iam(): - iam = boto3.client("iam", region_name="us-west-2") + iam = boto3.client("iam", region_name=_lambda_region) try: return iam.get_role(RoleName="my-role")["Role"]["Arn"] except ClientError: diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 84f7a8b86905..3ee517ce8a4c 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -21,7 +21,10 @@ def test_get_resources_s3(): # Create 4 buckets for i in range(1, 5): i_str = str(i) - s3_client.create_bucket(Bucket="test_bucket" + i_str) + s3_client.create_bucket( + Bucket="test_bucket" + i_str, + CreateBucketConfiguration={"LocationConstraint": "eu-central-1"}, + ) s3_client.put_bucket_tagging( Bucket="test_bucket" + i_str, Tagging={"TagSet": [{"Key": "key" + i_str, "Value": "value" + i_str}]}, diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index afea4d55e494..33b4299a641d 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1546,7 +1546,8 @@ def test_bucket_create_force_us_east_1(): s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) with assert_raises(ClientError) as exc: s3.create_bucket( - Bucket="blah", CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME} + Bucket="blah", + CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME}, ) exc.exception.response["Error"]["Code"].should.equal("InvalidLocationConstraint") From d5a36752d76f67036583705e17516c3337403dc6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 12:44:26 +0000 Subject: [PATCH 046/658] Add S3 LocationConstraint to CF tests --- tests/test_cloudformation/test_cloudformation_stack_crud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 75f705ea79da..3d1b2ab8c68c 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -143,7 +143,7 @@ def test_create_stack_with_notification_arn(): @mock_s3_deprecated def test_create_stack_from_s3_url(): s3_conn = boto.s3.connect_to_region("us-west-1") - bucket = s3_conn.create_bucket("foobar") + bucket = s3_conn.create_bucket("foobar", location="us-west-1") key = boto.s3.key.Key(bucket) key.key = "template-key" key.set_contents_from_string(dummy_template_json) From ceb16b00a7dcfe36a9255c30aa4c7ab17317f3d6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 12:45:57 +0000 Subject: [PATCH 047/658] S3 LocationConstraint test can only be run in non-ServerMode --- tests/test_s3/test_s3.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 33b4299a641d..56cbe547ba0f 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1118,17 +1118,22 @@ def test_bucket_location_nondefault(): ) -@mock_s3 -def test_s3_location_should_error_outside_useast1(): - s3 = boto3.client("s3", region_name="eu-west-1") +# Test uses current Region to determine whether to throw an error +# Region is retrieved based on current URL +# URL will always be localhost in Server Mode, so can't run it there +if not settings.TEST_SERVER_MODE: - bucket_name = "asdfasdfsdfdsfasda" + @mock_s3 + def test_s3_location_should_error_outside_useast1(): + s3 = boto3.client("s3", region_name="eu-west-1") - with assert_raises(ClientError) as e: - s3.create_bucket(Bucket=bucket_name) - e.exception.response["Error"]["Message"].should.equal( - "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." - ) + bucket_name = "asdfasdfsdfdsfasda" + + with assert_raises(ClientError) as e: + s3.create_bucket(Bucket=bucket_name) + e.exception.response["Error"]["Message"].should.equal( + "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." + ) @mock_s3_deprecated From e21ddb7abc2efb83b1fa2cefee8ee9bd9427b111 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 14:25:44 +0000 Subject: [PATCH 048/658] Use var instead of hardcoded string for region --- tests/test_awslambda/test_lambda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 446856f6047f..4f587cdd8991 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -153,7 +153,7 @@ def test_invoke_event_function(): @mock_lambda def test_invoke_dryrun_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", From 414f8086b0210ca6522183c14a3ab6a188689766 Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 5 Feb 2020 10:30:59 -0500 Subject: [PATCH 049/658] use sure for unit test assertions --- tests/test_utilities/test_tagging_service.py | 115 ++++++++++--------- 1 file changed, 60 insertions(+), 55 deletions(-) diff --git a/tests/test_utilities/test_tagging_service.py b/tests/test_utilities/test_tagging_service.py index 1cd820a194ab..0d7db3e2502d 100644 --- a/tests/test_utilities/test_tagging_service.py +++ b/tests/test_utilities/test_tagging_service.py @@ -1,59 +1,64 @@ -import unittest +import sure from moto.utilities.tagging_service import TaggingService -class TestTaggingService(unittest.TestCase): - def test_list_empty(self): - svc = TaggingService() - result = svc.list_tags_for_resource("test") - self.assertEqual(result, {"Tags": []}) - - def test_create_tag(self): - svc = TaggingService("TheTags", "TagKey", "TagValue") - tags = [{"TagKey": "key_key", "TagValue": "value_value"}] - svc.tag_resource("arn", tags) - actual = svc.list_tags_for_resource("arn") - expected = {"TheTags": [{"TagKey": "key_key", "TagValue": "value_value"}]} - self.assertDictEqual(expected, actual) - - def test_create_tag_without_value(self): - svc = TaggingService() - tags = [{"Key": "key_key"}] - svc.tag_resource("arn", tags) - actual = svc.list_tags_for_resource("arn") - expected = {"Tags": [{"Key": "key_key", "Value": None}]} - self.assertDictEqual(expected, actual) - - def test_delete_tag_using_names(self): - svc = TaggingService() - tags = [{"Key": "key_key", "Value": "value_value"}] - svc.tag_resource("arn", tags) - svc.untag_resource_using_names("arn", ["key_key"]) - result = svc.list_tags_for_resource("arn") - self.assertEqual(result, {"Tags": []}) - - def test_list_empty_delete(self): - svc = TaggingService() - svc.untag_resource_using_names("arn", ["key_key"]) - result = svc.list_tags_for_resource("arn") - self.assertEqual(result, {"Tags": []}) - - def test_delete_tag_using_tags(self): - svc = TaggingService() - tags = [{"Key": "key_key", "Value": "value_value"}] - svc.tag_resource("arn", tags) - svc.untag_resource_using_tags("arn", tags) - result = svc.list_tags_for_resource("arn") - self.assertEqual(result, {"Tags": []}) - - def test_extract_tag_names(self): - svc = TaggingService() - tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] - actual = svc.extract_tag_names(tags) - expected = ["key1", "key2"] - self.assertEqual(expected, actual) - - -if __name__ == "__main__": - unittest.main() +def test_list_empty(): + svc = TaggingService() + result = svc.list_tags_for_resource("test") + + {"Tags": []}.should.be.equal(result) + + +def test_create_tag(): + svc = TaggingService("TheTags", "TagKey", "TagValue") + tags = [{"TagKey": "key_key", "TagValue": "value_value"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"TheTags": [{"TagKey": "key_key", "TagValue": "value_value"}]} + + expected.should.be.equal(actual) + +def test_create_tag_without_value(): + svc = TaggingService() + tags = [{"Key": "key_key"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"Tags": [{"Key": "key_key", "Value": None}]} + + expected.should.be.equal(actual) + +def test_delete_tag_using_names(): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + +def test_list_empty_delete(): + svc = TaggingService() + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + +def test_delete_tag_using_tags(): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_tags("arn", tags) + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + + +def test_extract_tag_names(): + svc = TaggingService() + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + actual = svc.extract_tag_names(tags) + expected = ["key1", "key2"] + + expected.should.be.equal(actual) + From c95254a2843fac342e702f7708cce63274a053d0 Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 5 Feb 2020 11:58:52 -0500 Subject: [PATCH 050/658] delete tags when their resource is deleted --- moto/events/models.py | 2 ++ moto/utilities/tagging_service.py | 3 +++ tests/test_utilities/test_tagging_service.py | 17 ++++++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/moto/events/models.py b/moto/events/models.py index 695cfb17a12d..82723ac6c961 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -143,6 +143,8 @@ def _process_token_and_limits(self, array_len, next_token=None, limit=None): def delete_rule(self, name): self.rules_order.pop(self.rules_order.index(name)) + arn = self.rules.get(name).arn + self.tagger.delete_all_tags_for_resource(arn) return self.rules.pop(name) is not None def describe_rule(self, name): diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py index 8c7a86f1d768..c56fd2306d49 100644 --- a/moto/utilities/tagging_service.py +++ b/moto/utilities/tagging_service.py @@ -12,6 +12,9 @@ def list_tags_for_resource(self, arn): result.append({self.keyName: k, self.valueName: v}) return {self.tagName: result} + def delete_all_tags_for_resource(self, arn): + del self.tags[arn] + def tag_resource(self, arn, tags): if arn not in self.tags: self.tags[arn] = {} diff --git a/tests/test_utilities/test_tagging_service.py b/tests/test_utilities/test_tagging_service.py index 0d7db3e2502d..249e903fe774 100644 --- a/tests/test_utilities/test_tagging_service.py +++ b/tests/test_utilities/test_tagging_service.py @@ -19,6 +19,7 @@ def test_create_tag(): expected.should.be.equal(actual) + def test_create_tag_without_value(): svc = TaggingService() tags = [{"Key": "key_key"}] @@ -28,6 +29,7 @@ def test_create_tag_without_value(): expected.should.be.equal(actual) + def test_delete_tag_using_names(): svc = TaggingService() tags = [{"Key": "key_key", "Value": "value_value"}] @@ -37,6 +39,19 @@ def test_delete_tag_using_names(): {"Tags": []}.should.be.equal(result) + +def test_delete_all_tags_for_resource(): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + tags2 = [{"Key": "key_key2", "Value": "value_value2"}] + svc.tag_resource("arn", tags) + svc.tag_resource("arn", tags2) + svc.delete_all_tags_for_resource("arn") + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + + def test_list_empty_delete(): svc = TaggingService() svc.untag_resource_using_names("arn", ["key_key"]) @@ -44,6 +59,7 @@ def test_list_empty_delete(): {"Tags": []}.should.be.equal(result) + def test_delete_tag_using_tags(): svc = TaggingService() tags = [{"Key": "key_key", "Value": "value_value"}] @@ -61,4 +77,3 @@ def test_extract_tag_names(): expected = ["key1", "key2"] expected.should.be.equal(actual) - From 5b5510218156ada78990432bf3d07157c68e611d Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 5 Feb 2020 15:30:34 -0500 Subject: [PATCH 051/658] fix test case --- moto/events/models.py | 3 ++- moto/utilities/tagging_service.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/moto/events/models.py b/moto/events/models.py index 82723ac6c961..a80b86daa302 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -144,7 +144,8 @@ def _process_token_and_limits(self, array_len, next_token=None, limit=None): def delete_rule(self, name): self.rules_order.pop(self.rules_order.index(name)) arn = self.rules.get(name).arn - self.tagger.delete_all_tags_for_resource(arn) + if self.tagger.has_tags(arn): + self.tagger.delete_all_tags_for_resource(arn) return self.rules.pop(name) is not None def describe_rule(self, name): diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py index c56fd2306d49..89b857277b15 100644 --- a/moto/utilities/tagging_service.py +++ b/moto/utilities/tagging_service.py @@ -15,6 +15,9 @@ def list_tags_for_resource(self, arn): def delete_all_tags_for_resource(self, arn): del self.tags[arn] + def has_tags(self, arn): + return arn in self.tags + def tag_resource(self, arn, tags): if arn not in self.tags: self.tags[arn] = {} From ecdedf30c87fdd321d910374972ec1808bc1b7a1 Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 5 Feb 2020 16:31:33 -0500 Subject: [PATCH 052/658] force build... --- tests/test_events/test_events.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 4fb3b4029bcc..4ecb2d88200b 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -484,22 +484,22 @@ def test_rule_tagging_happy(): @mock_events def test_rule_tagging_sad(): - b = EventsBackend("us-west-2") + back_end = EventsBackend("us-west-2") try: - b.tag_resource("unknown", []) + back_end.tag_resource("unknown", []) raise "tag_resource should fail if ResourceARN is not known" except JsonRESTError: pass try: - b.untag_resource("unknown", []) + back_end.untag_resource("unknown", []) raise "untag_resource should fail if ResourceARN is not known" except JsonRESTError: pass try: - b.list_tags_for_resource("unknown") + back_end.list_tags_for_resource("unknown") raise "list_tags_for_resource should fail if ResourceARN is not known" except JsonRESTError: pass From b4c9b76ca958223f54c6b8cc22b85bc100f48c18 Mon Sep 17 00:00:00 2001 From: Terry Griffin <“griffint61@users.noreply.github.com”> Date: Thu, 6 Feb 2020 15:26:20 -0800 Subject: [PATCH 053/658] Added 'x-amzn-ErrorType' in return header from lambda:get_function for missing function --- moto/awslambda/responses.py | 2 +- tests/test_awslambda/test_lambda.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index bac670b8e010..3152ea6f6de8 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -295,7 +295,7 @@ def _get_function(self, request, full_url, headers): code["Configuration"]["FunctionArn"] += ":$LATEST" return 200, {}, json.dumps(code) else: - return 404, {}, "{}" + return 404, {"x-amzn-ErrorType": "ResourceNotFoundException"}, "{}" def _get_aws_region(self, full_url): region = self.region_regex.search(full_url) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 4db13d220fed..f1265ce71903 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -78,7 +78,7 @@ def lambda_handler(event, context): def get_test_zip_file4(): pfunc = """ -def lambda_handler(event, context): +def lambda_handler(event, context): raise Exception('I failed!') """ return _process_lambda(pfunc) @@ -455,7 +455,7 @@ def test_get_function(): ) # Test get function when can't find function name - with assert_raises(ClientError): + with assert_raises(conn.exceptions.ResourceNotFoundException): conn.get_function(FunctionName="junk", Qualifier="$LATEST") From df031d0f33749454ad2612f5c58ffb2b1042625d Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 8 Feb 2020 10:58:31 +0000 Subject: [PATCH 054/658] #2732 - Created AMI should have AccountID as the OwnerID --- moto/ec2/models.py | 14 ++++---------- tests/test_ec2/test_amis.py | 16 +++++++++++++++- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index a0c886087ccd..166d8e646a1b 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -27,6 +27,7 @@ iso_8601_datetime_with_milliseconds, camelcase_to_underscores, ) +from moto.iam.models import ACCOUNT_ID from .exceptions import ( CidrLimitExceeded, DependencyViolationError, @@ -155,7 +156,7 @@ def _load_resource(filename): ) -OWNER_ID = "111122223333" +OWNER_ID = ACCOUNT_ID def utc_date_and_time(): @@ -1341,7 +1342,7 @@ def create_image(self, instance_id, name=None, description=None, context=None): source_ami=None, name=name, description=description, - owner_id=context.get_current_user() if context else OWNER_ID, + owner_id=OWNER_ID, ) self.amis[ami_id] = ami return ami @@ -1392,14 +1393,7 @@ def describe_images( # Limit by owner ids if owners: # support filtering by Owners=['self'] - owners = list( - map( - lambda o: context.get_current_user() - if context and o == "self" - else o, - owners, - ) - ) + owners = list(map(lambda o: OWNER_ID if o == "self" else o, owners,)) images = [ami for ami in images if ami.owner_id in owners] # Generic filters diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index f65352c7c7bd..ad432bb78ebf 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -12,6 +12,7 @@ from moto import mock_ec2_deprecated, mock_ec2 from moto.ec2.models import AMIS, OWNER_ID +from moto.iam.models import ACCOUNT_ID from tests.helpers import requires_boto_gte @@ -251,6 +252,19 @@ def test_ami_pulls_attributes_from_instance(): image.kernel_id.should.equal("test-kernel") +@mock_ec2_deprecated +def test_ami_uses_account_id_if_valid_access_key_is_supplied(): + access_key = "AKIAXXXXXXXXXXXXXXXX" + conn = boto.connect_ec2(access_key, "the_secret") + reservation = conn.run_instances("ami-1234abcd") + instance = reservation.instances[0] + instance.modify_attribute("kernel", "test-kernel") + + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + images = conn.get_all_images(owners=["self"]) + [(ami.id, ami.owner_id) for ami in images].should.equal([(image_id, ACCOUNT_ID)]) + + @mock_ec2_deprecated def test_ami_filters(): conn = boto.connect_ec2("the_key", "the_secret") @@ -773,7 +787,7 @@ def test_ami_filter_wildcard(): instance.create_image(Name="not-matching-image") my_images = ec2_client.describe_images( - Owners=["111122223333"], Filters=[{"Name": "name", "Values": ["test*"]}] + Owners=[ACCOUNT_ID], Filters=[{"Name": "name", "Values": ["test*"]}] )["Images"] my_images.should.have.length_of(1) From e91f1309d103b66fb7ccf049c6f7d9b09891a122 Mon Sep 17 00:00:00 2001 From: Luka Bratos Date: Sat, 8 Feb 2020 17:49:54 +0000 Subject: [PATCH 055/658] Update docs --- docs/docs/getting_started.rst | 39 ++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/docs/docs/getting_started.rst b/docs/docs/getting_started.rst index d52e76235e06..ffe37f3a0cf6 100644 --- a/docs/docs/getting_started.rst +++ b/docs/docs/getting_started.rst @@ -24,8 +24,7 @@ For example, we have the following code we want to test: .. sourcecode:: python - import boto - from boto.s3.key import Key + import boto3 class MyModel(object): def __init__(self, name, value): @@ -33,11 +32,8 @@ For example, we have the following code we want to test: self.value = value def save(self): - conn = boto.connect_s3() - bucket = conn.get_bucket('mybucket') - k = Key(bucket) - k.key = self.name - k.set_contents_from_string(self.value) + s3 = boto3.client('s3', region_name='us-east-1') + s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment. @@ -48,20 +44,23 @@ With a decorator wrapping, all the calls to S3 are automatically mocked out. .. sourcecode:: python - import boto + import boto3 from moto import mock_s3 from mymodule import MyModel @mock_s3 def test_my_model_save(): - conn = boto.connect_s3() + conn = boto3.resource('s3', region_name='us-east-1') # We need to create the bucket since this is all in Moto's 'virtual' AWS account - conn.create_bucket('mybucket') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") + + assert body == 'is awesome' Context manager ~~~~~~~~~~~~~~~ @@ -72,13 +71,16 @@ Same as the Decorator, every call inside the ``with`` statement is mocked out. def test_my_model_save(): with mock_s3(): - conn = boto.connect_s3() - conn.create_bucket('mybucket') + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") + + assert body == 'is awesome' Raw ~~~ @@ -91,13 +93,16 @@ You can also start and stop the mocking manually. mock = mock_s3() mock.start() - conn = boto.connect_s3() - conn.create_bucket('mybucket') + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") + + assert body == 'is awesome' mock.stop() From 936d6863927d0f4c5784889ccac3e74033135e84 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 9 Feb 2020 11:47:02 +0000 Subject: [PATCH 056/658] #2580 - DynamoDB update_item: Allow list_append and if_not_exists-functions in one expression --- moto/dynamodb2/models.py | 17 +++++++++++------ tests/test_dynamodb2/test_dynamodb.py | 25 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 2313a6e410b0..82c3559eaf94 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -448,13 +448,18 @@ def _get_appended_list(self, value, expression_attribute_values): if list_append_re: new_value = expression_attribute_values[list_append_re.group(2).strip()] old_list_key = list_append_re.group(1) - # Get the existing value - old_list = self.attrs[old_list_key.split(".")[0]] - if "." in old_list_key: - # Value is nested inside a map - find the appropriate child attr - old_list = old_list.child_attr( - ".".join(old_list_key.split(".")[1:]) + # old_key could be a function itself (if_not_exists) + if old_list_key.startswith("if_not_exists"): + old_list = DynamoType( + expression_attribute_values[self._get_default(old_list_key)] ) + else: + old_list = self.attrs[old_list_key.split(".")[0]] + if "." in old_list_key: + # Value is nested inside a map - find the appropriate child attr + old_list = old_list.child_attr( + ".".join(old_list_key.split(".")[1:]) + ) if not old_list.is_list(): raise ParamValidationError old_list.value.extend([DynamoType(v) for v in new_value["L"]]) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index ec01889aeed1..fec4c306456c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3609,6 +3609,31 @@ def test_update_supports_list_append_maps(): ) +@mock_dynamodb2 +def test_update_supports_list_append_with_nested_if_not_exists_operation(): + dynamo = boto3.resource("dynamodb") + table_name = "test" + + dynamo.create_table( + TableName=table_name, + AttributeDefinitions=[{"AttributeName": "Id", "AttributeType": "S"}], + KeySchema=[{"AttributeName": "Id", "KeyType": "HASH"}], + ProvisionedThroughput={"ReadCapacityUnits": 20, "WriteCapacityUnits": 20}, + ) + + table = dynamo.Table(table_name) + + table.put_item(Item={"Id": "item-id", "nest1": {"nest2": {}}}) + table.update_item( + Key={"Id": "item-id"}, + UpdateExpression="SET nest1.nest2.event_history = list_append(if_not_exists(nest1.nest2.event_history, :empty_list), :new_value)", + ExpressionAttributeValues={":empty_list": [], ":new_value": ["some_value"]}, + ) + table.get_item(Key={"Id": "item-id"})["Item"].should.equal( + {"Id": "item-id", "nest1": {"nest2": {"event_history": ["some_value"]}}} + ) + + @mock_dynamodb2 def test_update_catches_invalid_list_append_operation(): client = boto3.client("dynamodb", region_name="us-east-1") From 2bd93a76fc2e31524d565461a6716a8bbdcc65fd Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 9 Feb 2020 11:58:41 +0000 Subject: [PATCH 057/658] Add region to DDB tests --- tests/test_dynamodb2/test_dynamodb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index fec4c306456c..180f460c0eca 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3611,7 +3611,7 @@ def test_update_supports_list_append_maps(): @mock_dynamodb2 def test_update_supports_list_append_with_nested_if_not_exists_operation(): - dynamo = boto3.resource("dynamodb") + dynamo = boto3.resource("dynamodb", region_name="us-west-1") table_name = "test" dynamo.create_table( From f70cd0182e413bca58be077e7f1f90e50ec83f62 Mon Sep 17 00:00:00 2001 From: Terry Griffin <“griffint61@users.noreply.github.com”> Date: Mon, 10 Feb 2020 09:18:25 -0800 Subject: [PATCH 058/658] Fixed test_lambda_can_be_deleted_by_cloudformation for new (correct) error code. --- tests/test_awslambda/test_lambda_cloudformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda_cloudformation.py b/tests/test_awslambda/test_lambda_cloudformation.py index a5d4d23fdb8a..f57354d69531 100644 --- a/tests/test_awslambda/test_lambda_cloudformation.py +++ b/tests/test_awslambda/test_lambda_cloudformation.py @@ -94,7 +94,7 @@ def test_lambda_can_be_deleted_by_cloudformation(): # Verify function was deleted with assert_raises(ClientError) as e: lmbda.get_function(FunctionName=created_fn_name) - e.exception.response["Error"]["Code"].should.equal("404") + e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") def create_stack(cf, s3): From 353ad631f088e42b7d171d1dc69d501153020b13 Mon Sep 17 00:00:00 2001 From: Laurie O Date: Sat, 15 Feb 2020 01:18:08 +1000 Subject: [PATCH 059/658] Include closed execution extra info Include 'closeStatus' and 'closeTimestamp' when describing SWF workflow execution using 'describe_workflow_execution' Signed-off-by: Laurie O --- moto/swf/models/workflow_execution.py | 4 +++ .../models/test_workflow_execution.py | 33 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index 4d91b1f6f690..17ce819fb4e8 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -127,6 +127,10 @@ def to_full_dict(self): "executionInfo": self.to_medium_dict(), "executionConfiguration": {"taskList": {"name": self.task_list}}, } + # info + if self.execution_status == "CLOSED": + hsh["executionInfo"]["closeStatus"] = self.close_status + hsh["executionInfo"]["closeTimestamp"] = self.close_timestamp # configuration for key in self._configuration_keys: attr = camelcase_to_underscores(key) diff --git a/tests/test_swf/models/test_workflow_execution.py b/tests/test_swf/models/test_workflow_execution.py index 6c73a968608c..503198f4644b 100644 --- a/tests/test_swf/models/test_workflow_execution.py +++ b/tests/test_swf/models/test_workflow_execution.py @@ -148,6 +148,39 @@ def test_workflow_execution_full_dict_representation(): ) +def test_closed_workflow_execution_full_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + "test-workflow", + "v1.0", + task_list="queue", + default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + wfe = WorkflowExecution(domain, wf_type, "ab1234") + wfe.execution_status = "CLOSED" + wfe.close_status = "CANCELED" + wfe.close_timestamp = 1420066801.123 + + fd = wfe.to_full_dict() + medium_dict = wfe.to_medium_dict() + medium_dict["closeStatus"] = "CANCELED" + medium_dict["closeTimestamp"] = 1420066801.123 + fd["executionInfo"].should.equal(medium_dict) + fd["openCounts"]["openTimers"].should.equal(0) + fd["openCounts"]["openDecisionTasks"].should.equal(0) + fd["openCounts"]["openActivityTasks"].should.equal(0) + fd["executionConfiguration"].should.equal( + { + "childPolicy": "ABANDON", + "executionStartToCloseTimeout": "300", + "taskList": {"name": "queue"}, + "taskStartToCloseTimeout": "300", + } + ) + + def test_workflow_execution_list_dict_representation(): domain = get_basic_domain() wf_type = WorkflowType( From 8a51fbe1c99972ea94b08b5926004cadde4cacf2 Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Fri, 14 Feb 2020 12:26:27 -0600 Subject: [PATCH 060/658] add default for apiKeyRequired field on API Gateway methods --- moto/apigateway/models.py | 25 +++++++++++++---- moto/apigateway/responses.py | 7 ++++- tests/test_apigateway/test_apigateway.py | 35 ++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 7 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index ae7bdfac3dce..937b9b08cb03 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -83,14 +83,14 @@ def __init__(self, status_code): class Method(BaseModel, dict): - def __init__(self, method_type, authorization_type): + def __init__(self, method_type, authorization_type, **kwargs): super(Method, self).__init__() self.update( dict( httpMethod=method_type, authorizationType=authorization_type, authorizerId=None, - apiKeyRequired=None, + apiKeyRequired=kwargs.get("api_key_required") or False, requestParameters=None, requestModels=None, methodIntegration=None, @@ -158,8 +158,12 @@ def get_response(self, request): ) return response.status_code, response.text - def add_method(self, method_type, authorization_type): - method = Method(method_type=method_type, authorization_type=authorization_type) + def add_method(self, method_type, authorization_type, api_key_required): + method = Method( + method_type=method_type, + authorization_type=authorization_type, + api_key_required=api_key_required, + ) self.resource_methods[method_type] = method return method @@ -594,9 +598,18 @@ def get_method(self, function_id, resource_id, method_type): resource = self.get_resource(function_id, resource_id) return resource.get_method(method_type) - def create_method(self, function_id, resource_id, method_type, authorization_type): + def create_method( + self, + function_id, + resource_id, + method_type, + authorization_type, + api_key_required=None, + ): resource = self.get_resource(function_id, resource_id) - method = resource.add_method(method_type, authorization_type) + method = resource.add_method( + method_type, authorization_type, api_key_required=api_key_required + ) return method def get_stage(self, function_id, stage_name): diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index e10d670c5f68..6a22a47087cd 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -145,8 +145,13 @@ def resource_methods(self, request, full_url, headers): return 200, {}, json.dumps(method) elif self.method == "PUT": authorization_type = self._get_param("authorizationType") + api_key_required = self._get_param("apiKeyRequired") method = self.backend.create_method( - function_id, resource_id, method_type, authorization_type + function_id, + resource_id, + method_type, + authorization_type, + api_key_required, ) return 200, {}, json.dumps(method) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 496098e8cf81..c92fc08f46f3 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -286,6 +286,41 @@ def test_create_method(): { "httpMethod": "GET", "authorizationType": "none", + "apiKeyRequired": False, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + +@mock_apigateway +def test_create_method_apikeyrequired(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + resources = client.get_resources(restApiId=api_id) + root_id = [resource for resource in resources["items"] if resource["path"] == "/"][ + 0 + ]["id"] + + client.put_method( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + authorizationType="none", + apiKeyRequired=True, + ) + + response = client.get_method(restApiId=api_id, resourceId=root_id, httpMethod="GET") + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "httpMethod": "GET", + "authorizationType": "none", + "apiKeyRequired": True, "ResponseMetadata": {"HTTPStatusCode": 200}, } ) From 92fc39d7bbb40d02aa96f0cc244e74eecd60f23b Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Mon, 17 Feb 2020 15:08:09 -0600 Subject: [PATCH 061/658] add Arn to cognito user pool model and response --- moto/cognitoidp/models.py | 5 +++++ tests/test_cognitoidp/test_cognitoidp.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 96b23a404639..2394c64ee1d9 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -14,6 +14,7 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID from .exceptions import ( GroupExistsException, NotAuthorizedError, @@ -69,6 +70,9 @@ class CognitoIdpUserPool(BaseModel): def __init__(self, region, name, extended_config): self.region = region self.id = "{}_{}".format(self.region, str(uuid.uuid4().hex)) + self.arn = "arn:aws:cognito-idp:{}:{}:userpool/{}".format( + self.region, DEFAULT_ACCOUNT_ID, self.id + ) self.name = name self.status = None self.extended_config = extended_config or {} @@ -91,6 +95,7 @@ def __init__(self, region, name, extended_config): def _base_json(self): return { "Id": self.id, + "Arn": self.arn, "Name": self.name, "Status": self.status, "CreationDate": time.mktime(self.creation_date.timetuple()), diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 2f7ed11e5792..d37cf7d5cf10 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -27,6 +27,11 @@ def test_create_user_pool(): result["UserPool"]["Id"].should_not.be.none result["UserPool"]["Id"].should.match(r"[\w-]+_[0-9a-zA-Z]+") + result["UserPool"]["Arn"].should.equal( + "arn:aws:cognito-idp:us-west-2:{}:userpool/{}".format( + ACCOUNT_ID, result["UserPool"]["Id"] + ) + ) result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) From aeb194fc57167c2df11741aa70a14aab758bdd98 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 17:29:00 -0600 Subject: [PATCH 062/658] Update new lambda test to work with updated status codes. CC #2642. --- tests/test_awslambda/test_lambda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index d26d78fd418c..4f0bc5063956 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -150,7 +150,7 @@ def test_invoke_requestresponse_function_with_arn(): Payload=json.dumps(in_data), ) - success_result["StatusCode"].should.equal(202) + success_result["StatusCode"].should.equal(200) result_obj = json.loads( base64.b64decode(success_result["LogResult"]).decode("utf-8") ) From 01f3b60c09109f6805ed01e6e4eb27c693a6c30a Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Mon, 17 Feb 2020 17:38:53 -0600 Subject: [PATCH 063/658] Allow ports in k8s service urls for s3 mock If there is a port in the host for the request, then this if statement is not tripped. --- moto/s3/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 6041201bfc35..0e68a3116af4 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -168,7 +168,7 @@ def subdomain_based_buckets(self, request): or host.startswith("localhost") or host.startswith("localstack") or re.match(r"^[^.]+$", host) - or re.match(r"^.*\.svc\.cluster\.local$", host) + or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host) ): # Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev), # (3) local host names that do not contain a "." (e.g., Docker container host names), or From 11b7be0e85053eb60b55142f26693e5b59e37b54 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Thu, 13 Feb 2020 18:01:44 -0800 Subject: [PATCH 064/658] Implemented S3 Account-level public access block. - Also added AWS Config listing and fetching support - Also fixed Lambda test breakage --- .travis.yml | 3 +- README.md | 10 + moto/config/models.py | 55 ++-- moto/core/models.py | 11 +- moto/s3/config.py | 147 +++++++++++ moto/s3/exceptions.py | 9 + moto/s3/models.py | 43 +++- moto/s3/responses.py | 136 ++++++++-- moto/s3/urls.py | 4 +- tests/test_awslambda/test_lambda.py | 2 +- tests/test_core/test_server.py | 2 +- tests/test_s3/test_s3.py | 375 ++++++++++++++++++++++++++++ 12 files changed, 746 insertions(+), 51 deletions(-) diff --git a/.travis.yml b/.travis.yml index ac9322211ad3..8f218134b84d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,11 +26,12 @@ install: fi docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${PYTHON_DOCKER_TAG} /moto/travis_moto_server.sh & fi + travis_retry pip install -r requirements-dev.txt travis_retry pip install boto==2.45.0 travis_retry pip install boto3 travis_retry pip install dist/moto*.gz travis_retry pip install coveralls==1.1 - travis_retry pip install -r requirements-dev.txt + travis_retry pip install coverage==4.5.4 if [ "$TEST_SERVER_MODE" = "true" ]; then python wait_for.py diff --git a/README.md b/README.md index f5c45a6b6d26..6fb942aefdd5 100644 --- a/README.md +++ b/README.md @@ -450,6 +450,16 @@ boto3.resource( ) ``` +### Caveats +The standalone server has some caveats with some services. The following services +require that you update your hosts file for your code to work properly: + +1. `s3-control` + +For the above services, this is required because the hostname is in the form of `AWS_ACCOUNT_ID.localhost`. +As a result, you need to add that entry to your host file for your tests to function properly. + + ## Install diff --git a/moto/config/models.py b/moto/config/models.py index 45dccd1ba82e..a66576979350 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -43,7 +43,7 @@ ) from moto.core import BaseBackend, BaseModel -from moto.s3.config import s3_config_query +from moto.s3.config import s3_account_public_access_block_query, s3_config_query from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID @@ -58,7 +58,10 @@ DEFAULT_PAGE_SIZE = 100 # Map the Config resource type to a backend: -RESOURCE_MAP = {"AWS::S3::Bucket": s3_config_query} +RESOURCE_MAP = { + "AWS::S3::Bucket": s3_config_query, + "AWS::S3::AccountPublicAccessBlock": s3_account_public_access_block_query, +} def datetime2int(date): @@ -867,16 +870,17 @@ def list_discovered_resources( backend_region=backend_query_region, ) - result = { - "resourceIdentifiers": [ - { - "resourceType": identifier["type"], - "resourceId": identifier["id"], - "resourceName": identifier["name"], - } - for identifier in identifiers - ] - } + resource_identifiers = [] + for identifier in identifiers: + item = {"resourceType": identifier["type"], "resourceId": identifier["id"]} + + # Some resource types lack names: + if identifier.get("name"): + item["resourceName"] = identifier["name"] + + resource_identifiers.append(item) + + result = {"resourceIdentifiers": resource_identifiers} if new_token: result["nextToken"] = new_token @@ -927,18 +931,21 @@ def list_aggregate_discovered_resources( resource_region=resource_region, ) - result = { - "ResourceIdentifiers": [ - { - "SourceAccountId": DEFAULT_ACCOUNT_ID, - "SourceRegion": identifier["region"], - "ResourceType": identifier["type"], - "ResourceId": identifier["id"], - "ResourceName": identifier["name"], - } - for identifier in identifiers - ] - } + resource_identifiers = [] + for identifier in identifiers: + item = { + "SourceAccountId": DEFAULT_ACCOUNT_ID, + "SourceRegion": identifier["region"], + "ResourceType": identifier["type"], + "ResourceId": identifier["id"], + } + + if identifier.get("name"): + item["ResourceName"] = identifier["name"] + + resource_identifiers.append(item) + + result = {"ResourceIdentifiers": resource_identifiers} if new_token: result["NextToken"] = new_token diff --git a/moto/core/models.py b/moto/core/models.py index 3be3bbd8ee4a..ffb2ffd9f9e9 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -606,12 +606,13 @@ def list_config_service_resources( As such, the proper way to implement is to first obtain a full list of results from all the region backends, and then filter from there. It may be valuable to make this a concatenation of the region and resource name. - :param resource_region: - :param resource_ids: - :param resource_name: - :param limit: - :param next_token: + :param resource_ids: A list of resource IDs + :param resource_name: The individual name of a resource + :param limit: How many per page + :param next_token: The item that will page on :param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query. + :param resource_region: The region for where the resources reside to pull results from. Set to `None` if this is a + non-aggregated query. :return: This should return a list of Dicts that have the following fields: [ { diff --git a/moto/s3/config.py b/moto/s3/config.py index 8098addfcf6d..04b4315f359f 100644 --- a/moto/s3/config.py +++ b/moto/s3/config.py @@ -1,8 +1,13 @@ +import datetime import json +import time + +from boto3 import Session from moto.core.exceptions import InvalidNextTokenException from moto.core.models import ConfigQueryModel from moto.s3 import s3_backends +from moto.s3.models import get_moto_s3_account_id class S3ConfigQuery(ConfigQueryModel): @@ -118,4 +123,146 @@ def get_config_resource( return config_data +class S3AccountPublicAccessBlockConfigQuery(ConfigQueryModel): + def list_config_service_resources( + self, + resource_ids, + resource_name, + limit, + next_token, + backend_region=None, + resource_region=None, + ): + # For the Account Public Access Block, they are the same for all regions. The resource ID is the AWS account ID + # There is no resource name -- it should be a blank string "" if provided. + + # The resource name can only ever be None or an empty string: + if resource_name is not None and resource_name != "": + return [], None + + pab = None + account_id = get_moto_s3_account_id() + regions = [region for region in Session().get_available_regions("config")] + + # If a resource ID was passed in, then filter accordingly: + if resource_ids: + for id in resource_ids: + if account_id == id: + pab = self.backends["global"].account_public_access_block + break + + # Otherwise, just grab the one from the backend: + if not resource_ids: + pab = self.backends["global"].account_public_access_block + + # If it's not present, then return nothing + if not pab: + return [], None + + # Filter on regions (and paginate on them as well): + if backend_region: + pab_list = [backend_region] + elif resource_region: + # Invalid region? + if resource_region not in regions: + return [], None + + pab_list = [resource_region] + + # Aggregated query where no regions were supplied so return them all: + else: + pab_list = regions + + # Pagination logic: + sorted_regions = sorted(pab_list) + new_token = None + + # Get the start: + if not next_token: + start = 0 + else: + # Tokens for this moto feature is just the region-name: + # For OTHER non-global resource types, it's the region concatenated with the resource ID. + if next_token not in sorted_regions: + raise InvalidNextTokenException() + + start = sorted_regions.index(next_token) + + # Get the list of items to collect: + pab_list = sorted_regions[start : (start + limit)] + + if len(sorted_regions) > (start + limit): + new_token = sorted_regions[start + limit] + + return ( + [ + { + "type": "AWS::S3::AccountPublicAccessBlock", + "id": account_id, + "region": region, + } + for region in pab_list + ], + new_token, + ) + + def get_config_resource( + self, resource_id, resource_name=None, backend_region=None, resource_region=None + ): + # Do we even have this defined? + if not self.backends["global"].account_public_access_block: + return None + + # Resource name can only ever be "" if it's supplied: + if resource_name is not None and resource_name != "": + return None + + # Are we filtering based on region? + account_id = get_moto_s3_account_id() + regions = [region for region in Session().get_available_regions("config")] + + # Is the resource ID correct?: + if account_id == resource_id: + if backend_region: + pab_region = backend_region + + # Invalid region? + elif resource_region not in regions: + return None + + else: + pab_region = resource_region + + else: + return None + + # Format the PAB to the AWS Config format: + creation_time = datetime.datetime.utcnow() + config_data = { + "version": "1.3", + "accountId": account_id, + "configurationItemCaptureTime": str(creation_time), + "configurationItemStatus": "OK", + "configurationStateId": str( + int(time.mktime(creation_time.timetuple())) + ), # PY2 and 3 compatible + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": account_id, + "awsRegion": pab_region, + "availabilityZone": "Not Applicable", + "configuration": self.backends[ + "global" + ].account_public_access_block.to_config_dict(), + "supplementaryConfiguration": {}, + } + + # The 'configuration' field is also a JSON string: + config_data["configuration"] = json.dumps(config_data["configuration"]) + + return config_data + + s3_config_query = S3ConfigQuery(s3_backends) +s3_account_public_access_block_query = S3AccountPublicAccessBlockConfigQuery( + s3_backends +) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index bc339772e62e..e26f384d50f8 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -359,3 +359,12 @@ def __init__(self, *args, **kwargs): *args, **kwargs ) + + +class WrongPublicAccessBlockAccountIdError(S3ClientError): + code = 403 + + def __init__(self): + super(WrongPublicAccessBlockAccountIdError, self).__init__( + "AccessDenied", "Access Denied" + ) diff --git a/moto/s3/models.py b/moto/s3/models.py index fe8e908ef996..5a665e27efeb 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -19,7 +19,7 @@ import six from bisect import insort -from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime from .exceptions import ( BucketAlreadyExists, @@ -37,6 +37,7 @@ CrossLocationLoggingProhibitted, NoSuchPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration, + WrongPublicAccessBlockAccountIdError, ) from .utils import clean_key_name, _VersionedKeyStore @@ -58,6 +59,13 @@ OWNER = "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a" +def get_moto_s3_account_id(): + """This makes it easy for mocking AWS Account IDs when using AWS Config + -- Simply mock.patch the ACCOUNT_ID here, and Config gets it for free. + """ + return ACCOUNT_ID + + class FakeDeleteMarker(BaseModel): def __init__(self, key): self.key = key @@ -1163,6 +1171,7 @@ def to_config_dict(self): class S3Backend(BaseBackend): def __init__(self): self.buckets = {} + self.account_public_access_block = None def create_bucket(self, bucket_name, region_name): if bucket_name in self.buckets: @@ -1264,6 +1273,16 @@ def get_bucket_public_access_block(self, bucket_name): return bucket.public_access_block + def get_account_public_access_block(self, account_id): + # The account ID should equal the account id that is set for Moto: + if account_id != ACCOUNT_ID: + raise WrongPublicAccessBlockAccountIdError() + + if not self.account_public_access_block: + raise NoSuchPublicAccessBlockConfiguration() + + return self.account_public_access_block + def set_key( self, bucket_name, key_name, value, storage=None, etag=None, multipart=None ): @@ -1356,6 +1375,13 @@ def delete_bucket_public_access_block(self, bucket_name): bucket = self.get_bucket(bucket_name) bucket.public_access_block = None + def delete_account_public_access_block(self, account_id): + # The account ID should equal the account id that is set for Moto: + if account_id != ACCOUNT_ID: + raise WrongPublicAccessBlockAccountIdError() + + self.account_public_access_block = None + def put_bucket_notification_configuration(self, bucket_name, notification_config): bucket = self.get_bucket(bucket_name) bucket.set_notification_configuration(notification_config) @@ -1384,6 +1410,21 @@ def put_bucket_public_access_block(self, bucket_name, pub_block_config): pub_block_config.get("RestrictPublicBuckets"), ) + def put_account_public_access_block(self, account_id, pub_block_config): + # The account ID should equal the account id that is set for Moto: + if account_id != ACCOUNT_ID: + raise WrongPublicAccessBlockAccountIdError() + + if not pub_block_config: + raise InvalidPublicAccessBlockConfiguration() + + self.account_public_access_block = PublicAccessBlock( + pub_block_config.get("BlockPublicAcls"), + pub_block_config.get("IgnorePublicAcls"), + pub_block_config.get("BlockPublicPolicy"), + pub_block_config.get("RestrictPublicBuckets"), + ) + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 6041201bfc35..4cb6e5288bdb 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -4,6 +4,7 @@ import sys import six +from botocore.awsrequest import AWSPreparedRequest from moto.core.utils import str_to_rfc_1123_datetime, py2_strip_unicode_keys from six.moves.urllib.parse import parse_qs, urlparse, unquote @@ -123,6 +124,11 @@ "uploadId": "PutObject", }, }, + "CONTROL": { + "GET": {"publicAccessBlock": "GetPublicAccessBlock"}, + "PUT": {"publicAccessBlock": "PutPublicAccessBlock"}, + "DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"}, + }, } @@ -220,7 +226,7 @@ def ambiguous_response(self, request, full_url, headers): # Depending on which calling format the client is using, we don't know # if this is a bucket or key request so we have to check if self.subdomain_based_buckets(request): - return self.key_response(request, full_url, headers) + return self.key_or_control_response(request, full_url, headers) else: # Using path-based buckets return self.bucket_response(request, full_url, headers) @@ -287,7 +293,7 @@ def _bucket_response(self, request, full_url, headers): return self._bucket_response_post(request, body, bucket_name) else: raise NotImplementedError( - "Method {0} has not been impelemented in the S3 backend yet".format( + "Method {0} has not been implemented in the S3 backend yet".format( method ) ) @@ -595,6 +601,20 @@ def _body_contains_location_constraint(self, body): pass return False + def _parse_pab_config(self, body): + parsed_xml = xmltodict.parse(body) + parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None) + + # If Python 2, fix the unicode strings: + if sys.version_info[0] < 3: + parsed_xml = { + "PublicAccessBlockConfiguration": py2_strip_unicode_keys( + dict(parsed_xml["PublicAccessBlockConfiguration"]) + ) + } + + return parsed_xml + def _bucket_response_put( self, request, body, region_name, bucket_name, querystring ): @@ -673,19 +693,9 @@ def _bucket_response_put( raise e elif "publicAccessBlock" in querystring: - parsed_xml = xmltodict.parse(body) - parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None) - - # If Python 2, fix the unicode strings: - if sys.version_info[0] < 3: - parsed_xml = { - "PublicAccessBlockConfiguration": py2_strip_unicode_keys( - dict(parsed_xml["PublicAccessBlockConfiguration"]) - ) - } - + pab_config = self._parse_pab_config(body) self.backend.put_bucket_public_access_block( - bucket_name, parsed_xml["PublicAccessBlockConfiguration"] + bucket_name, pab_config["PublicAccessBlockConfiguration"] ) return "" @@ -870,15 +880,21 @@ def toint(i): ) return 206, response_headers, response_content[begin : end + 1] - def key_response(self, request, full_url, headers): + def key_or_control_response(self, request, full_url, headers): + # Key and Control are lumped in because splitting out the regex is too much of a pain :/ self.method = request.method self.path = self._get_path(request) self.headers = request.headers if "host" not in self.headers: self.headers["host"] = urlparse(full_url).netloc response_headers = {} + try: - response = self._key_response(request, full_url, headers) + # Is this an S3 control response? + if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url: + response = self._control_response(request, full_url, headers) + else: + response = self._key_response(request, full_url, headers) except S3ClientError as s3error: response = s3error.code, {}, s3error.description @@ -894,6 +910,94 @@ def key_response(self, request, full_url, headers): ) return status_code, response_headers, response_content + def _control_response(self, request, full_url, headers): + parsed_url = urlparse(full_url) + query = parse_qs(parsed_url.query, keep_blank_values=True) + method = request.method + + if hasattr(request, "body"): + # Boto + body = request.body + if hasattr(body, "read"): + body = body.read() + else: + # Flask server + body = request.data + if body is None: + body = b"" + + if method == "GET": + return self._control_response_get(request, query, headers) + elif method == "PUT": + return self._control_response_put(request, body, query, headers) + elif method == "DELETE": + return self._control_response_delete(request, query, headers) + else: + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format( + method + ) + ) + + def _control_response_get(self, request, query, headers): + action = self.path.split("?")[0].split("/")[ + -1 + ] # Gets the action out of the URL sans query params. + self._set_action("CONTROL", "GET", action) + self._authenticate_and_authorize_s3_action() + + response_headers = {} + if "publicAccessBlock" in action: + public_block_config = self.backend.get_account_public_access_block( + headers["x-amz-account-id"] + ) + template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION) + return ( + 200, + response_headers, + template.render(public_block_config=public_block_config), + ) + + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format(action) + ) + + def _control_response_put(self, request, body, query, headers): + action = self.path.split("?")[0].split("/")[ + -1 + ] # Gets the action out of the URL sans query params. + self._set_action("CONTROL", "PUT", action) + self._authenticate_and_authorize_s3_action() + + response_headers = {} + if "publicAccessBlock" in action: + pab_config = self._parse_pab_config(body) + self.backend.put_account_public_access_block( + headers["x-amz-account-id"], + pab_config["PublicAccessBlockConfiguration"], + ) + return 200, response_headers, "" + + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format(action) + ) + + def _control_response_delete(self, request, query, headers): + action = self.path.split("?")[0].split("/")[ + -1 + ] # Gets the action out of the URL sans query params. + self._set_action("CONTROL", "DELETE", action) + self._authenticate_and_authorize_s3_action() + + response_headers = {} + if "publicAccessBlock" in action: + self.backend.delete_account_public_access_block(headers["x-amz-account-id"]) + return 200, response_headers, "" + + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format(action) + ) + def _key_response(self, request, full_url, headers): parsed_url = urlparse(full_url) query = parse_qs(parsed_url.query, keep_blank_values=True) diff --git a/moto/s3/urls.py b/moto/s3/urls.py index 7241dbef1768..752762184d09 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -13,7 +13,7 @@ # subdomain key of path-based bucket "{0}/(?P[^/]+)/?$": S3ResponseInstance.ambiguous_response, # path-based bucket + key - "{0}/(?P[^/]+)/(?P.+)": S3ResponseInstance.key_response, + "{0}/(?P[^/]+)/(?P.+)": S3ResponseInstance.key_or_control_response, # subdomain bucket + key with empty first part of path - "{0}//(?P.*)$": S3ResponseInstance.key_response, + "{0}//(?P.*)$": S3ResponseInstance.key_or_control_response, } diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index d26d78fd418c..4f0bc5063956 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -150,7 +150,7 @@ def test_invoke_requestresponse_function_with_arn(): Payload=json.dumps(in_data), ) - success_result["StatusCode"].should.equal(202) + success_result["StatusCode"].should.equal(200) result_obj = json.loads( base64.b64decode(success_result["LogResult"]).decode("utf-8") ) diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index 5514223afc6d..205a2ad0fdb7 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -46,4 +46,4 @@ def test_domain_dispatched_with_service(): dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") backend_app = dispatcher.get_application({"HTTP_HOST": "s3.us-east1.amazonaws.com"}) keys = set(backend_app.view_functions.keys()) - keys.should.contain("ResponseObject.key_response") + keys.should.contain("ResponseObject.key_or_control_response") diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 56cbe547ba0f..7f750cabdd8e 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -5,6 +5,7 @@ import os import sys +from boto3 import Session from six.moves.urllib.request import urlopen from six.moves.urllib.error import HTTPError from functools import wraps @@ -1135,6 +1136,380 @@ def test_s3_location_should_error_outside_useast1(): "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." ) + # All tests for s3-control cannot be run under the server without a modification of the + # hosts file on your system. This is due to the fact that the URL to the host is in the form of: + # ACCOUNT_ID.s3-control.amazonaws.com <-- That Account ID part is the problem. If you want to + # make use of the moto server, update your hosts file for `THE_ACCOUNT_ID_FOR_MOTO.localhost` + # and this will work fine. + + @mock_s3 + def test_get_public_access_block_for_account(): + from moto.s3.models import ACCOUNT_ID + + client = boto3.client("s3control", region_name="us-west-2") + + # With an invalid account ID: + with assert_raises(ClientError) as ce: + client.get_public_access_block(AccountId="111111111111") + assert ce.exception.response["Error"]["Code"] == "AccessDenied" + + # Without one defined: + with assert_raises(ClientError) as ce: + client.get_public_access_block(AccountId=ACCOUNT_ID) + assert ( + ce.exception.response["Error"]["Code"] + == "NoSuchPublicAccessBlockConfiguration" + ) + + # Put a with an invalid account ID: + with assert_raises(ClientError) as ce: + client.put_public_access_block( + AccountId="111111111111", + PublicAccessBlockConfiguration={"BlockPublicAcls": True}, + ) + assert ce.exception.response["Error"]["Code"] == "AccessDenied" + + # Put with an invalid PAB: + with assert_raises(ClientError) as ce: + client.put_public_access_block( + AccountId=ACCOUNT_ID, PublicAccessBlockConfiguration={} + ) + assert ce.exception.response["Error"]["Code"] == "InvalidRequest" + assert ( + "Must specify at least one configuration." + in ce.exception.response["Error"]["Message"] + ) + + # Correct PAB: + client.put_public_access_block( + AccountId=ACCOUNT_ID, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + + # Get the correct PAB (for all regions): + for region in Session().get_available_regions("s3control"): + region_client = boto3.client("s3control", region_name=region) + assert region_client.get_public_access_block(AccountId=ACCOUNT_ID)[ + "PublicAccessBlockConfiguration" + ] == { + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + } + + # Delete with an invalid account ID: + with assert_raises(ClientError) as ce: + client.delete_public_access_block(AccountId="111111111111") + assert ce.exception.response["Error"]["Code"] == "AccessDenied" + + # Delete successfully: + client.delete_public_access_block(AccountId=ACCOUNT_ID) + + # Confirm that it's deleted: + with assert_raises(ClientError) as ce: + client.get_public_access_block(AccountId=ACCOUNT_ID) + assert ( + ce.exception.response["Error"]["Code"] + == "NoSuchPublicAccessBlockConfiguration" + ) + + @mock_s3 + @mock_config + def test_config_list_account_pab(): + from moto.s3.models import ACCOUNT_ID + + client = boto3.client("s3control", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + # Create the aggregator: + account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="testing", + AccountAggregationSources=[account_aggregation_source], + ) + + # Without a PAB in place: + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock" + ) + assert not result["resourceIdentifiers"] + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + ) + assert not result["ResourceIdentifiers"] + + # Create a PAB: + client.put_public_access_block( + AccountId=ACCOUNT_ID, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + + # Test that successful queries work (non-aggregated): + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock" + ) + assert result["resourceIdentifiers"] == [ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", + resourceIds=[ACCOUNT_ID, "nope"], + ) + assert result["resourceIdentifiers"] == [ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="" + ) + assert result["resourceIdentifiers"] == [ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + + # Test that successful queries work (aggregated): + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + ) + regions = {region for region in Session().get_available_regions("config")} + for r in result["ResourceIdentifiers"]: + regions.remove(r.pop("SourceRegion")) + assert r == { + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "SourceAccountId": ACCOUNT_ID, + "ResourceId": ACCOUNT_ID, + } + + # Just check that the len is the same -- this should be reasonable + regions = {region for region in Session().get_available_regions("config")} + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceName": ""}, + ) + assert len(regions) == len(result["ResourceIdentifiers"]) + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceName": "", "ResourceId": ACCOUNT_ID}, + ) + assert len(regions) == len(result["ResourceIdentifiers"]) + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={ + "ResourceName": "", + "ResourceId": ACCOUNT_ID, + "Region": "us-west-2", + }, + ) + assert ( + result["ResourceIdentifiers"][0]["SourceRegion"] == "us-west-2" + and len(result["ResourceIdentifiers"]) == 1 + ) + + # Test aggregator pagination: + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Limit=1, + ) + regions = sorted( + [region for region in Session().get_available_regions("config")] + ) + assert result["ResourceIdentifiers"][0] == { + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "SourceAccountId": ACCOUNT_ID, + "ResourceId": ACCOUNT_ID, + "SourceRegion": regions[0], + } + assert result["NextToken"] == regions[1] + + # Get the next region: + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Limit=1, + NextToken=regions[1], + ) + assert result["ResourceIdentifiers"][0] == { + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "SourceAccountId": ACCOUNT_ID, + "ResourceId": ACCOUNT_ID, + "SourceRegion": regions[1], + } + + # Non-aggregated with incorrect info: + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="nope" + ) + assert not result["resourceIdentifiers"] + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceIds=["nope"] + ) + assert not result["resourceIdentifiers"] + + # Aggregated with incorrect info: + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceName": "nope"}, + ) + assert not result["ResourceIdentifiers"] + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceId": "nope"}, + ) + assert not result["ResourceIdentifiers"] + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"Region": "Nope"}, + ) + assert not result["ResourceIdentifiers"] + + @mock_s3 + @mock_config + def test_config_get_account_pab(): + from moto.s3.models import ACCOUNT_ID + + client = boto3.client("s3control", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + # Create the aggregator: + account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="testing", + AccountAggregationSources=[account_aggregation_source], + ) + + # Without a PAB in place: + with assert_raises(ClientError) as ce: + config_client.get_resource_config_history( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID + ) + assert ( + ce.exception.response["Error"]["Code"] == "ResourceNotDiscoveredException" + ) + # aggregate + result = config_client.batch_get_resource_config( + resourceKeys=[ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": "ACCOUNT_ID", + } + ] + ) + assert not result["baseConfigurationItems"] + result = config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="testing", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": "us-west-2", + "ResourceId": ACCOUNT_ID, + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "ResourceName": "", + } + ], + ) + assert not result["BaseConfigurationItems"] + + # Create a PAB: + client.put_public_access_block( + AccountId=ACCOUNT_ID, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + + # Get the proper config: + proper_config = { + "blockPublicAcls": True, + "ignorePublicAcls": True, + "blockPublicPolicy": True, + "restrictPublicBuckets": True, + } + result = config_client.get_resource_config_history( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID + ) + assert ( + json.loads(result["configurationItems"][0]["configuration"]) + == proper_config + ) + assert ( + result["configurationItems"][0]["accountId"] + == result["configurationItems"][0]["resourceId"] + == ACCOUNT_ID + ) + result = config_client.batch_get_resource_config( + resourceKeys=[ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + ) + assert len(result["baseConfigurationItems"]) == 1 + assert ( + json.loads(result["baseConfigurationItems"][0]["configuration"]) + == proper_config + ) + assert ( + result["baseConfigurationItems"][0]["accountId"] + == result["baseConfigurationItems"][0]["resourceId"] + == ACCOUNT_ID + ) + + for region in Session().get_available_regions("s3control"): + result = config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="testing", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": region, + "ResourceId": ACCOUNT_ID, + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "ResourceName": "", + } + ], + ) + assert len(result["BaseConfigurationItems"]) == 1 + assert ( + json.loads(result["BaseConfigurationItems"][0]["configuration"]) + == proper_config + ) + @mock_s3_deprecated def test_ranged_get(): From f111dd7febca9377056d0860e6b481e7797b446e Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 18:21:08 -0600 Subject: [PATCH 065/658] Update sphinx build version. --- docs/conf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 28a4b4e6bd1f..a902d0ecf8f4 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -56,9 +56,10 @@ # built documents. # # The short X.Y version. -version = '0.4.10' +import moto +version = moto.__version__ # The full version, including alpha/beta/rc tags. -release = '0.4.10' +release = moto.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 356c55f99d8cb2eec66ad9a1b2ae493671555bd3 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 20:21:18 -0600 Subject: [PATCH 066/658] Fix default resourceMethod for API Gateway. Closes #2750. --- moto/apigateway/models.py | 5 +++-- tests/test_apigateway/test_apigateway.py | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 937b9b08cb03..dcc38efc9c38 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -117,14 +117,15 @@ def __init__(self, id, region_name, api_id, path_part, parent_id): self.api_id = api_id self.path_part = path_part self.parent_id = parent_id - self.resource_methods = {"GET": {}} + self.resource_methods = {} def to_dict(self): response = { "path": self.get_path(), "id": self.id, - "resourceMethods": self.resource_methods, } + if self.resource_methods: + response["resourceMethods"] = self.resource_methods if self.parent_id: response["parentId"] = self.parent_id response["pathPart"] = self.path_part diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index c92fc08f46f3..1b422e875de6 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -208,7 +208,6 @@ def test_create_resource(): "path": "/", "id": root_id, "ResponseMetadata": {"HTTPStatusCode": 200}, - "resourceMethods": {"GET": {}}, } ) @@ -257,7 +256,6 @@ def test_child_resource(): "parentId": users_id, "id": tags_id, "ResponseMetadata": {"HTTPStatusCode": 200}, - "resourceMethods": {"GET": {}}, } ) From dcd1f0195fbe2a0f30b22515856b55f3eddf518c Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 20:32:19 -0600 Subject: [PATCH 067/658] lint. --- tests/test_apigateway/test_apigateway.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 1b422e875de6..8692ccc9b195 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -204,11 +204,7 @@ def test_create_resource(): root_resource["ResponseMetadata"].pop("HTTPHeaders", None) root_resource["ResponseMetadata"].pop("RetryAttempts", None) root_resource.should.equal( - { - "path": "/", - "id": root_id, - "ResponseMetadata": {"HTTPStatusCode": 200}, - } + {"path": "/", "id": root_id, "ResponseMetadata": {"HTTPStatusCode": 200},} ) client.create_resource(restApiId=api_id, parentId=root_id, pathPart="users") From 11e64109eb2ebc0480ab9ce84d6fdf2f177f7c7c Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 20:32:28 -0600 Subject: [PATCH 068/658] Fix s3 test for location constraint. --- tests/test_s3/test_s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index b4badcaf0056..2193f8b2744d 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2145,7 +2145,7 @@ def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): @mock_s3 def test_boto3_copy_object_with_replacement_tagging(): - client = boto3.client("s3", region_name="eu-north-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="mybucket") client.put_object( Bucket="mybucket", Key="original", Body=b"test", Tagging="tag=old" From 9971bcdfcd982a8765c852fb03347682f8da96f4 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 18 Feb 2020 11:49:55 +0000 Subject: [PATCH 069/658] DynamoDB - Send item to DDB Stream on update, not just on create --- moto/dynamodb2/models.py | 3 ++ tests/test_awslambda/test_lambda.py | 66 ++++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 82c3559eaf94..88f750775615 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -1406,6 +1406,7 @@ def update_item( range_value = None item = table.get_item(hash_value, range_value) + orig_item = copy.deepcopy(item) if not expected: expected = {} @@ -1439,6 +1440,8 @@ def update_item( ) else: item.update_with_attribute_updates(attribute_updates) + if table.stream_shard is not None: + table.stream_shard.add(orig_item, item) return item def delete_item( diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index d26d78fd418c..397da2813c32 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1161,7 +1161,7 @@ def test_invoke_function_from_sqs(): @mock_logs @mock_lambda @mock_dynamodb2 -def test_invoke_function_from_dynamodb(): +def test_invoke_function_from_dynamodb_put(): logs_conn = boto3.client("logs", region_name="us-east-1") dynamodb = boto3.client("dynamodb", region_name="us-east-1") table_name = "table_with_stream" @@ -1218,6 +1218,70 @@ def test_invoke_function_from_dynamodb(): assert False, "Test Failed" +@mock_logs +@mock_lambda +@mock_dynamodb2 +def test_invoke_function_from_dynamodb_update(): + logs_conn = boto3.client("logs", region_name="us-east-1") + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + table_name = "table_with_stream" + table = dynamodb.create_table( + TableName=table_name, + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + StreamSpecification={ + "StreamEnabled": True, + "StreamViewType": "NEW_AND_OLD_IMAGES", + }, + ) + dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}}) + + conn = boto3.client("lambda", region_name="us-east-1") + func = conn.create_function( + FunctionName="testFunction", + Runtime="python2.7", + Role=get_role_name(), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": get_test_zip_file3()}, + Description="test lambda function executed after a DynamoDB table is updated", + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.create_event_source_mapping( + EventSourceArn=table["TableDescription"]["LatestStreamArn"], + FunctionName=func["FunctionArn"], + ) + + assert response["EventSourceArn"] == table["TableDescription"]["LatestStreamArn"] + assert response["State"] == "Enabled" + dynamodb.update_item(TableName=table_name, + Key={'id': {'S': 'item 1'}}, + UpdateExpression="set #attr = :val", + ExpressionAttributeNames={'#attr': 'new_attr'}, + ExpressionAttributeValues={':val': {'S': 'new_val'}}) + start = time.time() + while (time.time() - start) < 30: + result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction") + log_streams = result.get("logStreams") + if not log_streams: + time.sleep(1) + continue + + assert len(log_streams) == 1 + result = logs_conn.get_log_events( + logGroupName="/aws/lambda/testFunction", + logStreamName=log_streams[0]["logStreamName"], + ) + for event in result.get("events"): + if event["message"] == "get_test_zip_file3 success": + return + time.sleep(1) + + assert False, "Test Failed" + + @mock_logs @mock_lambda @mock_sqs From 979d20753c4cc861a732cb8ec455e3dd4d35a0f3 Mon Sep 17 00:00:00 2001 From: Laurie O Date: Tue, 18 Feb 2020 21:59:06 +1000 Subject: [PATCH 070/658] Support more defaults in SWF workflow registration SWF workflow type now keeps track of the default task-priority and default AWS Lambda role, set at workflow registration. --- moto/swf/models/workflow_type.py | 2 + moto/swf/responses.py | 12 +++++- .../test_swf/responses/test_workflow_types.py | 37 +++++++++++++++++++ 3 files changed, 49 insertions(+), 2 deletions(-) diff --git a/moto/swf/models/workflow_type.py b/moto/swf/models/workflow_type.py index ddb2475b2785..137f0e221435 100644 --- a/moto/swf/models/workflow_type.py +++ b/moto/swf/models/workflow_type.py @@ -8,6 +8,8 @@ def _configuration_keys(self): "defaultChildPolicy", "defaultExecutionStartToCloseTimeout", "defaultTaskStartToCloseTimeout", + "defaultTaskPriority", + "defaultLambdaRole", ] @property diff --git a/moto/swf/responses.py b/moto/swf/responses.py index 98b736cda259..c57d966eb0ed 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -300,6 +300,12 @@ def register_workflow_type(self): default_execution_start_to_close_timeout = self._params.get( "defaultExecutionStartToCloseTimeout" ) + default_task_priority = self._params.get( + "defaultTaskPriority" + ) + default_lambda_role = self._params.get( + "defaultLambdaRole" + ) description = self._params.get("description") self._check_string(domain) @@ -309,10 +315,10 @@ def register_workflow_type(self): self._check_none_or_string(default_child_policy) self._check_none_or_string(default_task_start_to_close_timeout) self._check_none_or_string(default_execution_start_to_close_timeout) + self._check_none_or_string(default_task_priority) + self._check_none_or_string(default_lambda_role) self._check_none_or_string(description) - # TODO: add defaultTaskPriority when boto gets to support it - # TODO: add defaultLambdaRole when boto gets to support it self.swf_backend.register_type( "workflow", domain, @@ -322,6 +328,8 @@ def register_workflow_type(self): default_child_policy=default_child_policy, default_task_start_to_close_timeout=default_task_start_to_close_timeout, default_execution_start_to_close_timeout=default_execution_start_to_close_timeout, + default_task_priority=default_task_priority, + default_lambda_role=default_lambda_role, description=description, ) return "" diff --git a/tests/test_swf/responses/test_workflow_types.py b/tests/test_swf/responses/test_workflow_types.py index 4c92d7762b02..72aa814d2145 100644 --- a/tests/test_swf/responses/test_workflow_types.py +++ b/tests/test_swf/responses/test_workflow_types.py @@ -1,7 +1,9 @@ import sure import boto +import boto3 from moto import mock_swf_deprecated +from moto import mock_swf from boto.swf.exceptions import SWFResponseError @@ -133,6 +135,41 @@ def test_describe_workflow_type(): infos["status"].should.equal("REGISTERED") +@mock_swf +def test_describe_workflow_type_full_boto3(): + # boto3 required as boto doesn't support all of the arguments + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="2" + ) + client.register_workflow_type( + domain="test-domain", + name="test-workflow", + version="v1.0", + description="Test workflow.", + defaultTaskStartToCloseTimeout="20", + defaultExecutionStartToCloseTimeout="60", + defaultTaskList={"name": "foo"}, + defaultTaskPriority="-2", + defaultChildPolicy="ABANDON", + defaultLambdaRole="arn:bar", + ) + + resp = client.describe_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + resp["typeInfo"]["workflowType"]["name"].should.equal("test-workflow") + resp["typeInfo"]["workflowType"]["version"].should.equal("v1.0") + resp["typeInfo"]["status"].should.equal("REGISTERED") + resp["typeInfo"]["description"].should.equal("Test workflow.") + resp["configuration"]["defaultTaskStartToCloseTimeout"].should.equal("20") + resp["configuration"]["defaultExecutionStartToCloseTimeout"].should.equal("60") + resp["configuration"]["defaultTaskList"]["name"].should.equal("foo") + resp["configuration"]["defaultTaskPriority"].should.equal("-2") + resp["configuration"]["defaultChildPolicy"].should.equal("ABANDON") + resp["configuration"]["defaultLambdaRole"].should.equal("arn:bar") + + @mock_swf_deprecated def test_describe_non_existent_workflow_type(): conn = boto.connect_swf("the_key", "the_secret") From 5863d9fab9bc1b78f4ff8739202fea59965b3635 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 18 Feb 2020 12:34:24 +0000 Subject: [PATCH 071/658] Linting --- tests/test_awslambda/test_lambda.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 397da2813c32..3c3185c8a7b9 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1256,11 +1256,13 @@ def test_invoke_function_from_dynamodb_update(): assert response["EventSourceArn"] == table["TableDescription"]["LatestStreamArn"] assert response["State"] == "Enabled" - dynamodb.update_item(TableName=table_name, - Key={'id': {'S': 'item 1'}}, - UpdateExpression="set #attr = :val", - ExpressionAttributeNames={'#attr': 'new_attr'}, - ExpressionAttributeValues={':val': {'S': 'new_val'}}) + dynamodb.update_item( + TableName=table_name, + Key={"id": {"S": "item 1"}}, + UpdateExpression="set #attr = :val", + ExpressionAttributeNames={"#attr": "new_attr"}, + ExpressionAttributeValues={":val": {"S": "new_val"}}, + ) start = time.time() while (time.time() - start) < 30: result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction") From 3500e7d5d39d4a583cbd02c9ef0254394e5e9254 Mon Sep 17 00:00:00 2001 From: Laurie O Date: Tue, 18 Feb 2020 23:00:37 +1000 Subject: [PATCH 072/658] Styling --- moto/swf/responses.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/moto/swf/responses.py b/moto/swf/responses.py index c57d966eb0ed..2b7794ffd7d3 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -300,12 +300,8 @@ def register_workflow_type(self): default_execution_start_to_close_timeout = self._params.get( "defaultExecutionStartToCloseTimeout" ) - default_task_priority = self._params.get( - "defaultTaskPriority" - ) - default_lambda_role = self._params.get( - "defaultLambdaRole" - ) + default_task_priority = self._params.get("defaultTaskPriority") + default_lambda_role = self._params.get("defaultLambdaRole") description = self._params.get("description") self._check_string(domain) From b64a571a37453c36e565f09637a4ae2638a4f910 Mon Sep 17 00:00:00 2001 From: Bryan Alexander Date: Tue, 18 Feb 2020 10:33:27 -0600 Subject: [PATCH 073/658] adds utilities init --- moto/utilities/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 moto/utilities/__init__.py diff --git a/moto/utilities/__init__.py b/moto/utilities/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 From 1d140852947d1dc318b05b765e2bc0326aac07c3 Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Tue, 18 Feb 2020 10:49:35 -0600 Subject: [PATCH 074/658] add API Gateway authorizers --- moto/apigateway/exceptions.py | 9 + moto/apigateway/models.py | 121 +++++++++++ moto/apigateway/responses.py | 84 ++++++++ moto/apigateway/urls.py | 2 + tests/test_apigateway/test_apigateway.py | 250 ++++++++++++++++++++++- 5 files changed, 465 insertions(+), 1 deletion(-) diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index 2a306ab9930e..ccb870f52f00 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -85,6 +85,15 @@ def __init__(self): ) +class AuthorizerNotFoundException(RESTError): + code = 404 + + def __init__(self): + super(AuthorizerNotFoundException, self).__init__( + "NotFoundException", "Invalid Authorizer identifier specified" + ) + + class StageNotFoundException(RESTError): code = 404 diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index ae7bdfac3dce..c0e570630d1e 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -28,6 +28,7 @@ InvalidHttpEndpoint, InvalidResourcePathException, InvalidRequestInput, + AuthorizerNotFoundException, StageNotFoundException, RoleNotSpecified, NoIntegrationDefined, @@ -182,6 +183,54 @@ def delete_integration(self, method_type): return self.resource_methods[method_type].pop("methodIntegration") +class Authorizer(BaseModel, dict): + def __init__(self, id, name, authorizer_type, **kwargs): + super(Authorizer, self).__init__() + self["id"] = id + self["name"] = name + self["type"] = authorizer_type + if kwargs.get("provider_arns"): + self["providerARNs"] = kwargs.get("provider_arns") + if kwargs.get("auth_type"): + self["authType"] = kwargs.get("auth_type") + if kwargs.get("authorizer_uri"): + self["authorizerUri"] = kwargs.get("authorizer_uri") + if kwargs.get("authorizer_credentials"): + self["authorizerCredentials"] = kwargs.get("authorizer_credentials") + if kwargs.get("identity_source"): + self["identitySource"] = kwargs.get("identity_source") + if kwargs.get("identity_validation_expression"): + self["identityValidationExpression"] = kwargs.get( + "identity_validation_expression" + ) + self["authorizerResultTtlInSeconds"] = kwargs.get("authorizer_result_ttl") + + def apply_operations(self, patch_operations): + for op in patch_operations: + if "/authorizerUri" in op["path"]: + self["authorizerUri"] = op["value"] + elif "/authorizerCredentials" in op["path"]: + self["authorizerCredentials"] = op["value"] + elif "/authorizerResultTtlInSeconds" in op["path"]: + self["authorizerResultTtlInSeconds"] = int(op["value"]) + elif "/authType" in op["path"]: + self["authType"] = op["value"] + elif "/identitySource" in op["path"]: + self["identitySource"] = op["value"] + elif "/identityValidationExpression" in op["path"]: + self["identityValidationExpression"] = op["value"] + elif "/name" in op["path"]: + self["name"] = op["value"] + elif "/providerARNs" in op["path"]: + # TODO: add and remove + raise Exception('Patch operation for "%s" not implemented' % op["path"]) + elif "/type" in op["path"]: + self["type"] = op["value"] + else: + raise Exception('Patch operation "%s" not implemented' % op["op"]) + return self + + class Stage(BaseModel, dict): def __init__( self, @@ -407,6 +456,7 @@ def __init__(self, id, region_name, name, description, **kwargs): self.tags = kwargs.get("tags") or {} self.deployments = {} + self.authorizers = {} self.stages = {} self.resources = {} @@ -474,6 +524,34 @@ def update_integration_mocks(self, stage_name): ), ) + def create_authorizer( + self, + id, + name, + authorizer_type, + provider_arns=None, + auth_type=None, + authorizer_uri=None, + authorizer_credentials=None, + identity_source=None, + identiy_validation_expression=None, + authorizer_result_ttl=None, + ): + authorizer = Authorizer( + id=id, + name=name, + authorizer_type=authorizer_type, + provider_arns=provider_arns, + auth_type=auth_type, + authorizer_uri=authorizer_uri, + authorizer_credentials=authorizer_credentials, + identity_source=identity_source, + identiy_validation_expression=identiy_validation_expression, + authorizer_result_ttl=authorizer_result_ttl, + ) + self.authorizers[id] = authorizer + return authorizer + def create_stage( self, name, @@ -513,6 +591,9 @@ def create_deployment(self, name, description="", stage_variables=None): def get_deployment(self, deployment_id): return self.deployments[deployment_id] + def get_authorizers(self): + return list(self.authorizers.values()) + def get_stages(self): return list(self.stages.values()) @@ -599,6 +680,46 @@ def create_method(self, function_id, resource_id, method_type, authorization_typ method = resource.add_method(method_type, authorization_type) return method + def get_authorizer(self, restapi_id, authorizer_id): + api = self.get_rest_api(restapi_id) + authorizer = api.authorizers.get(authorizer_id) + if authorizer is None: + raise AuthorizerNotFoundException() + else: + return authorizer + + def get_authorizers(self, restapi_id): + api = self.get_rest_api(restapi_id) + return api.get_authorizers() + + def create_authorizer(self, restapi_id, name, authorizer_type, **kwargs): + api = self.get_rest_api(restapi_id) + authorizer_id = create_id() + authorizer = api.create_authorizer( + authorizer_id, + name, + authorizer_type, + provider_arns=kwargs.get("provider_arns"), + auth_type=kwargs.get("auth_type"), + authorizer_uri=kwargs.get("authorizer_uri"), + authorizer_credentials=kwargs.get("authorizer_credentials"), + identity_source=kwargs.get("identity_source"), + identiy_validation_expression=kwargs.get("identiy_validation_expression"), + authorizer_result_ttl=kwargs.get("authorizer_result_ttl"), + ) + return api.authorizers.get(authorizer["id"]) + + def update_authorizer(self, restapi_id, authorizer_id, patch_operations): + authorizer = self.get_authorizer(restapi_id, authorizer_id) + if not authorizer: + api = self.get_rest_api(restapi_id) + authorizer = api.authorizers[authorizer_id] = Authorizer() + return authorizer.apply_operations(patch_operations) + + def delete_authorizer(self, restapi_id, authorizer_id): + api = self.get_rest_api(restapi_id) + del api.authorizers[authorizer_id] + def get_stage(self, function_id, stage_name): api = self.get_rest_api(function_id) stage = api.stages.get(stage_name) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index e10d670c5f68..14a20832aca3 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -8,11 +8,13 @@ ApiKeyNotFoundException, BadRequestException, CrossAccountNotAllowed, + AuthorizerNotFoundException, StageNotFoundException, ApiKeyAlreadyExists, ) API_KEY_SOURCES = ["AUTHORIZER", "HEADER"] +AUTHORIZER_TYPES = ["TOKEN", "REQUEST", "COGNITO_USER_POOLS"] ENDPOINT_CONFIGURATION_TYPES = ["PRIVATE", "EDGE", "REGIONAL"] @@ -172,6 +174,88 @@ def resource_method_responses(self, request, full_url, headers): ) return 200, {}, json.dumps(method_response) + def restapis_authorizers(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + url_path_parts = self.path.split("/") + restapi_id = url_path_parts[2] + + if self.method == "POST": + name = self._get_param("name") + authorizer_type = self._get_param("type") + + provider_arns = self._get_param_with_default_value("providerARNs", None) + auth_type = self._get_param_with_default_value("authType", None) + authorizer_uri = self._get_param_with_default_value("authorizerUri", None) + authorizer_credentials = self._get_param_with_default_value( + "authorizerCredentials", None + ) + identity_source = self._get_param_with_default_value("identitySource", None) + identiy_validation_expression = self._get_param_with_default_value( + "identityValidationExpression", None + ) + authorizer_result_ttl = self._get_param_with_default_value( + "authorizerResultTtlInSeconds", 300 + ) + + # Param validation + if authorizer_type and authorizer_type not in AUTHORIZER_TYPES: + return self.error( + "ValidationException", + ( + "1 validation error detected: " + "Value '{authorizer_type}' at 'createAuthorizerInput.type' failed " + "to satisfy constraint: Member must satisfy enum value set: " + "[TOKEN, REQUEST, COGNITO_USER_POOLS]" + ).format(authorizer_type=authorizer_type), + ) + + authorizer_response = self.backend.create_authorizer( + restapi_id, + name, + authorizer_type, + provider_arns=provider_arns, + auth_type=auth_type, + authorizer_uri=authorizer_uri, + authorizer_credentials=authorizer_credentials, + identity_source=identity_source, + identiy_validation_expression=identiy_validation_expression, + authorizer_result_ttl=authorizer_result_ttl, + ) + elif self.method == "GET": + authorizers = self.backend.get_authorizers(restapi_id) + return 200, {}, json.dumps({"item": authorizers}) + + return 200, {}, json.dumps(authorizer_response) + + def authorizers(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + url_path_parts = self.path.split("/") + restapi_id = url_path_parts[2] + authorizer_id = url_path_parts[4] + + if self.method == "GET": + try: + authorizer_response = self.backend.get_authorizer( + restapi_id, authorizer_id + ) + except AuthorizerNotFoundException as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) + elif self.method == "PATCH": + patch_operations = self._get_param("patchOperations") + authorizer_response = self.backend.update_authorizer( + restapi_id, authorizer_id, patch_operations + ) + elif self.method == "DELETE": + self.backend.delete_authorizer(restapi_id, authorizer_id) + return 202, {}, "{}" + return 200, {}, json.dumps(authorizer_response) + def restapis_stages(self, request, full_url, headers): self.setup_class(request, full_url, headers) url_path_parts = self.path.split("/") diff --git a/moto/apigateway/urls.py b/moto/apigateway/urls.py index bb2b2d21662d..4ef6ae72bc4d 100644 --- a/moto/apigateway/urls.py +++ b/moto/apigateway/urls.py @@ -7,6 +7,8 @@ "{0}/restapis$": APIGatewayResponse().restapis, "{0}/restapis/(?P[^/]+)/?$": APIGatewayResponse().restapis_individual, "{0}/restapis/(?P[^/]+)/resources$": APIGatewayResponse().resources, + "{0}/restapis/(?P[^/]+)/authorizers$": APIGatewayResponse().restapis_authorizers, + "{0}/restapis/(?P[^/]+)/authorizers/(?P[^/]+)/?$": APIGatewayResponse().authorizers, "{0}/restapis/(?P[^/]+)/stages$": APIGatewayResponse().restapis_stages, "{0}/restapis/(?P[^/]+)/stages/(?P[^/]+)/?$": APIGatewayResponse().stages, "{0}/restapis/(?P[^/]+)/deployments$": APIGatewayResponse().deployments, diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 496098e8cf81..0b2b75b0b9f2 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -8,7 +8,7 @@ from botocore.exceptions import ClientError import responses -from moto import mock_apigateway, settings +from moto import mock_apigateway, mock_cognitoidp, settings from moto.core import ACCOUNT_ID from nose.tools import assert_raises @@ -547,6 +547,254 @@ def test_integration_response(): response["methodIntegration"]["integrationResponses"].should.equal({}) +@mock_apigateway +@mock_cognitoidp +def test_update_authorizer_configuration(): + client = boto3.client("apigateway", region_name="us-west-2") + authorizer_name = "my_authorizer" + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + cognito_client = boto3.client("cognito-idp", region_name="us-west-2") + user_pool_arn = cognito_client.create_user_pool(PoolName="my_cognito_pool")[ + "UserPool" + ]["Arn"] + + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id = response["id"] + + response = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + # createdDate is hard to match against, remove it + response.pop("createdDate", None) + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "id": authorizer_id, + "name": authorizer_name, + "type": "COGNITO_USER_POOLS", + "providerARNs": [user_pool_arn], + "identitySource": "method.request.header.Authorization", + "authorizerResultTtlInSeconds": 300, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + client.update_authorizer( + restApiId=api_id, + authorizerId=authorizer_id, + patchOperations=[{"op": "replace", "path": "/type", "value": "TOKEN"}], + ) + + authorizer = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + + authorizer.should.have.key("type").which.should.equal("TOKEN") + + client.update_authorizer( + restApiId=api_id, + authorizerId=authorizer_id, + patchOperations=[{"op": "replace", "path": "/type", "value": "REQUEST"}], + ) + + authorizer = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + + authorizer.should.have.key("type").which.should.equal("REQUEST") + + # TODO: implement mult-update tests + + try: + client.update_authorizer( + restApiId=api_id, + authorizerId=authorizer_id, + patchOperations=[ + {"op": "add", "path": "/notasetting", "value": "eu-west-1"} + ], + ) + assert False.should.be.ok # Fail, should not be here + except Exception: + assert True.should.be.ok + + +@mock_apigateway +def test_non_existent_authorizer(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + client.get_authorizer.when.called_with( + restApiId=api_id, authorizerId="xxx" + ).should.throw(ClientError) + + +@mock_apigateway +@mock_cognitoidp +def test_create_authorizer(): + client = boto3.client("apigateway", region_name="us-west-2") + authorizer_name = "my_authorizer" + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + cognito_client = boto3.client("cognito-idp", region_name="us-west-2") + user_pool_arn = cognito_client.create_user_pool(PoolName="my_cognito_pool")[ + "UserPool" + ]["Arn"] + + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id = response["id"] + + response = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + # createdDate is hard to match against, remove it + response.pop("createdDate", None) + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "id": authorizer_id, + "name": authorizer_name, + "type": "COGNITO_USER_POOLS", + "providerARNs": [user_pool_arn], + "identitySource": "method.request.header.Authorization", + "authorizerResultTtlInSeconds": 300, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + authorizer_name2 = "my_authorizer2" + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name2, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id2 = response["id"] + + response = client.get_authorizers(restApiId=api_id) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + + response["items"][0]["id"].should.match( + r"{0}|{1}".format(authorizer_id2, authorizer_id) + ) + response["items"][1]["id"].should.match( + r"{0}|{1}".format(authorizer_id2, authorizer_id) + ) + + new_authorizer_name_with_vars = "authorizer_with_vars" + response = client.create_authorizer( + restApiId=api_id, + name=new_authorizer_name_with_vars, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id3 = response["id"] + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + + response.should.equal( + { + "name": new_authorizer_name_with_vars, + "id": authorizer_id3, + "type": "COGNITO_USER_POOLS", + "providerARNs": [user_pool_arn], + "identitySource": "method.request.header.Authorization", + "authorizerResultTtlInSeconds": 300, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + stage = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id3) + stage["name"].should.equal(new_authorizer_name_with_vars) + stage["id"].should.equal(authorizer_id3) + stage["type"].should.equal("COGNITO_USER_POOLS") + stage["providerARNs"].should.equal([user_pool_arn]) + stage["identitySource"].should.equal("method.request.header.Authorization") + stage["authorizerResultTtlInSeconds"].should.equal(300) + + +@mock_apigateway +@mock_cognitoidp +def test_delete_authorizer(): + client = boto3.client("apigateway", region_name="us-west-2") + authorizer_name = "my_authorizer" + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + cognito_client = boto3.client("cognito-idp", region_name="us-west-2") + user_pool_arn = cognito_client.create_user_pool(PoolName="my_cognito_pool")[ + "UserPool" + ]["Arn"] + + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id = response["id"] + + response = client.get_authorizer(restApiId=api_id, authorizerId=authorizer_id) + # createdDate is hard to match against, remove it + response.pop("createdDate", None) + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "id": authorizer_id, + "name": authorizer_name, + "type": "COGNITO_USER_POOLS", + "providerARNs": [user_pool_arn], + "identitySource": "method.request.header.Authorization", + "authorizerResultTtlInSeconds": 300, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + authorizer_name2 = "my_authorizer2" + response = client.create_authorizer( + restApiId=api_id, + name=authorizer_name2, + type="COGNITO_USER_POOLS", + providerARNs=[user_pool_arn], + identitySource="method.request.header.Authorization", + ) + authorizer_id2 = response["id"] + + authorizers = client.get_authorizers(restApiId=api_id)["items"] + sorted([authorizer["name"] for authorizer in authorizers]).should.equal( + sorted([authorizer_name2, authorizer_name]) + ) + # delete stage + response = client.delete_authorizer(restApiId=api_id, authorizerId=authorizer_id2) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(202) + # verify other stage still exists + authorizers = client.get_authorizers(restApiId=api_id)["items"] + sorted([authorizer["name"] for authorizer in authorizers]).should.equal( + sorted([authorizer_name]) + ) + + @mock_apigateway def test_update_stage_configuration(): client = boto3.client("apigateway", region_name="us-west-2") From d1efedec2952a8597624d821ecaaa718597612a9 Mon Sep 17 00:00:00 2001 From: Bryan Alexander Date: Tue, 18 Feb 2020 13:40:34 -0600 Subject: [PATCH 075/658] updates kms to use tagging service and support untag_resource --- moto/kms/models.py | 25 +++++---------------- tests/test_kms/test_kms.py | 43 ++++++++++++++++++++++++++++++++++++ tests/test_kms/test_utils.py | 8 +++---- 3 files changed, 53 insertions(+), 23 deletions(-) diff --git a/moto/kms/models.py b/moto/kms/models.py index 9f61b275f574..3d0da036ed4e 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -7,27 +7,18 @@ from boto3 import Session from moto.core import BaseBackend, BaseModel -<<<<<<< HEAD -from moto.core.exceptions import JsonRESTError -from moto.core.utils import iso_8601_datetime_without_milliseconds -from moto.utilities.tagging_service import TaggingService -======= from moto.core.utils import unix_time - +from moto.utilities.tagging_service import TaggingService +from moto.core.exceptions import JsonRESTError from moto.iam.models import ACCOUNT_ID ->>>>>>> 100dbd529f174f18d579a1dcc066d55409f2e38f from .utils import decrypt, encrypt, generate_key_id, generate_master_key class Key(BaseModel): -<<<<<<< HEAD - def __init__(self, policy, key_usage, description, region): -======= def __init__( - self, policy, key_usage, customer_master_key_spec, description, tags, region + self, policy, key_usage, customer_master_key_spec, description, region ): ->>>>>>> 100dbd529f174f18d579a1dcc066d55409f2e38f self.id = generate_key_id() self.creation_date = unix_time() self.policy = policy @@ -142,19 +133,14 @@ def __init__(self): self.key_to_aliases = defaultdict(set) self.tagger = TaggingService(keyName='TagKey', valueName='TagValue') -<<<<<<< HEAD - def create_key(self, policy, key_usage, description, tags, region): - key = Key(policy, key_usage, description, region) -======= def create_key( self, policy, key_usage, customer_master_key_spec, description, tags, region ): key = Key( - policy, key_usage, customer_master_key_spec, description, tags, region + policy, key_usage, customer_master_key_spec, description, region ) ->>>>>>> 100dbd529f174f18d579a1dcc066d55409f2e38f self.keys[key.id] = key - if tags != None and len(tags) > 0: + if tags is not None and len(tags) > 0: self.tag_resource(key.id, tags) return key @@ -166,6 +152,7 @@ def delete_key(self, key_id): if key_id in self.keys: if key_id in self.key_to_aliases: self.key_to_aliases.pop(key_id) + self.tagger.delete_all_tags_for_resource(key_id) return self.keys.pop(key_id) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index aaf09a6be4e6..d2dca67860f4 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -680,3 +680,46 @@ def test__assert_default_policy(): _assert_default_policy.when.called_with("default").should_not.throw( MotoNotFoundException ) + + +@mock_kms +def test_key_tagging_happy(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="test-key-tagging") + key_id = key["KeyMetadata"]["KeyId"] + + tags = [{"TagKey": "key1", "TagValue": "value1"}, {"TagKey": "key2", "TagValue": "value2"}] + client.tag_resource(KeyId=key_id, Tags=tags) + + result = client.list_resource_tags(KeyId=key_id) + actual = result.get("Tags", []) + assert tags == actual + + client.untag_resource(KeyId=key_id, TagKeys=["key1"]) + + actual = client.list_resource_tags(KeyId=key_id).get("Tags", []) + expected = [{"TagKey": "key2", "TagValue": "value2"}] + assert expected == actual + + +@mock_kms +def test_key_tagging_sad(): + b = KmsBackend() + + try: + b.tag_resource('unknown', []) + raise 'tag_resource should fail if KeyId is not known' + except JsonRESTError: + pass + + try: + b.untag_resource('unknown', []) + raise 'untag_resource should fail if KeyId is not known' + except JsonRESTError: + pass + + try: + b.list_resource_tags('unknown') + raise 'list_resource_tags should fail if KeyId is not known' + except JsonRESTError: + pass diff --git a/tests/test_kms/test_utils.py b/tests/test_kms/test_utils.py index 4c84ed127679..4446635f318f 100644 --- a/tests/test_kms/test_utils.py +++ b/tests/test_kms/test_utils.py @@ -102,7 +102,7 @@ def test_deserialize_ciphertext_blob(raw, serialized): @parameterized(((ec[0],) for ec in ENCRYPTION_CONTEXT_VECTORS)) def test_encrypt_decrypt_cycle(encryption_context): plaintext = b"some secret plaintext" - master_key = Key("nop", "nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = encrypt( @@ -133,7 +133,7 @@ def test_encrypt_unknown_key_id(): def test_decrypt_invalid_ciphertext_format(): - master_key = Key("nop", "nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} with assert_raises(InvalidCiphertextException): @@ -153,7 +153,7 @@ def test_decrypt_unknwown_key_id(): def test_decrypt_invalid_ciphertext(): - master_key = Key("nop", "nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = ( master_key.id.encode("utf-8") + b"123456789012" @@ -171,7 +171,7 @@ def test_decrypt_invalid_ciphertext(): def test_decrypt_invalid_encryption_context(): plaintext = b"some secret plaintext" - master_key = Key("nop", "nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = encrypt( From 4e2fe76820025a75ce6282047e3ae0663ea45ccc Mon Sep 17 00:00:00 2001 From: Bryan Alexander Date: Tue, 18 Feb 2020 13:51:35 -0600 Subject: [PATCH 076/658] removes duplicate declaration of list_tags_for_resource --- moto/events/models.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index c400677df778..6787f51abe5e 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -374,14 +374,6 @@ def list_tags_for_resource(self, arn): "ResourceNotFoundException", "An entity that you specified does not exist." ) - def list_tags_for_resource(self, arn): - name = arn.split("/")[-1] - if name in self.rules: - return self.tagger.list_tags_for_resource(self.rules[name].arn) - raise JsonRESTError( - "ResourceNotFoundException", "An entity that you specified does not exist." - ) - def tag_resource(self, arn, tags): name = arn.split("/")[-1] if name in self.rules: From 1432e82606946bd9d7b002bbbb5e87423011308f Mon Sep 17 00:00:00 2001 From: Bryan Alexander Date: Tue, 18 Feb 2020 14:01:15 -0600 Subject: [PATCH 077/658] fixes kms/models create_key parameters --- moto/kms/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/kms/models.py b/moto/kms/models.py index 3d0da036ed4e..89cc5758a593 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -111,11 +111,11 @@ def create_from_cloudformation_json( key_usage="ENCRYPT_DECRYPT", customer_master_key_spec="SYMMETRIC_DEFAULT", description=properties["Description"], + tags=properties["Tags"], region=region_name, ) key.key_rotation_status = properties["EnableKeyRotation"] key.enabled = properties["Enabled"] - kms_backend.tag_resource(key.id, properties.get("Tags")) return key From 38413577fc04164886728ac46f1b7563054f56b3 Mon Sep 17 00:00:00 2001 From: Bryan Alexander Date: Wed, 19 Feb 2020 09:18:01 -0600 Subject: [PATCH 078/658] fixes bug in resourcetaggingapi/get_kms_tags --- moto/resourcegroupstaggingapi/models.py | 2 +- tests/test_events/test_events.py | 7 ------ tests/test_kms/test_kms.py | 4 ++-- .../test_resourcegroupstaggingapi.py | 23 +++++++++++++++++++ 4 files changed, 26 insertions(+), 10 deletions(-) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index 850ab5c04db1..8c17864f36c1 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -318,7 +318,7 @@ def get_elbv2_tags(arn): # KMS def get_kms_tags(kms_key_id): result = [] - for tag in self.kms_backend.list_resource_tags(kms_key_id): + for tag in self.kms_backend.list_resource_tags(kms_key_id).get("Tags",[]): result.append({"Key": tag["TagKey"], "Value": tag["TagValue"]}) return result diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index cf3743d34577..80fadb449319 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -10,9 +10,6 @@ from nose.tools import assert_raises from moto.core import ACCOUNT_ID -<< << << < HEAD -== == == = ->>>>>> > 100dbd529f174f18d579a1dcc066d55409f2e38f RULES = [ {"Name": "test1", "ScheduleExpression": "rate(5 minutes)"}, @@ -461,10 +458,6 @@ def test_delete_event_bus_errors(): ) -<< << << < HEAD -== == == = - ->>>>>> > 100dbd529f174f18d579a1dcc066d55409f2e38f @mock_events def test_rule_tagging_happy(): client = generate_environment() diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index d2dca67860f4..d00c885f2062 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -682,7 +682,7 @@ def test__assert_default_policy(): ) -@mock_kms +@mock_kms_deprecated def test_key_tagging_happy(): client = boto3.client("kms", region_name="us-east-1") key = client.create_key(Description="test-key-tagging") @@ -702,7 +702,7 @@ def test_key_tagging_happy(): assert expected == actual -@mock_kms +@mock_kms_deprecated def test_key_tagging_sad(): b = KmsBackend() diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 3ee517ce8a4c..dc75bb722b4a 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -248,3 +248,26 @@ def test_get_many_resources(): ) # TODO test pagenation + + +@mock_kms +def test_get_kms_tags(): + kms = boto3.client("kms", region_name="us-east-1") + key = kms.create_key( + KeyUsage="ENCRYPT_DECRYPT", + Tags=[ + {"TagKey": "key_name", "TagValue": "a_value"}, + {"TagKey": "key_2", "TagValue": "val2"}, + ], + ) + key_id = key["KeyMetadata"]["KeyId"] + + rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-east-1") + resp = rtapi.get_resources( + ResourceTypeFilters=["kms"], + TagFilters=[{"Key": "key_name"}], + ) + resp["ResourceTagMappingList"].should.have.length_of(1) + resp["ResourceTagMappingList"][0]["Tags"].should.contain( + {"Key": "key_name", "Value": "a_value"} + ) From 7205ab77854e7e086a204bc7df9d28c8a747ffcb Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 20 Feb 2020 08:59:21 +0000 Subject: [PATCH 079/658] #1427 - EMR - Return start time of first step --- moto/emr/models.py | 5 +++++ moto/emr/responses.py | 2 +- tests/test_emr/test_emr_boto3.py | 4 +++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/moto/emr/models.py b/moto/emr/models.py index 713b15b9f8ea..d9ec2fd691c3 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -86,6 +86,9 @@ def __init__( self.start_datetime = None self.state = state + def start(self): + self.start_datetime = datetime.now(pytz.utc) + class FakeCluster(BaseModel): def __init__( @@ -204,6 +207,8 @@ def __init__( self.start_cluster() self.run_bootstrap_actions() + if self.steps: + self.steps[0].start() @property def instance_groups(self): diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 94847ec8b61f..38b9774e1606 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -835,7 +835,7 @@ def terminate_job_flows(self): {% if step.end_datetime is not none %} {{ step.end_datetime.isoformat() }} {% endif %} - {% if step.ready_datetime is not none %} + {% if step.start_datetime is not none %} {{ step.start_datetime.isoformat() }} {% endif %} diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 212444abfbe3..d849247bdc91 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -752,7 +752,9 @@ def test_steps(): # StateChangeReason x["Status"]["Timeline"]["CreationDateTime"].should.be.a("datetime.datetime") # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') + # Only the first step will have started - we don't know anything about when it finishes, so the second step never starts + if x["Name"] == "My wordcount example": + x["Status"]["Timeline"]["StartDateTime"].should.be.a("datetime.datetime") x = client.describe_step(ClusterId=cluster_id, StepId=x["Id"])["Step"] x["ActionOnFailure"].should.equal("TERMINATE_CLUSTER") From 1221d2653ac3ab5fdeddcffc56ef5d23219cd543 Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 19 Feb 2020 09:12:13 -0500 Subject: [PATCH 080/658] fix test cases, bug when no tags are present and conflict --- moto/events/models.py | 2 +- moto/kms/models.py | 17 +++--- moto/resourcegroupstaggingapi/models.py | 2 +- setup.cfg | 2 +- tests/test_kms/test_kms.py | 59 +++++++++++++++---- .../test_resourcegroupstaggingapi.py | 23 -------- 6 files changed, 58 insertions(+), 47 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index 6787f51abe5e..a80b86daa302 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -367,7 +367,7 @@ def delete_event_bus(self, name): self.event_buses.pop(name, None) def list_tags_for_resource(self, arn): - name = arn.split('/')[-1] + name = arn.split("/")[-1] if name in self.rules: return self.tagger.list_tags_for_resource(self.rules[name].arn) raise JsonRESTError( diff --git a/moto/kms/models.py b/moto/kms/models.py index 89cc5758a593..36f72e6de8ba 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -111,7 +111,7 @@ def create_from_cloudformation_json( key_usage="ENCRYPT_DECRYPT", customer_master_key_spec="SYMMETRIC_DEFAULT", description=properties["Description"], - tags=properties["Tags"], + tags=properties.get("Tags", []), region=region_name, ) key.key_rotation_status = properties["EnableKeyRotation"] @@ -131,14 +131,12 @@ class KmsBackend(BaseBackend): def __init__(self): self.keys = {} self.key_to_aliases = defaultdict(set) - self.tagger = TaggingService(keyName='TagKey', valueName='TagValue') + self.tagger = TaggingService(keyName="TagKey", valueName="TagValue") def create_key( self, policy, key_usage, customer_master_key_spec, description, tags, region ): - key = Key( - policy, key_usage, customer_master_key_spec, description, region - ) + key = Key(policy, key_usage, customer_master_key_spec, description, region) self.keys[key.id] = key if tags is not None and len(tags) > 0: self.tag_resource(key.id, tags) @@ -326,7 +324,8 @@ def list_resource_tags(self, key_id): if key_id in self.keys: return self.tagger.list_tags_for_resource(key_id) raise JsonRESTError( - "NotFoundException", "The request was rejected because the specified entity or resource could not be found." + "NotFoundException", + "The request was rejected because the specified entity or resource could not be found.", ) def tag_resource(self, key_id, tags): @@ -334,7 +333,8 @@ def tag_resource(self, key_id, tags): self.tagger.tag_resource(key_id, tags) return {} raise JsonRESTError( - "NotFoundException", "The request was rejected because the specified entity or resource could not be found." + "NotFoundException", + "The request was rejected because the specified entity or resource could not be found.", ) def untag_resource(self, key_id, tag_names): @@ -342,7 +342,8 @@ def untag_resource(self, key_id, tag_names): self.tagger.untag_resource_using_names(key_id, tag_names) return {} raise JsonRESTError( - "NotFoundException", "The request was rejected because the specified entity or resource could not be found." + "NotFoundException", + "The request was rejected because the specified entity or resource could not be found.", ) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index 8c17864f36c1..d05a53f81548 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -318,7 +318,7 @@ def get_elbv2_tags(arn): # KMS def get_kms_tags(kms_key_id): result = [] - for tag in self.kms_backend.list_resource_tags(kms_key_id).get("Tags",[]): + for tag in self.kms_backend.list_resource_tags(kms_key_id).get("Tags", []): result.append({"Key": tag["TagKey"], "Value": tag["TagValue"]}) return result diff --git a/setup.cfg b/setup.cfg index fb04c16a82a0..9dbd988dba69 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,7 +1,7 @@ [nosetests] verbosity=1 detailed-errors=1 -with-coverage=1 +#with-coverage=1 cover-package=moto [bdist_wheel] diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index d00c885f2062..3384d940ef31 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -2,8 +2,10 @@ from __future__ import unicode_literals import base64 import re +from collections import OrderedDict import boto.kms +import boto3 import six import sure # noqa from boto.exception import JSONResponseError @@ -13,7 +15,7 @@ from moto.core.exceptions import JsonRESTError from moto.kms.models import KmsBackend from moto.kms.exceptions import NotFoundException as MotoNotFoundException -from moto import mock_kms_deprecated +from moto import mock_kms_deprecated, mock_kms PLAINTEXT_VECTORS = ( (b"some encodeable plaintext",), @@ -682,24 +684,55 @@ def test__assert_default_policy(): ) -@mock_kms_deprecated -def test_key_tagging_happy(): +if six.PY2: + sort = sorted +else: + sort = lambda l: sorted(l, key=lambda d: d.keys()) + + +@mock_kms +def test_key_tag_on_create_key_happy(): client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="test-key-tagging") + + tags = [ + {"TagKey": "key1", "TagValue": "value1"}, + {"TagKey": "key2", "TagValue": "value2"}, + ] + key = client.create_key(Description="test-key-tagging", Tags=tags) key_id = key["KeyMetadata"]["KeyId"] - tags = [{"TagKey": "key1", "TagValue": "value1"}, {"TagKey": "key2", "TagValue": "value2"}] + result = client.list_resource_tags(KeyId=key_id) + actual = result.get("Tags", []) + assert sort(tags) == sort(actual) + + client.untag_resource(KeyId=key_id, TagKeys=["key1"]) + + actual = client.list_resource_tags(KeyId=key_id).get("Tags", []) + expected = [{"TagKey": "key2", "TagValue": "value2"}] + assert sort(expected) == sort(actual) + + +@mock_kms +def test_key_tag_added_happy(): + client = boto3.client("kms", region_name="us-east-1") + + key = client.create_key(Description="test-key-tagging") + key_id = key["KeyMetadata"]["KeyId"] + tags = [ + {"TagKey": "key1", "TagValue": "value1"}, + {"TagKey": "key2", "TagValue": "value2"}, + ] client.tag_resource(KeyId=key_id, Tags=tags) result = client.list_resource_tags(KeyId=key_id) actual = result.get("Tags", []) - assert tags == actual + assert sort(tags) == sort(actual) client.untag_resource(KeyId=key_id, TagKeys=["key1"]) actual = client.list_resource_tags(KeyId=key_id).get("Tags", []) expected = [{"TagKey": "key2", "TagValue": "value2"}] - assert expected == actual + assert sort(expected) == sort(actual) @mock_kms_deprecated @@ -707,19 +740,19 @@ def test_key_tagging_sad(): b = KmsBackend() try: - b.tag_resource('unknown', []) - raise 'tag_resource should fail if KeyId is not known' + b.tag_resource("unknown", []) + raise "tag_resource should fail if KeyId is not known" except JsonRESTError: pass try: - b.untag_resource('unknown', []) - raise 'untag_resource should fail if KeyId is not known' + b.untag_resource("unknown", []) + raise "untag_resource should fail if KeyId is not known" except JsonRESTError: pass try: - b.list_resource_tags('unknown') - raise 'list_resource_tags should fail if KeyId is not known' + b.list_resource_tags("unknown") + raise "list_resource_tags should fail if KeyId is not known" except JsonRESTError: pass diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index dc75bb722b4a..3ee517ce8a4c 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -248,26 +248,3 @@ def test_get_many_resources(): ) # TODO test pagenation - - -@mock_kms -def test_get_kms_tags(): - kms = boto3.client("kms", region_name="us-east-1") - key = kms.create_key( - KeyUsage="ENCRYPT_DECRYPT", - Tags=[ - {"TagKey": "key_name", "TagValue": "a_value"}, - {"TagKey": "key_2", "TagValue": "val2"}, - ], - ) - key_id = key["KeyMetadata"]["KeyId"] - - rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-east-1") - resp = rtapi.get_resources( - ResourceTypeFilters=["kms"], - TagFilters=[{"Key": "key_name"}], - ) - resp["ResourceTagMappingList"].should.have.length_of(1) - resp["ResourceTagMappingList"][0]["Tags"].should.contain( - {"Key": "key_name", "Value": "a_value"} - ) From c162f02091e1c19428b30f6d03de61687b625568 Mon Sep 17 00:00:00 2001 From: Brady Date: Fri, 21 Feb 2020 15:39:23 -0500 Subject: [PATCH 081/658] re-add coverage and remove unused import --- setup.cfg | 2 +- tests/test_kms/test_kms.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 9dbd988dba69..fb04c16a82a0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,7 +1,7 @@ [nosetests] verbosity=1 detailed-errors=1 -#with-coverage=1 +with-coverage=1 cover-package=moto [bdist_wheel] diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 3384d940ef31..a04a24a8272e 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals import base64 import re -from collections import OrderedDict import boto.kms import boto3 From 736f8b5a8f5e68c9b6225b99316ac930c6e5cdca Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 24 Feb 2020 08:24:14 +0000 Subject: [PATCH 082/658] Refactor - reuse logic that expects CW log message --- tests/test_awslambda/test_lambda.py | 65 ++++++++++------------------- 1 file changed, 22 insertions(+), 43 deletions(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 2bd8f4bb3167..48539c0e6f03 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1111,7 +1111,6 @@ def test_create_event_source_mapping(): @mock_lambda @mock_sqs def test_invoke_function_from_sqs(): - logs_conn = boto3.client("logs", region_name="us-east-1") sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-sqs-queue1") @@ -1137,32 +1136,18 @@ def test_invoke_function_from_sqs(): sqs_client = boto3.client("sqs", region_name="us-east-1") sqs_client.send_message(QueueUrl=queue.url, MessageBody="test") - start = time.time() - while (time.time() - start) < 30: - result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction") - log_streams = result.get("logStreams") - if not log_streams: - time.sleep(1) - continue - assert len(log_streams) == 1 - result = logs_conn.get_log_events( - logGroupName="/aws/lambda/testFunction", - logStreamName=log_streams[0]["logStreamName"], - ) - for event in result.get("events"): - if event["message"] == "get_test_zip_file3 success": - return - time.sleep(1) + expected_msg = "get_test_zip_file3 success" + log_group = "/aws/lambda/testFunction" + msg_showed_up = wait_for_log_msg(expected_msg, log_group) - assert False, "Test Failed" + assert msg_showed_up, "Message was not found in log_group, so sending an SQS message did not result in a successful Lambda execution" @mock_logs @mock_lambda @mock_dynamodb2 def test_invoke_function_from_dynamodb_put(): - logs_conn = boto3.client("logs", region_name="us-east-1") dynamodb = boto3.client("dynamodb", region_name="us-east-1") table_name = "table_with_stream" table = dynamodb.create_table( @@ -1197,32 +1182,18 @@ def test_invoke_function_from_dynamodb_put(): assert response["State"] == "Enabled" dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}}) - start = time.time() - while (time.time() - start) < 30: - result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction") - log_streams = result.get("logStreams") - if not log_streams: - time.sleep(1) - continue - assert len(log_streams) == 1 - result = logs_conn.get_log_events( - logGroupName="/aws/lambda/testFunction", - logStreamName=log_streams[0]["logStreamName"], - ) - for event in result.get("events"): - if event["message"] == "get_test_zip_file3 success": - return - time.sleep(1) + expected_msg = "get_test_zip_file3 success" + log_group = "/aws/lambda/testFunction" + msg_showed_up = wait_for_log_msg(expected_msg, log_group) - assert False, "Test Failed" + assert msg_showed_up, "Message was not found in log_group, so inserting DynamoDB did not result in a successful Lambda execution" @mock_logs @mock_lambda @mock_dynamodb2 def test_invoke_function_from_dynamodb_update(): - logs_conn = boto3.client("logs", region_name="us-east-1") dynamodb = boto3.client("dynamodb", region_name="us-east-1") table_name = "table_with_stream" table = dynamodb.create_table( @@ -1263,9 +1234,18 @@ def test_invoke_function_from_dynamodb_update(): ExpressionAttributeNames={"#attr": "new_attr"}, ExpressionAttributeValues={":val": {"S": "new_val"}}, ) + expected_msg = "get_test_zip_file3 success" + log_group = "/aws/lambda/testFunction" + msg_showed_up = wait_for_log_msg(expected_msg, log_group) + + assert msg_showed_up, "Message was not found in log_group, so updating DynamoDB did not result in a successful Lambda execution" + + +def wait_for_log_msg(expected_msg, log_group): + logs_conn = boto3.client("logs", region_name="us-east-1") start = time.time() while (time.time() - start) < 30: - result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction") + result = logs_conn.describe_log_streams(logGroupName=log_group) log_streams = result.get("logStreams") if not log_streams: time.sleep(1) @@ -1273,15 +1253,14 @@ def test_invoke_function_from_dynamodb_update(): assert len(log_streams) == 1 result = logs_conn.get_log_events( - logGroupName="/aws/lambda/testFunction", + logGroupName=log_group, logStreamName=log_streams[0]["logStreamName"], ) for event in result.get("events"): - if event["message"] == "get_test_zip_file3 success": - return + if event["message"] == expected_msg: + return True time.sleep(1) - - assert False, "Test Failed" + return False @mock_logs From 038ff620b2e1e67fde707b38ee518e4a53088249 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 24 Feb 2020 09:28:52 +0000 Subject: [PATCH 083/658] DDB Streams - Bugfix where processed items are resend every time --- moto/awslambda/models.py | 2 +- tests/test_awslambda/test_lambda.py | 60 ++++++++++++++++++----------- 2 files changed, 39 insertions(+), 23 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 939952d5eb46..9cdf2397c9d1 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -981,7 +981,7 @@ def send_dynamodb_items(self, function_arn, items, source): ] } func = self._lambdas.get_arn(function_arn) - func.invoke(json.dumps(event), {}, {}) + return func.invoke(json.dumps(event), {}, {}) def list_tags(self, resource): return self.get_function_by_arn(resource).tags diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 48539c0e6f03..eb8453e432f7 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -70,6 +70,7 @@ def lambda_handler(event, context): def get_test_zip_file3(): pfunc = """ def lambda_handler(event, context): + print("Nr_of_records("+str(len(event['Records']))+")") print("get_test_zip_file3 success") return event """ @@ -1139,9 +1140,13 @@ def test_invoke_function_from_sqs(): expected_msg = "get_test_zip_file3 success" log_group = "/aws/lambda/testFunction" - msg_showed_up = wait_for_log_msg(expected_msg, log_group) + msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) - assert msg_showed_up, "Message was not found in log_group, so sending an SQS message did not result in a successful Lambda execution" + assert msg_showed_up, ( + expected_msg + + " was not found after sending an SQS message. All logs: " + + all_logs + ) @mock_logs @@ -1185,9 +1190,11 @@ def test_invoke_function_from_dynamodb_put(): expected_msg = "get_test_zip_file3 success" log_group = "/aws/lambda/testFunction" - msg_showed_up = wait_for_log_msg(expected_msg, log_group) + msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) - assert msg_showed_up, "Message was not found in log_group, so inserting DynamoDB did not result in a successful Lambda execution" + assert msg_showed_up, ( + expected_msg + " was not found after a DDB insert. All logs: " + all_logs + ) @mock_logs @@ -1205,7 +1212,6 @@ def test_invoke_function_from_dynamodb_update(): "StreamViewType": "NEW_AND_OLD_IMAGES", }, ) - dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}}) conn = boto3.client("lambda", region_name="us-east-1") func = conn.create_function( @@ -1220,13 +1226,17 @@ def test_invoke_function_from_dynamodb_update(): Publish=True, ) - response = conn.create_event_source_mapping( + conn.create_event_source_mapping( EventSourceArn=table["TableDescription"]["LatestStreamArn"], FunctionName=func["FunctionArn"], ) - assert response["EventSourceArn"] == table["TableDescription"]["LatestStreamArn"] - assert response["State"] == "Enabled" + dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}}) + log_group = "/aws/lambda/testFunction" + expected_msg = "get_test_zip_file3 success" + msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) + assert "Nr_of_records(1)" in all_logs, "Only one item should be inserted" + dynamodb.update_item( TableName=table_name, Key={"id": {"S": "item 1"}}, @@ -1234,33 +1244,39 @@ def test_invoke_function_from_dynamodb_update(): ExpressionAttributeNames={"#attr": "new_attr"}, ExpressionAttributeValues={":val": {"S": "new_val"}}, ) - expected_msg = "get_test_zip_file3 success" - log_group = "/aws/lambda/testFunction" - msg_showed_up = wait_for_log_msg(expected_msg, log_group) + msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group) - assert msg_showed_up, "Message was not found in log_group, so updating DynamoDB did not result in a successful Lambda execution" + assert msg_showed_up, ( + expected_msg + " was not found after updating DDB. All logs: " + str(all_logs) + ) + assert "Nr_of_records(1)" in all_logs, "Only one item should be updated" + assert ( + "Nr_of_records(2)" not in all_logs + ), "The inserted item should not show up again" def wait_for_log_msg(expected_msg, log_group): logs_conn = boto3.client("logs", region_name="us-east-1") + received_messages = [] start = time.time() - while (time.time() - start) < 30: + while (time.time() - start) < 10: result = logs_conn.describe_log_streams(logGroupName=log_group) log_streams = result.get("logStreams") if not log_streams: time.sleep(1) continue - assert len(log_streams) == 1 - result = logs_conn.get_log_events( - logGroupName=log_group, - logStreamName=log_streams[0]["logStreamName"], - ) - for event in result.get("events"): - if event["message"] == expected_msg: - return True + for log_stream in log_streams: + result = logs_conn.get_log_events( + logGroupName=log_group, logStreamName=log_stream["logStreamName"], + ) + received_messages.extend( + [event["message"] for event in result.get("events")] + ) + if expected_msg in received_messages: + return True, received_messages time.sleep(1) - return False + return False, received_messages @mock_logs From 939bd1cd86ad62b4e33b937a8f21253664f085d0 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 24 Feb 2020 13:43:19 +0000 Subject: [PATCH 084/658] EC2 - Add some filters for describe_instance_status --- moto/ec2/models.py | 29 ++++++++++++---- moto/ec2/responses/instances.py | 19 ++++++++-- tests/test_ec2/test_instances.py | 59 +++++++++++++++++++++++++++++++- 3 files changed, 97 insertions(+), 10 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 166d8e646a1b..ef506e443309 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -822,6 +822,21 @@ def get_cfn_attribute(self, attribute_name): return self.public_ip raise UnformattedGetAttTemplateException() + def applies(self, filters): + if filters: + applicable = False + for f in filters: + acceptable_values = f['values'] + if f['name'] == "instance-state-name": + if self._state.name in acceptable_values: + applicable = True + if f['name'] == "instance-state-code": + if str(self._state.code) in acceptable_values: + applicable = True + return applicable + # If there are no filters, all instances are valid + return True + class InstanceBackend(object): def __init__(self): @@ -921,22 +936,23 @@ def describe_instance_attribute(self, instance_id, attribute): value = getattr(instance, key) return instance, value - def all_instances(self): + def all_instances(self, filters=None): instances = [] for reservation in self.all_reservations(): for instance in reservation.instances: - instances.append(instance) + if instance.applies(filters): + instances.append(instance) return instances - def all_running_instances(self): + def all_running_instances(self, filters=None): instances = [] for reservation in self.all_reservations(): for instance in reservation.instances: - if instance.state_code == 16: + if instance.state_code == 16 and instance.applies(filters): instances.append(instance) return instances - def get_multi_instances_by_id(self, instance_ids): + def get_multi_instances_by_id(self, instance_ids, filters=None): """ :param instance_ids: A string list with instance ids :return: A list with instance objects @@ -946,7 +962,8 @@ def get_multi_instances_by_id(self, instance_ids): for reservation in self.all_reservations(): for instance in reservation.instances: if instance.id in instance_ids: - result.append(instance) + if instance.applies(filters): + result.append(instance) # TODO: Trim error message down to specific invalid id. if instance_ids and len(instance_ids) > len(result): diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index b9e572d2977a..9b1105291115 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -113,16 +113,29 @@ def start_instances(self): template = self.response_template(EC2_START_INSTANCES) return template.render(instances=instances) + def _get_list_of_dict_params(self, param_prefix, _dct): + """ + Simplified version of _get_dict_param + Allows you to pass in a custom dict instead of using self.querystring by default + """ + params = [] + for key, value in _dct.items(): + if key.startswith(param_prefix): + params.append(value) + return params + def describe_instance_status(self): instance_ids = self._get_multi_param("InstanceId") include_all_instances = self._get_param("IncludeAllInstances") == "true" + filters = self._get_list_prefix("Filter") + filters = [{'name': f['name'], 'values': self._get_list_of_dict_params("value.", f)} for f in filters] if instance_ids: - instances = self.ec2_backend.get_multi_instances_by_id(instance_ids) + instances = self.ec2_backend.get_multi_instances_by_id(instance_ids, filters) elif include_all_instances: - instances = self.ec2_backend.all_instances() + instances = self.ec2_backend.all_instances(filters) else: - instances = self.ec2_backend.all_running_instances() + instances = self.ec2_backend.all_running_instances(filters) template = self.response_template(EC2_INSTANCE_STATUS) return template.render(instances=instances) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 041bc8c85c97..ac6a4f4ecf5b 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1144,7 +1144,7 @@ def test_describe_instance_status_with_instances(): @mock_ec2_deprecated -def test_describe_instance_status_with_instance_filter(): +def test_describe_instance_status_with_instance_filter_deprecated(): conn = boto.connect_ec2("the_key", "the_secret") # We want to filter based on this one @@ -1166,6 +1166,63 @@ def test_describe_instance_status_with_instance_filter(): cm.exception.request_id.should_not.be.none +@mock_ec2 +def test_describe_instance_status_with_instance_filter(): + conn = boto3.client("ec2", region_name="us-west-1") + + # We want to filter based on this one + reservation = conn.run_instances(ImageId="ami-1234abcd", MinCount=3, MaxCount=3) + instance1 = reservation['Instances'][0] + instance2 = reservation['Instances'][1] + instance3 = reservation['Instances'][2] + conn.stop_instances(InstanceIds=[instance1['InstanceId']]) + stopped_instance_ids = [instance1['InstanceId']] + running_instance_ids = sorted([instance2['InstanceId'], instance3['InstanceId']]) + all_instance_ids = sorted(stopped_instance_ids + running_instance_ids) + + # Filter instance using the state name + state_name_filter = { + "running_and_stopped": [ + {"Name": "instance-state-name", "Values": ["running", "stopped"]} + ], + "running": [{"Name": "instance-state-name", "Values": ["running"]}], + "stopped": [{"Name": "instance-state-name", "Values": ["stopped"]}], + } + + found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_name_filter["running_and_stopped"])['InstanceStatuses'] + found_instance_ids = [status['InstanceId'] for status in found_statuses] + sorted(found_instance_ids).should.equal(all_instance_ids) + + found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_name_filter["running"])['InstanceStatuses'] + found_instance_ids = [status['InstanceId'] for status in found_statuses] + sorted(found_instance_ids).should.equal(running_instance_ids) + + found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_name_filter["stopped"])['InstanceStatuses'] + found_instance_ids = [status['InstanceId'] for status in found_statuses] + sorted(found_instance_ids).should.equal(stopped_instance_ids) + + # Filter instance using the state code + state_code_filter = { + "running_and_stopped": [ + {"Name": "instance-state-code", "Values": ["16", "80"]} + ], + "running": [{"Name": "instance-state-code", "Values": ["16"]}], + "stopped": [{"Name": "instance-state-code", "Values": ["80"]}], + } + + found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_code_filter["running_and_stopped"])['InstanceStatuses'] + found_instance_ids = [status['InstanceId'] for status in found_statuses] + sorted(found_instance_ids).should.equal(all_instance_ids) + + found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_code_filter["running"])['InstanceStatuses'] + found_instance_ids = [status['InstanceId'] for status in found_statuses] + sorted(found_instance_ids).should.equal(running_instance_ids) + + found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_code_filter["stopped"])['InstanceStatuses'] + found_instance_ids = [status['InstanceId'] for status in found_statuses] + sorted(found_instance_ids).should.equal(stopped_instance_ids) + + @requires_boto_gte("2.32.0") @mock_ec2_deprecated def test_describe_instance_status_with_non_running_instances(): From 3aeb5f504319fd901482bc7f3428b3e27774c217 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 24 Feb 2020 13:43:58 +0000 Subject: [PATCH 085/658] Linting --- moto/ec2/models.py | 6 ++-- moto/ec2/responses/instances.py | 9 ++++-- tests/test_ec2/test_instances.py | 48 ++++++++++++++++++++------------ 3 files changed, 40 insertions(+), 23 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index ef506e443309..9c720cda833a 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -826,11 +826,11 @@ def applies(self, filters): if filters: applicable = False for f in filters: - acceptable_values = f['values'] - if f['name'] == "instance-state-name": + acceptable_values = f["values"] + if f["name"] == "instance-state-name": if self._state.name in acceptable_values: applicable = True - if f['name'] == "instance-state-code": + if f["name"] == "instance-state-code": if str(self._state.code) in acceptable_values: applicable = True return applicable diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 9b1105291115..29c346f8242b 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -128,10 +128,15 @@ def describe_instance_status(self): instance_ids = self._get_multi_param("InstanceId") include_all_instances = self._get_param("IncludeAllInstances") == "true" filters = self._get_list_prefix("Filter") - filters = [{'name': f['name'], 'values': self._get_list_of_dict_params("value.", f)} for f in filters] + filters = [ + {"name": f["name"], "values": self._get_list_of_dict_params("value.", f)} + for f in filters + ] if instance_ids: - instances = self.ec2_backend.get_multi_instances_by_id(instance_ids, filters) + instances = self.ec2_backend.get_multi_instances_by_id( + instance_ids, filters + ) elif include_all_instances: instances = self.ec2_backend.all_instances(filters) else: diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index ac6a4f4ecf5b..85ba0fe01bbd 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1172,12 +1172,12 @@ def test_describe_instance_status_with_instance_filter(): # We want to filter based on this one reservation = conn.run_instances(ImageId="ami-1234abcd", MinCount=3, MaxCount=3) - instance1 = reservation['Instances'][0] - instance2 = reservation['Instances'][1] - instance3 = reservation['Instances'][2] - conn.stop_instances(InstanceIds=[instance1['InstanceId']]) - stopped_instance_ids = [instance1['InstanceId']] - running_instance_ids = sorted([instance2['InstanceId'], instance3['InstanceId']]) + instance1 = reservation["Instances"][0] + instance2 = reservation["Instances"][1] + instance3 = reservation["Instances"][2] + conn.stop_instances(InstanceIds=[instance1["InstanceId"]]) + stopped_instance_ids = [instance1["InstanceId"]] + running_instance_ids = sorted([instance2["InstanceId"], instance3["InstanceId"]]) all_instance_ids = sorted(stopped_instance_ids + running_instance_ids) # Filter instance using the state name @@ -1189,16 +1189,22 @@ def test_describe_instance_status_with_instance_filter(): "stopped": [{"Name": "instance-state-name", "Values": ["stopped"]}], } - found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_name_filter["running_and_stopped"])['InstanceStatuses'] - found_instance_ids = [status['InstanceId'] for status in found_statuses] + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_name_filter["running_and_stopped"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] sorted(found_instance_ids).should.equal(all_instance_ids) - found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_name_filter["running"])['InstanceStatuses'] - found_instance_ids = [status['InstanceId'] for status in found_statuses] + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_name_filter["running"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] sorted(found_instance_ids).should.equal(running_instance_ids) - found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_name_filter["stopped"])['InstanceStatuses'] - found_instance_ids = [status['InstanceId'] for status in found_statuses] + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_name_filter["stopped"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] sorted(found_instance_ids).should.equal(stopped_instance_ids) # Filter instance using the state code @@ -1210,16 +1216,22 @@ def test_describe_instance_status_with_instance_filter(): "stopped": [{"Name": "instance-state-code", "Values": ["80"]}], } - found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_code_filter["running_and_stopped"])['InstanceStatuses'] - found_instance_ids = [status['InstanceId'] for status in found_statuses] + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_code_filter["running_and_stopped"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] sorted(found_instance_ids).should.equal(all_instance_ids) - found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_code_filter["running"])['InstanceStatuses'] - found_instance_ids = [status['InstanceId'] for status in found_statuses] + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_code_filter["running"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] sorted(found_instance_ids).should.equal(running_instance_ids) - found_statuses = conn.describe_instance_status(IncludeAllInstances=True, Filters=state_code_filter["stopped"])['InstanceStatuses'] - found_instance_ids = [status['InstanceId'] for status in found_statuses] + found_statuses = conn.describe_instance_status( + IncludeAllInstances=True, Filters=state_code_filter["stopped"] + )["InstanceStatuses"] + found_instance_ids = [status["InstanceId"] for status in found_statuses] sorted(found_instance_ids).should.equal(stopped_instance_ids) From 28b4305759d49bafd2d49943956aaed4def10429 Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Mon, 24 Feb 2020 11:53:27 -0600 Subject: [PATCH 086/658] add rudimentary support for Config PutEvaluations with TestMode for now --- moto/config/exceptions.py | 10 +++++ moto/config/models.py | 21 +++++++++++ moto/config/responses.py | 8 ++++ tests/test_config/test_config.py | 65 ++++++++++++++++++++++++++++++++ 4 files changed, 104 insertions(+) diff --git a/moto/config/exceptions.py b/moto/config/exceptions.py index 4a0dc0d73099..6b6498d342be 100644 --- a/moto/config/exceptions.py +++ b/moto/config/exceptions.py @@ -366,3 +366,13 @@ def __init__(self, bad_list): message = str(message) super(TooManyResourceKeys, self).__init__("ValidationException", message) + + +class InvalidResultTokenException(JsonRESTError): + code = 400 + + def __init__(self): + message = "The resultToken provided is invalid" + super(InvalidResultTokenException, self).__init__( + "InvalidResultTokenException", message + ) diff --git a/moto/config/models.py b/moto/config/models.py index a66576979350..242a219e4c65 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -40,6 +40,7 @@ TooManyResourceIds, ResourceNotDiscoveredException, TooManyResourceKeys, + InvalidResultTokenException, ) from moto.core import BaseBackend, BaseModel @@ -1089,6 +1090,26 @@ def batch_get_aggregate_resource_config( "UnprocessedResourceIdentifiers": not_found, } + def put_evaluations(self, evaluations=None, result_token=None, test_mode=False): + if not evaluations: + raise InvalidParameterValueException( + "The Evaluations object in your request cannot be null." + "Add the required parameters and try again." + ) + + if not result_token: + raise InvalidResultTokenException() + + # Moto only supports PutEvaluations with test mode currently (missing rule and token support) + if not test_mode: + raise NotImplementedError( + "PutEvaluations without TestMode is not yet implemented" + ) + + return { + "FailedEvaluations": [], + } # At this time, moto is not adding failed evaluations. + config_backends = {} for region in Session().get_available_regions("config"): diff --git a/moto/config/responses.py b/moto/config/responses.py index e977945c9042..3b647b5bff7e 100644 --- a/moto/config/responses.py +++ b/moto/config/responses.py @@ -151,3 +151,11 @@ def batch_get_aggregate_resource_config(self): self._get_param("ResourceIdentifiers"), ) return json.dumps(schema) + + def put_evaluations(self): + evaluations = self.config_backend.put_evaluations( + self._get_param("Evaluations"), + self._get_param("ResultToken"), + self._get_param("TestMode"), + ) + return json.dumps(evaluations) diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index d5ec8f0bc19d..09fe8ed91015 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -1802,3 +1802,68 @@ def test_batch_get_aggregate_resource_config(): len(result["UnprocessedResourceIdentifiers"]) == 1 and result["UnprocessedResourceIdentifiers"][0]["SourceRegion"] == "eu-west-1" ) + + +@mock_config +def test_put_evaluations(): + client = boto3.client("config", region_name="us-west-2") + + # Try without Evaluations supplied: + with assert_raises(ClientError) as ce: + client.put_evaluations(Evaluations=[], ResultToken="test", TestMode=True) + assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ( + "The Evaluations object in your request cannot be null" + in ce.exception.response["Error"]["Message"] + ) + + # Try without a ResultToken supplied: + with assert_raises(ClientError) as ce: + client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + ResultToken="", + TestMode=True, + ) + assert ce.exception.response["Error"]["Code"] == "InvalidResultTokenException" + + # Try without TestMode supplied: + with assert_raises(NotImplementedError) as ce: + client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + ResultToken="test", + ) + + # Now with proper params: + response = client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + TestMode=True, + ResultToken="test", + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + {"FailedEvaluations": [], "ResponseMetadata": {"HTTPStatusCode": 200,},} + ) From 4c43ca362f8e6d247407368dbe73d0a74f66ea2c Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Mon, 24 Feb 2020 13:01:38 -0600 Subject: [PATCH 087/658] add workaround for NotImplementedError failing server mode tests --- tests/test_config/test_config.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index 09fe8ed91015..8e6c3ec4c51e 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -3,6 +3,7 @@ import boto3 from botocore.exceptions import ClientError +from nose import SkipTest from nose.tools import assert_raises from moto import mock_s3 @@ -1833,19 +1834,22 @@ def test_put_evaluations(): ) assert ce.exception.response["Error"]["Code"] == "InvalidResultTokenException" - # Try without TestMode supplied: - with assert_raises(NotImplementedError) as ce: - client.put_evaluations( - Evaluations=[ - { - "ComplianceResourceType": "AWS::ApiGateway::RestApi", - "ComplianceResourceId": "test-api", - "ComplianceType": "INSUFFICIENT_DATA", - "OrderingTimestamp": datetime(2015, 1, 1), - } - ], - ResultToken="test", - ) + if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + raise SkipTest("Does not work in server mode due to error in Workzeug") + else: + # Try without TestMode supplied: + with assert_raises(NotImplementedError): + client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + ResultToken="test", + ) # Now with proper params: response = client.put_evaluations( From c3581dbd0b1287d8a0e7fe5c5dba8a64223fd159 Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Mon, 24 Feb 2020 13:25:36 -0600 Subject: [PATCH 088/658] add missing os import for config tests --- tests/test_config/test_config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index 8e6c3ec4c51e..1ffd52a2cfd2 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -1,4 +1,5 @@ import json +import os from datetime import datetime, timedelta import boto3 From 002683fd13c74028912f934408c0376b410bd487 Mon Sep 17 00:00:00 2001 From: Laurie O Date: Wed, 26 Feb 2020 00:19:39 +1000 Subject: [PATCH 089/658] Return empty task-token on no-task To match the SWF documentation, an empty task is one where the task-token is the empty string, rather than being a nonexistant key Signed-off-by: Laurie O --- moto/swf/responses.py | 6 ++++-- tests/test_swf/responses/test_activity_tasks.py | 4 ++-- tests/test_swf/responses/test_decision_tasks.py | 8 ++++++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/moto/swf/responses.py b/moto/swf/responses.py index 2b7794ffd7d3..c8c601fa7226 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -423,7 +423,9 @@ def poll_for_decision_task(self): if decision: return json.dumps(decision.to_full_dict(reverse_order=reverse_order)) else: - return json.dumps({"previousStartedEventId": 0, "startedEventId": 0}) + return json.dumps( + {"previousStartedEventId": 0, "startedEventId": 0, "taskToken": ""} + ) def count_pending_decision_tasks(self): domain_name = self._params["domain"] @@ -457,7 +459,7 @@ def poll_for_activity_task(self): if activity_task: return json.dumps(activity_task.to_full_dict()) else: - return json.dumps({"startedEventId": 0}) + return json.dumps({"startedEventId": 0, "taskToken": ""}) def count_pending_activity_tasks(self): domain_name = self._params["domain"] diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index 0b72b7ca7663..4fa965b1193f 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -35,14 +35,14 @@ def test_poll_for_activity_task_when_one(): def test_poll_for_activity_task_when_none(): conn = setup_workflow() resp = conn.poll_for_activity_task("test-domain", "activity-task-list") - resp.should.equal({"startedEventId": 0}) + resp.should.equal({"startedEventId": 0, "taskToken": ""}) @mock_swf_deprecated def test_poll_for_activity_task_on_non_existent_queue(): conn = setup_workflow() resp = conn.poll_for_activity_task("test-domain", "non-existent-queue") - resp.should.equal({"startedEventId": 0}) + resp.should.equal({"startedEventId": 0, "taskToken": ""}) # CountPendingActivityTasks endpoint diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index 6389536e6832..bdf690ce0a82 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -38,14 +38,18 @@ def test_poll_for_decision_task_when_none(): resp = conn.poll_for_decision_task("test-domain", "queue") # this is the DecisionTask representation you get from the real SWF # after waiting 60s when there's no decision to be taken - resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) + resp.should.equal( + {"previousStartedEventId": 0, "startedEventId": 0, "taskToken": ""} + ) @mock_swf_deprecated def test_poll_for_decision_task_on_non_existent_queue(): conn = setup_workflow() resp = conn.poll_for_decision_task("test-domain", "non-existent-queue") - resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) + resp.should.equal( + {"previousStartedEventId": 0, "startedEventId": 0, "taskToken": ""} + ) @mock_swf_deprecated From 369285b7ca1de06aaa296ea5d51482dad2492d83 Mon Sep 17 00:00:00 2001 From: Laurie O Date: Wed, 26 Feb 2020 01:06:58 +1000 Subject: [PATCH 090/658] Don't 0-default previous started event ID Signed-off-by: Laurie O --- moto/swf/models/decision_task.py | 5 +++-- tests/test_swf/models/test_decision_task.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/moto/swf/models/decision_task.py b/moto/swf/models/decision_task.py index c8c9824a253f..d7236a0ad272 100644 --- a/moto/swf/models/decision_task.py +++ b/moto/swf/models/decision_task.py @@ -15,7 +15,7 @@ def __init__(self, workflow_execution, scheduled_event_id): self.workflow_type = workflow_execution.workflow_type self.task_token = str(uuid.uuid4()) self.scheduled_event_id = scheduled_event_id - self.previous_started_event_id = 0 + self.previous_started_event_id = None self.started_event_id = None self.started_timestamp = None self.start_to_close_timeout = ( @@ -40,10 +40,11 @@ def to_full_dict(self, reverse_order=False): hsh = { "events": [evt.to_dict() for evt in events], "taskToken": self.task_token, - "previousStartedEventId": self.previous_started_event_id, "workflowExecution": self.workflow_execution.to_short_dict(), "workflowType": self.workflow_type.to_short_dict(), } + if self.previous_started_event_id is not None: + hsh["previousStartedEventId"] = self.previous_started_event_id if self.started_event_id: hsh["startedEventId"] = self.started_event_id return hsh diff --git a/tests/test_swf/models/test_decision_task.py b/tests/test_swf/models/test_decision_task.py index 0661adffb4ac..8296f0472943 100644 --- a/tests/test_swf/models/test_decision_task.py +++ b/tests/test_swf/models/test_decision_task.py @@ -24,7 +24,7 @@ def test_decision_task_full_dict_representation(): fd = dt.to_full_dict() fd["events"].should.be.a("list") - fd["previousStartedEventId"].should.equal(0) + fd.should_not.contain("previousStartedEventId") fd.should_not.contain("startedEventId") fd.should.contain("taskToken") fd["workflowExecution"].should.equal(wfe.to_short_dict()) From 209c9997061b88d11ba62e700d5df05b706345ef Mon Sep 17 00:00:00 2001 From: Laurie O Date: Wed, 26 Feb 2020 01:08:03 +1000 Subject: [PATCH 091/658] Keep track of previous started event ID Closes #2107 Signed-off-by: Laurie O --- moto/swf/models/decision_task.py | 3 ++- moto/swf/models/workflow_execution.py | 4 +++- tests/test_swf/models/test_decision_task.py | 3 ++- .../test_swf/responses/test_decision_tasks.py | 24 +++++++++++++++++++ 4 files changed, 31 insertions(+), 3 deletions(-) diff --git a/moto/swf/models/decision_task.py b/moto/swf/models/decision_task.py index d7236a0ad272..aaf810f081e8 100644 --- a/moto/swf/models/decision_task.py +++ b/moto/swf/models/decision_task.py @@ -49,10 +49,11 @@ def to_full_dict(self, reverse_order=False): hsh["startedEventId"] = self.started_event_id return hsh - def start(self, started_event_id): + def start(self, started_event_id, previous_started_event_id=None): self.state = "STARTED" self.started_timestamp = unix_time() self.started_event_id = started_event_id + self.previous_started_event_id = previous_started_event_id def complete(self): self._check_workflow_execution_open() diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index 17ce819fb4e8..035a47558d50 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -82,6 +82,7 @@ def __init__(self, domain, workflow_type, workflow_id, **kwargs): self._events = [] # child workflows self.child_workflow_executions = [] + self._previous_started_event_id = None def __repr__(self): return "WorkflowExecution(run_id: {0})".format(self.run_id) @@ -295,7 +296,8 @@ def start_decision_task(self, task_token, identity=None): scheduled_event_id=dt.scheduled_event_id, identity=identity, ) - dt.start(evt.event_id) + dt.start(evt.event_id, self._previous_started_event_id) + self._previous_started_event_id = evt.event_id def complete_decision_task( self, task_token, decisions=None, execution_context=None diff --git a/tests/test_swf/models/test_decision_task.py b/tests/test_swf/models/test_decision_task.py index 8296f0472943..8ddb230e2f39 100644 --- a/tests/test_swf/models/test_decision_task.py +++ b/tests/test_swf/models/test_decision_task.py @@ -30,9 +30,10 @@ def test_decision_task_full_dict_representation(): fd["workflowExecution"].should.equal(wfe.to_short_dict()) fd["workflowType"].should.equal(wft.to_short_dict()) - dt.start(1234) + dt.start(1234, 1230) fd = dt.to_full_dict() fd["startedEventId"].should.equal(1234) + fd["previousStartedEventId"].should.equal(1230) def test_decision_task_first_timeout(): diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index 6389536e6832..6493302f9850 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -30,6 +30,30 @@ def test_poll_for_decision_task_when_one(): ) +@mock_swf_deprecated +def test_poll_for_decision_task_previous_started_event_id(): + conn = setup_workflow() + + resp = conn.poll_for_decision_task("test-domain", "queue") + assert resp["workflowExecution"]["runId"] == conn.run_id + assert "previousStartedEventId" not in resp + + # Require a failing decision, in this case a non-existant activity type + attrs = { + "activityId": "spam", + "activityType": {"name": "test-activity", "version": "v1.42"}, + "taskList": "eggs", + } + decision = { + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": attrs, + } + conn.respond_decision_task_completed(resp["taskToken"], decisions=[decision]) + resp = conn.poll_for_decision_task("test-domain", "queue") + assert resp["workflowExecution"]["runId"] == conn.run_id + assert resp["previousStartedEventId"] == 3 + + @mock_swf_deprecated def test_poll_for_decision_task_when_none(): conn = setup_workflow() From 607e0a845241b7033e14bae57117ee275c44b7bb Mon Sep 17 00:00:00 2001 From: Olabode Anise Date: Tue, 25 Feb 2020 20:25:44 -0500 Subject: [PATCH 092/658] ENH: changes the behavior of delete_parameter to respond with a 400 error when the parameter does not exist. Currently, the delete_parameter function for the ssm client will respond with a dict containing a key of Invalid Parameter which has a value of a list containing the parameter name that was requested to be deleted when a parameter with said name doesn't exist which doesn't match the behavior of boto3. --- moto/ssm/models.py | 5 +---- moto/ssm/responses.py | 8 +++++++- tests/test_ssm/test_ssm_boto3.py | 14 ++++++++++++++ 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 60c47f021646..a7518d405b4b 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -278,10 +278,7 @@ def __init__(self): self._region = region def delete_parameter(self, name): - try: - del self._parameters[name] - except KeyError: - pass + return self._parameters.pop(name, None) def delete_parameters(self, names): result = [] diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 1b13780a89c1..831737848f57 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -22,7 +22,13 @@ def _get_param(self, param, default=None): def delete_parameter(self): name = self._get_param("Name") - self.ssm_backend.delete_parameter(name) + result = self.ssm_backend.delete_parameter(name) + if result is None: + error = { + "__type": "ParameterNotFound", + "message": "Parameter {0} not found.".format(name), + } + return json.dumps(error), dict(status=400) return json.dumps({}) def delete_parameters(self): diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 5b978520dd41..26db26dcb6d3 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -30,6 +30,20 @@ def test_delete_parameter(): len(response["Parameters"]).should.equal(0) +@mock_ssm +def test_delete_nonexistent_parameter(): + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.delete_parameter(Name="test_noexist") + raise RuntimeError("Should of failed") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("DeleteParameter") + err.response["Error"]["Message"].should.equal( + "Parameter test_noexist not found." + ) + + @mock_ssm def test_delete_parameters(): client = boto3.client("ssm", region_name="us-east-1") From 47349b30df93d383d938effdcd6462af53fc1812 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 27 Feb 2020 08:54:57 +0000 Subject: [PATCH 093/658] #2567 - When mocking URLs, always return the first match --- moto/core/models.py | 18 ++++++++++++++++++ tests/test_s3/test_s3.py | 16 ++++++++++++++-- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index ffb2ffd9f9e9..8ca74d5b5d41 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -7,6 +7,7 @@ import os import re import six +import types from io import BytesIO from collections import defaultdict from botocore.handlers import BUILTIN_HANDLERS @@ -217,12 +218,29 @@ def _url_matches(self, url, other, match_querystring=False): assert_all_requests_are_fired=False, target="botocore.vendored.requests.adapters.HTTPAdapter.send", ) + responses_mock = responses._default_mock # Add passthrough to allow any other requests to work # Since this uses .startswith, it applies to http and https requests. responses_mock.add_passthru("http") +def _find_first_match(self, request): + for i, match in enumerate(self._matches): + if match.matches(request): + return match + + return None + + +# Modify behaviour of the matcher to only/always return the first match +# Default behaviour is to return subsequent matches for subsequent requests, which leads to https://github.com/spulec/moto/issues/2567 +# - First request matches on the appropriate S3 URL +# - Same request, executed again, will be matched on the subsequent match, which happens to be the catch-all, not-yet-implemented, callback +# Fix: Always return the first match +responses_mock._find_match = types.MethodType(_find_first_match, responses_mock) + + BOTOCORE_HTTP_METHODS = ["GET", "DELETE", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 2193f8b2744d..48655ee17385 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -39,6 +39,7 @@ from moto.core.exceptions import InvalidNextTokenException from moto.core.utils import py2_strip_unicode_keys + if settings.TEST_SERVER_MODE: REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"' @@ -1018,12 +1019,23 @@ def test_s3_object_in_public_bucket(): s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get() exc.exception.response["Error"]["Code"].should.equal("403") + +@mock_s3 +def test_s3_object_in_public_bucket_using_multiple_presigned_urls(): + s3 = boto3.resource("s3") + bucket = s3.Bucket("test-bucket") + bucket.create( + ACL="public-read", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) + bucket.put_object(Body=b"ABCD", Key="file.txt") + params = {"Bucket": "test-bucket", "Key": "file.txt"} presigned_url = boto3.client("s3").generate_presigned_url( "get_object", params, ExpiresIn=900 ) - response = requests.get(presigned_url) - assert response.status_code == 200 + for i in range(1, 10): + response = requests.get(presigned_url) + assert response.status_code == 200, "Failed on req number {}".format(i) @mock_s3 From 9227845121788a4804da5e026d9f90baf9541bd7 Mon Sep 17 00:00:00 2001 From: Olabode Anise Date: Thu, 27 Feb 2020 07:29:13 -0500 Subject: [PATCH 094/658] using assert_raises --- tests/test_ssm/test_ssm_boto3.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 26db26dcb6d3..bb674fb652d8 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -34,14 +34,12 @@ def test_delete_parameter(): def test_delete_nonexistent_parameter(): client = boto3.client("ssm", region_name="us-east-1") - try: + with assert_raises(ClientError) as ex: client.delete_parameter(Name="test_noexist") - raise RuntimeError("Should of failed") - except botocore.exceptions.ClientError as err: - err.operation_name.should.equal("DeleteParameter") - err.response["Error"]["Message"].should.equal( - "Parameter test_noexist not found." - ) + ex.exception.response["Error"]["Code"].should.equal("ParameterNotFound") + ex.exception.response["Error"]["Message"].should.equal( + "Parameter test_noexist not found." + ) @mock_ssm From 00134d2df37bb4dcd5f447ef951d383bfec0903c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 29 Feb 2020 09:41:06 +0000 Subject: [PATCH 095/658] Fix dependencies to versions that support Python 2 --- requirements-dev.txt | 2 +- setup.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index c5f055a26341..2aaca300b428 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,5 @@ -r requirements.txt -mock +mock==3.0.5 # Last version compatible with Python 2.7 nose black; python_version >= '3.6' regex==2019.11.1; python_version >= '3.6' # Needed for black diff --git a/setup.py b/setup.py index 1dde71ac7bd0..b806f7bae494 100755 --- a/setup.py +++ b/setup.py @@ -1,20 +1,20 @@ #!/usr/bin/env python from __future__ import unicode_literals import codecs +from io import open import os import re import setuptools from setuptools import setup, find_packages import sys - # Borrowed from pip at https://github.com/pypa/pip/blob/62c27dee45625e1b63d1e023b0656310f276e050/setup.py#L11-L15 here = os.path.abspath(os.path.dirname(__file__)) def read(*parts): # intentionally *not* adding an encoding option to open, See: # https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 - with codecs.open(os.path.join(here, *parts), 'r') as fp: + with open(os.path.join(here, *parts), 'r') as fp: return fp.read() @@ -28,7 +28,8 @@ def get_version(): install_requires = [ - "Jinja2>=2.10.1", + "setuptools==44.0.0", + "Jinja2==2.11.0", "boto>=2.36.0", "boto3>=1.9.201", "botocore>=1.12.201", @@ -41,14 +42,16 @@ def get_version(): "pytz", "python-dateutil<3.0.0,>=2.1", "python-jose<4.0.0", - "mock", + "mock==3.0.5", "docker>=2.5.1", "jsondiff>=1.1.2", "aws-xray-sdk!=0.96,>=0.93", "responses>=0.9.0", "idna<2.9,>=2.5", "cfn-lint>=0.4.0", - "sshpubkeys>=3.1.0,<4.0" + "sshpubkeys>=3.1.0,<4.0", + "zipp==0.6.0", + "more-itertools==5.0.0" ] extras_require = { From 5b9b9656476f852ff00c8095ea5a28d330c15d87 Mon Sep 17 00:00:00 2001 From: aimannajjar Date: Wed, 18 Dec 2019 17:29:13 -0500 Subject: [PATCH 096/658] [ec2-sg] added logic to create a second default egress rule for ipv6 --- moto/ec2/models.py | 6 ++++++ tests/test_ec2/test_security_groups.py | 12 ++++++++++++ 2 files changed, 18 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 166d8e646a1b..8afa30aa4667 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1722,6 +1722,12 @@ def __init__(self, ec2_backend, group_id, name, description, vpc_id=None): self.vpc_id = vpc_id self.owner_id = OWNER_ID + # Append default IPv6 egress rule for VPCs with IPv6 support + if vpc_id: + vpc = self.ec2_backend.vpcs.get(vpc_id) + if vpc and len(vpc.get_cidr_block_association_set(ipv6=True)) > 0: + self.egress_rules.append(SecurityRule("-1", None, None, [], [])) + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index bb9c8f52a17b..ac9f39b57aeb 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -123,6 +123,18 @@ def test_create_two_security_groups_with_same_name_in_different_vpc(): set(group_names).should.equal(set(["default", "test security group"])) +@mock_ec2 +def test_create_two_security_groups_in_vpc_with_ipv6_enabled(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16", AmazonProvidedIpv6CidrBlock=True) + + security_group = ec2.create_security_group( + GroupName="sg01", Description="Test security group sg01", VpcId=vpc.id + ) + + security_group.ip_permissions_egress.should.have.length_of(2) + + @mock_ec2_deprecated def test_deleting_security_groups(): conn = boto.connect_ec2("the_key", "the_secret") From 639c1abcb486cd083d801122ae7f37dcaa65292f Mon Sep 17 00:00:00 2001 From: aimannajjar Date: Sun, 1 Mar 2020 08:23:31 -0500 Subject: [PATCH 097/658] clarifying comment in test case --- tests/test_ec2/test_security_groups.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index ac9f39b57aeb..7e936b7a5872 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -132,6 +132,7 @@ def test_create_two_security_groups_in_vpc_with_ipv6_enabled(): GroupName="sg01", Description="Test security group sg01", VpcId=vpc.id ) + # The security group must have two defaul egress rules (one for ipv4 and aonther for ipv6) security_group.ip_permissions_egress.should.have.length_of(2) From 9fdeaca589956d7911e8b05491e3e15f7a9bcfad Mon Sep 17 00:00:00 2001 From: Jon Nangle Date: Mon, 2 Mar 2020 12:46:15 +0000 Subject: [PATCH 098/658] Support GeoLocation and Failover on Route 53 --- moto/route53/models.py | 12 +++++ tests/test_route53/test_route53.py | 82 ++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/moto/route53/models.py b/moto/route53/models.py index 2ae03e54d71c..0bdefd25b0fd 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -88,6 +88,8 @@ def __init__(self, kwargs): self.hosted_zone_name = kwargs.get("HostedZoneName") self.hosted_zone_id = kwargs.get("HostedZoneId") self.alias_target = kwargs.get("AliasTarget") + self.failover = kwargs.get("Failover") + self.geo_location = kwargs.get("GeoLocation") @classmethod def create_from_cloudformation_json( @@ -154,6 +156,16 @@ def to_xml(self): {% if record_set.ttl %} {{ record_set.ttl }} {% endif %} + {% if record_set.failover %} + {{ record_set.failover }} + {% endif %} + {% if record_set.geo_location %} + + {% for geo_key in ['ContinentCode','CountryCode','SubdivisionCode'] %} + {% if record_set.geo_location[geo_key] %}<{{ geo_key }}>{{ record_set.geo_location[geo_key] }}{% endif %} + {% endfor %} + + {% endif %} {% if record_set.alias_target %} {{ record_set.alias_target['HostedZoneId'] }} diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 746c78719399..8cf148c14296 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -753,6 +753,88 @@ def test_change_weighted_resource_record_sets(): record["Weight"].should.equal(10) +@mock_route53 +def test_failover_record_sets(): + conn = boto3.client("route53", region_name="us-east-2") + conn.create_hosted_zone( + Name="test.zone.", CallerReference=str(hash("test")) + ) + zones = conn.list_hosted_zones_by_name(DNSName="test.zone.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + # Create geolocation record + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch={ + "Changes": [ + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "failover.test.zone.", + "Type": "A", + "TTL": 10, + "ResourceRecords": [{"Value": "127.0.0.1"}], + "Failover": "PRIMARY" + } + } + ] + } + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + record = response["ResourceRecordSets"][0] + record["Failover"].should.equal("PRIMARY") + + +@mock_route53 +def test_geolocation_record_sets(): + conn = boto3.client("route53", region_name="us-east-2") + conn.create_hosted_zone( + Name="test.zone.", CallerReference=str(hash("test")) + ) + zones = conn.list_hosted_zones_by_name(DNSName="test.zone.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + # Create geolocation record + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch={ + "Changes": [ + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "georecord1.test.zone.", + "Type": "A", + "TTL": 10, + "ResourceRecords": [{"Value": "127.0.0.1"}], + "GeoLocation": { + "ContinentCode": "EU" + } + } + }, + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "georecord2.test.zone.", + "Type": "A", + "TTL": 10, + "ResourceRecords": [{"Value": "127.0.0.2"}], + "GeoLocation": { + "CountryCode": "US", + "SubdivisionCode": "NY" + } + } + } + ] + } + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + rrs = response["ResourceRecordSets"] + rrs[0]["GeoLocation"].should.equal({"ContinentCode": "EU"}) + rrs[1]["GeoLocation"].should.equal({"CountryCode": "US", "SubdivisionCode": "NY"}) + + @mock_route53 def test_change_resource_record_invalid(): conn = boto3.client("route53", region_name="us-east-1") From bebcf52851e5dd094dd8b670f1ff4b65d6599f9d Mon Sep 17 00:00:00 2001 From: Jon Nangle Date: Mon, 2 Mar 2020 13:07:34 +0000 Subject: [PATCH 099/658] Formatting --- tests/test_route53/test_route53.py | 31 +++++++++++------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 8cf148c14296..8c036441c4f0 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -756,9 +756,7 @@ def test_change_weighted_resource_record_sets(): @mock_route53 def test_failover_record_sets(): conn = boto3.client("route53", region_name="us-east-2") - conn.create_hosted_zone( - Name="test.zone.", CallerReference=str(hash("test")) - ) + conn.create_hosted_zone(Name="test.zone.", CallerReference=str(hash("test"))) zones = conn.list_hosted_zones_by_name(DNSName="test.zone.") hosted_zone_id = zones["HostedZones"][0]["Id"] @@ -774,11 +772,11 @@ def test_failover_record_sets(): "Type": "A", "TTL": 10, "ResourceRecords": [{"Value": "127.0.0.1"}], - "Failover": "PRIMARY" - } + "Failover": "PRIMARY", + }, } ] - } + }, ) response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) @@ -789,9 +787,7 @@ def test_failover_record_sets(): @mock_route53 def test_geolocation_record_sets(): conn = boto3.client("route53", region_name="us-east-2") - conn.create_hosted_zone( - Name="test.zone.", CallerReference=str(hash("test")) - ) + conn.create_hosted_zone(Name="test.zone.", CallerReference=str(hash("test"))) zones = conn.list_hosted_zones_by_name(DNSName="test.zone.") hosted_zone_id = zones["HostedZones"][0]["Id"] @@ -807,10 +803,8 @@ def test_geolocation_record_sets(): "Type": "A", "TTL": 10, "ResourceRecords": [{"Value": "127.0.0.1"}], - "GeoLocation": { - "ContinentCode": "EU" - } - } + "GeoLocation": {"ContinentCode": "EU"}, + }, }, { "Action": "CREATE", @@ -819,14 +813,11 @@ def test_geolocation_record_sets(): "Type": "A", "TTL": 10, "ResourceRecords": [{"Value": "127.0.0.2"}], - "GeoLocation": { - "CountryCode": "US", - "SubdivisionCode": "NY" - } - } - } + "GeoLocation": {"CountryCode": "US", "SubdivisionCode": "NY"}, + }, + }, ] - } + }, ) response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) From bc1c9a27f1fd2eaf3b41e0e6e5a11da0deab88b0 Mon Sep 17 00:00:00 2001 From: Heyder Date: Wed, 4 Mar 2020 23:17:03 -0300 Subject: [PATCH 100/658] fix use of _get_default result on _get_appended_list --- moto/dynamodb2/models.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 88f750775615..747fa93a77fd 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -450,9 +450,7 @@ def _get_appended_list(self, value, expression_attribute_values): old_list_key = list_append_re.group(1) # old_key could be a function itself (if_not_exists) if old_list_key.startswith("if_not_exists"): - old_list = DynamoType( - expression_attribute_values[self._get_default(old_list_key)] - ) + old_list = DynamoType(self._get_default(old_list_key)) else: old_list = self.attrs[old_list_key.split(".")[0]] if "." in old_list_key: From f8dd5a13c65c8d30f235e99b334c6d00f00a0dac Mon Sep 17 00:00:00 2001 From: Heyder Date: Wed, 4 Mar 2020 23:56:30 -0300 Subject: [PATCH 101/658] fix case if don't have attrs --- moto/dynamodb2/models.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 747fa93a77fd..7d995486d57d 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -450,7 +450,11 @@ def _get_appended_list(self, value, expression_attribute_values): old_list_key = list_append_re.group(1) # old_key could be a function itself (if_not_exists) if old_list_key.startswith("if_not_exists"): - old_list = DynamoType(self._get_default(old_list_key)) + old_list = self._get_default(old_list_key) + if not isinstance(old_list, DynamoType): + old_list = DynamoType( + expression_attribute_values[old_list] + ) else: old_list = self.attrs[old_list_key.split(".")[0]] if "." in old_list_key: From 916add9ac54845a676b29d7d4b47bea624c217ab Mon Sep 17 00:00:00 2001 From: Laurie O Date: Thu, 5 Mar 2020 23:37:17 +1000 Subject: [PATCH 102/658] Add SWF domain and type undeprecation Signed-off-by: Laurie O --- moto/swf/models/__init__.py | 13 ++++ moto/swf/responses.py | 23 ++++++ .../test_swf/responses/test_activity_types.py | 74 +++++++++++++++++++ tests/test_swf/responses/test_domains.py | 53 +++++++++++++ .../test_swf/responses/test_workflow_types.py | 72 ++++++++++++++++++ 5 files changed, 235 insertions(+) diff --git a/moto/swf/models/__init__.py b/moto/swf/models/__init__.py index e5b285f5bc3a..010c8c734649 100644 --- a/moto/swf/models/__init__.py +++ b/moto/swf/models/__init__.py @@ -121,6 +121,12 @@ def deprecate_domain(self, name): raise SWFDomainDeprecatedFault(name) domain.status = "DEPRECATED" + def undeprecate_domain(self, name): + domain = self._get_domain(name) + if domain.status == "REGISTERED": + raise SWFDomainAlreadyExistsFault(name) + domain.status = "REGISTERED" + def describe_domain(self, name): return self._get_domain(name) @@ -148,6 +154,13 @@ def deprecate_type(self, kind, domain_name, name, version): raise SWFTypeDeprecatedFault(_type) _type.status = "DEPRECATED" + def undeprecate_type(self, kind, domain_name, name, version): + domain = self._get_domain(domain_name) + _type = domain.get_type(kind, name, version) + if _type.status == "REGISTERED": + raise SWFTypeAlreadyExistsFault(_type) + _type.status = "REGISTERED" + def describe_type(self, kind, domain_name, name, version): domain = self._get_domain(domain_name) return domain.get_type(kind, name, version) diff --git a/moto/swf/responses.py b/moto/swf/responses.py index c8c601fa7226..17ec7281a571 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -92,6 +92,17 @@ def _deprecate_type(self, kind): self.swf_backend.deprecate_type(kind, domain, name, version) return "" + def _undeprecate_type(self, kind): + domain = self._params["domain"] + _type_args = self._params["{0}Type".format(kind)] + name = _type_args["name"] + version = _type_args["version"] + self._check_string(domain) + self._check_string(name) + self._check_string(version) + self.swf_backend.undeprecate_type(kind, domain, name, version) + return "" + # TODO: implement pagination def list_domains(self): status = self._params["registrationStatus"] @@ -219,6 +230,12 @@ def deprecate_domain(self): self.swf_backend.deprecate_domain(name) return "" + def undeprecate_domain(self): + name = self._params["name"] + self._check_string(name) + self.swf_backend.undeprecate_domain(name) + return "" + def describe_domain(self): name = self._params["name"] self._check_string(name) @@ -278,6 +295,9 @@ def register_activity_type(self): def deprecate_activity_type(self): return self._deprecate_type("activity") + def undeprecate_activity_type(self): + return self._undeprecate_type("activity") + def describe_activity_type(self): return self._describe_type("activity") @@ -333,6 +353,9 @@ def register_workflow_type(self): def deprecate_workflow_type(self): return self._deprecate_type("workflow") + def undeprecate_workflow_type(self): + return self._undeprecate_type("workflow") + def describe_workflow_type(self): return self._describe_type("workflow") diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index 3fa9ad6b1151..d49e5d4cbd1c 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -1,8 +1,11 @@ import boto from boto.swf.exceptions import SWFResponseError +import boto3 +from botocore.exceptions import ClientError import sure # noqa from moto import mock_swf_deprecated +from moto import mock_swf # RegisterActivityType endpoint @@ -110,6 +113,77 @@ def test_deprecate_non_existent_activity_type(): ).should.throw(SWFResponseError) +# DeprecateActivityType endpoint +@mock_swf +def test_undeprecate_activity_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_activity_type( + domain="test-domain", name="test-activity", version="v1.0" + ) + client.deprecate_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + client.undeprecate_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + + resp = client.describe_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + resp["typeInfo"]["status"].should.equal("REGISTERED") + + +@mock_swf +def test_undeprecate_already_undeprecated_activity_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_activity_type( + domain="test-domain", name="test-activity", version="v1.0" + ) + client.deprecate_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + client.undeprecate_activity_type( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ) + + client.undeprecate_activity_type.when.called_with( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ).should.throw(ClientError) + + +@mock_swf +def test_undeprecate_never_deprecated_activity_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_activity_type( + domain="test-domain", name="test-activity", version="v1.0" + ) + + client.undeprecate_activity_type.when.called_with( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ).should.throw(ClientError) + + +@mock_swf +def test_undeprecate_non_existent_activity_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + + client.undeprecate_activity_type.when.called_with( + domain="test-domain", activityType={"name": "test-activity", "version": "v1.0"} + ).should.throw(ClientError) + + # DescribeActivityType endpoint @mock_swf_deprecated def test_describe_activity_type(): diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index 199219d274c9..59ba551a62f1 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -1,8 +1,11 @@ import boto from boto.swf.exceptions import SWFResponseError +import boto3 +from botocore.exceptions import ClientError import sure # noqa from moto import mock_swf_deprecated +from moto import mock_swf # RegisterDomain endpoint @@ -94,6 +97,56 @@ def test_deprecate_non_existent_domain(): ) +# UndeprecateDomain endpoint +@mock_swf +def test_undeprecate_domain(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.deprecate_domain(name="test-domain") + client.undeprecate_domain(name="test-domain") + + resp = client.describe_domain(name="test-domain") + + resp["domainInfo"]["status"].should.equal("REGISTERED") + + +@mock_swf +def test_undeprecate_already_undeprecated_domain(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.deprecate_domain(name="test-domain") + client.undeprecate_domain(name="test-domain") + + client.undeprecate_domain.when.called_with(name="test-domain").should.throw( + ClientError + ) + + +@mock_swf +def test_undeprecate_never_deprecated_domain(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + + client.undeprecate_domain.when.called_with(name="test-domain").should.throw( + ClientError + ) + + +@mock_swf +def test_undeprecate_non_existent_domain(): + client = boto3.client("swf", region_name="us-east-1") + + client.undeprecate_domain.when.called_with(name="non-existent").should.throw( + ClientError + ) + + # DescribeDomain endpoint @mock_swf_deprecated def test_describe_domain(): diff --git a/tests/test_swf/responses/test_workflow_types.py b/tests/test_swf/responses/test_workflow_types.py index 72aa814d2145..e1990596b690 100644 --- a/tests/test_swf/responses/test_workflow_types.py +++ b/tests/test_swf/responses/test_workflow_types.py @@ -5,6 +5,7 @@ from moto import mock_swf_deprecated from moto import mock_swf from boto.swf.exceptions import SWFResponseError +from botocore.exceptions import ClientError # RegisterWorkflowType endpoint @@ -112,6 +113,77 @@ def test_deprecate_non_existent_workflow_type(): ).should.throw(SWFResponseError) +# UndeprecateWorkflowType endpoint +@mock_swf +def test_undeprecate_workflow_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_workflow_type( + domain="test-domain", name="test-workflow", version="v1.0" + ) + client.deprecate_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + client.undeprecate_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + + resp = client.describe_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + resp["typeInfo"]["status"].should.equal("REGISTERED") + + +@mock_swf +def test_undeprecate_already_undeprecated_workflow_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_workflow_type( + domain="test-domain", name="test-workflow", version="v1.0" + ) + client.deprecate_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + client.undeprecate_workflow_type( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ) + + client.undeprecate_workflow_type.when.called_with( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ).should.throw(ClientError) + + +@mock_swf +def test_undeprecate_never_deprecated_workflow_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + client.register_workflow_type( + domain="test-domain", name="test-workflow", version="v1.0" + ) + + client.undeprecate_workflow_type.when.called_with( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ).should.throw(ClientError) + + +@mock_swf +def test_undeprecate_non_existent_workflow_type(): + client = boto3.client("swf", region_name="us-east-1") + client.register_domain( + name="test-domain", workflowExecutionRetentionPeriodInDays="60" + ) + + client.undeprecate_workflow_type.when.called_with( + domain="test-domain", workflowType={"name": "test-workflow", "version": "v1.0"} + ).should.throw(ClientError) + + # DescribeWorkflowType endpoint @mock_swf_deprecated def test_describe_workflow_type(): From 27e7336debcfc2d13e2b62738cb9ee7931fa165b Mon Sep 17 00:00:00 2001 From: Laurie O Date: Thu, 5 Mar 2020 23:39:26 +1000 Subject: [PATCH 103/658] Mark new SWF end-points as implemented Signed-off-by: Laurie O --- IMPLEMENTATION_COVERAGE.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index a863d483d626..a22cc3bfb566 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -7272,9 +7272,9 @@ - [X] start_workflow_execution - [ ] tag_resource - [X] terminate_workflow_execution -- [ ] undeprecate_activity_type -- [ ] undeprecate_domain -- [ ] undeprecate_workflow_type +- [X] undeprecate_activity_type +- [X] undeprecate_domain +- [X] undeprecate_workflow_type - [ ] untag_resource ## textract From b19cf8a08538425f5210d5e5cd0447065ef318ca Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 5 Mar 2020 15:49:30 +0000 Subject: [PATCH 104/658] #2774 - Run Policy test on different method --- tests/test_core/test_auth.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py index a8fde5d8c943..767f743dc3e1 100644 --- a/tests/test_core/test_auth.py +++ b/tests/test_core/test_auth.py @@ -275,7 +275,7 @@ def test_access_denied_with_not_allowing_policy(): inline_policy_document = { "Version": "2012-10-17", "Statement": [ - {"Effect": "Allow", "Action": ["ec2:Describe*"], "Resource": "*"} + {"Effect": "Allow", "Action": ["ec2:Run*"], "Resource": "*"} ], } access_key = create_user_with_access_key_and_inline_policy( @@ -288,12 +288,12 @@ def test_access_denied_with_not_allowing_policy(): aws_secret_access_key=access_key["SecretAccessKey"], ) with assert_raises(ClientError) as ex: - client.run_instances(MaxCount=1, MinCount=1) + client.describe_instances() ex.exception.response["Error"]["Code"].should.equal("AccessDenied") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.exception.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:RunInstances" + account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:DescribeInstances" ) ) From c6b66cb001c3a2b00d5244966fe86fd37dff099a Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 5 Mar 2020 17:22:54 +0000 Subject: [PATCH 105/658] Linting --- tests/test_core/test_auth.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py index 767f743dc3e1..29273cea7273 100644 --- a/tests/test_core/test_auth.py +++ b/tests/test_core/test_auth.py @@ -274,9 +274,7 @@ def test_access_denied_with_not_allowing_policy(): user_name = "test-user" inline_policy_document = { "Version": "2012-10-17", - "Statement": [ - {"Effect": "Allow", "Action": ["ec2:Run*"], "Resource": "*"} - ], + "Statement": [{"Effect": "Allow", "Action": ["ec2:Run*"], "Resource": "*"}], } access_key = create_user_with_access_key_and_inline_policy( user_name, inline_policy_document @@ -293,7 +291,9 @@ def test_access_denied_with_not_allowing_policy(): ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.exception.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:DescribeInstances" + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:DescribeInstances", ) ) From fa7f83bc2f10c9157e8b9484e5ef1219e54b2af1 Mon Sep 17 00:00:00 2001 From: "heyder.dias" Date: Thu, 5 Mar 2020 17:05:00 -0300 Subject: [PATCH 106/658] add test to nested if_not_exists and property already exists --- tests/test_dynamodb2/test_dynamodb.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 180f460c0eca..d36fdc7fa691 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3634,6 +3634,31 @@ def test_update_supports_list_append_with_nested_if_not_exists_operation(): ) +@mock_dynamodb2 +def test_update_supports_list_append_with_nested_if_not_exists_operation_and_property_already_exists(): + dynamo = boto3.resource("dynamodb", region_name="us-west-1") + table_name = "test" + + dynamo.create_table( + TableName=table_name, + AttributeDefinitions=[{"AttributeName": "Id", "AttributeType": "S"}], + KeySchema=[{"AttributeName": "Id", "KeyType": "HASH"}], + ProvisionedThroughput={"ReadCapacityUnits": 20, "WriteCapacityUnits": 20}, + ) + + table = dynamo.Table(table_name) + + table.put_item(Item={"Id": "item-id", "event_history":["other_value"]}) + table.update_item( + Key={"Id": "item-id"}, + UpdateExpression="SET event_history = list_append(if_not_exists(event_history, :empty_list), :new_value)", + ExpressionAttributeValues={":empty_list": [], ":new_value": ["some_value"]}, + ) + table.get_item(Key={"Id": "item-id"})["Item"].should.equal( + {"Id": "item-id", "event_history": ["other_value", "some_value"]} + ) + + @mock_dynamodb2 def test_update_catches_invalid_list_append_operation(): client = boto3.client("dynamodb", region_name="us-east-1") From a9b06776671c081733ae82c21ba191a4f1965f3d Mon Sep 17 00:00:00 2001 From: addomafi Date: Thu, 5 Mar 2020 18:11:49 -0300 Subject: [PATCH 107/658] #2784 Adding missing support for EbsConfiguration on EMR run_job_flow --- moto/emr/models.py | 2 ++ tests/test_emr/test_emr_boto3.py | 23 +++++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/moto/emr/models.py b/moto/emr/models.py index d9ec2fd691c3..72c588166c59 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -35,6 +35,7 @@ def __init__( name=None, id=None, bid_price=None, + ebs_configuration=None, ): self.id = id or random_instance_group_id() @@ -51,6 +52,7 @@ def __init__( self.num_instances = instance_count self.role = instance_role self.type = instance_type + self.ebs_configuration = ebs_configuration self.creation_datetime = datetime.now(pytz.utc) self.start_datetime = datetime.now(pytz.utc) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index d849247bdc91..fc7170ba9f94 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -61,6 +61,23 @@ "Name": "task-2", "BidPrice": "0.05", }, + { + "InstanceCount": 10, + "InstanceRole": "TASK", + "InstanceType": "c1.xlarge", + "Market": "SPOT", + "Name": "task-3", + "BidPrice": "0.05", + "EbsConfiguration": { + "EbsBlockDeviceConfigs": [ + { + "VolumeSpecification": {"VolumeType": "gp2", "SizeInGB": 800}, + "VolumesPerInstance": 6, + }, + ], + "EbsOptimized": True, + }, + }, ] @@ -447,6 +464,8 @@ def test_run_job_flow_with_instance_groups(): x["Market"].should.equal(y["Market"]) if "BidPrice" in y: x["BidPrice"].should.equal(y["BidPrice"]) + if "EbsConfiguration" in y: + x["EbsConfiguration"].should.equal(y["EbsConfiguration"]) @mock_emr @@ -604,6 +623,8 @@ def test_instance_groups(): y = input_groups[x["Name"]] if hasattr(y, "BidPrice"): x["BidPrice"].should.equal("BidPrice") + if "EbsConfiguration" in y: + x["EbsConfiguration"].should.equal(y["EbsConfiguration"]) x["CreationDateTime"].should.be.a("datetime.datetime") # x['EndDateTime'].should.be.a('datetime.datetime') x.should.have.key("InstanceGroupId") @@ -623,6 +644,8 @@ def test_instance_groups(): y = input_groups[x["Name"]] if hasattr(y, "BidPrice"): x["BidPrice"].should.equal("BidPrice") + if "EbsConfiguration" in y: + x["EbsConfiguration"].should.equal(y["EbsConfiguration"]) # Configurations # EbsBlockDevices # EbsOptimized From f5080e539dd140e556c0a280d9770b552d9aeaba Mon Sep 17 00:00:00 2001 From: "heyder.dias" Date: Thu, 5 Mar 2020 18:39:20 -0300 Subject: [PATCH 108/658] fix lint check --- moto/dynamodb2/models.py | 4 +--- tests/test_dynamodb2/test_dynamodb.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 7d995486d57d..8e5a6175518e 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -452,9 +452,7 @@ def _get_appended_list(self, value, expression_attribute_values): if old_list_key.startswith("if_not_exists"): old_list = self._get_default(old_list_key) if not isinstance(old_list, DynamoType): - old_list = DynamoType( - expression_attribute_values[old_list] - ) + old_list = DynamoType(expression_attribute_values[old_list]) else: old_list = self.attrs[old_list_key.split(".")[0]] if "." in old_list_key: diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index d36fdc7fa691..428b58f8109a 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3648,7 +3648,7 @@ def test_update_supports_list_append_with_nested_if_not_exists_operation_and_pro table = dynamo.Table(table_name) - table.put_item(Item={"Id": "item-id", "event_history":["other_value"]}) + table.put_item(Item={"Id": "item-id", "event_history": ["other_value"]}) table.update_item( Key={"Id": "item-id"}, UpdateExpression="SET event_history = list_append(if_not_exists(event_history, :empty_list), :new_value)", From c8dfbe95753fcaa01578eda2798d47c62c86102f Mon Sep 17 00:00:00 2001 From: addomafi Date: Fri, 6 Mar 2020 15:12:44 -0300 Subject: [PATCH 109/658] #2784 Adding missing support for EbsConfiguration on EMR instance groups --- moto/emr/responses.py | 98 +++++++++++++++++++++++++++++++- tests/test_emr/test_emr_boto3.py | 23 ++++---- 2 files changed, 109 insertions(+), 12 deletions(-) diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 38b9774e1606..3bb595bbb365 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -73,6 +73,8 @@ def add_instance_groups(self): instance_groups = self._get_list_prefix("InstanceGroups.member") for item in instance_groups: item["instance_count"] = int(item["instance_count"]) + # Adding support to EbsConfiguration + self._parse_ebs_configuration(item) instance_groups = self.backend.add_instance_groups(jobflow_id, instance_groups) template = self.response_template(ADD_INSTANCE_GROUPS_TEMPLATE) return template.render(instance_groups=instance_groups) @@ -324,6 +326,8 @@ def run_job_flow(self): if instance_groups: for ig in instance_groups: ig["instance_count"] = int(ig["instance_count"]) + # Adding support to EbsConfiguration + self._parse_ebs_configuration(ig) self.backend.add_instance_groups(cluster.id, instance_groups) tags = self._get_list_prefix("Tags.member") @@ -335,6 +339,83 @@ def run_job_flow(self): template = self.response_template(RUN_JOB_FLOW_TEMPLATE) return template.render(cluster=cluster) + def _has_key_prefix(self, key_prefix, value): + for key in value: # iter on both keys and values + if key.startswith(key_prefix): + return True + return False + + def _parse_ebs_configuration(self, instance_group): + key_ebs_config = "ebs_configuration" + ebs_configuration = dict() + # Filter only EBS config keys + for key in instance_group: + if key.startswith(key_ebs_config): + ebs_configuration[key] = instance_group[key] + + if len(ebs_configuration) > 0: + # Key that should be extracted + ebs_optimized = "ebs_optimized" + ebs_block_device_configs = "ebs_block_device_configs" + volume_specification = "volume_specification" + size_in_gb = "size_in_gb" + volume_type = "volume_type" + iops = "iops" + volumes_per_instance = "volumes_per_instance" + + key_ebs_optimized = f"{key_ebs_config}._{ebs_optimized}" + # EbsOptimized config + if key_ebs_optimized in ebs_configuration: + instance_group.pop(key_ebs_optimized) + ebs_configuration[ebs_optimized] = ebs_configuration.pop( + key_ebs_optimized + ) + + # Ebs Blocks + ebs_blocks = [] + idx = 1 + keyfmt = f"{key_ebs_config}._{ebs_block_device_configs}.member.{{}}" + key = keyfmt.format(idx) + while self._has_key_prefix(key, ebs_configuration): + vlespc_keyfmt = f"{key}._{volume_specification}._{{}}" + vol_size = vlespc_keyfmt.format(size_in_gb) + vol_iops = vlespc_keyfmt.format(iops) + vol_type = vlespc_keyfmt.format(volume_type) + + ebs_block = dict() + ebs_block[volume_specification] = dict() + if vol_size in ebs_configuration: + instance_group.pop(vol_size) + ebs_block[volume_specification][size_in_gb] = int( + ebs_configuration.pop(vol_size) + ) + if vol_iops in ebs_configuration: + instance_group.pop(vol_iops) + ebs_block[volume_specification][iops] = ebs_configuration.pop( + vol_iops + ) + if vol_type in ebs_configuration: + instance_group.pop(vol_type) + ebs_block[volume_specification][ + volume_type + ] = ebs_configuration.pop(vol_type) + + per_instance = f"{key}._{volumes_per_instance}" + if per_instance in ebs_configuration: + instance_group.pop(per_instance) + ebs_block[volumes_per_instance] = int( + ebs_configuration.pop(per_instance) + ) + + if len(ebs_block) > 0: + ebs_blocks.append(ebs_block) + idx += 1 + key = keyfmt.format(idx) + + if len(ebs_blocks) > 0: + ebs_configuration[ebs_block_device_configs] = ebs_blocks + instance_group[key_ebs_config] = ebs_configuration + @generate_boto3_response("SetTerminationProtection") def set_termination_protection(self): termination_protection = self._get_param("TerminationProtected") @@ -754,7 +835,22 @@ def terminate_job_flows(self): {{ instance_group.bid_price }} {% endif %} - + {% if instance_group.ebs_configuration is not none %} + + {% for ebs_block_device in instance_group.ebs_configuration.ebs_block_device_configs %} + {% for i in range(ebs_block_device.volumes_per_instance) %} + + + {{ebs_block_device.volume_specification.volume_type}} + {{ebs_block_device.volume_specification.iops}} + {{ebs_block_device.volume_specification.size_in_gb}} + + /dev/sd{{i}} + + {% endfor %} + {% endfor %} + + {% endif %} {% if instance_group.ebs_optimized is not none %} {{ instance_group.ebs_optimized }} {% endif %} diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index fc7170ba9f94..a00de164b332 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -60,14 +60,6 @@ "Market": "SPOT", "Name": "task-2", "BidPrice": "0.05", - }, - { - "InstanceCount": 10, - "InstanceRole": "TASK", - "InstanceType": "c1.xlarge", - "Market": "SPOT", - "Name": "task-3", - "BidPrice": "0.05", "EbsConfiguration": { "EbsBlockDeviceConfigs": [ { @@ -623,8 +615,6 @@ def test_instance_groups(): y = input_groups[x["Name"]] if hasattr(y, "BidPrice"): x["BidPrice"].should.equal("BidPrice") - if "EbsConfiguration" in y: - x["EbsConfiguration"].should.equal(y["EbsConfiguration"]) x["CreationDateTime"].should.be.a("datetime.datetime") # x['EndDateTime'].should.be.a('datetime.datetime') x.should.have.key("InstanceGroupId") @@ -645,7 +635,18 @@ def test_instance_groups(): if hasattr(y, "BidPrice"): x["BidPrice"].should.equal("BidPrice") if "EbsConfiguration" in y: - x["EbsConfiguration"].should.equal(y["EbsConfiguration"]) + total_volumes = 0 + total_size = 0 + for ebs_block in y["EbsConfiguration"]["EbsBlockDeviceConfigs"]: + total_volumes += ebs_block["VolumesPerInstance"] + total_size += ebs_block["VolumeSpecification"]["SizeInGB"] + # Multiply by total volumes + total_size = total_size * total_volumes + comp_total_size = 0 + for ebs_block in x["EbsBlockDevices"]: + comp_total_size += ebs_block["VolumeSpecification"]["SizeInGB"] + len(x["EbsBlockDevices"]).should.equal(total_volumes) + comp_total_size.should.equal(comp_total_size) # Configurations # EbsBlockDevices # EbsOptimized From 23771549c24a8732205bc06f69fee1b02ca8da6a Mon Sep 17 00:00:00 2001 From: addomafi Date: Fri, 6 Mar 2020 18:10:39 -0300 Subject: [PATCH 110/658] #2784 Implementing assertions for testcase with instance groups --- tests/test_emr/test_emr_boto3.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index a00de164b332..524cdcd55ec7 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -438,6 +438,19 @@ def test_run_job_flow_with_visible_to_all_users(): resp = client.describe_cluster(ClusterId=cluster_id) resp["Cluster"]["VisibleToAllUsers"].should.equal(expected) +def _do_assertion_ebs_configuration(x, y): + total_volumes = 0 + total_size = 0 + for ebs_block in y["EbsConfiguration"]["EbsBlockDeviceConfigs"]: + total_volumes += ebs_block["VolumesPerInstance"] + total_size += ebs_block["VolumeSpecification"]["SizeInGB"] + # Multiply by total volumes + total_size = total_size * total_volumes + comp_total_size = 0 + for ebs_block in x["EbsBlockDevices"]: + comp_total_size += ebs_block["VolumeSpecification"]["SizeInGB"] + len(x["EbsBlockDevices"]).should.equal(total_volumes) + comp_total_size.should.equal(comp_total_size) @mock_emr def test_run_job_flow_with_instance_groups(): @@ -456,8 +469,9 @@ def test_run_job_flow_with_instance_groups(): x["Market"].should.equal(y["Market"]) if "BidPrice" in y: x["BidPrice"].should.equal(y["BidPrice"]) + if "EbsConfiguration" in y: - x["EbsConfiguration"].should.equal(y["EbsConfiguration"]) + _do_assertion_ebs_configuration(x, y) @mock_emr @@ -635,18 +649,7 @@ def test_instance_groups(): if hasattr(y, "BidPrice"): x["BidPrice"].should.equal("BidPrice") if "EbsConfiguration" in y: - total_volumes = 0 - total_size = 0 - for ebs_block in y["EbsConfiguration"]["EbsBlockDeviceConfigs"]: - total_volumes += ebs_block["VolumesPerInstance"] - total_size += ebs_block["VolumeSpecification"]["SizeInGB"] - # Multiply by total volumes - total_size = total_size * total_volumes - comp_total_size = 0 - for ebs_block in x["EbsBlockDevices"]: - comp_total_size += ebs_block["VolumeSpecification"]["SizeInGB"] - len(x["EbsBlockDevices"]).should.equal(total_volumes) - comp_total_size.should.equal(comp_total_size) + _do_assertion_ebs_configuration(x, y) # Configurations # EbsBlockDevices # EbsOptimized From c6eca1843435413615b1650e2161a0f4890819d7 Mon Sep 17 00:00:00 2001 From: addomafi Date: Fri, 6 Mar 2020 18:11:07 -0300 Subject: [PATCH 111/658] Reformat --- tests/test_emr/test_emr_boto3.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 524cdcd55ec7..adfc3fa9ca5b 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -438,6 +438,7 @@ def test_run_job_flow_with_visible_to_all_users(): resp = client.describe_cluster(ClusterId=cluster_id) resp["Cluster"]["VisibleToAllUsers"].should.equal(expected) + def _do_assertion_ebs_configuration(x, y): total_volumes = 0 total_size = 0 @@ -452,6 +453,7 @@ def _do_assertion_ebs_configuration(x, y): len(x["EbsBlockDevices"]).should.equal(total_volumes) comp_total_size.should.equal(comp_total_size) + @mock_emr def test_run_job_flow_with_instance_groups(): input_groups = dict((g["Name"], g) for g in input_instance_groups) From 155cf82791cef359ee7d6404da14090a18396d1b Mon Sep 17 00:00:00 2001 From: addomafi Date: Sat, 7 Mar 2020 07:43:59 -0300 Subject: [PATCH 112/658] Keeping support to python 2 --- moto/emr/responses.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 3bb595bbb365..a9a4aae93c14 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -363,7 +363,7 @@ def _parse_ebs_configuration(self, instance_group): iops = "iops" volumes_per_instance = "volumes_per_instance" - key_ebs_optimized = f"{key_ebs_config}._{ebs_optimized}" + key_ebs_optimized = "{0}._{1}".format(key_ebs_config, ebs_optimized) # EbsOptimized config if key_ebs_optimized in ebs_configuration: instance_group.pop(key_ebs_optimized) @@ -374,10 +374,10 @@ def _parse_ebs_configuration(self, instance_group): # Ebs Blocks ebs_blocks = [] idx = 1 - keyfmt = f"{key_ebs_config}._{ebs_block_device_configs}.member.{{}}" + keyfmt = "{0}._{1}.member.{{}}".format(key_ebs_config, ebs_block_device_configs) key = keyfmt.format(idx) while self._has_key_prefix(key, ebs_configuration): - vlespc_keyfmt = f"{key}._{volume_specification}._{{}}" + vlespc_keyfmt = "{0}._{1}._{{}}".format(key, volume_specification) vol_size = vlespc_keyfmt.format(size_in_gb) vol_iops = vlespc_keyfmt.format(iops) vol_type = vlespc_keyfmt.format(volume_type) @@ -400,7 +400,7 @@ def _parse_ebs_configuration(self, instance_group): volume_type ] = ebs_configuration.pop(vol_type) - per_instance = f"{key}._{volumes_per_instance}" + per_instance = "{0}._{1}".format(key, volumes_per_instance) if per_instance in ebs_configuration: instance_group.pop(per_instance) ebs_block[volumes_per_instance] = int( From a6c1d474128d4097a157b300a4b24948e32656ea Mon Sep 17 00:00:00 2001 From: addomafi Date: Sat, 7 Mar 2020 08:21:27 -0300 Subject: [PATCH 113/658] Reformat --- moto/emr/responses.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/emr/responses.py b/moto/emr/responses.py index a9a4aae93c14..3708db0ed268 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -374,7 +374,9 @@ def _parse_ebs_configuration(self, instance_group): # Ebs Blocks ebs_blocks = [] idx = 1 - keyfmt = "{0}._{1}.member.{{}}".format(key_ebs_config, ebs_block_device_configs) + keyfmt = "{0}._{1}.member.{{}}".format( + key_ebs_config, ebs_block_device_configs + ) key = keyfmt.format(idx) while self._has_key_prefix(key, ebs_configuration): vlespc_keyfmt = "{0}._{1}._{{}}".format(key, volume_specification) From 28af7412f80d973a2bf34130fc2926519861c06a Mon Sep 17 00:00:00 2001 From: Dick Marinus Date: Sun, 8 Mar 2020 20:56:21 +0100 Subject: [PATCH 114/658] Change RESTError to JsonRESTError for ImageNotFoundException, update test to expect ImageNotFoundException --- moto/ecr/exceptions.py | 4 ++-- tests/test_ecr/test_ecr_boto3.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/ecr/exceptions.py b/moto/ecr/exceptions.py index 9b55f0589091..6d1713a6add7 100644 --- a/moto/ecr/exceptions.py +++ b/moto/ecr/exceptions.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from moto.core.exceptions import RESTError +from moto.core.exceptions import RESTError, JsonRESTError class RepositoryNotFoundException(RESTError): @@ -13,7 +13,7 @@ def __init__(self, repository_name, registry_id): ) -class ImageNotFoundException(RESTError): +class ImageNotFoundException(JsonRESTError): code = 400 def __init__(self, image_id, repository_name, registry_id): diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 82a2c75217e6..6c6840a7ed74 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -538,7 +538,7 @@ def test_describe_image_that_doesnt_exist(): repositoryName="test_repository", imageIds=[{"imageTag": "testtag"}], registryId="123", - ).should.throw(ClientError, error_msg1) + ).should.throw(client.exceptions.ImageNotFoundException, error_msg1) error_msg2 = re.compile( r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", From 7b5613b3312f35ba5d93f17ccfe2d6891b162615 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 9 Mar 2020 12:47:44 +0000 Subject: [PATCH 115/658] #2774 - Re-add test, and update requirements to working botocore --- requirements-dev.txt | 2 +- tests/test_core/test_auth.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 2aaca300b428..4e5f4e8a0d7f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -10,7 +10,7 @@ freezegun flask boto>=2.45.0 boto3>=1.4.4 -botocore>=1.12.13 +botocore>=1.15.13 six>=1.9 parameterized>=0.7.0 prompt-toolkit==1.0.14 diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py index 29273cea7273..b391d82c8082 100644 --- a/tests/test_core/test_auth.py +++ b/tests/test_core/test_auth.py @@ -298,6 +298,40 @@ def test_access_denied_with_not_allowing_policy(): ) +@set_initial_no_auth_action_count(3) +@mock_ec2 +def test_access_denied_for_run_instances(): + # https://github.com/spulec/moto/issues/2774 + # The run-instances method was broken between botocore versions 1.15.8 and 1.15.12 + # This was due to the inclusion of '"idempotencyToken":true' in the response, somehow altering the signature and breaking the authentication + # Keeping this test in place in case botocore decides to break again + user_name = "test-user" + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + {"Effect": "Allow", "Action": ["ec2:Describe*"], "Resource": "*"} + ], + } + access_key = create_user_with_access_key_and_inline_policy( + user_name, inline_policy_document + ) + client = boto3.client( + "ec2", + region_name="us-east-1", + aws_access_key_id=access_key["AccessKeyId"], + aws_secret_access_key=access_key["SecretAccessKey"], + ) + with assert_raises(ClientError) as ex: + client.run_instances(MaxCount=1, MinCount=1) + ex.exception.response["Error"]["Code"].should.equal("AccessDenied") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.exception.response["Error"]["Message"].should.equal( + "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( + account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:RunInstances", + ) + ) + + @set_initial_no_auth_action_count(3) @mock_ec2 def test_access_denied_with_denying_policy(): From dc98fca8532ad6a031948e263841f7cd1944dab1 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 9 Mar 2020 15:14:51 +0000 Subject: [PATCH 116/658] #718 - Allow filtering by multiple tags --- moto/ec2/utils.py | 7 +++++++ tests/test_ec2/test_tags.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 2301248c1c50..18a038b10b03 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -252,6 +252,7 @@ def dhcp_configuration_from_querystring(querystring, option="DhcpConfiguration") def filters_from_querystring(querystring_dict): response_values = {} + last_tag_key = None for key, value in querystring_dict.items(): match = re.search(r"Filter.(\d).Name", key) if match: @@ -262,6 +263,10 @@ def filters_from_querystring(querystring_dict): for filter_key, filter_value in querystring_dict.items() if filter_key.startswith(value_prefix) ] + if value[0] == "tag-key": + last_tag_key = "tag:" + filter_values[0] + elif last_tag_key and value[0] == "tag-value": + response_values[last_tag_key] = filter_values response_values[value[0]] = filter_values return response_values @@ -329,6 +334,8 @@ def tag_filter_matches(obj, filter_name, filter_values): tag_values = get_obj_tag_names(obj) elif filter_name == "tag-value": tag_values = get_obj_tag_values(obj) + elif filter_name.startswith("tag:"): + tag_values = get_obj_tag_values(obj) else: tag_values = [get_obj_tag(obj, filter_name) or ""] diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 29d2cb1e361d..789f7b976144 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -468,3 +468,36 @@ def test_delete_tag_empty_resource(): ex.exception.response["Error"]["Message"].should.equal( "The request must contain the parameter resourceIdSet" ) + + +@mock_ec2 +def test_retrieve_resource_with_multiple_tags(): + ec2 = boto3.resource("ec2") + blue, green = ec2.create_instances(ImageId="ANY_ID", MinCount=2, MaxCount=2) + ec2.create_tags( + Resources=[blue.instance_id], + Tags=[ + {"Key": "environment", "Value": "blue"}, + {"Key": "application", "Value": "api"}, + ], + ) + ec2.create_tags( + Resources=[green.instance_id], + Tags=[ + {"Key": "environment", "Value": "green"}, + {"Key": "application", "Value": "api"}, + ], + ) + green_instances = list(ec2.instances.filter(Filters=(get_filter("green")))) + green_instances.should.equal([green]) + blue_instances = list(ec2.instances.filter(Filters=(get_filter("blue")))) + blue_instances.should.equal([blue]) + + +def get_filter(color): + return [ + {"Name": "tag-key", "Values": ["application"]}, + {"Name": "tag-value", "Values": ["api"]}, + {"Name": "tag-key", "Values": ["environment"]}, + {"Name": "tag-value", "Values": [color]}, + ] From 0e489a8a287c36af36a9728fe62699b84575ebd4 Mon Sep 17 00:00:00 2001 From: Steven Davidovitz Date: Tue, 10 Mar 2020 01:05:46 -0700 Subject: [PATCH 117/658] support mock versions < 3.0.5 --- requirements-dev.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 4e5f4e8a0d7f..2b43bcf9d863 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,5 @@ -r requirements.txt -mock==3.0.5 # Last version compatible with Python 2.7 +mock<=3.0.5 # Last version compatible with Python 2.7 nose black; python_version >= '3.6' regex==2019.11.1; python_version >= '3.6' # Needed for black diff --git a/setup.py b/setup.py index b806f7bae494..9bb7cf522578 100755 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ def get_version(): "pytz", "python-dateutil<3.0.0,>=2.1", "python-jose<4.0.0", - "mock==3.0.5", + "mock<=3.0.5", "docker>=2.5.1", "jsondiff>=1.1.2", "aws-xray-sdk!=0.96,>=0.93", From 9eeb375911420416d5b6ad78ca3062b231a85ceb Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 10 Mar 2020 09:26:44 +0000 Subject: [PATCH 118/658] Add region to test case --- tests/test_ec2/test_tags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 789f7b976144..92ed18dd46f9 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -472,7 +472,7 @@ def test_delete_tag_empty_resource(): @mock_ec2 def test_retrieve_resource_with_multiple_tags(): - ec2 = boto3.resource("ec2") + ec2 = boto3.resource("ec2", region_name="us-west-1") blue, green = ec2.create_instances(ImageId="ANY_ID", MinCount=2, MaxCount=2) ec2.create_tags( Resources=[blue.instance_id], From 994ab9aadf23e4d4f4f64bb582a73848a9113416 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 10 Mar 2020 12:42:18 +0000 Subject: [PATCH 119/658] #718 - EC2 - Guarantee order when filtering tags from querystring --- moto/ec2/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 18a038b10b03..74fe3d27b736 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -253,7 +253,7 @@ def dhcp_configuration_from_querystring(querystring, option="DhcpConfiguration") def filters_from_querystring(querystring_dict): response_values = {} last_tag_key = None - for key, value in querystring_dict.items(): + for key, value in sorted(querystring_dict.items()): match = re.search(r"Filter.(\d).Name", key) if match: filter_index = match.groups()[0] From f17d5f8e4d14d36fb662eb360f10158a78df8a0c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 10 Mar 2020 12:56:33 +0000 Subject: [PATCH 120/658] #657 - S3 - Verify content type is set/returned as appropriate --- tests/test_s3/test_s3.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 48655ee17385..48939be267fd 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -11,6 +11,7 @@ from functools import wraps from gzip import GzipFile from io import BytesIO +import mimetypes import zlib import pickle @@ -2024,6 +2025,24 @@ def test_boto3_get_object(): e.exception.response["Error"]["Code"].should.equal("NoSuchKey") +@mock_s3 +def test_boto3_s3_content_type(): + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) + my_bucket = s3.Bucket("my-cool-bucket") + my_bucket.create() + local_path = "test_s3.py" + s3_path = local_path + s3 = boto3.resource("s3", verify=False) + + with open(local_path, "rb") as _file: + content_type = mimetypes.guess_type(local_path) + s3.Object(my_bucket.name, s3_path).put( + ContentType=content_type[0], Body=_file, ACL="public-read" + ) + + s3.Object(my_bucket.name, s3_path).content_type.should.equal(content_type[0]) + + @mock_s3 def test_boto3_get_missing_object_with_part_number(): s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) From 6ba00d9ad19854fa78532de907a114ed0d49ee42 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 10 Mar 2020 13:25:40 +0000 Subject: [PATCH 121/658] #1054 - DynamoDB - Improve error handling for put_item without keys --- moto/dynamodb2/models.py | 7 ++++++- moto/dynamodb2/responses.py | 6 ++---- tests/test_dynamodb2/test_dynamodb.py | 18 ++++++++++++++++++ 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 8e5a6175518e..1527821ed95c 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -792,6 +792,12 @@ def put_item( expression_attribute_values=None, overwrite=False, ): + if self.hash_key_attr not in item_attrs.keys(): + raise ValueError( + "One or more parameter values were invalid: Missing the key " + + self.hash_key_attr + + " in the item" + ) hash_value = DynamoType(item_attrs.get(self.hash_key_attr)) if self.has_range_key: range_value = DynamoType(item_attrs.get(self.range_key_attr)) @@ -808,7 +814,6 @@ def put_item( else: lookup_range_value = DynamoType(expected_range_value) current = self.get_item(hash_value, lookup_range_value) - item = Item( hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs ) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index d3767c3fdda3..9bcb0d541351 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -293,11 +293,9 @@ def put_item(self): except ItemSizeTooLarge: er = "com.amazonaws.dynamodb.v20111205#ValidationException" return self.error(er, ItemSizeTooLarge.message) - except ValueError: + except ValueError as ve: er = "com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException" - return self.error( - er, "A condition specified in the operation could not be evaluated." - ) + return self.error(er, str(ve)) if result: item_dict = result.to_json() diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 428b58f8109a..0f2be6a2eaea 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1344,6 +1344,24 @@ def test_get_item_returns_consumed_capacity(): assert "TableName" in response["ConsumedCapacity"] +@mock_dynamodb2 +def test_put_item_nonexisting_hash_key(): + dynamodb = boto3.resource("dynamodb") + dynamodb.create_table( + AttributeDefinitions=[{"AttributeName": "structure_id", "AttributeType": "S"},], + TableName="test", + KeySchema=[{"AttributeName": "structure_id", "KeyType": "HASH"},], + ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, + ) + table = dynamodb.Table("test") + + with assert_raises(ClientError) as ex: + table.put_item(Item={"a_terribly_misguided_id_attribute": "abcdef"}) + ex.exception.response["Error"]["Message"].should.equal( + "One or more parameter values were invalid: Missing the key structure_id in the item" + ) + + def test_filter_expression(): row1 = moto.dynamodb2.models.Item( None, From e9930b0cb2dd66e0057511f742ae5021ad377407 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 10 Mar 2020 13:30:38 +0000 Subject: [PATCH 122/658] S3 - test fix - Use plain text as content, instead of file --- tests/test_s3/test_s3.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 48939be267fd..800daaef87dc 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2030,17 +2030,15 @@ def test_boto3_s3_content_type(): s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) my_bucket = s3.Bucket("my-cool-bucket") my_bucket.create() - local_path = "test_s3.py" - s3_path = local_path + s3_path = "test_s3.py" s3 = boto3.resource("s3", verify=False) - with open(local_path, "rb") as _file: - content_type = mimetypes.guess_type(local_path) - s3.Object(my_bucket.name, s3_path).put( - ContentType=content_type[0], Body=_file, ACL="public-read" - ) + content_type = "text/python-x" + s3.Object(my_bucket.name, s3_path).put( + ContentType=content_type, Body=b"some python code", ACL="public-read" + ) - s3.Object(my_bucket.name, s3_path).content_type.should.equal(content_type[0]) + s3.Object(my_bucket.name, s3_path).content_type.should.equal(content_type) @mock_s3 From 315ac32f0906c36ebf0525c7a9f83ceb8d8cf7ac Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 10 Mar 2020 14:28:12 +0000 Subject: [PATCH 123/658] Add region to test case --- tests/test_dynamodb2/test_dynamodb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 0f2be6a2eaea..24e0626c9b75 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1346,7 +1346,7 @@ def test_get_item_returns_consumed_capacity(): @mock_dynamodb2 def test_put_item_nonexisting_hash_key(): - dynamodb = boto3.resource("dynamodb") + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") dynamodb.create_table( AttributeDefinitions=[{"AttributeName": "structure_id", "AttributeType": "S"},], TableName="test", From ba1bf09474933976f422179e3e902d3866b0892c Mon Sep 17 00:00:00 2001 From: ImFlog Date: Wed, 5 Feb 2020 09:31:03 +0100 Subject: [PATCH 124/658] Fix UPDATED_NEW return values differences between moto and dynamoDB --- moto/dynamodb2/responses.py | 43 ++++++++++++++++++++++----- tests/test_dynamodb2/test_dynamodb.py | 36 ++++++++++++++++++---- 2 files changed, 65 insertions(+), 14 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index d3767c3fdda3..826a9a19c7cc 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -1,9 +1,12 @@ from __future__ import unicode_literals -import itertools + +import copy import json -import six import re +import itertools +import six + from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge @@ -710,7 +713,8 @@ def update_item(self): attribute_updates = self.body.get("AttributeUpdates") expression_attribute_names = self.body.get("ExpressionAttributeNames", {}) expression_attribute_values = self.body.get("ExpressionAttributeValues", {}) - existing_item = self.dynamodb_backend.get_item(name, key) + # We need to copy the item in order to avoid it being modified by the update_item operation + existing_item = copy.deepcopy(self.dynamodb_backend.get_item(name, key)) if existing_item: existing_attributes = existing_item.to_json()["Attributes"] else: @@ -796,14 +800,37 @@ def update_item(self): k: v for k, v in existing_attributes.items() if k in changed_attributes } elif return_values == "UPDATED_NEW": - item_dict["Attributes"] = { - k: v - for k, v in item_dict["Attributes"].items() - if k in changed_attributes - } + item_dict["Attributes"] = self._build_updated_new_attributes( + existing_attributes, item_dict["Attributes"] + ) return dynamo_json_dump(item_dict) + def _build_updated_new_attributes(self, original, changed): + if type(changed) != type(original): + return changed + else: + if type(changed) is dict: + return { + key: self._build_updated_new_attributes( + original.get(key, None), changed[key] + ) + for key in changed.keys() + if changed[key] != original.get(key, None) + } + elif type(changed) in (set, list): + if len(changed) != len(original): + return changed + else: + return [ + self._build_updated_new_attributes(original[index], changed[index]) + for index in range(len(changed)) + ] + elif changed != original: + return changed + else: + return None + def describe_limits(self): return json.dumps( { diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 428b58f8109a..a2ea09c0e080 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3412,13 +3412,18 @@ def test_update_supports_list_append(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"SHA256": {"S": "sha-of-file"}}, UpdateExpression="SET crontab = list_append(crontab, :i)", ExpressionAttributeValues={":i": {"L": [{"S": "bar2"}]}}, + ReturnValues="UPDATED_NEW", ) + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"crontab": {"L": [{"S": "bar1"}, {"S": "bar2"}]}} + ) # Verify item is appended to the existing list result = client.get_item( TableName="TestTable", Key={"SHA256": {"S": "sha-of-file"}} @@ -3451,15 +3456,19 @@ def test_update_supports_nested_list_append(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}}, UpdateExpression="SET a.#b = list_append(a.#b, :i)", ExpressionAttributeValues={":i": {"L": [{"S": "bar2"}]}}, ExpressionAttributeNames={"#b": "b"}, + ReturnValues="UPDATED_NEW", ) - # Verify item is appended to the existing list + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"a": {"M": {"b": {"L": [{"S": "bar1"}, {"S": "bar2"}]}}}} + ) result = client.get_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}} )["Item"] @@ -3491,14 +3500,19 @@ def test_update_supports_multiple_levels_nested_list_append(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}}, UpdateExpression="SET a.#b.c = list_append(a.#b.#c, :i)", ExpressionAttributeValues={":i": {"L": [{"S": "bar2"}]}}, ExpressionAttributeNames={"#b": "b", "#c": "c"}, + ReturnValues="UPDATED_NEW", ) + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"a": {"M": {"b": {"M": {"c": {"L": [{"S": "bar1"}, {"S": "bar2"}]}}}}}} + ) # Verify item is appended to the existing list result = client.get_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}} @@ -3532,14 +3546,19 @@ def test_update_supports_nested_list_append_onto_another_list(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"id": {"S": "list_append_another"}}, UpdateExpression="SET a.#c = list_append(a.#b, :i)", ExpressionAttributeValues={":i": {"L": [{"S": "bar2"}]}}, ExpressionAttributeNames={"#b": "b", "#c": "c"}, + ReturnValues="UPDATED_NEW", ) + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"a": {"M": {"c": {"L": [{"S": "bar1"}, {"S": "bar2"}]}}}} + ) # Verify item is appended to the existing list result = client.get_item( TableName="TestTable", Key={"id": {"S": "list_append_another"}} @@ -3582,13 +3601,18 @@ def test_update_supports_list_append_maps(): ) # Update item using list_append expression - client.update_item( + updated_item = client.update_item( TableName="TestTable", Key={"id": {"S": "nested_list_append"}, "rid": {"S": "range_key"}}, UpdateExpression="SET a = list_append(a, :i)", ExpressionAttributeValues={":i": {"L": [{"M": {"b": {"S": "bar2"}}}]}}, + ReturnValues="UPDATED_NEW", ) + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"a": {"L": [{"M": {"b": {"S": "bar1"}}}, {"M": {"b": {"S": "bar2"}}}]}} + ) # Verify item is appended to the existing list result = client.query( TableName="TestTable", From ab68d14649a2378b77a749e5ddaed30cd406e1e9 Mon Sep 17 00:00:00 2001 From: Huang syunwei Date: Mon, 9 Apr 2018 14:13:41 +1000 Subject: [PATCH 125/658] Fix bug of put metric data with timestamp, timestamp should be a date time object instead of a string --- moto/cloudwatch/models.py | 8 +- tests/test_cloudwatch/test_cloudwatch.py | 265 +++++++++++++---------- 2 files changed, 152 insertions(+), 121 deletions(-) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 13b31ddfe361..716a296334f4 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -2,13 +2,14 @@ from boto3 import Session -from moto.core.utils import iso_8601_datetime_with_milliseconds +from moto.core.utils import iso_8601_datetime_without_milliseconds from moto.core import BaseBackend, BaseModel from moto.core.exceptions import RESTError from datetime import datetime, timedelta from dateutil.tz import tzutc from uuid import uuid4 from .utils import make_arn_for_dashboard +from dateutil import parser from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID @@ -146,7 +147,7 @@ def __repr__(self): class Statistics: def __init__(self, stats, dt): - self.timestamp = iso_8601_datetime_with_milliseconds(dt) + self.timestamp = iso_8601_datetime_without_milliseconds(dt) self.values = [] self.stats = stats @@ -278,8 +279,7 @@ def put_metric_data(self, namespace, metric_data): # Preserve "datetime" for get_metric_statistics comparisons timestamp = metric_member.get("Timestamp") if timestamp is not None and type(timestamp) != datetime: - timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ") - timestamp = timestamp.replace(tzinfo=tzutc()) + timestamp = parser.parse(timestamp) self.metric_data.append( MetricDatum( namespace, diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index cc624e852841..49d9b63d27e7 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -1,117 +1,148 @@ -import boto -from boto.ec2.cloudwatch.alarm import MetricAlarm -import sure # noqa - -from moto import mock_cloudwatch_deprecated - - -def alarm_fixture(name="tester", action=None): - action = action or ["arn:alarm"] - return MetricAlarm( - name=name, - namespace="{0}_namespace".format(name), - metric="{0}_metric".format(name), - comparison=">=", - threshold=2.0, - period=60, - evaluation_periods=5, - statistic="Average", - description="A test", - dimensions={"InstanceId": ["i-0123456,i-0123457"]}, - alarm_actions=action, - ok_actions=["arn:ok"], - insufficient_data_actions=["arn:insufficient"], - unit="Seconds", - ) - - -@mock_cloudwatch_deprecated -def test_create_alarm(): - conn = boto.connect_cloudwatch() - - alarm = alarm_fixture() - conn.create_alarm(alarm) - - alarms = conn.describe_alarms() - alarms.should.have.length_of(1) - alarm = alarms[0] - alarm.name.should.equal("tester") - alarm.namespace.should.equal("tester_namespace") - alarm.metric.should.equal("tester_metric") - alarm.comparison.should.equal(">=") - alarm.threshold.should.equal(2.0) - alarm.period.should.equal(60) - alarm.evaluation_periods.should.equal(5) - alarm.statistic.should.equal("Average") - alarm.description.should.equal("A test") - dict(alarm.dimensions).should.equal({"InstanceId": ["i-0123456,i-0123457"]}) - list(alarm.alarm_actions).should.equal(["arn:alarm"]) - list(alarm.ok_actions).should.equal(["arn:ok"]) - list(alarm.insufficient_data_actions).should.equal(["arn:insufficient"]) - alarm.unit.should.equal("Seconds") - - -@mock_cloudwatch_deprecated -def test_delete_alarm(): - conn = boto.connect_cloudwatch() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) - - alarm = alarm_fixture() - conn.create_alarm(alarm) - - alarms = conn.describe_alarms() - alarms.should.have.length_of(1) - - alarms[0].delete() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) - - -@mock_cloudwatch_deprecated -def test_put_metric_data(): - conn = boto.connect_cloudwatch() - - conn.put_metric_data( - namespace="tester", - name="metric", - value=1.5, - dimensions={"InstanceId": ["i-0123456,i-0123457"]}, - ) - - metrics = conn.list_metrics() - metrics.should.have.length_of(1) - metric = metrics[0] - metric.namespace.should.equal("tester") - metric.name.should.equal("metric") - dict(metric.dimensions).should.equal({"InstanceId": ["i-0123456,i-0123457"]}) - - -@mock_cloudwatch_deprecated -def test_describe_alarms(): - conn = boto.connect_cloudwatch() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) - - conn.create_alarm(alarm_fixture(name="nfoobar", action="afoobar")) - conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) - conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) - conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) - - alarms = conn.describe_alarms() - alarms.should.have.length_of(4) - alarms = conn.describe_alarms(alarm_name_prefix="nfoo") - alarms.should.have.length_of(2) - alarms = conn.describe_alarms(alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) - alarms.should.have.length_of(3) - alarms = conn.describe_alarms(action_prefix="afoo") - alarms.should.have.length_of(2) - - for alarm in conn.describe_alarms(): - alarm.delete() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) +import boto +from boto.ec2.cloudwatch.alarm import MetricAlarm +from datetime import datetime +import sure # noqa + +from moto import mock_cloudwatch_deprecated + + +def alarm_fixture(name="tester", action=None): + action = action or ["arn:alarm"] + return MetricAlarm( + name=name, + namespace="{0}_namespace".format(name), + metric="{0}_metric".format(name), + comparison=">=", + threshold=2.0, + period=60, + evaluation_periods=5, + statistic="Average", + description="A test", + dimensions={"InstanceId": ["i-0123456,i-0123457"]}, + alarm_actions=action, + ok_actions=["arn:ok"], + insufficient_data_actions=["arn:insufficient"], + unit="Seconds", + ) + + +@mock_cloudwatch_deprecated +def test_create_alarm(): + conn = boto.connect_cloudwatch() + + alarm = alarm_fixture() + conn.create_alarm(alarm) + + alarms = conn.describe_alarms() + alarms.should.have.length_of(1) + alarm = alarms[0] + alarm.name.should.equal("tester") + alarm.namespace.should.equal("tester_namespace") + alarm.metric.should.equal("tester_metric") + alarm.comparison.should.equal(">=") + alarm.threshold.should.equal(2.0) + alarm.period.should.equal(60) + alarm.evaluation_periods.should.equal(5) + alarm.statistic.should.equal("Average") + alarm.description.should.equal("A test") + dict(alarm.dimensions).should.equal({"InstanceId": ["i-0123456,i-0123457"]}) + list(alarm.alarm_actions).should.equal(["arn:alarm"]) + list(alarm.ok_actions).should.equal(["arn:ok"]) + list(alarm.insufficient_data_actions).should.equal(["arn:insufficient"]) + alarm.unit.should.equal("Seconds") + + +@mock_cloudwatch_deprecated +def test_delete_alarm(): + conn = boto.connect_cloudwatch() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + alarm = alarm_fixture() + conn.create_alarm(alarm) + + alarms = conn.describe_alarms() + alarms.should.have.length_of(1) + + alarms[0].delete() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + +@mock_cloudwatch_deprecated +def test_put_metric_data(): + conn = boto.connect_cloudwatch() + + conn.put_metric_data( + namespace="tester", + name="metric", + value=1.5, + dimensions={"InstanceId": ["i-0123456,i-0123457"]}, + ) + + metrics = conn.list_metrics() + metrics.should.have.length_of(1) + metric = metrics[0] + metric.namespace.should.equal("tester") + metric.name.should.equal("metric") + dict(metric.dimensions).should.equal({"InstanceId": ["i-0123456,i-0123457"]}) + + +@mock_cloudwatch_deprecated +def test_describe_alarms(): + conn = boto.connect_cloudwatch() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + conn.create_alarm(alarm_fixture(name="nfoobar", action="afoobar")) + conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) + conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) + conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) + + alarms = conn.describe_alarms() + alarms.should.have.length_of(4) + alarms = conn.describe_alarms(alarm_name_prefix="nfoo") + alarms.should.have.length_of(2) + alarms = conn.describe_alarms(alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) + alarms.should.have.length_of(3) + alarms = conn.describe_alarms(action_prefix="afoo") + alarms.should.have.length_of(2) + + for alarm in conn.describe_alarms(): + alarm.delete() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + +@mock_cloudwatch_deprecated +def test_get_metric_statistics(): + conn = boto.connect_cloudwatch() + + metric_timestamp = datetime(2018, 4, 9, 13, 0, 0, 0) + + conn.put_metric_data( + namespace='tester', + name='metric', + value=1.5, + dimensions={'InstanceId': ['i-0123456,i-0123457']}, + timestamp=metric_timestamp + ) + + metric_kwargs = dict( + namespace='tester', + metric_name='metric', + start_time=metric_timestamp, + end_time=datetime.now(), + period=3600, + statistics=['Minimum'] + ) + + datapoints = conn.get_metric_statistics(**metric_kwargs) + datapoints.should.have.length_of(1) + datapoint = datapoints[0] + datapoint.should.have.key('Minimum').which.should.equal(1.5) + datapoint.should.have.key('Timestamp').which.should.equal(metric_timestamp) From 0e433691556243f34d995d5eb544bb2ed40c5e4c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 11 Mar 2020 12:47:40 +0000 Subject: [PATCH 126/658] Linting --- tests/test_cloudwatch/test_cloudwatch.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 49d9b63d27e7..dee8aa605cd6 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -125,24 +125,24 @@ def test_get_metric_statistics(): metric_timestamp = datetime(2018, 4, 9, 13, 0, 0, 0) conn.put_metric_data( - namespace='tester', - name='metric', + namespace="tester", + name="metric", value=1.5, - dimensions={'InstanceId': ['i-0123456,i-0123457']}, - timestamp=metric_timestamp + dimensions={"InstanceId": ["i-0123456,i-0123457"]}, + timestamp=metric_timestamp, ) metric_kwargs = dict( - namespace='tester', - metric_name='metric', + namespace="tester", + metric_name="metric", start_time=metric_timestamp, end_time=datetime.now(), period=3600, - statistics=['Minimum'] + statistics=["Minimum"], ) datapoints = conn.get_metric_statistics(**metric_kwargs) datapoints.should.have.length_of(1) datapoint = datapoints[0] - datapoint.should.have.key('Minimum').which.should.equal(1.5) - datapoint.should.have.key('Timestamp').which.should.equal(metric_timestamp) + datapoint.should.have.key("Minimum").which.should.equal(1.5) + datapoint.should.have.key("Timestamp").which.should.equal(metric_timestamp) From 20364b177a5afb1d0452c835daa60086e6eb289e Mon Sep 17 00:00:00 2001 From: Luis Pollo Date: Wed, 7 Nov 2018 15:58:26 -0600 Subject: [PATCH 127/658] Fix IAM role name when parsed from CloudFormation JSON. --- moto/iam/models.py | 4 +++- .../test_cloudformation_stack_integration.py | 7 ++++--- 2 files changed, 7 insertions(+), 4 deletions(-) mode change 100644 => 100755 moto/iam/models.py diff --git a/moto/iam/models.py b/moto/iam/models.py old mode 100644 new mode 100755 index 18b3a7a6f39d..7ac3a4f9e48b --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -12,6 +12,7 @@ from cryptography import x509 from cryptography.hazmat.backends import default_backend from six.moves.urllib.parse import urlparse +from uuid import uuid4 from moto.core.exceptions import RESTError from moto.core import BaseBackend, BaseModel, ACCOUNT_ID @@ -330,9 +331,10 @@ def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] + role_name = properties['RoleName'] if 'RoleName' in properties else str(uuid4())[0:5] role = iam_backend.create_role( - role_name=resource_name, + role_name=role_name, assume_role_policy_document=properties["AssumeRolePolicyDocument"], path=properties.get("Path", "/"), permissions_boundary=properties.get("PermissionsBoundary", ""), diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index e296ef2edfef..45a2045b304e 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -909,6 +909,7 @@ def test_iam_roles(): }, "my-role-no-path": { "Properties": { + "RoleName": "my-role-no-path-name", "AssumeRolePolicyDocument": { "Statement": [ { @@ -936,13 +937,13 @@ def test_iam_roles(): role_name_to_id = {} for role_result in role_results: role = iam_conn.get_role(role_result.role_name) - role.role_name.should.contain("my-role") - if "with-path" in role.role_name: + if "my-role" not in role.role_name: role_name_to_id["with-path"] = role.role_id role.path.should.equal("my-path") + len(role.role_name).should.equal(5) # Role name is not specified, so randomly generated - can't check exact name else: role_name_to_id["no-path"] = role.role_id - role.role_name.should.contain("no-path") + role.role_name.should.equal("my-role-no-path-name") role.path.should.equal("/") instance_profile_responses = iam_conn.list_instance_profiles()[ From 9163f042927eb688a2a93194b2a0f5d4a47a8aca Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 11 Mar 2020 13:19:40 +0000 Subject: [PATCH 128/658] Linting --- moto/iam/models.py | 4 +++- .../test_cloudformation_stack_integration.py | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 7ac3a4f9e48b..e34ca7cf86bb 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -331,7 +331,9 @@ def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - role_name = properties['RoleName'] if 'RoleName' in properties else str(uuid4())[0:5] + role_name = ( + properties["RoleName"] if "RoleName" in properties else str(uuid4())[0:5] + ) role = iam_backend.create_role( role_name=role_name, diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 45a2045b304e..5a3181449b23 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -918,7 +918,7 @@ def test_iam_roles(): "Principal": {"Service": ["ec2.amazonaws.com"]}, } ] - } + }, }, "Type": "AWS::IAM::Role", }, @@ -940,7 +940,9 @@ def test_iam_roles(): if "my-role" not in role.role_name: role_name_to_id["with-path"] = role.role_id role.path.should.equal("my-path") - len(role.role_name).should.equal(5) # Role name is not specified, so randomly generated - can't check exact name + len(role.role_name).should.equal( + 5 + ) # Role name is not specified, so randomly generated - can't check exact name else: role_name_to_id["no-path"] = role.role_id role.role_name.should.equal("my-role-no-path-name") From 57056954954ddea47e82d7443f3f401c41c2476f Mon Sep 17 00:00:00 2001 From: Brent Driskill Date: Sun, 8 Mar 2020 20:32:01 -0400 Subject: [PATCH 129/658] SSM: Added support for label_parameter_version and getting labels on get_parameter_history --- moto/ssm/exceptions.py | 17 ++ moto/ssm/models.py | 66 ++++++- moto/ssm/responses.py | 13 +- tests/test_ssm/test_ssm_boto3.py | 303 +++++++++++++++++++++++++++++++ 4 files changed, 396 insertions(+), 3 deletions(-) diff --git a/moto/ssm/exceptions.py b/moto/ssm/exceptions.py index 3458fe7d3276..1c7c26ed9d57 100644 --- a/moto/ssm/exceptions.py +++ b/moto/ssm/exceptions.py @@ -22,6 +22,23 @@ class InvalidFilterValue(JsonRESTError): def __init__(self, message): super(InvalidFilterValue, self).__init__("InvalidFilterValue", message) +class ParameterNotFound(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ParameterNotFound, self).__init__("ParameterNotFound", message) + +class ParameterVersionNotFound(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ParameterVersionNotFound, self).__init__("ParameterVersionNotFound", message) + +class ParameterVersionLabelLimitExceeded(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ParameterVersionLabelLimitExceeded, self).__init__("ParameterVersionLabelLimitExceeded", message) class ValidationException(JsonRESTError): code = 400 diff --git a/moto/ssm/models.py b/moto/ssm/models.py index a7518d405b4b..2806a0fe0fa6 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -19,6 +19,9 @@ InvalidFilterValue, InvalidFilterOption, InvalidFilterKey, + ParameterVersionLabelLimitExceeded, + ParameterVersionNotFound, + ParameterNotFound ) @@ -32,7 +35,7 @@ def __init__( allowed_pattern, keyid, last_modified_date, - version, + version ): self.name = name self.type = type @@ -41,6 +44,7 @@ def __init__( self.keyid = keyid self.last_modified_date = last_modified_date self.version = version + self.labels = [] if self.type == "SecureString": if not self.keyid: @@ -75,7 +79,7 @@ def response_object(self, decrypt=False, region=None): return r - def describe_response_object(self, decrypt=False): + def describe_response_object(self, decrypt=False, include_labels=False): r = self.response_object(decrypt) r["LastModifiedDate"] = round(self.last_modified_date, 3) r["LastModifiedUser"] = "N/A" @@ -89,6 +93,9 @@ def describe_response_object(self, decrypt=False): if self.allowed_pattern: r["AllowedPattern"] = self.allowed_pattern + if include_labels: + r["Labels"] = self.labels + return r @@ -614,6 +621,61 @@ def get_parameter(self, name, with_decryption): return self._parameters[name][-1] return None + def label_parameter_version(self, name, version, labels): + previous_parameter_versions = self._parameters[name] + if not previous_parameter_versions: + raise ParameterNotFound( + "Parameter %s not found." % name + ) + found_parameter = None + labels_needing_removal = [] + if not version: + version = 1 + for parameter in previous_parameter_versions: + if parameter.version >= version: + version = parameter.version + for parameter in previous_parameter_versions: + if parameter.version == version: + found_parameter = parameter + else: + for label in labels: + if label in parameter.labels: + labels_needing_removal.append(label) + if not found_parameter: + raise ParameterVersionNotFound( + "Systems Manager could not find version %s of %s. " + "Verify the version and try again." % (version, name) + ) + labels_to_append = [] + invalid_labels = [] + for label in labels: + if label.startswith("aws") or label.startswith("ssm") or label[:1].isdigit() or not re.match("^[a-zA-z0-9_\.\-]*$", label): + invalid_labels.append(label) + continue + if len(label) > 100: + raise ValidationException( + "1 validation error detected: " + "Value '[%s]' at 'labels' failed to satisfy constraint: " + "Member must satisfy constraint: " + "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]" % label + ) + continue + if label not in found_parameter.labels: + labels_to_append.append(label) + if (len(found_parameter.labels) + len(labels_to_append)) > 10: + raise ParameterVersionLabelLimitExceeded( + "An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: " + "A parameter version can have maximum 10 labels." + "Move one or more labels to another version and try again." + ) + found_parameter.labels = found_parameter.labels + labels_to_append + for parameter in previous_parameter_versions: + if parameter.version != version: + for label in parameter.labels[:]: + if label in labels_needing_removal: + parameter.labels.remove(label) + return [invalid_labels, version] + def put_parameter( self, name, description, value, type, allowed_pattern, keyid, overwrite ): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 831737848f57..f453518abd13 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -168,12 +168,23 @@ def get_parameter_history(self): response = {"Parameters": []} for parameter_version in result: param_data = parameter_version.describe_response_object( - decrypt=with_decryption + decrypt=with_decryption, + include_labels=True ) response["Parameters"].append(param_data) return json.dumps(response) + def label_parameter_version(self): + name = self._get_param("Name") + version = self._get_param("ParameterVersion") + labels = self._get_param("Labels") + + invalid_labels, version = self.ssm_backend.label_parameter_version(name, version, labels) + + response = {"InvalidLabels": invalid_labels, "ParameterVersion": version} + return json.dumps(response) + def add_tags_to_resource(self): resource_id = self._get_param("ResourceId") resource_type = self._get_param("ResourceType") diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index bb674fb652d8..c2813772d120 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -897,6 +897,7 @@ def test_get_parameter_history(): param["Value"].should.equal("value-%d" % index) param["Version"].should.equal(index + 1) param["Description"].should.equal("A test parameter version %d" % index) + param["Labels"].should.equal([]) len(parameters_response).should.equal(3) @@ -937,6 +938,308 @@ def test_get_parameter_history_with_secure_string(): len(parameters_response).should.equal(3) +@mock_ssm +def test_label_parameter_version(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + + response = client.label_parameter_version(Name=test_parameter_name, Labels=["test-label"]) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + +@mock_ssm +def test_label_parameter_version_with_specific_version(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["test-label"]) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + +@mock_ssm +def test_label_parameter_version_twice(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=test_labels) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=test_labels) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + + response = client.get_parameter_history(Name=test_parameter_name) + len(response["Parameters"]).should.equal(1) + response["Parameters"][0]["Labels"].should.equal(test_labels) + +@mock_ssm +def test_label_parameter_moving_versions(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True + ) + + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=test_labels) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=2, Labels=test_labels) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(2) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = test_labels if param["Version"] == 2 else [] + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) + +@mock_ssm +def test_label_parameter_moving_versions_complex(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True + ) + + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["test-label1", "test-label2", "test-label3"]) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(1) + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=2, Labels=["test-label2", "test-label3"]) + response["InvalidLabels"].should.equal([]) + response["ParameterVersion"].should.equal(2) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = ["test-label2", "test-label3"] if param["Version"] == 2 else (["test-label1"] if param["Version"] == 1 else []) + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) + +@mock_ssm +def test_label_parameter_version_exception_ten_labels_at_once(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label1", "test-label2", "test-label3", "test-label4", "test-label5", "test-label6", "test-label7", "test-label8", "test-label9", "test-label10", "test-label11"] + + client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + client.label_parameter_version.when.called_with( + Name="test", ParameterVersion=1, Labels=test_labels + ).should.throw( + ClientError, + "An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: " + "A parameter version can have maximum 10 labels." + "Move one or more labels to another version and try again." + ) + +@mock_ssm +def test_label_parameter_version_exception_ten_labels_over_multiple_calls(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + + client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["test-label1", "test-label2", "test-label3", "test-label4", "test-label5"]) + client.label_parameter_version.when.called_with( + Name="test", ParameterVersion=1, Labels=["test-label6", "test-label7", "test-label8", "test-label9", "test-label10", "test-label11"] + ).should.throw( + ClientError, + "An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: " + "A parameter version can have maximum 10 labels." + "Move one or more labels to another version and try again." + ) + +@mock_ssm +def test_label_parameter_version_invalid_name(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + + response = client.label_parameter_version.when.called_with( + Name=test_parameter_name, Labels=["test-label"] + ).should.throw( + ClientError, + "An error occurred (ParameterNotFound) when calling the LabelParameterVersion operation: " + "Parameter test not found." + ) + +@mock_ssm +def test_label_parameter_version_invalid_parameter_version(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + + response = client.label_parameter_version.when.called_with( + Name=test_parameter_name, Labels=["test-label"], ParameterVersion=5 + ).should.throw( + ClientError, + "An error occurred (ParameterVersionNotFound) when calling the LabelParameterVersion operation: " + "Systems Manager could not find version 5 of test. " + "Verify the version and try again." + ) + +@mock_ssm +def test_label_parameter_version_invalid_label(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["awsabc"]) + response["InvalidLabels"].should.equal(["awsabc"]) + + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["ssmabc"]) + response["InvalidLabels"].should.equal(["ssmabc"]) + + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["9abc"]) + response["InvalidLabels"].should.equal(["9abc"]) + + response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["abc/123"]) + response["InvalidLabels"].should.equal(["abc/123"]) + + client.label_parameter_version.when.called_with( + Name=test_parameter_name, ParameterVersion=1, Labels=["a"*101] + ).should.throw( + ClientError, + "1 validation error detected: " + "Value '[%s]' at 'labels' failed to satisfy constraint: " + "Member must satisfy constraint: " + "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]" % ("a"*101) + ) + + +@mock_ssm +def test_get_parameter_history_with_label(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True, + ) + + client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=test_labels) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = test_labels if param["Version"] == 1 else [] + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) + +@mock_ssm +def test_get_parameter_history_with_label_non_latest(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True, + ) + + client.label_parameter_version(Name=test_parameter_name, ParameterVersion=2, Labels=test_labels) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = test_labels if param["Version"] == 2 else [] + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) + +@mock_ssm +def test_get_parameter_history_with_label_latest_assumed(): + client = boto3.client("ssm", region_name="us-east-1") + + test_parameter_name = "test" + test_labels = ["test-label"] + + for i in range(3): + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter version %d" % i, + Value="value-%d" % i, + Type="String", + Overwrite=True, + ) + + client.label_parameter_version(Name=test_parameter_name, Labels=test_labels) + + response = client.get_parameter_history(Name=test_parameter_name) + parameters_response = response["Parameters"] + + for index, param in enumerate(parameters_response): + param["Name"].should.equal(test_parameter_name) + param["Type"].should.equal("String") + param["Value"].should.equal("value-%d" % index) + param["Version"].should.equal(index + 1) + param["Description"].should.equal("A test parameter version %d" % index) + labels = test_labels if param["Version"] == 3 else [] + param["Labels"].should.equal(labels) + + len(parameters_response).should.equal(3) @mock_ssm def test_get_parameter_history_missing_parameter(): From e3e4b741d8aae3acc7408bdb48f733c66fa59d39 Mon Sep 17 00:00:00 2001 From: Brent Driskill Date: Wed, 11 Mar 2020 11:57:04 -0400 Subject: [PATCH 130/658] SSM: Fix the formatting associated with label_parameter_version/get_parameter_history updates --- moto/ssm/exceptions.py | 12 +- moto/ssm/models.py | 20 ++-- moto/ssm/responses.py | 7 +- tests/test_ssm/test_ssm_boto3.py | 184 +++++++++++++++++++++++++------ 4 files changed, 176 insertions(+), 47 deletions(-) diff --git a/moto/ssm/exceptions.py b/moto/ssm/exceptions.py index 1c7c26ed9d57..83ae26b6cf3e 100644 --- a/moto/ssm/exceptions.py +++ b/moto/ssm/exceptions.py @@ -22,23 +22,31 @@ class InvalidFilterValue(JsonRESTError): def __init__(self, message): super(InvalidFilterValue, self).__init__("InvalidFilterValue", message) + class ParameterNotFound(JsonRESTError): code = 400 def __init__(self, message): super(ParameterNotFound, self).__init__("ParameterNotFound", message) + class ParameterVersionNotFound(JsonRESTError): code = 400 def __init__(self, message): - super(ParameterVersionNotFound, self).__init__("ParameterVersionNotFound", message) + super(ParameterVersionNotFound, self).__init__( + "ParameterVersionNotFound", message + ) + class ParameterVersionLabelLimitExceeded(JsonRESTError): code = 400 def __init__(self, message): - super(ParameterVersionLabelLimitExceeded, self).__init__("ParameterVersionLabelLimitExceeded", message) + super(ParameterVersionLabelLimitExceeded, self).__init__( + "ParameterVersionLabelLimitExceeded", message + ) + class ValidationException(JsonRESTError): code = 400 diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 2806a0fe0fa6..201f43c5a47a 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -21,7 +21,7 @@ InvalidFilterKey, ParameterVersionLabelLimitExceeded, ParameterVersionNotFound, - ParameterNotFound + ParameterNotFound, ) @@ -35,7 +35,7 @@ def __init__( allowed_pattern, keyid, last_modified_date, - version + version, ): self.name = name self.type = type @@ -624,9 +624,7 @@ def get_parameter(self, name, with_decryption): def label_parameter_version(self, name, version, labels): previous_parameter_versions = self._parameters[name] if not previous_parameter_versions: - raise ParameterNotFound( - "Parameter %s not found." % name - ) + raise ParameterNotFound("Parameter %s not found." % name) found_parameter = None labels_needing_removal = [] if not version: @@ -645,11 +643,16 @@ def label_parameter_version(self, name, version, labels): raise ParameterVersionNotFound( "Systems Manager could not find version %s of %s. " "Verify the version and try again." % (version, name) - ) + ) labels_to_append = [] invalid_labels = [] for label in labels: - if label.startswith("aws") or label.startswith("ssm") or label[:1].isdigit() or not re.match("^[a-zA-z0-9_\.\-]*$", label): + if ( + label.startswith("aws") + or label.startswith("ssm") + or label[:1].isdigit() + or not re.match("^[a-zA-z0-9_\.\-]*$", label) + ): invalid_labels.append(label) continue if len(label) > 100: @@ -657,7 +660,8 @@ def label_parameter_version(self, name, version, labels): "1 validation error detected: " "Value '[%s]' at 'labels' failed to satisfy constraint: " "Member must satisfy constraint: " - "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]" % label + "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]" + % label ) continue if label not in found_parameter.labels: diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index f453518abd13..45d2dec0ad27 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -168,8 +168,7 @@ def get_parameter_history(self): response = {"Parameters": []} for parameter_version in result: param_data = parameter_version.describe_response_object( - decrypt=with_decryption, - include_labels=True + decrypt=with_decryption, include_labels=True ) response["Parameters"].append(param_data) @@ -180,7 +179,9 @@ def label_parameter_version(self): version = self._get_param("ParameterVersion") labels = self._get_param("Labels") - invalid_labels, version = self.ssm_backend.label_parameter_version(name, version, labels) + invalid_labels, version = self.ssm_backend.label_parameter_version( + name, version, labels + ) response = {"InvalidLabels": invalid_labels, "ParameterVersion": version} return json.dumps(response) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index c2813772d120..170cd8a3e51e 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -938,40 +938,66 @@ def test_get_parameter_history_with_secure_string(): len(parameters_response).should.equal(3) + @mock_ssm def test_label_parameter_version(): client = boto3.client("ssm", region_name="us-east-1") test_parameter_name = "test" - client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) - response = client.label_parameter_version(Name=test_parameter_name, Labels=["test-label"]) + response = client.label_parameter_version( + Name=test_parameter_name, Labels=["test-label"] + ) response["InvalidLabels"].should.equal([]) response["ParameterVersion"].should.equal(1) + @mock_ssm def test_label_parameter_version_with_specific_version(): client = boto3.client("ssm", region_name="us-east-1") test_parameter_name = "test" - client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["test-label"]) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["test-label"] + ) response["InvalidLabels"].should.equal([]) response["ParameterVersion"].should.equal(1) + @mock_ssm def test_label_parameter_version_twice(): client = boto3.client("ssm", region_name="us-east-1") test_parameter_name = "test" test_labels = ["test-label"] - client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=test_labels) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=test_labels + ) response["InvalidLabels"].should.equal([]) response["ParameterVersion"].should.equal(1) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=test_labels) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=test_labels + ) response["InvalidLabels"].should.equal([]) response["ParameterVersion"].should.equal(1) @@ -979,6 +1005,7 @@ def test_label_parameter_version_twice(): len(response["Parameters"]).should.equal(1) response["Parameters"][0]["Labels"].should.equal(test_labels) + @mock_ssm def test_label_parameter_moving_versions(): client = boto3.client("ssm", region_name="us-east-1") @@ -992,13 +1019,17 @@ def test_label_parameter_moving_versions(): Description="A test parameter version %d" % i, Value="value-%d" % i, Type="String", - Overwrite=True + Overwrite=True, ) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=test_labels) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=test_labels + ) response["InvalidLabels"].should.equal([]) response["ParameterVersion"].should.equal(1) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=2, Labels=test_labels) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=2, Labels=test_labels + ) response["InvalidLabels"].should.equal([]) response["ParameterVersion"].should.equal(2) @@ -1016,6 +1047,7 @@ def test_label_parameter_moving_versions(): len(parameters_response).should.equal(3) + @mock_ssm def test_label_parameter_moving_versions_complex(): client = boto3.client("ssm", region_name="us-east-1") @@ -1028,13 +1060,21 @@ def test_label_parameter_moving_versions_complex(): Description="A test parameter version %d" % i, Value="value-%d" % i, Type="String", - Overwrite=True + Overwrite=True, ) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["test-label1", "test-label2", "test-label3"]) + response = client.label_parameter_version( + Name=test_parameter_name, + ParameterVersion=1, + Labels=["test-label1", "test-label2", "test-label3"], + ) response["InvalidLabels"].should.equal([]) response["ParameterVersion"].should.equal(1) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=2, Labels=["test-label2", "test-label3"]) + response = client.label_parameter_version( + Name=test_parameter_name, + ParameterVersion=2, + Labels=["test-label2", "test-label3"], + ) response["InvalidLabels"].should.equal([]) response["ParameterVersion"].should.equal(2) @@ -1047,45 +1087,93 @@ def test_label_parameter_moving_versions_complex(): param["Value"].should.equal("value-%d" % index) param["Version"].should.equal(index + 1) param["Description"].should.equal("A test parameter version %d" % index) - labels = ["test-label2", "test-label3"] if param["Version"] == 2 else (["test-label1"] if param["Version"] == 1 else []) + labels = ( + ["test-label2", "test-label3"] + if param["Version"] == 2 + else (["test-label1"] if param["Version"] == 1 else []) + ) param["Labels"].should.equal(labels) len(parameters_response).should.equal(3) + @mock_ssm def test_label_parameter_version_exception_ten_labels_at_once(): client = boto3.client("ssm", region_name="us-east-1") test_parameter_name = "test" - test_labels = ["test-label1", "test-label2", "test-label3", "test-label4", "test-label5", "test-label6", "test-label7", "test-label8", "test-label9", "test-label10", "test-label11"] + test_labels = [ + "test-label1", + "test-label2", + "test-label3", + "test-label4", + "test-label5", + "test-label6", + "test-label7", + "test-label8", + "test-label9", + "test-label10", + "test-label11", + ] - client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) client.label_parameter_version.when.called_with( Name="test", ParameterVersion=1, Labels=test_labels ).should.throw( ClientError, "An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: " "A parameter version can have maximum 10 labels." - "Move one or more labels to another version and try again." + "Move one or more labels to another version and try again.", ) + @mock_ssm def test_label_parameter_version_exception_ten_labels_over_multiple_calls(): client = boto3.client("ssm", region_name="us-east-1") test_parameter_name = "test" - client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") - client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["test-label1", "test-label2", "test-label3", "test-label4", "test-label5"]) + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) + client.label_parameter_version( + Name=test_parameter_name, + ParameterVersion=1, + Labels=[ + "test-label1", + "test-label2", + "test-label3", + "test-label4", + "test-label5", + ], + ) client.label_parameter_version.when.called_with( - Name="test", ParameterVersion=1, Labels=["test-label6", "test-label7", "test-label8", "test-label9", "test-label10", "test-label11"] + Name="test", + ParameterVersion=1, + Labels=[ + "test-label6", + "test-label7", + "test-label8", + "test-label9", + "test-label10", + "test-label11", + ], ).should.throw( ClientError, "An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: " "A parameter version can have maximum 10 labels." - "Move one or more labels to another version and try again." + "Move one or more labels to another version and try again.", ) + @mock_ssm def test_label_parameter_version_invalid_name(): client = boto3.client("ssm", region_name="us-east-1") @@ -1097,15 +1185,21 @@ def test_label_parameter_version_invalid_name(): ).should.throw( ClientError, "An error occurred (ParameterNotFound) when calling the LabelParameterVersion operation: " - "Parameter test not found." + "Parameter test not found.", ) + @mock_ssm def test_label_parameter_version_invalid_parameter_version(): client = boto3.client("ssm", region_name="us-east-1") test_parameter_name = "test" - client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) response = client.label_parameter_version.when.called_with( Name=test_parameter_name, Labels=["test-label"], ParameterVersion=5 @@ -1113,37 +1207,52 @@ def test_label_parameter_version_invalid_parameter_version(): ClientError, "An error occurred (ParameterVersionNotFound) when calling the LabelParameterVersion operation: " "Systems Manager could not find version 5 of test. " - "Verify the version and try again." + "Verify the version and try again.", ) + @mock_ssm def test_label_parameter_version_invalid_label(): client = boto3.client("ssm", region_name="us-east-1") test_parameter_name = "test" - client.put_parameter(Name=test_parameter_name, Description="A test parameter", Value="value", Type="String") - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["awsabc"]) + client.put_parameter( + Name=test_parameter_name, + Description="A test parameter", + Value="value", + Type="String", + ) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["awsabc"] + ) response["InvalidLabels"].should.equal(["awsabc"]) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["ssmabc"]) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["ssmabc"] + ) response["InvalidLabels"].should.equal(["ssmabc"]) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["9abc"]) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["9abc"] + ) response["InvalidLabels"].should.equal(["9abc"]) - response = client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=["abc/123"]) + response = client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=["abc/123"] + ) response["InvalidLabels"].should.equal(["abc/123"]) client.label_parameter_version.when.called_with( - Name=test_parameter_name, ParameterVersion=1, Labels=["a"*101] + Name=test_parameter_name, ParameterVersion=1, Labels=["a" * 101] ).should.throw( ClientError, "1 validation error detected: " "Value '[%s]' at 'labels' failed to satisfy constraint: " "Member must satisfy constraint: " - "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]" % ("a"*101) + "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]" + % ("a" * 101), ) - + @mock_ssm def test_get_parameter_history_with_label(): @@ -1161,7 +1270,9 @@ def test_get_parameter_history_with_label(): Overwrite=True, ) - client.label_parameter_version(Name=test_parameter_name, ParameterVersion=1, Labels=test_labels) + client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=1, Labels=test_labels + ) response = client.get_parameter_history(Name=test_parameter_name) parameters_response = response["Parameters"] @@ -1177,6 +1288,7 @@ def test_get_parameter_history_with_label(): len(parameters_response).should.equal(3) + @mock_ssm def test_get_parameter_history_with_label_non_latest(): client = boto3.client("ssm", region_name="us-east-1") @@ -1193,7 +1305,9 @@ def test_get_parameter_history_with_label_non_latest(): Overwrite=True, ) - client.label_parameter_version(Name=test_parameter_name, ParameterVersion=2, Labels=test_labels) + client.label_parameter_version( + Name=test_parameter_name, ParameterVersion=2, Labels=test_labels + ) response = client.get_parameter_history(Name=test_parameter_name) parameters_response = response["Parameters"] @@ -1209,6 +1323,7 @@ def test_get_parameter_history_with_label_non_latest(): len(parameters_response).should.equal(3) + @mock_ssm def test_get_parameter_history_with_label_latest_assumed(): client = boto3.client("ssm", region_name="us-east-1") @@ -1241,6 +1356,7 @@ def test_get_parameter_history_with_label_latest_assumed(): len(parameters_response).should.equal(3) + @mock_ssm def test_get_parameter_history_missing_parameter(): client = boto3.client("ssm", region_name="us-east-1") From 2e0bc1aff302608680bea62c424b4b2cc8e9af84 Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Wed, 11 Mar 2020 12:45:01 -0700 Subject: [PATCH 131/658] Loosen jinja2 requirement. This allows repos consuming moto to use the latest jinaj2 2.11.x patched version (currently 2.11.1) w/o breaking moto's python 2 support. See https://github.com/spulec/moto/pull/2776 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9bb7cf522578..193761191083 100755 --- a/setup.py +++ b/setup.py @@ -29,7 +29,7 @@ def get_version(): install_requires = [ "setuptools==44.0.0", - "Jinja2==2.11.0", + "Jinja2<3.0.0,>=2.10.1", "boto>=2.36.0", "boto3>=1.9.201", "botocore>=1.12.201", From 649b497f71cce95a6474a3ff6f3c9c3339efb68f Mon Sep 17 00:00:00 2001 From: Laurence de Bruxelles Date: Thu, 12 Mar 2020 09:38:02 +0000 Subject: [PATCH 132/658] Loosen idna requirement requests 2.23.0 allows idna<3 [1] [1] psf/requests@c46f55b --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 193761191083..79b9875ee4bf 100755 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ def get_version(): "jsondiff>=1.1.2", "aws-xray-sdk!=0.96,>=0.93", "responses>=0.9.0", - "idna<2.9,>=2.5", + "idna<3,>=2.5", "cfn-lint>=0.4.0", "sshpubkeys>=3.1.0,<4.0", "zipp==0.6.0", From 3802767817139ca1d287c15cc266c4e114f5ddeb Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 12 Mar 2020 12:25:31 +0000 Subject: [PATCH 133/658] S3 - Add test case to showcase bug when downloading large files --- moto/s3/models.py | 11 +++++- moto/s3/responses.py | 15 ++++++- moto/s3/utils.py | 4 +- tests/test_s3/test_s3.py | 84 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 111 insertions(+), 3 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 5a665e27efeb..67b53b984ee7 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -12,6 +12,7 @@ import random import string import tempfile +import threading import sys import time import uuid @@ -110,6 +111,7 @@ def __init__( self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size) self._max_buffer_size = max_buffer_size self.value = value + self.lock = threading.Lock() @property def version_id(self): @@ -117,8 +119,14 @@ def version_id(self): @property def value(self): + self.lock.acquire() + print("===>value") self._value_buffer.seek(0) - return self._value_buffer.read() + print("===>seek") + r = self._value_buffer.read() + print("===>read") + self.lock.release() + return r @value.setter def value(self, new_value): @@ -1319,6 +1327,7 @@ def append_to_key(self, bucket_name, key_name, value): return key def get_key(self, bucket_name, key_name, version_id=None, part_number=None): + print("get_key("+str(bucket_name)+","+str(key_name)+","+str(version_id)+","+str(part_number)+")") key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) key = None diff --git a/moto/s3/responses.py b/moto/s3/responses.py index b74be9a63442..15b1d1670fbc 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -2,6 +2,7 @@ import re import sys +import threading import six from botocore.awsrequest import AWSPreparedRequest @@ -150,6 +151,7 @@ def __init__(self, backend): self.path = "" self.data = {} self.headers = {} + self.lock = threading.Lock() @property def should_autoescape(self): @@ -857,6 +859,7 @@ def _bucket_response_delete_keys(self, request, body, bucket_name): def _handle_range_header(self, request, headers, response_content): response_headers = {} length = len(response_content) + print("Length: " + str(length) + " Range: " + str(request.headers.get("range"))) last = length - 1 _, rspec = request.headers.get("range").split("=") if "," in rspec: @@ -874,6 +877,7 @@ def toint(i): else: return 400, response_headers, "" if begin < 0 or end > last or begin > min(end, last): + print(str(begin)+ " < 0 or " + str(end) + " > " + str(last) + " or " + str(begin) + " > min("+str(end)+","+str(last)+")") return 416, response_headers, "" response_headers["content-range"] = "bytes {0}-{1}/{2}".format( begin, end, length @@ -903,14 +907,20 @@ def key_or_control_response(self, request, full_url, headers): response_content = response else: status_code, response_headers, response_content = response + print("response received: " + str(len(response_content))) + print(request.headers) if status_code == 200 and "range" in request.headers: - return self._handle_range_header( + self.lock.acquire() + r = self._handle_range_header( request, response_headers, response_content ) + self.lock.release() + return r return status_code, response_headers, response_content def _control_response(self, request, full_url, headers): + print("_control_response") parsed_url = urlparse(full_url) query = parse_qs(parsed_url.query, keep_blank_values=True) method = request.method @@ -1058,12 +1068,14 @@ def _key_response(self, request, full_url, headers): ) def _key_response_get(self, bucket_name, query, key_name, headers): + print("_key_response_get("+str(key_name)+","+str(headers)+")") self._set_action("KEY", "GET", query) self._authenticate_and_authorize_s3_action() response_headers = {} if query.get("uploadId"): upload_id = query["uploadId"][0] + print("UploadID: " + str(upload_id)) parts = self.backend.list_multipart(bucket_name, upload_id) template = self.response_template(S3_MULTIPART_LIST_RESPONSE) return ( @@ -1095,6 +1107,7 @@ def _key_response_get(self, bucket_name, query, key_name, headers): response_headers.update(key.metadata) response_headers.update(key.response_dict) + print("returning 200, " + str(headers) + ", " + str(len(key.value)) + " ( " + str(key_name) + ")") return 200, response_headers, key.value def _key_response_put(self, request, body, bucket_name, query, key_name, headers): diff --git a/moto/s3/utils.py b/moto/s3/utils.py index e22b6b860262..50ff1cf34e2a 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -104,7 +104,9 @@ def __setitem__(self, key, value): def get(self, key, default=None): try: return self[key] - except (KeyError, IndexError): + except (KeyError, IndexError) as e: + print("Error retrieving " + str(key)) + print(e) pass return default diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 48655ee17385..2eef9ef826b3 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4393,3 +4393,87 @@ def test_s3_config_dict(): assert not logging_bucket["supplementaryConfiguration"].get( "BucketTaggingConfiguration" ) + + +@mock_s3 +def test_delete_downloaded_file(): + # SET UP + filename = '...' + file = open(filename, 'rb') + uploader = PdfFileUploader(file) + boto3.client('s3').create_bucket(Bucket=uploader.bucket_name()) + uploader.upload() + print("================\nUPLOADED\n=================") + # DOWNLOAD + # the following two lines are basically + # boto3.client('s3').download_file(bucket_name, file_name, local_path) + # where bucket_name, file_name and local_path are retrieved from PdfFileUploader + # e.g. boto3.client('s3').download_file("bucket_name", "asdf.pdf", "/tmp/asdf.pdf") + downloader = PdfFileDownloader(uploader.full_bucket_file_name()) + downloader.download() + + downloader.delete_downloaded_file() + + print("Done!") + + +from pathlib import Path +import re +import os +class PdfFileDownloader: + def __init__(self, full_bucket_file_name): + self.bucket_name, self.file_name = self.extract(full_bucket_file_name) + self.s3 = boto3.client('s3') + + def download(self): + try: + self.s3.download_file(self.bucket_name, self.file_name, self.local_path()) + + return self.local_path() + except ClientError as exc: + print("=======") + print(exc) + raise exc + + def local_path(self): + return '/tmp/' + self.file_name.replace('/', '') + + def delete_downloaded_file(self): + if Path(self.local_path()).is_file(): + print("Removing " + str(self.local_path())) + os.remove(self.local_path()) + + def file(self): + return open(self.local_path(), 'rb') + + def extract(self, full_bucket_file_name): + match = re.search(r'([\.a-zA-Z_-]+)\/(.*)', full_bucket_file_name) + + if match and len(match.groups()) == 2: + return (match.groups()[0], match.groups()[1]) + else: + raise RuntimeError(f"Cannot determine bucket and file name for {full_bucket_file_name}") + + +import binascii +class PdfFileUploader: + def __init__(self, file): + self.file = file + date = datetime.datetime.now().strftime('%Y%m%d%H%M%S') + random_hex = binascii.b2a_hex(os.urandom(16)).decode('ascii') + self.bucket_file_name = f"{date}_{random_hex}.pdf" + + def upload(self): + self.file.seek(0) + boto3.client('s3').upload_fileobj(self.file, self.bucket_name(), self.bucket_file_name) + + return (self.original_file_name(), self.full_bucket_file_name()) + + def original_file_name(self): + return os.path.basename(self.file.name) + + def bucket_name(self): + return 'test_bucket' #os.environ['AWS_BUCKET_NAME'] + + def full_bucket_file_name(self): + return f"{self.bucket_name()}/{self.bucket_file_name}" From ad5314ad0686e76353eed6415267ae355bd4d795 Mon Sep 17 00:00:00 2001 From: mzgierski Date: Wed, 15 May 2019 17:04:31 +0200 Subject: [PATCH 134/658] Enable the test that AWS-Batch describe_jobs fails at. --- tests/test_batch/test_batch.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 141d6b343328..de4b349e08fe 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -692,7 +692,8 @@ def test_submit_job_by_name(): # SLOW TESTS -@expected_failure + +# @expected_failure @mock_logs @mock_ec2 @mock_ecs From bfeaf73109c8f5c1712badb8f785022e357bf95b Mon Sep 17 00:00:00 2001 From: mzgierski Date: Wed, 15 May 2019 17:05:45 +0200 Subject: [PATCH 135/658] Fix the AWS-Batch describe_jobs problem with not-yet-started jobs. --- moto/batch/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index fc35f2997509..a5986b7a44b3 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -338,10 +338,11 @@ def describe(self): "jobId": self.job_id, "jobName": self.job_name, "jobQueue": self.job_queue.arn, - "startedAt": datetime2int(self.job_started_at), "status": self.job_state, "dependsOn": [], } + if result['status'] not in ['SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING']: + result['startedAt'] = datetime2int(self.job_started_at) if self.job_stopped: result["stoppedAt"] = datetime2int(self.job_stopped_at) result["container"] = {} From bb5a54ca4b42a77f98c276009552087a3926a031 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 12 Mar 2020 13:37:46 +0000 Subject: [PATCH 136/658] Batch - Fix tests --- moto/batch/models.py | 6 ++++-- tests/test_batch/test_batch.py | 37 ++++++++-------------------------- 2 files changed, 12 insertions(+), 31 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index a5986b7a44b3..08f4cbdb2055 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -301,7 +301,7 @@ def __init__(self, name, job_def, job_queue, log_backend, container_overrides): self.job_name = name self.job_id = str(uuid.uuid4()) self.job_definition = job_def - self.container_overrides = container_overrides + self.container_overrides = container_overrides or {} self.job_queue = job_queue self.job_state = "SUBMITTED" # One of SUBMITTED | PENDING | RUNNABLE | STARTING | RUNNING | SUCCEEDED | FAILED self.job_queue.jobs.append(self) @@ -317,6 +317,7 @@ def __init__(self, name, job_def, job_queue, log_backend, container_overrides): self.docker_client = docker.from_env() self._log_backend = log_backend + self.log_stream_name = None # Unfortunately mocking replaces this method w/o fallback enabled, so we # need to replace it if we detect it's been mocked @@ -504,7 +505,8 @@ def run(self): for line in logs_stdout + logs_stderr: date, line = line.split(" ", 1) date = dateutil.parser.parse(date) - date = int(date.timestamp()) + # TODO: Replace with int(date.timestamp()) once we yeet Python2 out of the window + date = int((time.mktime(date.timetuple()) + date.microsecond / 1000000.0)) logs.append({"timestamp": date, "message": line.strip()}) # Send to cloudwatch diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index de4b349e08fe..6eedf452c5ab 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -10,17 +10,6 @@ import nose -def expected_failure(test): - @functools.wraps(test) - def inner(*args, **kwargs): - try: - test(*args, **kwargs) - except Exception as err: - raise nose.SkipTest - - return inner - - DEFAULT_REGION = "eu-central-1" @@ -693,7 +682,6 @@ def test_submit_job_by_name(): # SLOW TESTS -# @expected_failure @mock_logs @mock_ec2 @mock_ecs @@ -721,13 +709,13 @@ def test_submit_job(): queue_arn = resp["jobQueueArn"] resp = batch_client.register_job_definition( - jobDefinitionName="sleep10", + jobDefinitionName="sayhellotomylittlefriend", type="container", containerProperties={ - "image": "busybox", + "image": "busybox:latest", "vcpus": 1, "memory": 128, - "command": ["sleep", "10"], + "command": ["echo", "hello"], }, ) job_def_arn = resp["jobDefinitionArn"] @@ -741,13 +729,6 @@ def test_submit_job(): while datetime.datetime.now() < future: resp = batch_client.describe_jobs(jobs=[job_id]) - print( - "{0}:{1} {2}".format( - resp["jobs"][0]["jobName"], - resp["jobs"][0]["jobId"], - resp["jobs"][0]["status"], - ) - ) if resp["jobs"][0]["status"] == "FAILED": raise RuntimeError("Batch job failed") @@ -764,10 +745,9 @@ def test_submit_job(): resp = logs_client.get_log_events( logGroupName="/aws/batch/job", logStreamName=ls_name ) - len(resp["events"]).should.be.greater_than(5) + [event['message'] for event in resp["events"]].should.equal(['hello']) -@expected_failure @mock_logs @mock_ec2 @mock_ecs @@ -795,13 +775,13 @@ def test_list_jobs(): queue_arn = resp["jobQueueArn"] resp = batch_client.register_job_definition( - jobDefinitionName="sleep10", + jobDefinitionName="sleep5", type="container", containerProperties={ - "image": "busybox", + "image": "busybox:latest", "vcpus": 1, "memory": 128, - "command": ["sleep", "10"], + "command": ["sleep", "5"], }, ) job_def_arn = resp["jobDefinitionArn"] @@ -844,7 +824,6 @@ def test_list_jobs(): len(resp_finished_jobs2["jobSummaryList"]).should.equal(2) -@expected_failure @mock_logs @mock_ec2 @mock_ecs @@ -875,7 +854,7 @@ def test_terminate_job(): jobDefinitionName="sleep10", type="container", containerProperties={ - "image": "busybox", + "image": "busybox:latest", "vcpus": 1, "memory": 128, "command": ["sleep", "10"], From 1b031aeeb0a2816bc153d64d73d47251ec642465 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 12 Mar 2020 14:07:34 +0000 Subject: [PATCH 137/658] Linting --- moto/batch/models.py | 8 +++++--- tests/test_batch/test_batch.py | 3 ++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index 08f4cbdb2055..95ad6478961e 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -342,8 +342,8 @@ def describe(self): "status": self.job_state, "dependsOn": [], } - if result['status'] not in ['SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING']: - result['startedAt'] = datetime2int(self.job_started_at) + if result["status"] not in ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING"]: + result["startedAt"] = datetime2int(self.job_started_at) if self.job_stopped: result["stoppedAt"] = datetime2int(self.job_stopped_at) result["container"] = {} @@ -506,7 +506,9 @@ def run(self): date, line = line.split(" ", 1) date = dateutil.parser.parse(date) # TODO: Replace with int(date.timestamp()) once we yeet Python2 out of the window - date = int((time.mktime(date.timetuple()) + date.microsecond / 1000000.0)) + date = int( + (time.mktime(date.timetuple()) + date.microsecond / 1000000.0) + ) logs.append({"timestamp": date, "message": line.strip()}) # Send to cloudwatch diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 6eedf452c5ab..4b75fb8577eb 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -682,6 +682,7 @@ def test_submit_job_by_name(): # SLOW TESTS + @mock_logs @mock_ec2 @mock_ecs @@ -745,7 +746,7 @@ def test_submit_job(): resp = logs_client.get_log_events( logGroupName="/aws/batch/job", logStreamName=ls_name ) - [event['message'] for event in resp["events"]].should.equal(['hello']) + [event["message"] for event in resp["events"]].should.equal(["hello"]) @mock_logs From b74625db0cf6676bd57cd09e610a202fe176117d Mon Sep 17 00:00:00 2001 From: Tomoya Iwata Date: Sun, 13 Jan 2019 17:38:38 +0900 Subject: [PATCH 138/658] add support for dynamodb transact_get_items --- IMPLEMENTATION_COVERAGE.md | 4 +- moto/dynamodb2/responses.py | 69 ++++++ tests/test_dynamodb2/test_dynamodb.py | 308 +++++++++++++++++++++++++- 3 files changed, 378 insertions(+), 3 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index a22cc3bfb566..705618524723 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -2237,7 +2237,7 @@ - [ ] verify_trust ## dynamodb -17% implemented +24% implemented - [ ] batch_get_item - [ ] batch_write_item - [ ] create_backup @@ -2268,7 +2268,7 @@ - [ ] restore_table_to_point_in_time - [X] scan - [ ] tag_resource -- [ ] transact_get_items +- [X] transact_get_items - [ ] transact_write_items - [ ] untag_resource - [ ] update_continuous_backups diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index d3767c3fdda3..c9b526121b77 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -10,6 +10,9 @@ from .models import dynamodb_backends, dynamo_json_dump +TRANSACTION_MAX_ITEMS = 10 + + def has_empty_keys_or_values(_dict): if _dict == "": return True @@ -828,3 +831,69 @@ def describe_time_to_live(self): ttl_spec = self.dynamodb_backend.describe_ttl(name) return json.dumps({"TimeToLiveDescription": ttl_spec}) + + def transact_get_items(self): + transact_items = self.body['TransactItems'] + responses = list() + + if len(transact_items) > TRANSACTION_MAX_ITEMS: + msg = "1 validation error detected: Value '[" + err_list = list() + request_id = 268435456 + for _ in transact_items: + request_id += 1 + hex_request_id = format(request_id, 'x') + err_list.append('com.amazonaws.dynamodb.v20120810.TransactGetItem@%s' % hex_request_id) + msg += ', '.join(err_list) + msg += "'] at 'transactItems' failed to satisfy constraint: " \ + "Member must have length less than or equal to %s" % TRANSACTION_MAX_ITEMS + + return self.error('ValidationException', msg) + + dedup_list = [i for n, i in enumerate(transact_items) if i not in transact_items[n + 1:]] + if len(transact_items) != len(dedup_list): + er = 'com.amazon.coral.validate#ValidationException' + return self.error(er, 'Transaction request cannot include multiple operations on one item') + + ret_consumed_capacity = self.body.get('ReturnConsumedCapacity', 'NONE') + consumed_capacity = dict() + + for transact_item in transact_items: + + table_name = transact_item['Get']['TableName'] + key = transact_item['Get']['Key'] + try: + item = self.dynamodb_backend.get_item(table_name, key) + except ValueError as e: + er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' + return self.error(er, 'Requested resource not found') + + if not item: + continue + + item_describe = item.describe_attrs(False) + responses.append(item_describe) + + table_capacity = consumed_capacity.get(table_name, {}) + table_capacity['TableName'] = table_name + capacity_units = table_capacity.get('CapacityUnits', 0) + 2.0 + table_capacity['CapacityUnits'] = capacity_units + read_capacity_units = table_capacity.get('ReadCapacityUnits', 0) + 2.0 + table_capacity['ReadCapacityUnits'] = read_capacity_units + consumed_capacity[table_name] = table_capacity + + if ret_consumed_capacity == 'INDEXES': + table_capacity['Table'] = { + 'CapacityUnits': capacity_units, + 'ReadCapacityUnits': read_capacity_units + } + + result = dict() + result.update({ + 'Responses': responses}) + if ret_consumed_capacity != 'NONE': + result.update({ + 'ConsumedCapacity': [v for v in consumed_capacity.values()] + }) + + return dynamo_json_dump(result) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 428b58f8109a..e439eeeb991c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -6,8 +6,9 @@ import boto import boto3 from boto3.dynamodb.conditions import Attr, Key -import sure # noqa +import re import requests +import sure # noqa from moto import mock_dynamodb2, mock_dynamodb2_deprecated from moto.dynamodb2 import dynamodb_backend2, dynamodb_backends2 from boto.exception import JSONResponseError @@ -3792,3 +3793,308 @@ def test_query_catches_when_no_filters(): ex.exception.response["Error"]["Message"].should.equal( "Either KeyConditions or QueryFilter should be present" ) + + +@mock_dynamodb2 +def test_invalid_transact_get_items(): + + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + dynamodb.create_table( + TableName='test1', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5} + ) + table = dynamodb.Table('test1') + table.put_item(Item={ + 'id': '1', + 'val': '1', + }) + + table.put_item(Item={ + 'id': '1', + 'val': '2', + }) + + client = boto3.client('dynamodb', region_name='us-east-1') + + with assert_raises(ClientError) as ex: + client.transact_get_items(TransactItems=[ + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + }, + 'TableName': 'test1' + } + }, + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + }, + 'TableName': 'test1' + } + } + ]) + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'Transaction request cannot include multiple operations on one item' + ) + + with assert_raises(ClientError) as ex: + client.transact_get_items(TransactItems=[ + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + ]) + + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.match( + r'failed to satisfy constraint: Member must have length less than or equal to 10', re.I + ) + + with assert_raises(ClientError) as ex: + client.transact_get_items(TransactItems=[ + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + }, + 'TableName': 'test1' + } + }, + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + }, + 'TableName': 'non_exists_table' + } + } + ]) + + ex.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'Requested resource not found' + ) + + +@mock_dynamodb2 +def test_valid_transact_get_items(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + dynamodb.create_table( + TableName='test1', + KeySchema=[ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'sort_key', 'KeyType': 'RANGE'}, + ], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'sort_key', 'AttributeType': 'S'}, + ], + ProvisionedThroughput={'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5} + ) + table1 = dynamodb.Table('test1') + table1.put_item(Item={ + 'id': '1', + 'sort_key': '1', + }) + + table1.put_item(Item={ + 'id': '1', + 'sort_key': '2', + }) + + dynamodb.create_table( + TableName='test2', + KeySchema=[ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'sort_key', 'KeyType': 'RANGE'}, + ], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'sort_key', 'AttributeType': 'S'}, + ], + ProvisionedThroughput={'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5} + ) + table2 = dynamodb.Table('test2') + table2.put_item(Item={ + 'id': '1', + 'sort_key': '1', + }) + + client = boto3.client('dynamodb', region_name='us-east-1') + res = client.transact_get_items(TransactItems=[ + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }, + 'TableName': 'test1' + } + }, + { + 'Get': { + 'Key': { + 'id': {'S': 'non_exists_key'}, + 'sort_key': {'S': '2'} + }, + 'TableName': 'test1' + } + } + ]) + res['Responses'][0]['Item'].should.equal({ + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }) + len(res['Responses']).should.equal(1) + + res = client.transact_get_items(TransactItems=[ + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }, + 'TableName': 'test1' + } + }, + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '2'} + }, + 'TableName': 'test1' + } + }, + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }, + 'TableName': 'test2' + } + }, + ]) + + res['Responses'][0]['Item'].should.equal({ + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }) + + res['Responses'][1]['Item'].should.equal({ + 'id': {'S': '1'}, + 'sort_key': {'S': '2'} + }) + + res['Responses'][2]['Item'].should.equal({ + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }) + + res = client.transact_get_items(TransactItems=[ + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }, + 'TableName': 'test1' + } + }, + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '2'} + }, + 'TableName': 'test1' + } + }, + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }, + 'TableName': 'test2' + } + }, + ], ReturnConsumedCapacity='TOTAL') + + res['ConsumedCapacity'][0].should.equal({ + 'TableName': 'test1', + 'CapacityUnits': 4.0, + 'ReadCapacityUnits': 4.0 + }) + + res['ConsumedCapacity'][1].should.equal({ + 'TableName': 'test2', + 'CapacityUnits': 2.0, + 'ReadCapacityUnits': 2.0 + }) + + res = client.transact_get_items(TransactItems=[ + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }, + 'TableName': 'test1' + } + }, + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '2'} + }, + 'TableName': 'test1' + } + }, + { + 'Get': { + 'Key': { + 'id': {'S': '1'}, + 'sort_key': {'S': '1'} + }, + 'TableName': 'test2' + } + }, + ], ReturnConsumedCapacity='INDEXES') + + res['ConsumedCapacity'][0].should.equal({ + 'TableName': 'test1', + 'CapacityUnits': 4.0, + 'ReadCapacityUnits': 4.0, + 'Table': { + 'CapacityUnits': 4.0, + 'ReadCapacityUnits': 4.0, + } + }) + + res['ConsumedCapacity'][1].should.equal({ + 'TableName': 'test2', + 'CapacityUnits': 2.0, + 'ReadCapacityUnits': 2.0, + 'Table': { + 'CapacityUnits': 2.0, + 'ReadCapacityUnits': 2.0, + } + }) From 5a7da61833222c603113c5be03d3e117494efb8b Mon Sep 17 00:00:00 2001 From: Tomoya Iwata Date: Sun, 13 Jan 2019 18:32:27 +0900 Subject: [PATCH 139/658] remove unused local variable --- moto/dynamodb2/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index c9b526121b77..90cbcedda323 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -864,7 +864,7 @@ def transact_get_items(self): key = transact_item['Get']['Key'] try: item = self.dynamodb_backend.get_item(table_name, key) - except ValueError as e: + except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er, 'Requested resource not found') From caebe222d7846b87139a9eec08a1376db9c4d0d6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 12 Mar 2020 14:24:53 +0000 Subject: [PATCH 140/658] DynamoDB - Transact_get_items - Remove error condition --- moto/dynamodb2/responses.py | 7 +---- tests/test_dynamodb2/test_dynamodb.py | 40 ++------------------------- 2 files changed, 3 insertions(+), 44 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 90cbcedda323..a5e465a1abaa 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -10,7 +10,7 @@ from .models import dynamodb_backends, dynamo_json_dump -TRANSACTION_MAX_ITEMS = 10 +TRANSACTION_MAX_ITEMS = 25 def has_empty_keys_or_values(_dict): @@ -850,11 +850,6 @@ def transact_get_items(self): return self.error('ValidationException', msg) - dedup_list = [i for n, i in enumerate(transact_items) if i not in transact_items[n + 1:]] - if len(transact_items) != len(dedup_list): - er = 'com.amazon.coral.validate#ValidationException' - return self.error(er, 'Transaction request cannot include multiple operations on one item') - ret_consumed_capacity = self.body.get('ReturnConsumedCapacity', 'NONE') consumed_capacity = dict() diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index e439eeeb991c..cfe071f447c3 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3820,48 +3820,12 @@ def test_invalid_transact_get_items(): with assert_raises(ClientError) as ex: client.transact_get_items(TransactItems=[ - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - }, - 'TableName': 'test1' - } - }, - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - }, - 'TableName': 'test1' - } - } - ]) - - ex.exception.response['Error']['Code'].should.equal('ValidationException') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'Transaction request cannot include multiple operations on one item' - ) - - with assert_raises(ClientError) as ex: - client.transact_get_items(TransactItems=[ - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}}, + {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}} for i in range(26) ]) ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) ex.exception.response['Error']['Message'].should.match( - r'failed to satisfy constraint: Member must have length less than or equal to 10', re.I + r'failed to satisfy constraint: Member must have length less than or equal to 25', re.I ) with assert_raises(ClientError) as ex: From 71d3941daf09a9276dd29f3692585200bd6ea7fa Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 12 Mar 2020 14:26:23 +0000 Subject: [PATCH 141/658] Linting --- moto/dynamodb2/responses.py | 57 ++-- tests/test_dynamodb2/test_dynamodb.py | 392 +++++++++++--------------- 2 files changed, 199 insertions(+), 250 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index a5e465a1abaa..3d25c7e49e62 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -833,7 +833,7 @@ def describe_time_to_live(self): return json.dumps({"TimeToLiveDescription": ttl_spec}) def transact_get_items(self): - transact_items = self.body['TransactItems'] + transact_items = self.body["TransactItems"] responses = list() if len(transact_items) > TRANSACTION_MAX_ITEMS: @@ -842,26 +842,32 @@ def transact_get_items(self): request_id = 268435456 for _ in transact_items: request_id += 1 - hex_request_id = format(request_id, 'x') - err_list.append('com.amazonaws.dynamodb.v20120810.TransactGetItem@%s' % hex_request_id) - msg += ', '.join(err_list) - msg += "'] at 'transactItems' failed to satisfy constraint: " \ - "Member must have length less than or equal to %s" % TRANSACTION_MAX_ITEMS + hex_request_id = format(request_id, "x") + err_list.append( + "com.amazonaws.dynamodb.v20120810.TransactGetItem@%s" + % hex_request_id + ) + msg += ", ".join(err_list) + msg += ( + "'] at 'transactItems' failed to satisfy constraint: " + "Member must have length less than or equal to %s" + % TRANSACTION_MAX_ITEMS + ) - return self.error('ValidationException', msg) + return self.error("ValidationException", msg) - ret_consumed_capacity = self.body.get('ReturnConsumedCapacity', 'NONE') + ret_consumed_capacity = self.body.get("ReturnConsumedCapacity", "NONE") consumed_capacity = dict() for transact_item in transact_items: - table_name = transact_item['Get']['TableName'] - key = transact_item['Get']['Key'] + table_name = transact_item["Get"]["TableName"] + key = transact_item["Get"]["Key"] try: item = self.dynamodb_backend.get_item(table_name, key) except ValueError: - er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' - return self.error(er, 'Requested resource not found') + er = "com.amazonaws.dynamodb.v20111205#ResourceNotFoundException" + return self.error(er, "Requested resource not found") if not item: continue @@ -870,25 +876,22 @@ def transact_get_items(self): responses.append(item_describe) table_capacity = consumed_capacity.get(table_name, {}) - table_capacity['TableName'] = table_name - capacity_units = table_capacity.get('CapacityUnits', 0) + 2.0 - table_capacity['CapacityUnits'] = capacity_units - read_capacity_units = table_capacity.get('ReadCapacityUnits', 0) + 2.0 - table_capacity['ReadCapacityUnits'] = read_capacity_units + table_capacity["TableName"] = table_name + capacity_units = table_capacity.get("CapacityUnits", 0) + 2.0 + table_capacity["CapacityUnits"] = capacity_units + read_capacity_units = table_capacity.get("ReadCapacityUnits", 0) + 2.0 + table_capacity["ReadCapacityUnits"] = read_capacity_units consumed_capacity[table_name] = table_capacity - if ret_consumed_capacity == 'INDEXES': - table_capacity['Table'] = { - 'CapacityUnits': capacity_units, - 'ReadCapacityUnits': read_capacity_units + if ret_consumed_capacity == "INDEXES": + table_capacity["Table"] = { + "CapacityUnits": capacity_units, + "ReadCapacityUnits": read_capacity_units, } result = dict() - result.update({ - 'Responses': responses}) - if ret_consumed_capacity != 'NONE': - result.update({ - 'ConsumedCapacity': [v for v in consumed_capacity.values()] - }) + result.update({"Responses": responses}) + if ret_consumed_capacity != "NONE": + result.update({"ConsumedCapacity": [v for v in consumed_capacity.values()]}) return dynamo_json_dump(result) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index cfe071f447c3..f67711689fe6 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3798,267 +3798,213 @@ def test_query_catches_when_no_filters(): @mock_dynamodb2 def test_invalid_transact_get_items(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") dynamodb.create_table( - TableName='test1', - KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], - AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], - ProvisionedThroughput={'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5} + TableName="test1", + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + table = dynamodb.Table("test1") + table.put_item( + Item={"id": "1", "val": "1",} ) - table = dynamodb.Table('test1') - table.put_item(Item={ - 'id': '1', - 'val': '1', - }) - table.put_item(Item={ - 'id': '1', - 'val': '2', - }) + table.put_item( + Item={"id": "1", "val": "2",} + ) - client = boto3.client('dynamodb', region_name='us-east-1') + client = boto3.client("dynamodb", region_name="us-east-1") with assert_raises(ClientError) as ex: - client.transact_get_items(TransactItems=[ - {'Get': {'Key': {'id': {'S': '1'}}, 'TableName': 'test1'}} for i in range(26) - ]) + client.transact_get_items( + TransactItems=[ + {"Get": {"Key": {"id": {"S": "1"}}, "TableName": "test1"}} + for i in range(26) + ] + ) - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.match( - r'failed to satisfy constraint: Member must have length less than or equal to 25', re.I + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.match( + r"failed to satisfy constraint: Member must have length less than or equal to 25", + re.I, ) with assert_raises(ClientError) as ex: - client.transact_get_items(TransactItems=[ - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - }, - 'TableName': 'test1' - } - }, - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - }, - 'TableName': 'non_exists_table' - } - } - ]) + client.transact_get_items( + TransactItems=[ + {"Get": {"Key": {"id": {"S": "1"},}, "TableName": "test1"}}, + {"Get": {"Key": {"id": {"S": "1"},}, "TableName": "non_exists_table"}}, + ] + ) - ex.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'Requested resource not found' + ex.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "Requested resource not found" ) @mock_dynamodb2 def test_valid_transact_get_items(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") dynamodb.create_table( - TableName='test1', + TableName="test1", KeySchema=[ - {'AttributeName': 'id', 'KeyType': 'HASH'}, - {'AttributeName': 'sort_key', 'KeyType': 'RANGE'}, + {"AttributeName": "id", "KeyType": "HASH"}, + {"AttributeName": "sort_key", "KeyType": "RANGE"}, ], AttributeDefinitions=[ - {'AttributeName': 'id', 'AttributeType': 'S'}, - {'AttributeName': 'sort_key', 'AttributeType': 'S'}, + {"AttributeName": "id", "AttributeType": "S"}, + {"AttributeName": "sort_key", "AttributeType": "S"}, ], - ProvisionedThroughput={'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5} + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + table1 = dynamodb.Table("test1") + table1.put_item( + Item={"id": "1", "sort_key": "1",} ) - table1 = dynamodb.Table('test1') - table1.put_item(Item={ - 'id': '1', - 'sort_key': '1', - }) - table1.put_item(Item={ - 'id': '1', - 'sort_key': '2', - }) + table1.put_item( + Item={"id": "1", "sort_key": "2",} + ) dynamodb.create_table( - TableName='test2', + TableName="test2", KeySchema=[ - {'AttributeName': 'id', 'KeyType': 'HASH'}, - {'AttributeName': 'sort_key', 'KeyType': 'RANGE'}, + {"AttributeName": "id", "KeyType": "HASH"}, + {"AttributeName": "sort_key", "KeyType": "RANGE"}, ], AttributeDefinitions=[ - {'AttributeName': 'id', 'AttributeType': 'S'}, - {'AttributeName': 'sort_key', 'AttributeType': 'S'}, + {"AttributeName": "id", "AttributeType": "S"}, + {"AttributeName": "sort_key", "AttributeType": "S"}, ], - ProvisionedThroughput={'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5} + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + table2 = dynamodb.Table("test2") + table2.put_item( + Item={"id": "1", "sort_key": "1",} ) - table2 = dynamodb.Table('test2') - table2.put_item(Item={ - 'id': '1', - 'sort_key': '1', - }) - client = boto3.client('dynamodb', region_name='us-east-1') - res = client.transact_get_items(TransactItems=[ - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }, - 'TableName': 'test1' - } - }, - { - 'Get': { - 'Key': { - 'id': {'S': 'non_exists_key'}, - 'sort_key': {'S': '2'} - }, - 'TableName': 'test1' - } - } - ]) - res['Responses'][0]['Item'].should.equal({ - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }) - len(res['Responses']).should.equal(1) - - res = client.transact_get_items(TransactItems=[ - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }, - 'TableName': 'test1' - } - }, - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '2'} - }, - 'TableName': 'test1' - } - }, - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }, - 'TableName': 'test2' - } - }, - ]) + client = boto3.client("dynamodb", region_name="us-east-1") + res = client.transact_get_items( + TransactItems=[ + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "non_exists_key"}, "sort_key": {"S": "2"}}, + "TableName": "test1", + } + }, + ] + ) + res["Responses"][0]["Item"].should.equal({"id": {"S": "1"}, "sort_key": {"S": "1"}}) + len(res["Responses"]).should.equal(1) - res['Responses'][0]['Item'].should.equal({ - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }) + res = client.transact_get_items( + TransactItems=[ + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "2"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test2", + } + }, + ] + ) - res['Responses'][1]['Item'].should.equal({ - 'id': {'S': '1'}, - 'sort_key': {'S': '2'} - }) + res["Responses"][0]["Item"].should.equal({"id": {"S": "1"}, "sort_key": {"S": "1"}}) - res['Responses'][2]['Item'].should.equal({ - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }) + res["Responses"][1]["Item"].should.equal({"id": {"S": "1"}, "sort_key": {"S": "2"}}) - res = client.transact_get_items(TransactItems=[ - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }, - 'TableName': 'test1' - } - }, - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '2'} - }, - 'TableName': 'test1' - } - }, - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }, - 'TableName': 'test2' - } - }, - ], ReturnConsumedCapacity='TOTAL') + res["Responses"][2]["Item"].should.equal({"id": {"S": "1"}, "sort_key": {"S": "1"}}) + + res = client.transact_get_items( + TransactItems=[ + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "2"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test2", + } + }, + ], + ReturnConsumedCapacity="TOTAL", + ) - res['ConsumedCapacity'][0].should.equal({ - 'TableName': 'test1', - 'CapacityUnits': 4.0, - 'ReadCapacityUnits': 4.0 - }) + res["ConsumedCapacity"][0].should.equal( + {"TableName": "test1", "CapacityUnits": 4.0, "ReadCapacityUnits": 4.0} + ) - res['ConsumedCapacity'][1].should.equal({ - 'TableName': 'test2', - 'CapacityUnits': 2.0, - 'ReadCapacityUnits': 2.0 - }) + res["ConsumedCapacity"][1].should.equal( + {"TableName": "test2", "CapacityUnits": 2.0, "ReadCapacityUnits": 2.0} + ) - res = client.transact_get_items(TransactItems=[ - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }, - 'TableName': 'test1' - } - }, - { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '2'} - }, - 'TableName': 'test1' - } - }, + res = client.transact_get_items( + TransactItems=[ + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "2"}}, + "TableName": "test1", + } + }, + { + "Get": { + "Key": {"id": {"S": "1"}, "sort_key": {"S": "1"}}, + "TableName": "test2", + } + }, + ], + ReturnConsumedCapacity="INDEXES", + ) + + res["ConsumedCapacity"][0].should.equal( { - 'Get': { - 'Key': { - 'id': {'S': '1'}, - 'sort_key': {'S': '1'} - }, - 'TableName': 'test2' - } - }, - ], ReturnConsumedCapacity='INDEXES') - - res['ConsumedCapacity'][0].should.equal({ - 'TableName': 'test1', - 'CapacityUnits': 4.0, - 'ReadCapacityUnits': 4.0, - 'Table': { - 'CapacityUnits': 4.0, - 'ReadCapacityUnits': 4.0, + "TableName": "test1", + "CapacityUnits": 4.0, + "ReadCapacityUnits": 4.0, + "Table": {"CapacityUnits": 4.0, "ReadCapacityUnits": 4.0,}, } - }) - - res['ConsumedCapacity'][1].should.equal({ - 'TableName': 'test2', - 'CapacityUnits': 2.0, - 'ReadCapacityUnits': 2.0, - 'Table': { - 'CapacityUnits': 2.0, - 'ReadCapacityUnits': 2.0, + ) + + res["ConsumedCapacity"][1].should.equal( + { + "TableName": "test2", + "CapacityUnits": 2.0, + "ReadCapacityUnits": 2.0, + "Table": {"CapacityUnits": 2.0, "ReadCapacityUnits": 2.0,}, } - }) + ) From 1409618b954a53ae4832a1212f18f957c1a7775c Mon Sep 17 00:00:00 2001 From: Justin Hipple Date: Wed, 11 Mar 2020 16:30:42 -0500 Subject: [PATCH 142/658] Fix a misleading error message AWSEvents.DescribeRule throws an error that references a rule named "test" rather than the specified rule name when a rule with the specified name does not exist. It has been fixed to reference the specified rule name. --- moto/events/responses.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/events/responses.py b/moto/events/responses.py index 68c2114a6af0..c9931aabc970 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -62,7 +62,9 @@ def describe_rule(self): rule = self.events_backend.describe_rule(name) if not rule: - return self.error("ResourceNotFoundException", "Rule test does not exist.") + return self.error( + "ResourceNotFoundException", "Rule " + name + " does not exist." + ) rule_dict = self._generate_rule_dict(rule) return json.dumps(rule_dict), self.response_headers From 374b623e1d50d0ef9632a9e9bac6efa4fadc81ec Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Thu, 12 Mar 2020 09:34:25 -0700 Subject: [PATCH 143/658] Fix some 'DeprecationWarning: invalid escape sequence' warnings and use str.format for string interpolation. I am seeing a lot of deperecation warnings when I use moto for my tests (running under pytest), so I figured I'll clean up some of them. --- moto/cloudformation/parsing.py | 8 ++++---- moto/core/models.py | 4 ++-- moto/core/utils.py | 2 +- moto/dynamodb2/comparisons.py | 20 +++++++++++--------- moto/ecr/models.py | 2 +- moto/s3/utils.py | 2 +- 6 files changed, 20 insertions(+), 18 deletions(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 34d96acc6d05..d7e15c7b4428 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -196,13 +196,13 @@ def clean_json(resource_json, resources_map): ) else: fn_sub_value = clean_json(resource_json["Fn::Sub"], resources_map) - to_sub = re.findall('(?=\${)[^!^"]*?}', fn_sub_value) - literals = re.findall('(?=\${!)[^"]*?}', fn_sub_value) + to_sub = re.findall(r'(?=\${)[^!^"]*?}', fn_sub_value) + literals = re.findall(r'(?=\${!)[^"]*?}', fn_sub_value) for sub in to_sub: if "." in sub: cleaned_ref = clean_json( { - "Fn::GetAtt": re.findall('(?<=\${)[^"]*?(?=})', sub)[ + "Fn::GetAtt": re.findall(r'(?<=\${)[^"]*?(?=})', sub)[ 0 ].split(".") }, @@ -210,7 +210,7 @@ def clean_json(resource_json, resources_map): ) else: cleaned_ref = clean_json( - {"Ref": re.findall('(?<=\${)[^"]*?(?=})', sub)[0]}, + {"Ref": re.findall(r'(?<=\${)[^"]*?(?=})', sub)[0]}, resources_map, ) fn_sub_value = fn_sub_value.replace(sub, cleaned_ref) diff --git a/moto/core/models.py b/moto/core/models.py index 8ca74d5b5d41..73942c669b47 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -347,7 +347,7 @@ def enable_patching(self): responses_mock.add( CallbackResponse( method=method, - url=re.compile("https?://.+.amazonaws.com/.*"), + url=re.compile(r"https?://.+.amazonaws.com/.*"), callback=not_implemented_callback, stream=True, match_querystring=False, @@ -356,7 +356,7 @@ def enable_patching(self): botocore_mock.add( CallbackResponse( method=method, - url=re.compile("https?://.+.amazonaws.com/.*"), + url=re.compile(r"https?://.+.amazonaws.com/.*"), callback=not_implemented_callback, stream=True, match_querystring=False, diff --git a/moto/core/utils.py b/moto/core/utils.py index efad5679c1dd..f61b040e0e49 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -95,7 +95,7 @@ def caller(reg): match_name, match_pattern = reg.groups() return ''.format(match_pattern, match_name) - url_path = re.sub("\(\?P<(.*?)>(.*?)\)", caller, url_path) + url_path = re.sub(r"\(\?P<(.*?)>(.*?)\)", caller, url_path) if url_path.endswith("/?"): # Flask does own handling of trailing slashes diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 29951d92dc24..d17ae6875f6c 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -251,9 +251,9 @@ def _lex_condition_expression(self): def _lex_one_node(self, remaining_expression): # TODO: Handle indexing like [1] - attribute_regex = "(:|#)?[A-z0-9\-_]+" + attribute_regex = r"(:|#)?[A-z0-9\-_]+" patterns = [ - (self.Nonterminal.WHITESPACE, re.compile("^ +")), + (self.Nonterminal.WHITESPACE, re.compile(r"^ +")), ( self.Nonterminal.COMPARATOR, re.compile( @@ -270,12 +270,14 @@ def _lex_one_node(self, remaining_expression): ( self.Nonterminal.OPERAND, re.compile( - "^" + attribute_regex + "(\." + attribute_regex + "|\[[0-9]\])*" + r"^{attribute_regex}(\.{attribute_regex}|\[[0-9]\])*".format( + attribute_regex=attribute_regex + ) ), ), - (self.Nonterminal.COMMA, re.compile("^,")), - (self.Nonterminal.LEFT_PAREN, re.compile("^\(")), - (self.Nonterminal.RIGHT_PAREN, re.compile("^\)")), + (self.Nonterminal.COMMA, re.compile(r"^,")), + (self.Nonterminal.LEFT_PAREN, re.compile(r"^\(")), + (self.Nonterminal.RIGHT_PAREN, re.compile(r"^\)")), ] for nonterminal, pattern in patterns: @@ -285,7 +287,7 @@ def _lex_one_node(self, remaining_expression): break else: # pragma: no cover raise ValueError( - "Cannot parse condition starting at: " + remaining_expression + "Cannot parse condition starting at:{}".format(remaining_expression) ) node = self.Node( @@ -318,7 +320,7 @@ def _parse_paths(self, nodes): for child in children: self._assert( child.nonterminal == self.Nonterminal.IDENTIFIER, - "Cannot use %s in path" % child.text, + "Cannot use {} in path".format(child.text), [node], ) output.append( @@ -392,7 +394,7 @@ def _parse_path_element(self, name): elif name.startswith("["): # e.g. [123] if not name.endswith("]"): # pragma: no cover - raise ValueError("Bad path element %s" % name) + raise ValueError("Bad path element {}".format(name)) return self.Node( nonterminal=self.Nonterminal.IDENTIFIER, kind=self.Kind.LITERAL, diff --git a/moto/ecr/models.py b/moto/ecr/models.py index f84df79aa479..88b058e1e515 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -403,7 +403,7 @@ def batch_delete_image(self, repository_name, registry_id=None, image_ids=None): # If we have a digest, is it valid? if "imageDigest" in image_id: - pattern = re.compile("^[0-9a-zA-Z_+\.-]+:[0-9a-fA-F]{64}") + pattern = re.compile(r"^[0-9a-zA-Z_+\.-]+:[0-9a-fA-F]{64}") if not pattern.match(image_id.get("imageDigest")): response["failures"].append( { diff --git a/moto/s3/utils.py b/moto/s3/utils.py index e22b6b860262..6855c9b25e75 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -52,7 +52,7 @@ def parse_region_from_url(url): def metadata_from_headers(headers): metadata = {} - meta_regex = re.compile("^x-amz-meta-([a-zA-Z0-9\-_]+)$", flags=re.IGNORECASE) + meta_regex = re.compile(r"^x-amz-meta-([a-zA-Z0-9\-_]+)$", flags=re.IGNORECASE) for header, value in headers.items(): if isinstance(header, six.string_types): result = meta_regex.match(header) From 7f6c6660aa5280ac36c919af8256148df6989c6f Mon Sep 17 00:00:00 2001 From: ImFlog Date: Thu, 12 Mar 2020 17:56:11 +0100 Subject: [PATCH 144/658] Add some new update_new tests --- moto/dynamodb2/responses.py | 4 +++- tests/test_dynamodb2/test_dynamodb.py | 32 +++++++++++++++++++++++---- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 826a9a19c7cc..9e3c3a79babd 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -823,7 +823,9 @@ def _build_updated_new_attributes(self, original, changed): return changed else: return [ - self._build_updated_new_attributes(original[index], changed[index]) + self._build_updated_new_attributes( + original[index], changed[index] + ) for index in range(len(changed)) ] elif changed != original: diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index a2ea09c0e080..05c5397216eb 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3648,11 +3648,18 @@ def test_update_supports_list_append_with_nested_if_not_exists_operation(): table = dynamo.Table(table_name) table.put_item(Item={"Id": "item-id", "nest1": {"nest2": {}}}) - table.update_item( + updated_item = table.update_item( Key={"Id": "item-id"}, UpdateExpression="SET nest1.nest2.event_history = list_append(if_not_exists(nest1.nest2.event_history, :empty_list), :new_value)", ExpressionAttributeValues={":empty_list": [], ":new_value": ["some_value"]}, + ReturnValues="UPDATED_NEW", + ) + + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"nest1": {"nest2": {"event_history": ["some_value"]}}} ) + table.get_item(Key={"Id": "item-id"})["Item"].should.equal( {"Id": "item-id", "nest1": {"nest2": {"event_history": ["some_value"]}}} ) @@ -3673,11 +3680,18 @@ def test_update_supports_list_append_with_nested_if_not_exists_operation_and_pro table = dynamo.Table(table_name) table.put_item(Item={"Id": "item-id", "event_history": ["other_value"]}) - table.update_item( + updated_item = table.update_item( Key={"Id": "item-id"}, UpdateExpression="SET event_history = list_append(if_not_exists(event_history, :empty_list), :new_value)", ExpressionAttributeValues={":empty_list": [], ":new_value": ["some_value"]}, + ReturnValues="UPDATED_NEW", + ) + + # Verify updated item is correct + updated_item["Attributes"].should.equal( + {"event_history": ["other_value", "some_value"]} ) + table.get_item(Key={"Id": "item-id"})["Item"].should.equal( {"Id": "item-id", "event_history": ["other_value", "some_value"]} ) @@ -3764,11 +3778,16 @@ def test_update_nested_item_if_original_value_is_none(): ) table = dynamo.Table("origin-rbu-dev") table.put_item(Item={"job_id": "a", "job_details": {"job_name": None}}) - table.update_item( + updated_item = table.update_item( Key={"job_id": "a"}, UpdateExpression="SET job_details.job_name = :output", ExpressionAttributeValues={":output": "updated"}, + ReturnValues="UPDATED_NEW", ) + + # Verify updated item is correct + updated_item["Attributes"].should.equal({"job_details": {"job_name": "updated"}}) + table.scan()["Items"][0]["job_details"]["job_name"].should.equal("updated") @@ -3784,11 +3803,16 @@ def test_allow_update_to_item_with_different_type(): table = dynamo.Table("origin-rbu-dev") table.put_item(Item={"job_id": "a", "job_details": {"job_name": {"nested": "yes"}}}) table.put_item(Item={"job_id": "b", "job_details": {"job_name": {"nested": "yes"}}}) - table.update_item( + updated_item = table.update_item( Key={"job_id": "a"}, UpdateExpression="SET job_details.job_name = :output", ExpressionAttributeValues={":output": "updated"}, + ReturnValues="UPDATED_NEW", ) + + # Verify updated item is correct + updated_item["Attributes"].should.equal({"job_details": {"job_name": "updated"}}) + table.get_item(Key={"job_id": "a"})["Item"]["job_details"][ "job_name" ].should.be.equal("updated") From 8bffff4620b7a7325a828e2c383e6f7b892b412c Mon Sep 17 00:00:00 2001 From: Tim Gatzemeier Date: Mon, 16 Mar 2020 18:48:29 +0100 Subject: [PATCH 145/658] set actions enabled in template on describe images this is to avoid errors with terraform relates to https://github.com/localstack/localstack/issues/2161 --- moto/cloudwatch/models.py | 4 ++++ moto/cloudwatch/responses.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 716a296334f4..bdba09930582 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -67,6 +67,7 @@ def __init__( ok_actions, insufficient_data_actions, unit, + actions_enabled, ): self.name = name self.namespace = namespace @@ -80,6 +81,7 @@ def __init__( self.dimensions = [ Dimension(dimension["name"], dimension["value"]) for dimension in dimensions ] + self.actions_enabled = actions_enabled self.alarm_actions = alarm_actions self.ok_actions = ok_actions self.insufficient_data_actions = insufficient_data_actions @@ -215,6 +217,7 @@ def put_metric_alarm( ok_actions, insufficient_data_actions, unit, + actions_enabled, ): alarm = FakeAlarm( name, @@ -231,6 +234,7 @@ def put_metric_alarm( ok_actions, insufficient_data_actions, unit, + actions_enabled, ) self.alarms[name] = alarm return alarm diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index 7872e71fd811..dbc9d8c5a0a8 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -28,6 +28,7 @@ def put_metric_alarm(self): dimensions = self._get_list_prefix("Dimensions.member") alarm_actions = self._get_multi_param("AlarmActions.member") ok_actions = self._get_multi_param("OKActions.member") + actions_enabled = self._get_multi_param("ActionsEnabled") insufficient_data_actions = self._get_multi_param( "InsufficientDataActions.member" ) @@ -47,6 +48,7 @@ def put_metric_alarm(self): ok_actions, insufficient_data_actions, unit, + actions_enabled, ) template = self.response_template(PUT_METRIC_ALARM_TEMPLATE) return template.render(alarm=alarm) From 9d3ee116d3ec53c6ee20d2df0823ac09694e0f37 Mon Sep 17 00:00:00 2001 From: Tim Gatzemeier Date: Mon, 16 Mar 2020 20:14:41 +0100 Subject: [PATCH 146/658] add test case for actions_enabled field --- tests/test_cloudwatch/test_cloudwatch_boto3.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 5bd9ed13d807..1935a418127f 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -128,11 +128,13 @@ def test_alarm_state(): len(resp["MetricAlarms"]).should.equal(1) resp["MetricAlarms"][0]["AlarmName"].should.equal("testalarm1") resp["MetricAlarms"][0]["StateValue"].should.equal("ALARM") + resp["MetricAlarms"][0]["ActionsEnabled"].should.equal("True") resp = client.describe_alarms(StateValue="OK") len(resp["MetricAlarms"]).should.equal(1) resp["MetricAlarms"][0]["AlarmName"].should.equal("testalarm2") resp["MetricAlarms"][0]["StateValue"].should.equal("OK") + resp["MetricAlarms"][0]["ActionsEnabled"].should.equal("True") # Just for sanity resp = client.describe_alarms() From 1fdb0e987dc882ba380d2665bd52f40dfd800e7e Mon Sep 17 00:00:00 2001 From: Tim Gatzemeier Date: Mon, 16 Mar 2020 21:45:18 +0100 Subject: [PATCH 147/658] get single param for actions enabled --- moto/cloudwatch/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index dbc9d8c5a0a8..7993c9f06284 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -28,7 +28,7 @@ def put_metric_alarm(self): dimensions = self._get_list_prefix("Dimensions.member") alarm_actions = self._get_multi_param("AlarmActions.member") ok_actions = self._get_multi_param("OKActions.member") - actions_enabled = self._get_multi_param("ActionsEnabled") + actions_enabled = self._get_param("ActionsEnabled") insufficient_data_actions = self._get_multi_param( "InsufficientDataActions.member" ) From 50974aa9b2d1c71b4be580e4236e04fac31c7e95 Mon Sep 17 00:00:00 2001 From: Tim Gatzemeier Date: Mon, 16 Mar 2020 21:45:29 +0100 Subject: [PATCH 148/658] add test cases to ensure actions enabled is correctly returned --- tests/test_cloudwatch/test_cloudwatch.py | 12 +++++++++--- tests/test_cloudwatch/test_cloudwatch_boto3.py | 6 +++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index dee8aa605cd6..f86b57d54957 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -101,15 +101,22 @@ def test_describe_alarms(): conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) - + + enabled = alarm_fixture(name="enabled1", action=["abarfoo"]) + enabled.add_alarm_action("arn:alarm") + conn.create_alarm(enabled) + alarms = conn.describe_alarms() - alarms.should.have.length_of(4) + alarms.should.have.length_of(5) alarms = conn.describe_alarms(alarm_name_prefix="nfoo") alarms.should.have.length_of(2) alarms = conn.describe_alarms(alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) alarms.should.have.length_of(3) alarms = conn.describe_alarms(action_prefix="afoo") alarms.should.have.length_of(2) + alarms = conn.describe_alarms(alarm_name_prefix="enabled") + alarms.should.have.length_of(1) + alarms[0].actions_enabled.should.equal("true") for alarm in conn.describe_alarms(): alarm.delete() @@ -117,7 +124,6 @@ def test_describe_alarms(): alarms = conn.describe_alarms() alarms.should.have.length_of(0) - @mock_cloudwatch_deprecated def test_get_metric_statistics(): conn = boto.connect_cloudwatch() diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 1935a418127f..6bef2b3f249d 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -104,6 +104,7 @@ def test_alarm_state(): Statistic="Average", Threshold=2, ComparisonOperator="GreaterThanThreshold", + ActionsEnabled=True, ) client.put_metric_alarm( AlarmName="testalarm2", @@ -128,19 +129,18 @@ def test_alarm_state(): len(resp["MetricAlarms"]).should.equal(1) resp["MetricAlarms"][0]["AlarmName"].should.equal("testalarm1") resp["MetricAlarms"][0]["StateValue"].should.equal("ALARM") - resp["MetricAlarms"][0]["ActionsEnabled"].should.equal("True") + resp["MetricAlarms"][0]["ActionsEnabled"].should.equal(True) resp = client.describe_alarms(StateValue="OK") len(resp["MetricAlarms"]).should.equal(1) resp["MetricAlarms"][0]["AlarmName"].should.equal("testalarm2") resp["MetricAlarms"][0]["StateValue"].should.equal("OK") - resp["MetricAlarms"][0]["ActionsEnabled"].should.equal("True") + resp["MetricAlarms"][0]["ActionsEnabled"].should.equal(False) # Just for sanity resp = client.describe_alarms() len(resp["MetricAlarms"]).should.equal(2) - @mock_cloudwatch def test_put_metric_data_no_dimensions(): conn = boto3.client("cloudwatch", region_name="us-east-1") From 6e490a91909b6be7e371db6241a09291eb0d81da Mon Sep 17 00:00:00 2001 From: Tim Gatzemeier Date: Mon, 16 Mar 2020 21:58:50 +0100 Subject: [PATCH 149/658] make linter happy --- tests/test_cloudwatch/test_cloudwatch.py | 5 +++-- tests/test_cloudwatch/test_cloudwatch_boto3.py | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index f86b57d54957..5a05a55e1cb5 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -101,11 +101,11 @@ def test_describe_alarms(): conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) - + enabled = alarm_fixture(name="enabled1", action=["abarfoo"]) enabled.add_alarm_action("arn:alarm") conn.create_alarm(enabled) - + alarms = conn.describe_alarms() alarms.should.have.length_of(5) alarms = conn.describe_alarms(alarm_name_prefix="nfoo") @@ -124,6 +124,7 @@ def test_describe_alarms(): alarms = conn.describe_alarms() alarms.should.have.length_of(0) + @mock_cloudwatch_deprecated def test_get_metric_statistics(): conn = boto.connect_cloudwatch() diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 6bef2b3f249d..7fe1440528b7 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -141,6 +141,7 @@ def test_alarm_state(): resp = client.describe_alarms() len(resp["MetricAlarms"]).should.equal(2) + @mock_cloudwatch def test_put_metric_data_no_dimensions(): conn = boto3.client("cloudwatch", region_name="us-east-1") From d8423b5de0f8770149449b54f7b09ed05419233b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 17 Mar 2020 09:16:12 +0000 Subject: [PATCH 150/658] Optimize content length for large files --- moto/s3/models.py | 14 +++++++------- moto/s3/responses.py | 8 -------- tests/test_s3/test_s3.py | 13 ++----------- 3 files changed, 9 insertions(+), 26 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 67b53b984ee7..8c2a86f4121f 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -120,11 +120,9 @@ def version_id(self): @property def value(self): self.lock.acquire() - print("===>value") self._value_buffer.seek(0) - print("===>seek") r = self._value_buffer.read() - print("===>read") + r = copy.copy(r) self.lock.release() return r @@ -138,6 +136,7 @@ def value(self, new_value): if isinstance(new_value, six.text_type): new_value = new_value.encode(DEFAULT_TEXT_ENCODING) self._value_buffer.write(new_value) + self.contentsize = len(new_value) def copy(self, new_name=None, new_is_versioned=None): r = copy.deepcopy(self) @@ -165,6 +164,7 @@ def set_acl(self, acl): self.acl = acl def append_to_value(self, value): + self.contentsize += len(value) self._value_buffer.seek(0, os.SEEK_END) self._value_buffer.write(value) @@ -237,8 +237,7 @@ def response_dict(self): @property def size(self): - self._value_buffer.seek(0, os.SEEK_END) - return self._value_buffer.tell() + return self.contentsize @property def storage_class(self): @@ -257,6 +256,7 @@ def __getstate__(self): state = self.__dict__.copy() state["value"] = self.value del state["_value_buffer"] + del state["lock"] return state def __setstate__(self, state): @@ -266,6 +266,7 @@ def __setstate__(self, state): max_size=self._max_buffer_size ) self.value = state["value"] + self.lock = threading.Lock() class FakeMultipart(BaseModel): @@ -292,7 +293,7 @@ def complete(self, body): etag = etag.replace('"', "") if part is None or part_etag != etag: raise InvalidPart() - if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE: + if last is not None and last.contentsize < UPLOAD_PART_MIN_SIZE: raise EntityTooSmall() md5s.extend(decode_hex(part_etag)[0]) total.extend(part.value) @@ -1327,7 +1328,6 @@ def append_to_key(self, bucket_name, key_name, value): return key def get_key(self, bucket_name, key_name, version_id=None, part_number=None): - print("get_key("+str(bucket_name)+","+str(key_name)+","+str(version_id)+","+str(part_number)+")") key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) key = None diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 15b1d1670fbc..4f38e2a9b438 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -859,7 +859,6 @@ def _bucket_response_delete_keys(self, request, body, bucket_name): def _handle_range_header(self, request, headers, response_content): response_headers = {} length = len(response_content) - print("Length: " + str(length) + " Range: " + str(request.headers.get("range"))) last = length - 1 _, rspec = request.headers.get("range").split("=") if "," in rspec: @@ -877,7 +876,6 @@ def toint(i): else: return 400, response_headers, "" if begin < 0 or end > last or begin > min(end, last): - print(str(begin)+ " < 0 or " + str(end) + " > " + str(last) + " or " + str(begin) + " > min("+str(end)+","+str(last)+")") return 416, response_headers, "" response_headers["content-range"] = "bytes {0}-{1}/{2}".format( begin, end, length @@ -907,8 +905,6 @@ def key_or_control_response(self, request, full_url, headers): response_content = response else: status_code, response_headers, response_content = response - print("response received: " + str(len(response_content))) - print(request.headers) if status_code == 200 and "range" in request.headers: self.lock.acquire() @@ -920,7 +916,6 @@ def key_or_control_response(self, request, full_url, headers): return status_code, response_headers, response_content def _control_response(self, request, full_url, headers): - print("_control_response") parsed_url = urlparse(full_url) query = parse_qs(parsed_url.query, keep_blank_values=True) method = request.method @@ -1068,14 +1063,12 @@ def _key_response(self, request, full_url, headers): ) def _key_response_get(self, bucket_name, query, key_name, headers): - print("_key_response_get("+str(key_name)+","+str(headers)+")") self._set_action("KEY", "GET", query) self._authenticate_and_authorize_s3_action() response_headers = {} if query.get("uploadId"): upload_id = query["uploadId"][0] - print("UploadID: " + str(upload_id)) parts = self.backend.list_multipart(bucket_name, upload_id) template = self.response_template(S3_MULTIPART_LIST_RESPONSE) return ( @@ -1107,7 +1100,6 @@ def _key_response_get(self, bucket_name, query, key_name, headers): response_headers.update(key.metadata) response_headers.update(key.response_dict) - print("returning 200, " + str(headers) + ", " + str(len(key.value)) + " ( " + str(key_name) + ")") return 200, response_headers, key.value def _key_response_put(self, request, body, bucket_name, query, key_name, headers): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 2eef9ef826b3..7b9f2c72680d 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4398,24 +4398,17 @@ def test_s3_config_dict(): @mock_s3 def test_delete_downloaded_file(): # SET UP - filename = '...' + filename = 'some_large_file.pdf' file = open(filename, 'rb') uploader = PdfFileUploader(file) boto3.client('s3').create_bucket(Bucket=uploader.bucket_name()) uploader.upload() - print("================\nUPLOADED\n=================") - # DOWNLOAD - # the following two lines are basically - # boto3.client('s3').download_file(bucket_name, file_name, local_path) - # where bucket_name, file_name and local_path are retrieved from PdfFileUploader - # e.g. boto3.client('s3').download_file("bucket_name", "asdf.pdf", "/tmp/asdf.pdf") + downloader = PdfFileDownloader(uploader.full_bucket_file_name()) downloader.download() downloader.delete_downloaded_file() - print("Done!") - from pathlib import Path import re @@ -4431,8 +4424,6 @@ def download(self): return self.local_path() except ClientError as exc: - print("=======") - print(exc) raise exc def local_path(self): From e2434cbf6f4f939c99fd4d81cbad285702251fd1 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 17 Mar 2020 09:18:38 +0000 Subject: [PATCH 151/658] Remove unnecessary lock --- moto/s3/responses.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 4f38e2a9b438..b74be9a63442 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -2,7 +2,6 @@ import re import sys -import threading import six from botocore.awsrequest import AWSPreparedRequest @@ -151,7 +150,6 @@ def __init__(self, backend): self.path = "" self.data = {} self.headers = {} - self.lock = threading.Lock() @property def should_autoescape(self): @@ -907,12 +905,9 @@ def key_or_control_response(self, request, full_url, headers): status_code, response_headers, response_content = response if status_code == 200 and "range" in request.headers: - self.lock.acquire() - r = self._handle_range_header( + return self._handle_range_header( request, response_headers, response_content ) - self.lock.release() - return r return status_code, response_headers, response_content def _control_response(self, request, full_url, headers): From 5e4736e23392079c20bb283a5ceb2c8e8d6bacf4 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 17 Mar 2020 09:19:57 +0000 Subject: [PATCH 152/658] Remove unnecessary print-statements --- moto/s3/utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 50ff1cf34e2a..e22b6b860262 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -104,9 +104,7 @@ def __setitem__(self, key, value): def get(self, key, default=None): try: return self[key] - except (KeyError, IndexError) as e: - print("Error retrieving " + str(key)) - print(e) + except (KeyError, IndexError): pass return default From 410d9ee90186d5e83c81f97dc93b6a24faf62b39 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 17 Mar 2020 09:21:33 +0000 Subject: [PATCH 153/658] Remove test that only runs locally --- tests/test_s3/test_s3.py | 75 ---------------------------------------- 1 file changed, 75 deletions(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 7b9f2c72680d..48655ee17385 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4393,78 +4393,3 @@ def test_s3_config_dict(): assert not logging_bucket["supplementaryConfiguration"].get( "BucketTaggingConfiguration" ) - - -@mock_s3 -def test_delete_downloaded_file(): - # SET UP - filename = 'some_large_file.pdf' - file = open(filename, 'rb') - uploader = PdfFileUploader(file) - boto3.client('s3').create_bucket(Bucket=uploader.bucket_name()) - uploader.upload() - - downloader = PdfFileDownloader(uploader.full_bucket_file_name()) - downloader.download() - - downloader.delete_downloaded_file() - - -from pathlib import Path -import re -import os -class PdfFileDownloader: - def __init__(self, full_bucket_file_name): - self.bucket_name, self.file_name = self.extract(full_bucket_file_name) - self.s3 = boto3.client('s3') - - def download(self): - try: - self.s3.download_file(self.bucket_name, self.file_name, self.local_path()) - - return self.local_path() - except ClientError as exc: - raise exc - - def local_path(self): - return '/tmp/' + self.file_name.replace('/', '') - - def delete_downloaded_file(self): - if Path(self.local_path()).is_file(): - print("Removing " + str(self.local_path())) - os.remove(self.local_path()) - - def file(self): - return open(self.local_path(), 'rb') - - def extract(self, full_bucket_file_name): - match = re.search(r'([\.a-zA-Z_-]+)\/(.*)', full_bucket_file_name) - - if match and len(match.groups()) == 2: - return (match.groups()[0], match.groups()[1]) - else: - raise RuntimeError(f"Cannot determine bucket and file name for {full_bucket_file_name}") - - -import binascii -class PdfFileUploader: - def __init__(self, file): - self.file = file - date = datetime.datetime.now().strftime('%Y%m%d%H%M%S') - random_hex = binascii.b2a_hex(os.urandom(16)).decode('ascii') - self.bucket_file_name = f"{date}_{random_hex}.pdf" - - def upload(self): - self.file.seek(0) - boto3.client('s3').upload_fileobj(self.file, self.bucket_name(), self.bucket_file_name) - - return (self.original_file_name(), self.full_bucket_file_name()) - - def original_file_name(self): - return os.path.basename(self.file.name) - - def bucket_name(self): - return 'test_bucket' #os.environ['AWS_BUCKET_NAME'] - - def full_bucket_file_name(self): - return f"{self.bucket_name()}/{self.bucket_file_name}" From b7da6b948152f96884c2e81bc06876a8e8e60713 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 17 Mar 2020 15:41:50 +0000 Subject: [PATCH 154/658] #2813 - DynamoDB - Add Global Index Status --- moto/dynamodb2/models.py | 4 ++++ tests/test_dynamodb2/test_dynamodb_table_with_range_key.py | 1 + 2 files changed, 5 insertions(+) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 1527821ed95c..91980ab0d034 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -679,6 +679,10 @@ def __init__( self.throughput["NumberOfDecreasesToday"] = 0 self.indexes = indexes self.global_indexes = global_indexes if global_indexes else [] + for index in self.global_indexes: + index[ + "IndexStatus" + ] = "ACTIVE" # One of 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE' self.created_at = datetime.datetime.utcnow() self.items = defaultdict(dict) self.table_arn = self._generate_arn(table_name) diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 7c7770874dd4..c433a3a31f64 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -574,6 +574,7 @@ def test_create_with_global_indexes(): "ReadCapacityUnits": 6, "WriteCapacityUnits": 1, }, + "IndexStatus": "ACTIVE", } ] ) From 3fab3f572f3da3470be1032775a3cf77dd7582f7 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 17 Mar 2020 16:09:42 +0000 Subject: [PATCH 155/658] #2773 - CloudFormation - Set CreationDate --- moto/cloudformation/models.py | 6 ++++++ moto/cloudformation/responses.py | 4 ++-- .../test_cloudformation_stack_crud_boto3.py | 6 ++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index b32d63b32496..8136e353de77 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -8,6 +8,7 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel +from moto.core.utils import iso_8601_datetime_without_milliseconds from .parsing import ResourceMap, OutputMap from .utils import ( @@ -240,6 +241,7 @@ def __init__( self.output_map = self._create_output_map() self._add_stack_event("CREATE_COMPLETE") self.status = "CREATE_COMPLETE" + self.creation_time = datetime.utcnow() def _create_resource_map(self): resource_map = ResourceMap( @@ -259,6 +261,10 @@ def _create_output_map(self): output_map.create() return output_map + @property + def creation_time_iso_8601(self): + return iso_8601_datetime_without_milliseconds(self.creation_time) + def _add_stack_event( self, resource_status, resource_status_reason=None, resource_properties=None ): diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 77a3051fd19b..782d68946317 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -662,7 +662,7 @@ def update_stack_instances(self): {{ stack.name }} {{ stack.stack_id }} - 2010-07-27T22:28:28Z + {{ stack.creation_time_iso_8601 }} {{ stack.status }} {% if stack.notification_arns %} @@ -803,7 +803,7 @@ def update_stack_instances(self): {{ stack.stack_id }} {{ stack.status }} {{ stack.name }} - 2011-05-23T15:47:44Z + {{ stack.creation_time_iso_8601 }} {{ stack.description }} {% endfor %} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index b7e86a1d5a3e..5444c2278977 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -2,6 +2,8 @@ import json from collections import OrderedDict +from datetime import datetime, timedelta +import pytz import boto3 from botocore.exceptions import ClientError @@ -911,6 +913,10 @@ def test_describe_stack_by_name(): stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0] stack["StackName"].should.equal("test_stack") + two_secs_ago = datetime.now(tz=pytz.UTC) - timedelta(seconds=2) + assert ( + two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC) + ), "Stack should have been created recently" @mock_cloudformation From 67c7fce85ecf95fdfbc8d768b7839cdb9ff00d5f Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 17 Mar 2020 16:28:49 +0000 Subject: [PATCH 156/658] #2760 - DynamoDB - Ensure proper ordering for Numeric sort keys --- moto/dynamodb2/models.py | 7 +++- tests/test_dynamodb2/test_dynamodb.py | 58 +++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 1527821ed95c..a80b3211ddcd 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -981,8 +981,13 @@ def query( if index_name: if index_range_key: + + # Convert to float if necessary to ensure proper ordering + def conv(x): + return float(x.value) if x.type == "N" else x.value + results.sort( - key=lambda item: item.attrs[index_range_key["AttributeName"]].value + key=lambda item: conv(item.attrs[index_range_key["AttributeName"]]) if item.attrs.get(index_range_key["AttributeName"]) else None ) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 82f82ccc904c..2b9475b9e509 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4026,3 +4026,61 @@ def test_valid_transact_get_items(): "Table": {"CapacityUnits": 2.0, "ReadCapacityUnits": 2.0,}, } ) + + +@mock_dynamodb2 +def test_gsi_verify_negative_number_order(): + table_schema = { + "KeySchema": [{"AttributeName": "partitionKey", "KeyType": "HASH"}], + "GlobalSecondaryIndexes": [ + { + "IndexName": "GSI-K1", + "KeySchema": [ + {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, + {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, + ], + "Projection": {"ProjectionType": "KEYS_ONLY",}, + } + ], + "AttributeDefinitions": [ + {"AttributeName": "partitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1PartitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1SortKey", "AttributeType": "N"}, + ], + } + + item1 = { + "partitionKey": "pk-1", + "gsiK1PartitionKey": "gsi-k1", + "gsiK1SortKey": Decimal("-0.6"), + } + + item2 = { + "partitionKey": "pk-2", + "gsiK1PartitionKey": "gsi-k1", + "gsiK1SortKey": Decimal("-0.7"), + } + + item3 = { + "partitionKey": "pk-3", + "gsiK1PartitionKey": "gsi-k1", + "gsiK1SortKey": Decimal("0.7"), + } + + dynamodb = boto3.resource("dynamodb") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + table = dynamodb.Table("test-table") + table.put_item(Item=item3) + table.put_item(Item=item1) + table.put_item(Item=item2) + + resp = table.query( + KeyConditionExpression=Key("gsiK1PartitionKey").eq("gsi-k1"), + IndexName="GSI-K1", + ) + # Items should be ordered with the lowest number first + [float(item["gsiK1SortKey"]) for item in resp["Items"]].should.equal( + [-0.7, -0.6, 0.7] + ) From aead80c392942d95a6437689d321c564739b795f Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 17 Mar 2020 17:11:35 +0000 Subject: [PATCH 157/658] Add missing region --- tests/test_dynamodb2/test_dynamodb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 2b9475b9e509..5d39e3805045 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4067,7 +4067,7 @@ def test_gsi_verify_negative_number_order(): "gsiK1SortKey": Decimal("0.7"), } - dynamodb = boto3.resource("dynamodb") + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") dynamodb.create_table( TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema ) From a9cb5b566dc6615f41b18b77fc4f5f3071c04e03 Mon Sep 17 00:00:00 2001 From: ImFlog Date: Tue, 17 Mar 2020 18:35:38 +0100 Subject: [PATCH 158/658] Python 2.X, fix missing neq in DynamoType --- moto/dynamodb2/models.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 8e5a6175518e..65dd2c3f7900 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -146,6 +146,9 @@ def __hash__(self): def __eq__(self, other): return self.type == other.type and self.value == other.value + def __ne__(self, other): + return self.type != other.type or self.value != other.value + def __lt__(self, other): return self.cast_value < other.cast_value From f0cab68208ae023c9e47976efdd6c9edd1cc3bf6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 18 Mar 2020 11:46:44 +0000 Subject: [PATCH 159/658] #2264 - SES - Ensure verify_email_address works with display names --- moto/ses/models.py | 2 ++ tests/test_ses/test_ses_boto3.py | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/moto/ses/models.py b/moto/ses/models.py index 4b6ce52c8198..91241f70629c 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -91,9 +91,11 @@ def _is_verified_address(self, source): return host in self.domains def verify_email_identity(self, address): + _, address = parseaddr(address) self.addresses.append(address) def verify_email_address(self, address): + _, address = parseaddr(address) self.email_addresses.append(address) def verify_domain(self, domain): diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index ee7c92aa1ad6..de8aa0813dae 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -214,3 +214,16 @@ def test_send_raw_email_without_source_or_from(): kwargs = dict(RawMessage={"Data": message.as_string()}) conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + +@mock_ses +def test_send_email_notification_with_encoded_sender(): + sender = "Foo " + conn = boto3.client("ses", region_name="us-east-1") + conn.verify_email_identity(EmailAddress=sender) + response = conn.send_email( + Source=sender, + Destination={"ToAddresses": ["your.friend@hotmail.com"]}, + Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}}}, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) From cbf03979536a805408b093ee03e90df7942c0b6e Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 18 Mar 2020 13:02:07 +0000 Subject: [PATCH 160/658] #2255 - CF - Implement FN::Transform and AWS::Include --- moto/cloudformation/parsing.py | 21 ++++++++- moto/cloudwatch/models.py | 10 ++-- moto/logs/models.py | 1 + moto/s3/utils.py | 11 +++++ .../test_cloudformation_stack_integration.py | 47 +++++++++++++++++++ 5 files changed, 82 insertions(+), 8 deletions(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index d7e15c7b4428..79276c8fca6a 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import functools +import json import logging import copy import warnings @@ -24,7 +25,8 @@ from moto.rds2 import models as rds2_models from moto.redshift import models as redshift_models from moto.route53 import models as route53_models -from moto.s3 import models as s3_models +from moto.s3 import models as s3_models, s3_backend +from moto.s3.utils import bucket_and_name_from_url from moto.sns import models as sns_models from moto.sqs import models as sqs_models from moto.core import ACCOUNT_ID @@ -150,7 +152,10 @@ def clean_json(resource_json, resources_map): map_path = resource_json["Fn::FindInMap"][1:] result = resources_map[map_name] for path in map_path: - result = result[clean_json(path, resources_map)] + if "Fn::Transform" in result: + result = resources_map[clean_json(path, resources_map)] + else: + result = result[clean_json(path, resources_map)] return result if "Fn::GetAtt" in resource_json: @@ -470,6 +475,17 @@ def resources(self): def load_mapping(self): self._parsed_resources.update(self._template.get("Mappings", {})) + def transform_mapping(self): + for k, v in self._template.get("Mappings", {}).items(): + if "Fn::Transform" in v: + name = v["Fn::Transform"]["Name"] + params = v["Fn::Transform"]["Parameters"] + if name == "AWS::Include": + location = params["Location"] + bucket_name, name = bucket_and_name_from_url(location) + key = s3_backend.get_key(bucket_name, name) + self._parsed_resources.update(json.loads(key.value)) + def load_parameters(self): parameter_slots = self._template.get("Parameters", {}) for parameter_name, parameter in parameter_slots.items(): @@ -515,6 +531,7 @@ def load_conditions(self): def create(self): self.load_mapping() + self.transform_mapping() self.load_parameters() self.load_conditions() diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index bdba09930582..a8a1b1d19153 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -5,6 +5,7 @@ from moto.core.utils import iso_8601_datetime_without_milliseconds from moto.core import BaseBackend, BaseModel from moto.core.exceptions import RESTError +from moto.logs import logs_backends from datetime import datetime, timedelta from dateutil.tz import tzutc from uuid import uuid4 @@ -428,12 +429,9 @@ def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - spec = {"LogGroupName": properties["LogGroupName"]} - optional_properties = "Tags".split() - for prop in optional_properties: - if prop in properties: - spec[prop] = properties[prop] - return LogGroup(spec) + log_group_name = properties["LogGroupName"] + tags = properties.get("Tags", {}) + return logs_backends[region_name].create_log_group(log_group_name, tags) cloudwatch_backends = {} diff --git a/moto/logs/models.py b/moto/logs/models.py index 7448319db932..5e21d87931eb 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -405,6 +405,7 @@ def create_log_group(self, log_group_name, tags): if log_group_name in self.groups: raise ResourceAlreadyExistsException() self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) + return self.groups[log_group_name] def ensure_log_group(self, log_group_name, tags): if log_group_name in self.groups: diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 6855c9b25e75..6ddcfa63e387 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -35,6 +35,17 @@ def bucket_name_from_url(url): return None +# 'owi-common-cf', 'snippets/test.json' = bucket_and_name_from_url('s3://owi-common-cf/snippets/test.json') +def bucket_and_name_from_url(url): + prefix = "s3://" + if url.startswith(prefix): + bucket_name = url[len(prefix) : url.index("/", len(prefix))] + key = url[url.index("/", len(prefix)) + 1 :] + return bucket_name, key + else: + return None, None + + REGION_URL_REGEX = re.compile( r"^https?://(s3[-\.](?P.+)\.amazonaws\.com/(.+)|" r"(.+)\.s3[-\.](?P.+)\.amazonaws\.com)/?" diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 5a3181449b23..a612156c444b 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -32,12 +32,14 @@ mock_iam_deprecated, mock_kms, mock_lambda, + mock_logs, mock_rds_deprecated, mock_rds2, mock_rds2_deprecated, mock_redshift, mock_redshift_deprecated, mock_route53_deprecated, + mock_s3, mock_sns_deprecated, mock_sqs, mock_sqs_deprecated, @@ -2332,3 +2334,48 @@ def test_stack_dynamodb_resources_integration(): response["Item"]["Sales"].should.equal(Decimal("10")) response["Item"]["NumberOfSongs"].should.equal(Decimal("5")) response["Item"]["Album"].should.equal("myAlbum") + + +@mock_cloudformation +@mock_logs +@mock_s3 +def test_create_log_group_using_fntransform(): + s3_resource = boto3.resource("s3") + s3_resource.create_bucket( + Bucket="owi-common-cf", + CreateBucketConfiguration={"LocationConstraint": "us-west-2"}, + ) + s3_resource.Object("owi-common-cf", "snippets/test.json").put( + Body=json.dumps({"lgname": {"name": "some-log-group"}}) + ) + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Mappings": { + "EnvironmentMapping": { + "Fn::Transform": { + "Name": "AWS::Include", + "Parameters": {"Location": "s3://owi-common-cf/snippets/test.json"}, + } + } + }, + "Resources": { + "LogGroup": { + "Properties": { + "LogGroupName": { + "Fn::FindInMap": ["EnvironmentMapping", "lgname", "name"] + }, + "RetentionInDays": 90, + }, + "Type": "AWS::Logs::LogGroup", + } + }, + } + + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(template), + ) + + logs_conn = boto3.client("logs", region_name="us-west-2") + log_group = logs_conn.describe_log_groups()["logGroups"][0] + log_group["logGroupName"].should.equal("some-log-group") From b1da99aedaee0d7db7c8d68dfd477440705648e7 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 20 Mar 2020 12:29:04 +0000 Subject: [PATCH 161/658] #2797 - DynamoDB - Allow case insensitive AND in KeyConditionExpression --- moto/dynamodb2/responses.py | 6 ++++-- tests/test_dynamodb2/test_dynamodb.py | 9 ++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index c72ded2c381e..c13078a728be 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -459,8 +459,10 @@ def query(self): for k, v in six.iteritems(self.body.get("ExpressionAttributeNames", {})) ) - if " AND " in key_condition_expression: - expressions = key_condition_expression.split(" AND ", 1) + if " and " in key_condition_expression.lower(): + expressions = re.split( + " AND ", key_condition_expression, maxsplit=1, flags=re.IGNORECASE + ) index_hash_key = [key for key in index if key["KeyType"] == "HASH"][0] hash_key_var = reverse_attribute_lookup.get( diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 062208863153..191f19c364c3 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1408,6 +1408,13 @@ def test_filter_expression(): filter_expr.expr(row1).should.be(True) filter_expr.expr(row2).should.be(False) + # lowercase AND test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + "Id > :v0 and Subs < :v1", {}, {":v0": {"N": "5"}, ":v1": {"N": "7"}} + ) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + # OR test filter_expr = moto.dynamodb2.comparisons.get_filter_expression( "Id = :v0 OR Id=:v1", {}, {":v0": {"N": "5"}, ":v1": {"N": "8"}} @@ -2719,7 +2726,7 @@ def test_query_gsi_with_range_key(): res = dynamodb.query( TableName="test", IndexName="test_gsi", - KeyConditionExpression="gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key", + KeyConditionExpression="gsi_hash_key = :gsi_hash_key and gsi_range_key = :gsi_range_key", ExpressionAttributeValues={ ":gsi_hash_key": {"S": "key1"}, ":gsi_range_key": {"S": "range1"}, From 5b596c8a78ffc0c5a6ee1b9b28629c3f04bd5396 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 20 Mar 2020 15:17:55 +0000 Subject: [PATCH 162/658] #2699 - EC2 - Add Volumes using CloudFormation --- moto/ec2/models.py | 14 ++++++++++- tests/test_ec2/test_instances.py | 40 +++++++++++++++++++++++++++++++- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index be39bab28a28..1b363a193b4c 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -556,6 +556,10 @@ def __del__(self): # worst case we'll get IP address exaustion... rarely pass + def add_block_device(self, size, device_path): + volume = self.ec2_backend.create_volume(size, self.region_name) + self.ec2_backend.attach_volume(volume.id, self.id, device_path) + def setup_defaults(self): # Default have an instance with root volume should you not wish to # override with attach volume cmd. @@ -620,6 +624,7 @@ def create_from_cloudformation_json( subnet_id=properties.get("SubnetId"), key_name=properties.get("KeyName"), private_ip=properties.get("PrivateIpAddress"), + block_device_mappings=properties.get("BlockDeviceMappings", {}), ) instance = reservation.instances[0] for tag in properties.get("Tags", []): @@ -872,7 +877,14 @@ def add_instances(self, image_id, count, user_data, security_group_names, **kwar ) new_reservation.instances.append(new_instance) new_instance.add_tags(instance_tags) - new_instance.setup_defaults() + if "block_device_mappings" in kwargs: + for block_device in kwargs["block_device_mappings"]: + new_instance.add_block_device( + block_device["Ebs"]["VolumeSize"], block_device["DeviceName"] + ) + else: + new_instance.setup_defaults() + return new_reservation def start_instances(self, instance_ids): diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 85ba0fe01bbd..4d1cbb28d4cd 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -9,6 +9,7 @@ import base64 import datetime import ipaddress +import json import six import boto @@ -18,7 +19,7 @@ from freezegun import freeze_time import sure # noqa -from moto import mock_ec2_deprecated, mock_ec2 +from moto import mock_ec2_deprecated, mock_ec2, mock_cloudformation from tests.helpers import requires_boto_gte @@ -1399,3 +1400,40 @@ def test_describe_instance_attribute(): invalid_instance_attribute=invalid_instance_attribute ) ex.exception.response["Error"]["Message"].should.equal(message) + + +@mock_ec2 +@mock_cloudformation +def test_volume_size_through_cloudformation(): + ec2 = boto3.client("ec2", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + volume_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-d3adb33f", + "KeyName": "dummy", + "InstanceType": "t2.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": "50"}} + ], + "Tags": [ + {"Key": "foo", "Value": "bar"}, + {"Key": "blah", "Value": "baz"}, + ], + }, + } + }, + } + template_json = json.dumps(volume_template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + instances = ec2.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2.describe_volumes(VolumeIds=[volume["VolumeId"]]) + volumes["Volumes"][0]["Size"].should.equal(50) From da1a2118bb12ca3279952d88154ed221f9f0fd1e Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 20 Mar 2020 16:17:21 +0000 Subject: [PATCH 163/658] EC2 - Verify default block exists before tearing down --- moto/ec2/models.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 1b363a193b4c..c391c88f3530 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -567,9 +567,10 @@ def setup_defaults(self): self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1") def teardown_defaults(self): - volume_id = self.block_device_mapping["/dev/sda1"].volume_id - self.ec2_backend.detach_volume(volume_id, self.id, "/dev/sda1") - self.ec2_backend.delete_volume(volume_id) + if "/dev/sda1" in self.block_device_mapping: + volume_id = self.block_device_mapping["/dev/sda1"].volume_id + self.ec2_backend.detach_volume(volume_id, self.id, "/dev/sda1") + self.ec2_backend.delete_volume(volume_id) @property def get_block_device_mapping(self): From e82e1e3f397cd610d3ed0316c37325cdfe55926b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 21 Mar 2020 12:20:09 +0000 Subject: [PATCH 164/658] DynamoDB - Add 1MB item size check --- moto/dynamodb2/models.py | 11 ++++++++ tests/test_dynamodb2/test_dynamodb.py | 38 +++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 54dccd56dc7c..a35eded61f6c 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -285,6 +285,9 @@ def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): def __repr__(self): return "Item: {0}".format(self.to_json()) + def size(self): + return sum([bytesize(key) + value.size() for key, value in self.attrs.items()]) + def to_json(self): attributes = {} for attribute_key, attribute in self.attrs.items(): @@ -1123,6 +1126,14 @@ def _trim_results(self, results, limit, exclusive_start_key, scanned_index=None) break last_evaluated_key = None + size_limit = 1000000 # DynamoDB has a 1MB size limit + item_size = sum([res.size() for res in results]) + if item_size > size_limit: + item_size = idx = 0 + while item_size + results[idx].size() < size_limit: + item_size += results[idx].size() + idx += 1 + limit = min(limit, idx) if limit else idx if limit and len(results) > limit: results = results[:limit] last_evaluated_key = {self.hash_key_attr: results[-1].hash_key} diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 062208863153..daae79232abd 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4132,3 +4132,41 @@ def test_gsi_verify_negative_number_order(): [float(item["gsiK1SortKey"]) for item in resp["Items"]].should.equal( [-0.7, -0.6, 0.7] ) + + +@mock_dynamodb2 +def test_dynamodb_max_1mb_limit(): + ddb = boto3.resource("dynamodb", region_name="eu-west-1") + + table_name = "populated-mock-table" + table = ddb.create_table( + TableName=table_name, + KeySchema=[ + {"AttributeName": "partition_key", "KeyType": "HASH"}, + {"AttributeName": "sort_key", "KeyType": "SORT"}, + ], + AttributeDefinitions=[ + {"AttributeName": "partition_key", "AttributeType": "S"}, + {"AttributeName": "sort_key", "AttributeType": "S"}, + ], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + # Populate the table + items = [ + { + "partition_key": "partition_key_val", # size=30 + "sort_key": "sort_key_value____" + str(i), # size=30 + } + for i in range(10000, 29999) + ] + with table.batch_writer() as batch: + for item in items: + batch.put_item(Item=item) + + response = table.query( + KeyConditionExpression=Key("partition_key").eq("partition_key_val") + ) + # We shouldn't get everything back - the total result set is well over 1MB + assert response["Count"] < len(items) + response["LastEvaluatedKey"].shouldnt.be(None) From 23dfecc845b774493c814745e712f8feb7296402 Mon Sep 17 00:00:00 2001 From: gruebel Date: Sat, 21 Mar 2020 19:25:25 +0100 Subject: [PATCH 165/658] Fix missing MessageAttributes when using RawMessageDelivery --- moto/sns/models.py | 40 +++++++++++++----- tests/test_sns/test_publishing_boto3.py | 54 +++++++++++++++++-------- 2 files changed, 66 insertions(+), 28 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index d6791eecf317..85196cd8f8bb 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -146,20 +146,38 @@ def publish(self, message, message_id, subject=None, message_attributes=None): queue_name = self.endpoint.split(":")[-1] region = self.endpoint.split(":")[3] if self.attributes.get("RawMessageDelivery") != "true": - enveloped_message = json.dumps( - self.get_post_data( - message, - message_id, - subject, - message_attributes=message_attributes, + sqs_backends[region].send_message( + queue_name, + json.dumps( + self.get_post_data( + message, + message_id, + subject, + message_attributes=message_attributes, + ), + sort_keys=True, + indent=2, + separators=(",", ": "), ), - sort_keys=True, - indent=2, - separators=(",", ": "), ) else: - enveloped_message = message - sqs_backends[region].send_message(queue_name, enveloped_message) + raw_message_attributes = {} + for key, value in message_attributes.items(): + type = "string_value" + type_value = value["Value"] + if value["Type"].startswith("Binary"): + type = "binary_value" + elif value["Type"].startswith("Number"): + type_value = "{0:g}".format(value["Value"]) + + raw_message_attributes[key] = { + "data_type": value["Type"], + type: type_value, + } + + sqs_backends[region].send_message( + queue_name, message, message_attributes=raw_message_attributes + ) elif self.protocol in ["http", "https"]: post_data = self.get_post_data(message, message_id, subject) requests.post( diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 51e0a9f5723b..fddd9125c692 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -148,34 +148,42 @@ def test_publish_to_sqs_msg_attr_byte_value(): conn.create_topic(Name="some-topic") response = conn.list_topics() topic_arn = response["Topics"][0]["TopicArn"] - - sqs_conn = boto3.resource("sqs", region_name="us-east-1") - queue = sqs_conn.create_queue(QueueName="test-queue") - + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="test-queue") + conn.subscribe( + TopicArn=topic_arn, Protocol="sqs", Endpoint=queue.attributes["QueueArn"], + ) + queue_raw = sqs.create_queue(QueueName="test-queue-raw") conn.subscribe( TopicArn=topic_arn, Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), + Endpoint=queue_raw.attributes["QueueArn"], + Attributes={"RawMessageDelivery": "true"}, ) - message = "my message" + conn.publish( TopicArn=topic_arn, - Message=message, + Message="my message", MessageAttributes={ "store": {"DataType": "Binary", "BinaryValue": b"\x02\x03\x04"} }, ) - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_attributes = [json.loads(m.body)["MessageAttributes"] for m in messages] - message_attributes.should.equal( - [ - { - "store": { - "Type": "Binary", - "Value": base64.b64encode(b"\x02\x03\x04").decode(), - } + + message = json.loads(queue.receive_messages()[0].body) + message["Message"].should.equal("my message") + message["MessageAttributes"].should.equal( + { + "store": { + "Type": "Binary", + "Value": base64.b64encode(b"\x02\x03\x04").decode(), } - ] + } + ) + + message = queue_raw.receive_messages()[0] + message.body.should.equal("my message") + message.message_attributes.should.equal( + {"store": {"DataType": "Binary", "BinaryValue": b"\x02\x03\x04"}} ) @@ -187,6 +195,12 @@ def test_publish_to_sqs_msg_attr_number_type(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-queue") topic.subscribe(Protocol="sqs", Endpoint=queue.attributes["QueueArn"]) + queue_raw = sqs.create_queue(QueueName="test-queue-raw") + topic.subscribe( + Protocol="sqs", + Endpoint=queue_raw.attributes["QueueArn"], + Attributes={"RawMessageDelivery": "true"}, + ) topic.publish( Message="test message", @@ -199,6 +213,12 @@ def test_publish_to_sqs_msg_attr_number_type(): {"retries": {"Type": "Number", "Value": 0}} ) + message = queue_raw.receive_messages()[0] + message.body.should.equal("test message") + message.message_attributes.should.equal( + {"retries": {"DataType": "Number", "StringValue": "0"}} + ) + @mock_sns def test_publish_sms(): From 7318523b50c48e3aed3b52ef745dde34f3f2a9ed Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 22 Mar 2020 16:30:16 -0300 Subject: [PATCH 166/658] Add cloudformation support for EventBridge --- moto/cloudformation/parsing.py | 3 +++ moto/events/models.py | 18 ++++++++++++++ .../test_cloudformation_stack_crud.py | 24 +++++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 79276c8fca6a..60eee63aa121 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -1,3 +1,4 @@ + from __future__ import unicode_literals import functools import json @@ -18,6 +19,7 @@ from moto.ecs import models as ecs_models from moto.elb import models as elb_models from moto.elbv2 import models as elbv2_models +from moto.events import models as events_models from moto.iam import models as iam_models from moto.kinesis import models as kinesis_models from moto.kms import models as kms_models @@ -94,6 +96,7 @@ "AWS::SNS::Topic": sns_models.Topic, "AWS::S3::Bucket": s3_models.FakeBucket, "AWS::SQS::Queue": sqs_models.Queue, + "AWS::Events::Rule": events_models.Rule, } # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html diff --git a/moto/events/models.py b/moto/events/models.py index a80b86daa302..2f6f3b869826 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -55,6 +55,24 @@ def remove_targets(self, ids): if index is not None: self.targets.pop(index) + @classmethod + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + event_backend = events_backends[region_name] + event_name = properties.get("Name") or resource_name + return event_backend.put_rule(name=event_name, **properties) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + event_backend = events_backends[region_name] + event_name = properties.get("Name") or resource_name + event_backend.delete_rule(name=event_name) + class EventBus(BaseModel): def __init__(self, region_name, name): diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 3d1b2ab8c68c..f6d359ec0f0d 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -596,6 +596,30 @@ def test_create_stack_kinesis(): assert len(resources) == 1 +@mock_cloudformation_deprecated +def test_create_stack_events(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack Kinesis Test 1", + "Parameters": {}, + "Resources": { + "event": { + "Type": "AWS::Events::Rule", + "Properties": { + "State": "ENABLED", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } + conn.create_stack("test_stack_events_1", template_body=json.dumps(dummy_template)) + stack = conn.describe_stacks()[0] + + resources = stack.list_resources() + resources.should.have.length_of(1) + + def get_role_name(): with mock_iam_deprecated(): iam = boto.connect_iam() From a1f664d2bbbb4788d567afe2f2ae9f42ba924240 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 22 Mar 2020 17:32:37 -0300 Subject: [PATCH 167/658] Change put_rule (and it's response) and fix tests_events/ --- moto/events/models.py | 8 ++++---- moto/events/responses.py | 4 ++-- tests/test_events/test_events.py | 19 ++++++++++++++++--- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index 2f6f3b869826..5c7662ba884a 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -250,10 +250,10 @@ def list_targets_by_rule(self, rule, next_token=None, limit=None): return return_obj def put_rule(self, name, **kwargs): - rule = Rule(name, self.region_name, **kwargs) - self.rules[rule.name] = rule - self.rules_order.append(rule.name) - return rule.arn + new_rule = Rule(name, self.region_name, **kwargs) + self.rules[new_rule.name] = new_rule + self.rules_order.append(new_rule.name) + return new_rule def put_targets(self, name, targets): rule = self.rules.get(name) diff --git a/moto/events/responses.py b/moto/events/responses.py index c9931aabc970..55a664b24e99 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -191,7 +191,7 @@ def put_rule(self): "ValidationException", "Parameter ScheduleExpression is not valid." ) - rule_arn = self.events_backend.put_rule( + rule = self.events_backend.put_rule( name, ScheduleExpression=sched_exp, EventPattern=event_pattern, @@ -200,7 +200,7 @@ def put_rule(self): RoleArn=role_arn, ) - return json.dumps({"RuleArn": rule_arn}), self.response_headers + return json.dumps({"RuleArn": rule.arn}), self.response_headers def put_targets(self): rule_name = self._get_param("Rule") diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 80fadb449319..27006ff1b2ff 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -1,15 +1,16 @@ -from moto.events.models import EventsBackend -from moto.events import mock_events import json import random import unittest import boto3 +import sure # noqa from botocore.exceptions import ClientError -from moto.core.exceptions import JsonRESTError from nose.tools import assert_raises from moto.core import ACCOUNT_ID +from moto.core.exceptions import JsonRESTError +from moto.events import mock_events +from moto.events.models import EventsBackend RULES = [ {"Name": "test1", "ScheduleExpression": "rate(5 minutes)"}, @@ -75,6 +76,18 @@ def generate_environment(): return client +@mock_events +def test_put_rule(): + client = boto3.client("events", "us-west-2") + + client.list_rules()["Rules"].should.have.length_of(0) + + rule_data = get_random_rule() + client.put_rule(**rule_data) + + client.list_rules()["Rules"].should.have.length_of(1) + + @mock_events def test_list_rules(): client = generate_environment() From 98a17dfc464c3b7a73a73c62e7f2868b9018d7a1 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 22 Mar 2020 18:03:42 -0300 Subject: [PATCH 168/658] Add test for boto3 integration --- .../test_cloudformation_stack_crud.py | 8 ++--- .../test_cloudformation_stack_integration.py | 29 +++++++++++++++++++ 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index f6d359ec0f0d..d3a03d2bb812 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -597,12 +597,10 @@ def test_create_stack_kinesis(): @mock_cloudformation_deprecated -def test_create_stack_events(): +def test_create_stack_events_rule(): conn = boto.connect_cloudformation() - dummy_template = { + events_template = { "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack Kinesis Test 1", - "Parameters": {}, "Resources": { "event": { "Type": "AWS::Events::Rule", @@ -613,7 +611,7 @@ def test_create_stack_events(): } }, } - conn.create_stack("test_stack_events_1", template_body=json.dumps(dummy_template)) + conn.create_stack("test_stack_events_1", template_body=json.dumps(events_template)) stack = conn.describe_stacks()[0] resources = stack.list_resources() diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index a612156c444b..2e84180b37bd 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -29,6 +29,7 @@ mock_ec2_deprecated, mock_elb, mock_elb_deprecated, + mock_events, mock_iam_deprecated, mock_kms, mock_lambda, @@ -2379,3 +2380,31 @@ def test_create_log_group_using_fntransform(): logs_conn = boto3.client("logs", region_name="us-west-2") log_group = logs_conn.describe_log_groups()["logGroups"][0] log_group["logGroupName"].should.equal("some-log-group") + + +@mock_cloudformation +@mock_events +def test_stack_events_rule_integration(): + events_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "event": { + "Type": "AWS::Events::Rule", + "Properties": { + "Name": "quick-fox", + "State": "ENABLED", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(events_template), + ) + + result = boto3.client("events", "us-west-2").list_rules() + result["Rules"].should.have.length_of(1) + result["Rules"][0]["Name"].should.equal("quick-fox") + result["Rules"][0]["State"].should.equal("ENABLED") + result["Rules"][0]["ScheduleExpression"].should.equal("rate(5 minutes)") From 6180cf7a45d294d31d63c514aa22ed78b0cf77e0 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 22 Mar 2020 18:08:12 -0300 Subject: [PATCH 169/658] Fix blank space --- moto/cloudformation/parsing.py | 1 - 1 file changed, 1 deletion(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 60eee63aa121..cc4daf9ce5a0 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -1,4 +1,3 @@ - from __future__ import unicode_literals import functools import json From c3865532f9ca6237261591277bde1afbe910099e Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 23 Mar 2020 15:53:39 +0000 Subject: [PATCH 170/658] #2711 - Register default S3 metrics in CloudWatch --- moto/cloudwatch/models.py | 50 ++++++++++++++++--- moto/cloudwatch/responses.py | 5 +- moto/s3/models.py | 33 ++++++++++++ moto/s3/utils.py | 6 +++ tests/test_cloudwatch/test_cloudwatch.py | 40 +++++++++++++-- .../test_cloudwatch/test_cloudwatch_boto3.py | 16 ++++-- 6 files changed, 133 insertions(+), 17 deletions(-) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index a8a1b1d19153..523eb10f36b2 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -22,6 +22,14 @@ def __init__(self, name, value): self.name = name self.value = value + def __eq__(self, item): + if isinstance(item, Dimension): + return self.name == item.name and self.value == item.value + return False + + def __ne__(self, item): # Only needed on Py2; Py3 defines it implicitly + return self != item + def daterange(start, stop, step=timedelta(days=1), inclusive=False): """ @@ -124,6 +132,17 @@ def __init__(self, namespace, name, value, dimensions, timestamp): Dimension(dimension["Name"], dimension["Value"]) for dimension in dimensions ] + def filter(self, namespace, name, dimensions): + if namespace and namespace != self.namespace: + return False + if name and name != self.name: + return False + if dimensions and any( + Dimension(d["Name"], d["Value"]) not in self.dimensions for d in dimensions + ): + return False + return True + class Dashboard(BaseModel): def __init__(self, name, body): @@ -202,6 +221,15 @@ def __init__(self): self.metric_data = [] self.paged_metric_data = {} + @property + # Retrieve a list of all OOTB metrics that are provided by metrics providers + # Computed on the fly + def aws_metric_data(self): + md = [] + for name, service in metric_providers.items(): + md.extend(service.get_cloudwatch_metrics()) + return md + def put_metric_alarm( self, name, @@ -334,7 +362,7 @@ def get_metric_statistics( return data def get_all_metrics(self): - return self.metric_data + return self.metric_data + self.aws_metric_data def put_dashboard(self, name, body): self.dashboards[name] = Dashboard(name, body) @@ -386,7 +414,7 @@ def set_alarm_state(self, alarm_name, reason, reason_data, state_value): self.alarms[alarm_name].update_state(reason, reason_data, state_value) - def list_metrics(self, next_token, namespace, metric_name): + def list_metrics(self, next_token, namespace, metric_name, dimensions): if next_token: if next_token not in self.paged_metric_data: raise RESTError( @@ -397,15 +425,16 @@ def list_metrics(self, next_token, namespace, metric_name): del self.paged_metric_data[next_token] # Cant reuse same token twice return self._get_paginated(metrics) else: - metrics = self.get_filtered_metrics(metric_name, namespace) + metrics = self.get_filtered_metrics(metric_name, namespace, dimensions) return self._get_paginated(metrics) - def get_filtered_metrics(self, metric_name, namespace): + def get_filtered_metrics(self, metric_name, namespace, dimensions): metrics = self.get_all_metrics() - if namespace: - metrics = [md for md in metrics if md.namespace == namespace] - if metric_name: - metrics = [md for md in metrics if md.name == metric_name] + metrics = [ + md + for md in metrics + if md.filter(namespace=namespace, name=metric_name, dimensions=dimensions) + ] return metrics def _get_paginated(self, metrics): @@ -443,3 +472,8 @@ def create_from_cloudformation_json( cloudwatch_backends[region] = CloudWatchBackend() for region in Session().get_available_regions("cloudwatch", partition_name="aws-cn"): cloudwatch_backends[region] = CloudWatchBackend() + +# List of services that provide OOTB CW metrics +# See the S3Backend constructor for an example +# TODO: We might have to separate this out per region for non-global services +metric_providers = {} diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index 7993c9f06284..dccc3021672d 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -124,9 +124,10 @@ def get_metric_statistics(self): def list_metrics(self): namespace = self._get_param("Namespace") metric_name = self._get_param("MetricName") + dimensions = self._get_multi_param("Dimensions.member") next_token = self._get_param("NextToken") next_token, metrics = self.cloudwatch_backend.list_metrics( - next_token, namespace, metric_name + next_token, namespace, metric_name, dimensions ) template = self.response_template(LIST_METRICS_TEMPLATE) return template.render(metrics=metrics, next_token=next_token) @@ -342,7 +343,7 @@ def set_alarm_state(self): {% endfor %} - {{ metric.name }} + Metric:{{ metric.name }} {{ metric.namespace }} {% endfor %} diff --git a/moto/s3/models.py b/moto/s3/models.py index 8c2a86f4121f..5f2678628c3e 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -22,6 +22,7 @@ from bisect import insort from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime +from moto.cloudwatch.models import metric_providers, MetricDatum from .exceptions import ( BucketAlreadyExists, MissingBucket, @@ -1181,6 +1182,38 @@ class S3Backend(BaseBackend): def __init__(self): self.buckets = {} self.account_public_access_block = None + # Register this class as a CloudWatch Metric Provider + # Must provide a method 'get_cloudwatch_metrics' that will return a list of metrics, based on the data available + metric_providers["S3"] = self + + def get_cloudwatch_metrics(self): + metrics = [] + for name, bucket in self.buckets.items(): + metrics.append( + MetricDatum( + namespace="AWS/S3", + name="BucketSizeBytes", + value=bucket.keys.item_size(), + dimensions=[ + {"Name": "StorageType", "Value": "StandardStorage"}, + {"Name": "BucketName", "Value": name}, + ], + timestamp=datetime.datetime.now(), + ) + ) + metrics.append( + MetricDatum( + namespace="AWS/S3", + name="NumberOfObjects", + value=len(bucket.keys), + dimensions=[ + {"Name": "StorageType", "Value": "AllStorageTypes"}, + {"Name": "BucketName", "Value": name}, + ], + timestamp=datetime.datetime.now(), + ) + ) + return metrics def create_bucket(self, bucket_name, region_name): if bucket_name in self.buckets: diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 6ddcfa63e387..014e98ca9c32 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -146,6 +146,12 @@ def _iterlists(self): for key in self: yield key, self.getlist(key) + def item_size(self): + size = 0 + for val in self.values(): + size += sys.getsizeof(val) + return size + items = iteritems = _iteritems lists = iterlists = _iterlists values = itervalues = _itervalues diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 5a05a55e1cb5..2d338cf35298 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -1,9 +1,10 @@ import boto from boto.ec2.cloudwatch.alarm import MetricAlarm +from boto.s3.key import Key from datetime import datetime import sure # noqa -from moto import mock_cloudwatch_deprecated +from moto import mock_cloudwatch_deprecated, mock_s3_deprecated def alarm_fixture(name="tester", action=None): @@ -83,10 +84,11 @@ def test_put_metric_data(): ) metrics = conn.list_metrics() - metrics.should.have.length_of(1) + metric_names = [m for m in metrics if m.name == "metric"] + metric_names.should.have(1) metric = metrics[0] metric.namespace.should.equal("tester") - metric.name.should.equal("metric") + metric.name.should.equal("Metric:metric") dict(metric.dimensions).should.equal({"InstanceId": ["i-0123456,i-0123457"]}) @@ -153,3 +155,35 @@ def test_get_metric_statistics(): datapoint = datapoints[0] datapoint.should.have.key("Minimum").which.should.equal(1.5) datapoint.should.have.key("Timestamp").which.should.equal(metric_timestamp) + + +@mock_s3_deprecated +@mock_cloudwatch_deprecated +def test_cloudwatch_return_s3_metrics(): + + region = "us-east-1" + + cw = boto.ec2.cloudwatch.connect_to_region(region) + s3 = boto.s3.connect_to_region(region) + + bucket_name_1 = "test-bucket-1" + bucket_name_2 = "test-bucket-2" + + bucket1 = s3.create_bucket(bucket_name=bucket_name_1) + key = Key(bucket1) + key.key = "the-key" + key.set_contents_from_string("foobar" * 4) + s3.create_bucket(bucket_name=bucket_name_2) + + metrics_s3_bucket_1 = cw.list_metrics(dimensions={"BucketName": bucket_name_1}) + # Verify that the OOTB S3 metrics are available for the created buckets + len(metrics_s3_bucket_1).should.be(2) + metric_names = [m.name for m in metrics_s3_bucket_1] + sorted(metric_names).should.equal( + ["Metric:BucketSizeBytes", "Metric:NumberOfObjects"] + ) + + # Explicit clean up - the metrics for these buckets are messing with subsequent tests + key.delete() + s3.delete_bucket(bucket_name_1) + s3.delete_bucket(bucket_name_2) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 7fe1440528b7..a089f27bbbb1 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -154,7 +154,7 @@ def test_put_metric_data_no_dimensions(): metrics.should.have.length_of(1) metric = metrics[0] metric["Namespace"].should.equal("tester") - metric["MetricName"].should.equal("metric") + metric["MetricName"].should.equal("Metric:metric") @mock_cloudwatch @@ -182,7 +182,7 @@ def test_put_metric_data_with_statistics(): metrics.should.have.length_of(1) metric = metrics[0] metric["Namespace"].should.equal("tester") - metric["MetricName"].should.equal("statmetric") + metric["MetricName"].should.equal("Metric:statmetric") # TODO: test statistics - https://github.com/spulec/moto/issues/1615 @@ -233,8 +233,16 @@ def test_list_metrics(): # Verify format res.should.equal( [ - {u"Namespace": "list_test_1/", u"Dimensions": [], u"MetricName": "metric1"}, - {u"Namespace": "list_test_1/", u"Dimensions": [], u"MetricName": "metric1"}, + { + u"Namespace": "list_test_1/", + u"Dimensions": [], + u"MetricName": "Metric:metric1", + }, + { + u"Namespace": "list_test_1/", + u"Dimensions": [], + u"MetricName": "Metric:metric1", + }, ] ) # Verify unknown namespace still has no results From c96efe531e3de2ba88028cf44e753cdcbe902b7a Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Mon, 23 Mar 2020 22:14:34 -0300 Subject: [PATCH 171/658] Add delete method for cloudformation's deletion --- moto/events/models.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index 5c7662ba884a..5f5909907845 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -26,12 +26,6 @@ def __init__(self, name, region_name, **kwargs): self.role_arn = kwargs.get("RoleArn") self.targets = [] - def enable(self): - self.state = "ENABLED" - - def disable(self): - self.state = "DISABLED" - # This song and dance for targets is because we need order for Limits and NextTokens, but can't use OrderedDicts # with Python 2.6, so tracking it with an array it is. def _check_target_exists(self, target_id): @@ -40,6 +34,16 @@ def _check_target_exists(self, target_id): return i return None + def enable(self): + self.state = "ENABLED" + + def disable(self): + self.state = "DISABLED" + + def delete(self, region_name): + event_backend = events_backends[region_name] + event_backend.delete_rule(name=self.name) + def put_targets(self, targets): # Not testing for valid ARNs. for target in targets: From 788b8fb6e152a3ae52827009679e059bd3874c75 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Mon, 23 Mar 2020 22:17:02 -0300 Subject: [PATCH 172/658] Add tests for Events::Rule integration with cf --- .../test_cloudformation_stack_crud.py | 22 ------ .../test_cloudformation_stack_integration.py | 68 +++++++++++++++++-- 2 files changed, 61 insertions(+), 29 deletions(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index d3a03d2bb812..3d1b2ab8c68c 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -596,28 +596,6 @@ def test_create_stack_kinesis(): assert len(resources) == 1 -@mock_cloudformation_deprecated -def test_create_stack_events_rule(): - conn = boto.connect_cloudformation() - events_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "event": { - "Type": "AWS::Events::Rule", - "Properties": { - "State": "ENABLED", - "ScheduleExpression": "rate(5 minutes)", - }, - } - }, - } - conn.create_stack("test_stack_events_1", template_body=json.dumps(events_template)) - stack = conn.describe_stacks()[0] - - resources = stack.list_resources() - resources.should.have.length_of(1) - - def get_role_name(): with mock_iam_deprecated(): iam = boto.connect_iam() diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 2e84180b37bd..e501796600fe 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2384,11 +2384,11 @@ def test_create_log_group_using_fntransform(): @mock_cloudformation @mock_events -def test_stack_events_rule_integration(): +def test_stack_events_create_rule_integration(): events_template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { - "event": { + "Event": { "Type": "AWS::Events::Rule", "Properties": { "Name": "quick-fox", @@ -2403,8 +2403,62 @@ def test_stack_events_rule_integration(): StackName="test_stack", TemplateBody=json.dumps(events_template), ) - result = boto3.client("events", "us-west-2").list_rules() - result["Rules"].should.have.length_of(1) - result["Rules"][0]["Name"].should.equal("quick-fox") - result["Rules"][0]["State"].should.equal("ENABLED") - result["Rules"][0]["ScheduleExpression"].should.equal("rate(5 minutes)") + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"].should.have.length_of(1) + rules["Rules"][0]["Name"].should.equal("quick-fox") + rules["Rules"][0]["State"].should.equal("ENABLED") + rules["Rules"][0]["ScheduleExpression"].should.equal("rate(5 minutes)") + + +@mock_cloudformation +@mock_events +def test_stack_events_delete_rule_integration(): + events_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Event": { + "Type": "AWS::Events::Rule", + "Properties": { + "Name": "quick-fox", + "State": "ENABLED", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(events_template), + ) + + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"].should.have.length_of(1) + + cf_conn.delete_stack(StackName="test_stack") + + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"].should.have.length_of(0) + + +@mock_cloudformation +@mock_events +def test_stack_events_create_rule_without_name_integration(): + events_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Event": { + "Type": "AWS::Events::Rule", + "Properties": { + "State": "ENABLED", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(events_template), + ) + + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"][0]["Name"].should.contain("test_stack-Event-") From 11526ced18226910dba4c6d8153d7b43d1cf30bd Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 24 Mar 2020 09:24:38 +0000 Subject: [PATCH 173/658] #2810 - EC2 - Explicitly set ebs_optimized to False if not specified --- moto/ec2/responses/instances.py | 2 +- tests/test_ec2/test_instances.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 29c346f8242b..ba15be8d036a 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -52,7 +52,7 @@ def run_instances(self): private_ip = self._get_param("PrivateIpAddress") associate_public_ip = self._get_param("AssociatePublicIpAddress") key_name = self._get_param("KeyName") - ebs_optimized = self._get_param("EbsOptimized") + ebs_optimized = self._get_param("EbsOptimized") or False instance_initiated_shutdown_behavior = self._get_param( "InstanceInitiatedShutdownBehavior" ) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 85ba0fe01bbd..d40aca000836 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1319,6 +1319,12 @@ def test_create_instance_ebs_optimized(): instance.load() instance.ebs_optimized.should.be(False) + instance = ec2_resource.create_instances( + ImageId="ami-12345678", MaxCount=1, MinCount=1, + )[0] + instance.load() + instance.ebs_optimized.should.be(False) + @mock_ec2 def test_run_multiple_instances_in_same_command(): From 04f488da62462a926d4ee61ad303583bdaf836a8 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 24 Mar 2020 10:22:08 +0000 Subject: [PATCH 174/658] #2388 - CloudFormation - CreateChangeSet does not create resources, as per spec --- moto/cloudformation/models.py | 14 +++++++++++--- moto/cloudformation/parsing.py | 4 +++- .../test_cloudformation_stack_crud_boto3.py | 17 +++++++++++++++++ 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 8136e353de77..281ab5e19a90 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -239,8 +239,11 @@ def __init__( self.cross_stack_resources = cross_stack_resources or {} self.resource_map = self._create_resource_map() self.output_map = self._create_output_map() - self._add_stack_event("CREATE_COMPLETE") - self.status = "CREATE_COMPLETE" + if create_change_set: + self.status = "REVIEW_IN_PROGRESS" + else: + self.create_resources() + self._add_stack_event("CREATE_COMPLETE") self.creation_time = datetime.utcnow() def _create_resource_map(self): @@ -253,7 +256,7 @@ def _create_resource_map(self): self.template_dict, self.cross_stack_resources, ) - resource_map.create() + resource_map.load() return resource_map def _create_output_map(self): @@ -326,6 +329,10 @@ def stack_outputs(self): def exports(self): return self.output_map.exports + def create_resources(self): + self.resource_map.create() + self.status = "CREATE_COMPLETE" + def update(self, template, role_arn=None, parameters=None, tags=None): self._add_stack_event( "UPDATE_IN_PROGRESS", resource_status_reason="User Initiated" @@ -640,6 +647,7 @@ def execute_change_set(self, change_set_name, stack_name=None): else: stack._add_stack_event("UPDATE_IN_PROGRESS") stack._add_stack_event("UPDATE_COMPLETE") + stack.create_resources() return True def describe_stacks(self, name_or_stack_id): diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 79276c8fca6a..6789c0007863 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -529,14 +529,16 @@ def load_conditions(self): for condition_name in self.lazy_condition_map: self.lazy_condition_map[condition_name] - def create(self): + def load(self): self.load_mapping() self.transform_mapping() self.load_parameters() self.load_conditions() + def create(self): # Since this is a lazy map, to create every object we just need to # iterate through self. + # Assumes that self.load() has been called before self.tags.update( { "aws:cloudformation:stack-name": self.get("AWS::StackName"), diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 5444c2278977..4df1ff5d2228 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -835,8 +835,10 @@ def test_describe_change_set(): ) stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet") + stack["ChangeSetName"].should.equal("NewChangeSet") stack["StackName"].should.equal("NewStack") + stack["Status"].should.equal("REVIEW_IN_PROGRESS") cf_conn.create_change_set( StackName="NewStack", @@ -851,15 +853,30 @@ def test_describe_change_set(): @mock_cloudformation +@mock_ec2 def test_execute_change_set_w_arn(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") + ec2 = boto3.client("ec2", region_name="us-east-1") + # Verify no instances exist at the moment + ec2.describe_instances()["Reservations"].should.have.length_of(0) + # Create a Change set, and verify no resources have been created yet change_set = cf_conn.create_change_set( StackName="NewStack", TemplateBody=dummy_template_json, ChangeSetName="NewChangeSet", ChangeSetType="CREATE", ) + ec2.describe_instances()["Reservations"].should.have.length_of(0) + cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal( + "REVIEW_IN_PROGRESS" + ) + # Execute change set cf_conn.execute_change_set(ChangeSetName=change_set["Id"]) + # Verify that the status has changed, and the appropriate resources have been created + cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal( + "CREATE_COMPLETE" + ) + ec2.describe_instances()["Reservations"].should.have.length_of(1) @mock_cloudformation From 2e20ad14df2069094cd658bb7eeeabda1ec6b226 Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Wed, 25 Mar 2020 11:07:59 -0700 Subject: [PATCH 175/658] Fix some 'DeprecationWarning: invalid escape sequence' warnings and use str.format for string interpolation. Similar to https://github.com/spulec/moto/pull/2811 --- moto/elbv2/models.py | 23 ++++++++++++++--------- moto/events/models.py | 4 ++-- moto/packages/httpretty/http.py | 2 +- moto/secretsmanager/utils.py | 2 +- 4 files changed, 18 insertions(+), 13 deletions(-) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index fdce9a8c2c0c..a6da0d01c419 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -582,11 +582,13 @@ def _validate_fixed_response_action(self, action, i, index): report='Missing required parameter in Actions[%s].FixedResponseConfig: "StatusCode"' % i ) - if not re.match(r"^(2|4|5)\d\d$", status_code): + expression = r"^(2|4|5)\d\d$" + if not re.match(expression, status_code): raise InvalidStatusCodeActionTypeError( - "1 validation error detected: Value '%s' at 'actions.%s.member.fixedResponseConfig.statusCode' failed to satisfy constraint: \ -Member must satisfy regular expression pattern: ^(2|4|5)\d\d$" - % (status_code, index) + "1 validation error detected: Value '{}' at 'actions.{}.member.fixedResponseConfig.statusCode' failed to satisfy constraint: \ +Member must satisfy regular expression pattern: {}".format( + status_code, index, expression + ) ) content_type = action.data["fixed_response_config._content_type"] if content_type and content_type not in [ @@ -603,16 +605,19 @@ def _validate_fixed_response_action(self, action, i, index): def create_target_group(self, name, **kwargs): if len(name) > 32: raise InvalidTargetGroupNameError( - "Target group name '%s' cannot be longer than '32' characters" % name + "Target group name '{}' cannot be longer than '32' characters".format( + name + ) ) - if not re.match("^[a-zA-Z0-9\-]+$", name): + if not re.match(r"^[a-zA-Z0-9\-]+$", name): raise InvalidTargetGroupNameError( - "Target group name '%s' can only contain characters that are alphanumeric characters or hyphens(-)" - % name + "Target group name '{}' can only contain characters that are alphanumeric characters or hyphens(-)".format( + name + ) ) # undocumented validation - if not re.match("(?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$", name): + if not re.match(r"(?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$", name): raise InvalidTargetGroupNameError( "1 validation error detected: Value '%s' at 'targetGroup.targetGroupArn.targetGroupName' failed to satisfy constraint: Member must satisfy regular expression pattern: (?!.*--)(?!^-)(?!.*-$)^[A-Za-z0-9-]+$" % name diff --git a/moto/events/models.py b/moto/events/models.py index 5f5909907845..f68b63e38b64 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -305,12 +305,12 @@ def put_permission(self, event_bus_name, action, principal, statement_id): if principal is None or self.ACCOUNT_ID.match(principal) is None: raise JsonRESTError( - "InvalidParameterValue", "Principal must match ^(\d{1,12}|\*)$" + "InvalidParameterValue", r"Principal must match ^(\d{1,12}|\*)$" ) if statement_id is None or self.STATEMENT_ID.match(statement_id) is None: raise JsonRESTError( - "InvalidParameterValue", "StatementId must match ^[a-zA-Z0-9-_]{1,64}$" + "InvalidParameterValue", r"StatementId must match ^[a-zA-Z0-9-_]{1,64}$" ) event_bus._permissions[statement_id] = { diff --git a/moto/packages/httpretty/http.py b/moto/packages/httpretty/http.py index 20c00707e5f6..1b4379f5b216 100644 --- a/moto/packages/httpretty/http.py +++ b/moto/packages/httpretty/http.py @@ -134,7 +134,7 @@ def parse_requestline(s): ValueError: Not a Request-Line """ methods = "|".join(HttpBaseClass.METHODS) - m = re.match(r"(" + methods + ")\s+(.*)\s+HTTP/(1.[0|1])", s, re.I) + m = re.match(r"({})\s+(.*)\s+HTTP/(1.[0|1])".format(methods), s, re.I) if m: return m.group(1).upper(), m.group(2), m.group(3) else: diff --git a/moto/secretsmanager/utils.py b/moto/secretsmanager/utils.py index 73275ee05b92..6033db6132da 100644 --- a/moto/secretsmanager/utils.py +++ b/moto/secretsmanager/utils.py @@ -89,7 +89,7 @@ def _exclude_characters(password, exclude_characters): for c in exclude_characters: if c in string.punctuation: # Escape punctuation regex usage - c = "\{0}".format(c) + c = r"\{0}".format(c) password = re.sub(c, "", str(password)) return password From bb8d4180540602088e24f5cd6ce90eeb52f6e4fb Mon Sep 17 00:00:00 2001 From: Constantino Schillebeeckx Date: Fri, 27 Mar 2020 15:35:50 -0500 Subject: [PATCH 176/658] fix: stepfunction stop_execution Fixes #2846 Calling stop_execution on a stepfunction should set the status to `ABORTED` not `SUCCEEDED`. --- moto/stepfunctions/models.py | 2 +- tests/test_stepfunctions/test_stepfunctions.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index de530b863fe5..e36598f2340e 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -46,7 +46,7 @@ def __init__( self.stop_date = None def stop(self): - self.status = "SUCCEEDED" + self.status = "ABORTED" self.stop_date = iso_8601_datetime_without_milliseconds(datetime.now()) diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 3e0a8115d3a4..eb2ace53de8d 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -516,7 +516,7 @@ def test_state_machine_describe_execution_after_stoppage(): description = client.describe_execution(executionArn=execution["executionArn"]) # description["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - description["status"].should.equal("SUCCEEDED") + description["status"].should.equal("ABORTED") description["stopDate"].should.be.a(datetime) From fb0de99e81dc0af7644faa66113c1aec60b589ea Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 28 Mar 2020 13:41:17 +0000 Subject: [PATCH 177/658] #2239 - Initial implementation of CW.get_metric_data --- moto/cloudwatch/models.py | 37 +++ moto/cloudwatch/responses.py | 41 +++ .../test_cloudwatch/test_cloudwatch_boto3.py | 259 ++++++++++++++++++ 3 files changed, 337 insertions(+) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index a8a1b1d19153..bddb94a12d4a 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -295,6 +295,43 @@ def put_metric_data(self, namespace, metric_data): ) ) + def get_metric_data(self, queries, start_time, end_time): + period_data = [ + md for md in self.metric_data if start_time <= md.timestamp <= end_time + ] + results = [] + for query in queries: + query_ns = query["metric_stat._metric._namespace"] + query_name = query["metric_stat._metric._metric_name"] + query_data = [ + md + for md in period_data + if md.namespace == query_ns and md.name == query_name + ] + metric_values = [m.value for m in query_data] + result_vals = [] + stat = query["metric_stat._stat"] + if len(metric_values) > 0: + if stat == "Average": + result_vals.append(sum(metric_values) / len(metric_values)) + elif stat == "Minimum": + result_vals.append(min(metric_values)) + elif stat == "Maximum": + result_vals.append(max(metric_values)) + elif stat == "Sum": + result_vals.append(sum(metric_values)) + + label = query["metric_stat._metric._metric_name"] + " " + stat + results.append( + { + "id": query["id"], + "label": label, + "vals": result_vals, + "timestamps": [datetime.now() for _ in result_vals], + } + ) + return results + def get_metric_statistics( self, namespace, metric_name, start_time, end_time, period, stats ): diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index 7993c9f06284..7e75a38f014c 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -92,6 +92,18 @@ def put_metric_data(self): template = self.response_template(PUT_METRIC_DATA_TEMPLATE) return template.render() + @amzn_request_id + def get_metric_data(self): + start = dtparse(self._get_param("StartTime")) + end = dtparse(self._get_param("EndTime")) + queries = self._get_list_prefix("MetricDataQueries.member") + results = self.cloudwatch_backend.get_metric_data( + start_time=start, end_time=end, queries=queries + ) + + template = self.response_template(GET_METRIC_DATA_TEMPLATE) + return template.render(results=results) + @amzn_request_id def get_metric_statistics(self): namespace = self._get_param("Namespace") @@ -285,6 +297,35 @@ def set_alarm_state(self): """ +GET_METRIC_DATA_TEMPLATE = """ + + + {{ request_id }} + + + + + {% for result in results %} + + {{ result.id }} + + Complete + + {% for val in result.timestamps %} + {{ val }} + {% endfor %} + + + {% for val in result.vals %} + {{ val }} + {% endfor %} + + + {% endfor %} + + +""" + GET_METRIC_STATISTICS_TEMPLATE = """ diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 7fe1440528b7..2b1caff027ec 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -3,6 +3,7 @@ import boto3 from botocore.exceptions import ClientError from datetime import datetime, timedelta +from freezegun import freeze_time from nose.tools import assert_raises from uuid import uuid4 import pytz @@ -211,6 +212,35 @@ def test_get_metric_statistics(): datapoint["Sum"].should.equal(1.5) +@mock_cloudwatch +@freeze_time("2020-02-10 18:44:05") +def test_custom_timestamp(): + utc_now = datetime.now(tz=pytz.utc) + time = "2020-02-10T18:44:09Z" + cw = boto3.client("cloudwatch", "eu-west-1") + + cw.put_metric_data( + Namespace="tester", + MetricData=[dict(MetricName="metric1", Value=1.5, Timestamp=time)], + ) + + cw.put_metric_data( + Namespace="tester", + MetricData=[ + dict(MetricName="metric2", Value=1.5, Timestamp=datetime(2020, 2, 10)) + ], + ) + + stats = cw.get_metric_statistics( + Namespace="tester", + MetricName="metric", + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + Period=60, + Statistics=["SampleCount", "Sum"], + ) + + @mock_cloudwatch def test_list_metrics(): cloudwatch = boto3.client("cloudwatch", "eu-west-1") @@ -292,3 +322,232 @@ def create_metrics(cloudwatch, namespace, metrics=5, data_points=5): Namespace=namespace, MetricData=[{"MetricName": metric_name, "Value": j, "Unit": "Seconds"}], ) + + +@mock_cloudwatch +def test_get_metric_data_within_timeframe(): + utc_now = datetime.now(tz=pytz.utc) + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + namespace1 = "my_namespace/" + # put metric data + values = [0, 2, 4, 3.5, 7, 100] + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + {"MetricName": "metric1", "Value": val, "Unit": "Seconds"} for val in values + ], + ) + # get_metric_data + stats = ["Average", "Sum", "Minimum", "Maximum"] + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + "Id": "result_" + stat, + "MetricStat": { + "Metric": {"Namespace": namespace1, "MetricName": "metric1"}, + "Period": 60, + "Stat": stat, + }, + } + for stat in stats + ], + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + ) + # + # Assert Average/Min/Max/Sum is returned as expected + avg = [ + res for res in response["MetricDataResults"] if res["Id"] == "result_Average" + ][0] + avg["Label"].should.equal("metric1 Average") + avg["StatusCode"].should.equal("Complete") + [int(val) for val in avg["Values"]].should.equal([19]) + + sum_ = [res for res in response["MetricDataResults"] if res["Id"] == "result_Sum"][ + 0 + ] + sum_["Label"].should.equal("metric1 Sum") + sum_["StatusCode"].should.equal("Complete") + [val for val in sum_["Values"]].should.equal([sum(values)]) + + min_ = [ + res for res in response["MetricDataResults"] if res["Id"] == "result_Minimum" + ][0] + min_["Label"].should.equal("metric1 Minimum") + min_["StatusCode"].should.equal("Complete") + [int(val) for val in min_["Values"]].should.equal([0]) + + max_ = [ + res for res in response["MetricDataResults"] if res["Id"] == "result_Maximum" + ][0] + max_["Label"].should.equal("metric1 Maximum") + max_["StatusCode"].should.equal("Complete") + [int(val) for val in max_["Values"]].should.equal([100]) + + +@mock_cloudwatch +def test_get_metric_data_partially_within_timeframe(): + utc_now = datetime.now(tz=pytz.utc) + yesterday = utc_now - timedelta(days=1) + last_week = utc_now - timedelta(days=7) + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + namespace1 = "my_namespace/" + # put metric data + values = [0, 2, 4, 3.5, 7, 100] + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + { + "MetricName": "metric1", + "Value": 10, + "Unit": "Seconds", + "Timestamp": utc_now, + } + ], + ) + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + { + "MetricName": "metric1", + "Value": 20, + "Unit": "Seconds", + "Timestamp": yesterday, + } + ], + ) + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + { + "MetricName": "metric1", + "Value": 50, + "Unit": "Seconds", + "Timestamp": last_week, + } + ], + ) + # get_metric_data + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + "Id": "result", + "MetricStat": { + "Metric": {"Namespace": namespace1, "MetricName": "metric1"}, + "Period": 60, + "Stat": "Sum", + }, + } + ], + StartTime=yesterday - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + ) + # + # Assert Last week's data is not returned + len(response["MetricDataResults"]).should.equal(1) + sum_ = response["MetricDataResults"][0] + sum_["Label"].should.equal("metric1 Sum") + sum_["StatusCode"].should.equal("Complete") + sum_["Values"].should.equal([30.0]) + + +@mock_cloudwatch +def test_get_metric_data_outside_timeframe(): + utc_now = datetime.now(tz=pytz.utc) + last_week = utc_now - timedelta(days=7) + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + namespace1 = "my_namespace/" + # put metric data + cloudwatch.put_metric_data( + Namespace=namespace1, + MetricData=[ + { + "MetricName": "metric1", + "Value": 50, + "Unit": "Seconds", + "Timestamp": last_week, + } + ], + ) + # get_metric_data + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + "Id": "result", + "MetricStat": { + "Metric": {"Namespace": namespace1, "MetricName": "metric1"}, + "Period": 60, + "Stat": "Sum", + }, + } + ], + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + ) + # + # Assert Last week's data is not returned + len(response["MetricDataResults"]).should.equal(1) + response["MetricDataResults"][0]["Id"].should.equal("result") + response["MetricDataResults"][0]["StatusCode"].should.equal("Complete") + response["MetricDataResults"][0]["Values"].should.equal([]) + + +@mock_cloudwatch +def test_get_metric_data_for_multiple_metrics(): + utc_now = datetime.now(tz=pytz.utc) + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + namespace = "my_namespace/" + # put metric data + cloudwatch.put_metric_data( + Namespace=namespace, + MetricData=[ + { + "MetricName": "metric1", + "Value": 50, + "Unit": "Seconds", + "Timestamp": utc_now, + } + ], + ) + cloudwatch.put_metric_data( + Namespace=namespace, + MetricData=[ + { + "MetricName": "metric2", + "Value": 25, + "Unit": "Seconds", + "Timestamp": utc_now, + } + ], + ) + # get_metric_data + response = cloudwatch.get_metric_data( + MetricDataQueries=[ + { + "Id": "result1", + "MetricStat": { + "Metric": {"Namespace": namespace, "MetricName": "metric1"}, + "Period": 60, + "Stat": "Sum", + }, + }, + { + "Id": "result2", + "MetricStat": { + "Metric": {"Namespace": namespace, "MetricName": "metric2"}, + "Period": 60, + "Stat": "Sum", + }, + }, + ], + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + ) + # + len(response["MetricDataResults"]).should.equal(2) + + res1 = [res for res in response["MetricDataResults"] if res["Id"] == "result1"][0] + res1["Values"].should.equal([50.0]) + + res2 = [res for res in response["MetricDataResults"] if res["Id"] == "result2"][0] + res2["Values"].should.equal([25.0]) From 349b381390ae34c809e50cd04c3886053cff4776 Mon Sep 17 00:00:00 2001 From: Andrea Amorosi Date: Sat, 28 Mar 2020 17:59:42 +0000 Subject: [PATCH 178/658] Fixed dynamodb2 put_item ValidationException --- moto/dynamodb2/models.py | 8 ++++- moto/dynamodb2/responses.py | 3 ++ tests/test_dynamodb2/test_dynamodb.py | 45 +++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 54dccd56dc7c..152e719c4047 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -800,13 +800,19 @@ def put_item( overwrite=False, ): if self.hash_key_attr not in item_attrs.keys(): - raise ValueError( + raise KeyError( "One or more parameter values were invalid: Missing the key " + self.hash_key_attr + " in the item" ) hash_value = DynamoType(item_attrs.get(self.hash_key_attr)) if self.has_range_key: + if self.range_key_attr not in item_attrs.keys(): + raise KeyError( + "One or more parameter values were invalid: Missing the key " + + self.range_key_attr + + " in the item" + ) range_value = DynamoType(item_attrs.get(self.range_key_attr)) else: range_value = None diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index c72ded2c381e..78126f7f1739 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -299,6 +299,9 @@ def put_item(self): except ItemSizeTooLarge: er = "com.amazonaws.dynamodb.v20111205#ValidationException" return self.error(er, ItemSizeTooLarge.message) + except KeyError as ke: + er = "com.amazonaws.dynamodb.v20111205#ValidationException" + return self.error(er, ke.args[0]) except ValueError as ve: er = "com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException" return self.error(er, str(ve)) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 062208863153..bec24c966ef2 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1345,6 +1345,25 @@ def test_get_item_returns_consumed_capacity(): assert "TableName" in response["ConsumedCapacity"] +@mock_dynamodb2 +def test_put_empty_item(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + AttributeDefinitions=[{"AttributeName": "structure_id", "AttributeType": "S"},], + TableName="test", + KeySchema=[{"AttributeName": "structure_id", "KeyType": "HASH"},], + ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, + ) + table = dynamodb.Table("test") + + with assert_raises(ClientError) as ex: + table.put_item(Item={}) + ex.exception.response["Error"]["Message"].should.equal( + "One or more parameter values were invalid: Missing the key structure_id in the item" + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + + @mock_dynamodb2 def test_put_item_nonexisting_hash_key(): dynamodb = boto3.resource("dynamodb", region_name="us-east-1") @@ -1361,6 +1380,32 @@ def test_put_item_nonexisting_hash_key(): ex.exception.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: Missing the key structure_id in the item" ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + + +@mock_dynamodb2 +def test_put_item_nonexisting_range_key(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + AttributeDefinitions=[ + {"AttributeName": "structure_id", "AttributeType": "S"}, + {"AttributeName": "added_at", "AttributeType": "N"}, + ], + TableName="test", + KeySchema=[ + {"AttributeName": "structure_id", "KeyType": "HASH"}, + {"AttributeName": "added_at", "KeyType": "RANGE"}, + ], + ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, + ) + table = dynamodb.Table("test") + + with assert_raises(ClientError) as ex: + table.put_item(Item={"structure_id": "abcdef"}) + ex.exception.response["Error"]["Message"].should.equal( + "One or more parameter values were invalid: Missing the key added_at in the item" + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") def test_filter_expression(): From 888e0c31a0fd94c8854f93ec88e3b45ebfeeb98b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 30 Mar 2020 13:42:00 +0100 Subject: [PATCH 179/658] Linting --- moto/__init__.py | 15 +++--- moto/core/utils.py | 19 +++---- moto/eb/exceptions.py | 6 ++- moto/eb/models.py | 35 ++++++------ moto/eb/responses.py | 47 +++++++--------- moto/eb/urls.py | 2 +- tests/test_eb/test_eb.py | 112 ++++++++++++++++----------------------- 7 files changed, 96 insertions(+), 140 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index c2caa8df01a7..9b59f18eb9b0 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -1,11 +1,4 @@ from __future__ import unicode_literals -import logging -# logging.getLogger('boto').setLevel(logging.CRITICAL) - - -__title__ = "moto" -__version__ = "1.3.15.dev" - from .acm import mock_acm # noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # noqa @@ -28,7 +21,7 @@ from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # noqa from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # noqa from .dynamodbstreams import mock_dynamodbstreams # noqa -from .eb import mock_eb # flake8: noqa +from .eb import mock_eb # noqa from .ec2 import mock_ec2, mock_ec2_deprecated # noqa from .ec2_instance_connect import mock_ec2_instance_connect # noqa from .ecr import mock_ecr, mock_ecr_deprecated # noqa @@ -65,6 +58,12 @@ from .swf import mock_swf, mock_swf_deprecated # noqa from .xray import XRaySegment, mock_xray, mock_xray_client # noqa +# import logging +# logging.getLogger('boto').setLevel(logging.CRITICAL) + +__title__ = "moto" +__version__ = "1.3.15.dev" + try: # Need to monkey-patch botocore requests back to underlying urllib3 classes diff --git a/moto/core/utils.py b/moto/core/utils.py index 59079784a90d..dce9f675c78b 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -331,10 +331,7 @@ def py2_strip_unicode_keys(blob): def tags_from_query_string( - querystring_dict, - prefix="Tag", - key_suffix="Key", - value_suffix="Value" + querystring_dict, prefix="Tag", key_suffix="Key", value_suffix="Value" ): response_values = {} for key, value in querystring_dict.items(): @@ -342,18 +339,14 @@ def tags_from_query_string( tag_index = key.replace(prefix + ".", "").replace("." + key_suffix, "") tag_key = querystring_dict.get( "{prefix}.{index}.{key_suffix}".format( - prefix=prefix, - index=tag_index, - key_suffix=key_suffix, - ))[0] + prefix=prefix, index=tag_index, key_suffix=key_suffix, + ) + )[0] tag_value_key = "{prefix}.{index}.{value_suffix}".format( - prefix=prefix, - index=tag_index, - value_suffix=value_suffix, + prefix=prefix, index=tag_index, value_suffix=value_suffix, ) if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[ - 0] + response_values[tag_key] = querystring_dict.get(tag_value_key)[0] else: response_values[tag_key] = None return response_values diff --git a/moto/eb/exceptions.py b/moto/eb/exceptions.py index bf3a896187a8..f1e27c564609 100644 --- a/moto/eb/exceptions.py +++ b/moto/eb/exceptions.py @@ -4,10 +4,12 @@ class InvalidParameterValueError(RESTError): def __init__(self, message): super(InvalidParameterValueError, self).__init__( - "InvalidParameterValue", message) + "InvalidParameterValue", message + ) class ResourceNotFoundException(RESTError): def __init__(self, message): super(ResourceNotFoundException, self).__init__( - "ResourceNotFoundException", message) + "ResourceNotFoundException", message + ) diff --git a/moto/eb/models.py b/moto/eb/models.py index 4490bbd0c83d..71873f30cc39 100644 --- a/moto/eb/models.py +++ b/moto/eb/models.py @@ -8,13 +8,11 @@ class FakeEnvironment(BaseModel): def __init__( - self, - application, - environment_name, - solution_stack_name, - tags, + self, application, environment_name, solution_stack_name, tags, ): - self.application = weakref.proxy(application) # weakref to break circular dependencies + self.application = weakref.proxy( + application + ) # weakref to break circular dependencies self.environment_name = environment_name self.solution_stack_name = solution_stack_name self.tags = tags @@ -25,17 +23,19 @@ def application_name(self): @property def environment_arn(self): - return 'arn:aws:elasticbeanstalk:{region}:{account_id}:' \ - 'environment/{application_name}/{environment_name}'.format( + return ( + "arn:aws:elasticbeanstalk:{region}:{account_id}:" + "environment/{application_name}/{environment_name}".format( region=self.region, - account_id='123456789012', + account_id="123456789012", application_name=self.application_name, environment_name=self.environment_name, ) + ) @property def platform_arn(self): - return 'TODO' # TODO + return "TODO" # TODO @property def region(self): @@ -49,10 +49,7 @@ def __init__(self, backend, application_name): self.environments = dict() def create_environment( - self, - environment_name, - solution_stack_name, - tags, + self, environment_name, solution_stack_name, tags, ): if environment_name in self.environments: raise InvalidParameterValueError @@ -89,13 +86,11 @@ def create_application(self, application_name): raise InvalidParameterValueError( "Application {} already exists.".format(application_name) ) - new_app = FakeApplication( - backend=self, - application_name=application_name, - ) + new_app = FakeApplication(backend=self, application_name=application_name,) self.applications[application_name] = new_app return new_app -eb_backends = dict((region.name, EBBackend(region.name)) - for region in boto.beanstalk.regions()) +eb_backends = dict( + (region.name, EBBackend(region.name)) for region in boto.beanstalk.regions() +) diff --git a/moto/eb/responses.py b/moto/eb/responses.py index 905780c448f3..6178e4a7ffde 100644 --- a/moto/eb/responses.py +++ b/moto/eb/responses.py @@ -14,42 +14,34 @@ def backend(self): def create_application(self): app = self.backend.create_application( - application_name=self._get_param('ApplicationName'), + application_name=self._get_param("ApplicationName"), ) template = self.response_template(EB_CREATE_APPLICATION) - return template.render( - region_name=self.backend.region, - application=app, - ) + return template.render(region_name=self.backend.region, application=app,) def describe_applications(self): template = self.response_template(EB_DESCRIBE_APPLICATIONS) - return template.render( - applications=self.backend.applications.values(), - ) + return template.render(applications=self.backend.applications.values(),) def create_environment(self): - application_name = self._get_param('ApplicationName') + application_name = self._get_param("ApplicationName") try: app = self.backend.applications[application_name] except KeyError: raise InvalidParameterValueError( - "No Application named \'{}\' found.".format(application_name) + "No Application named '{}' found.".format(application_name) ) tags = tags_from_query_string(self.querystring, prefix="Tags.member") env = app.create_environment( - environment_name=self._get_param('EnvironmentName'), - solution_stack_name=self._get_param('SolutionStackName'), + environment_name=self._get_param("EnvironmentName"), + solution_stack_name=self._get_param("SolutionStackName"), tags=tags, ) template = self.response_template(EB_CREATE_ENVIRONMENT) - return template.render( - environment=env, - region=self.backend.region, - ) + return template.render(environment=env, region=self.backend.region,) def describe_environments(self): envs = [] @@ -59,9 +51,7 @@ def describe_environments(self): envs.append(env) template = self.response_template(EB_DESCRIBE_ENVIRONMENTS) - return template.render( - environments=envs, - ) + return template.render(environments=envs,) @staticmethod def list_available_solution_stacks(): @@ -75,39 +65,38 @@ def _find_environment_by_arn(self, arn): raise KeyError() def update_tags_for_resource(self): - resource_arn = self._get_param('ResourceArn') + resource_arn = self._get_param("ResourceArn") try: res = self._find_environment_by_arn(resource_arn) except KeyError: raise ResourceNotFoundException( - "Resource not found for ARN \'{}\'.".format(resource_arn) + "Resource not found for ARN '{}'.".format(resource_arn) ) - tags_to_add = tags_from_query_string(self.querystring, prefix="TagsToAdd.member") + tags_to_add = tags_from_query_string( + self.querystring, prefix="TagsToAdd.member" + ) for key, value in tags_to_add.items(): res.tags[key] = value - tags_to_remove = self._get_multi_param('TagsToRemove.member') + tags_to_remove = self._get_multi_param("TagsToRemove.member") for key in tags_to_remove: del res.tags[key] return EB_UPDATE_TAGS_FOR_RESOURCE def list_tags_for_resource(self): - resource_arn = self._get_param('ResourceArn') + resource_arn = self._get_param("ResourceArn") try: res = self._find_environment_by_arn(resource_arn) except KeyError: raise ResourceNotFoundException( - "Resource not found for ARN \'{}\'.".format(resource_arn) + "Resource not found for ARN '{}'.".format(resource_arn) ) tags = res.tags template = self.response_template(EB_LIST_TAGS_FOR_RESOURCE) - return template.render( - tags=tags, - arn=resource_arn, - ) + return template.render(tags=tags, arn=resource_arn,) EB_CREATE_APPLICATION = """ diff --git a/moto/eb/urls.py b/moto/eb/urls.py index 4cd4add13793..2d57f7f9d6af 100644 --- a/moto/eb/urls.py +++ b/moto/eb/urls.py @@ -7,5 +7,5 @@ ] url_paths = { - '{0}/$': EBResponse.dispatch, + "{0}/$": EBResponse.dispatch, } diff --git a/tests/test_eb/test_eb.py b/tests/test_eb/test_eb.py index 2b5be4490562..1064bf31a0a2 100644 --- a/tests/test_eb/test_eb.py +++ b/tests/test_eb/test_eb.py @@ -8,114 +8,94 @@ @mock_eb def test_create_application(): # Create Elastic Beanstalk Application - conn = boto3.client('elasticbeanstalk', region_name='us-east-1') - app = conn.create_application( - ApplicationName="myapp", - ) - app['Application']['ApplicationName'].should.equal("myapp") + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + app = conn.create_application(ApplicationName="myapp",) + app["Application"]["ApplicationName"].should.equal("myapp") @mock_eb def test_create_application_dup(): - conn = boto3.client('elasticbeanstalk', region_name='us-east-1') - conn.create_application( - ApplicationName="myapp", + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) + conn.create_application.when.called_with(ApplicationName="myapp",).should.throw( + ClientError ) - conn.create_application.when.called_with( - ApplicationName="myapp", - ).should.throw(ClientError) @mock_eb def test_describe_applications(): # Create Elastic Beanstalk Application - conn = boto3.client('elasticbeanstalk', region_name='us-east-1') - conn.create_application( - ApplicationName="myapp", - ) + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) apps = conn.describe_applications() - len(apps['Applications']).should.equal(1) - apps['Applications'][0]['ApplicationName'].should.equal('myapp') + len(apps["Applications"]).should.equal(1) + apps["Applications"][0]["ApplicationName"].should.equal("myapp") @mock_eb def test_create_environment(): # Create Elastic Beanstalk Environment - conn = boto3.client('elasticbeanstalk', region_name='us-east-1') - app = conn.create_application( - ApplicationName="myapp", - ) - env = conn.create_environment( - ApplicationName="myapp", - EnvironmentName="myenv", - ) - env['EnvironmentName'].should.equal("myenv") + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + app = conn.create_application(ApplicationName="myapp",) + env = conn.create_environment(ApplicationName="myapp", EnvironmentName="myenv",) + env["EnvironmentName"].should.equal("myenv") @mock_eb def test_describe_environments(): # List Elastic Beanstalk Envs - conn = boto3.client('elasticbeanstalk', region_name='us-east-1') - conn.create_application( - ApplicationName="myapp", - ) + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) conn.create_environment( - ApplicationName="myapp", - EnvironmentName="myenv", + ApplicationName="myapp", EnvironmentName="myenv", ) envs = conn.describe_environments() - envs = envs['Environments'] + envs = envs["Environments"] len(envs).should.equal(1) - envs[0]['ApplicationName'].should.equal('myapp') - envs[0]['EnvironmentName'].should.equal('myenv') + envs[0]["ApplicationName"].should.equal("myapp") + envs[0]["EnvironmentName"].should.equal("myenv") def tags_dict_to_list(tag_dict): tag_list = [] for key, value in tag_dict.items(): - tag_list.append({'Key': key, 'Value': value}) + tag_list.append({"Key": key, "Value": value}) return tag_list def tags_list_to_dict(tag_list): tag_dict = {} for tag in tag_list: - tag_dict[tag['Key']] = tag['Value'] + tag_dict[tag["Key"]] = tag["Value"] return tag_dict @mock_eb def test_create_environment_tags(): - conn = boto3.client('elasticbeanstalk', region_name='us-east-1') - conn.create_application( - ApplicationName="myapp", - ) - env_tags = {'initial key': 'initial value'} + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) + env_tags = {"initial key": "initial value"} env = conn.create_environment( ApplicationName="myapp", EnvironmentName="myenv", Tags=tags_dict_to_list(env_tags), ) - tags = conn.list_tags_for_resource( - ResourceArn=env['EnvironmentArn'], - ) - tags['ResourceArn'].should.equal(env['EnvironmentArn']) - tags_list_to_dict(tags['ResourceTags']).should.equal(env_tags) + tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],) + tags["ResourceArn"].should.equal(env["EnvironmentArn"]) + tags_list_to_dict(tags["ResourceTags"]).should.equal(env_tags) @mock_eb def test_update_tags(): - conn = boto3.client('elasticbeanstalk', region_name='us-east-1') - conn.create_application( - ApplicationName="myapp", - ) + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") + conn.create_application(ApplicationName="myapp",) env_tags = { - 'initial key': 'initial value', - 'to remove': 'delete me', - 'to update': 'original', + "initial key": "initial value", + "to remove": "delete me", + "to update": "original", } env = conn.create_environment( ApplicationName="myapp", @@ -124,29 +104,27 @@ def test_update_tags(): ) extra_env_tags = { - 'to update': 'new', - 'extra key': 'extra value', + "to update": "new", + "extra key": "extra value", } conn.update_tags_for_resource( - ResourceArn=env['EnvironmentArn'], + ResourceArn=env["EnvironmentArn"], TagsToAdd=tags_dict_to_list(extra_env_tags), - TagsToRemove=['to remove'], + TagsToRemove=["to remove"], ) total_env_tags = env_tags.copy() total_env_tags.update(extra_env_tags) - del total_env_tags['to remove'] + del total_env_tags["to remove"] - tags = conn.list_tags_for_resource( - ResourceArn=env['EnvironmentArn'], - ) - tags['ResourceArn'].should.equal(env['EnvironmentArn']) - tags_list_to_dict(tags['ResourceTags']).should.equal(total_env_tags) + tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],) + tags["ResourceArn"].should.equal(env["EnvironmentArn"]) + tags_list_to_dict(tags["ResourceTags"]).should.equal(total_env_tags) @mock_eb def test_list_available_solution_stacks(): - conn = boto3.client('elasticbeanstalk', region_name='us-east-1') + conn = boto3.client("elasticbeanstalk", region_name="us-east-1") stacks = conn.list_available_solution_stacks() - len(stacks['SolutionStacks']).should.be.greater_than(0) - len(stacks['SolutionStacks']).should.be.equal(len(stacks['SolutionStackDetails'])) + len(stacks["SolutionStacks"]).should.be.greater_than(0) + len(stacks["SolutionStacks"]).should.be.equal(len(stacks["SolutionStackDetails"])) From c32c17a13ed94b3867336e2a9e16c5890ad75973 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 30 Mar 2020 13:49:19 +0100 Subject: [PATCH 180/658] Remove duplicated method --- moto/ec2/utils.py | 16 ---------------- moto/emr/responses.py | 5 +++-- moto/emr/utils.py | 16 ---------------- 3 files changed, 3 insertions(+), 34 deletions(-) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 74fe3d27b736..61d22d8b227f 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -196,22 +196,6 @@ def split_route_id(route_id): return values[0], values[1] -def tags_from_query_string(querystring_dict): - prefix = "Tag" - suffix = "Key" - response_values = {} - for key, value in querystring_dict.items(): - if key.startswith(prefix) and key.endswith(suffix): - tag_index = key.replace(prefix + ".", "").replace("." + suffix, "") - tag_key = querystring_dict.get("Tag.{0}.Key".format(tag_index))[0] - tag_value_key = "Tag.{0}.Value".format(tag_index) - if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[0] - else: - response_values[tag_key] = None - return response_values - - def dhcp_configuration_from_querystring(querystring, option="DhcpConfiguration"): """ turn: diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 3708db0ed268..d2b234ced0be 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -10,9 +10,10 @@ from moto.core.responses import AWSServiceSpec from moto.core.responses import BaseResponse from moto.core.responses import xml_to_json_response +from moto.core.utils import tags_from_query_string from .exceptions import EmrError from .models import emr_backends -from .utils import steps_from_query_string, tags_from_query_string +from .utils import steps_from_query_string def generate_boto3_response(operation): @@ -91,7 +92,7 @@ def add_job_flow_steps(self): @generate_boto3_response("AddTags") def add_tags(self): cluster_id = self._get_param("ResourceId") - tags = tags_from_query_string(self.querystring) + tags = tags_from_query_string(self.querystring, prefix="Tags") self.backend.add_tags(cluster_id, tags) template = self.response_template(ADD_TAGS_TEMPLATE) return template.render() diff --git a/moto/emr/utils.py b/moto/emr/utils.py index 0f75995b8e4e..fb33214c8592 100644 --- a/moto/emr/utils.py +++ b/moto/emr/utils.py @@ -22,22 +22,6 @@ def random_instance_group_id(size=13): return "i-{0}".format(random_id()) -def tags_from_query_string(querystring_dict): - prefix = "Tags" - suffix = "Key" - response_values = {} - for key, value in querystring_dict.items(): - if key.startswith(prefix) and key.endswith(suffix): - tag_index = key.replace(prefix + ".", "").replace("." + suffix, "") - tag_key = querystring_dict.get("Tags.{0}.Key".format(tag_index))[0] - tag_value_key = "Tags.{0}.Value".format(tag_index) - if tag_value_key in querystring_dict: - response_values[tag_key] = querystring_dict.get(tag_value_key)[0] - else: - response_values[tag_key] = None - return response_values - - def steps_from_query_string(querystring_dict): steps = [] for step in querystring_dict: From 7d524eaec9bb49f8d3e8e55a7f84c1876cd4e3d1 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 30 Mar 2020 14:08:22 +0100 Subject: [PATCH 181/658] Elastic Beanstalk - Rename and Add Implementation Coverage --- IMPLEMENTATION_COVERAGE.md | 14 +++--- moto/__init__.py | 2 +- moto/{eb => elasticbeanstalk}/__init__.py | 2 +- moto/{eb => elasticbeanstalk}/exceptions.py | 0 moto/{eb => elasticbeanstalk}/models.py | 50 ++++++++++++++++++++- moto/{eb => elasticbeanstalk}/responses.py | 41 +++-------------- moto/{eb => elasticbeanstalk}/urls.py | 0 tests/test_eb/test_eb.py | 18 ++++---- 8 files changed, 74 insertions(+), 53 deletions(-) rename moto/{eb => elasticbeanstalk}/__init__.py (59%) rename moto/{eb => elasticbeanstalk}/exceptions.py (100%) rename moto/{eb => elasticbeanstalk}/models.py (61%) rename moto/{eb => elasticbeanstalk}/responses.py (97%) rename moto/{eb => elasticbeanstalk}/urls.py (100%) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 705618524723..bd9e9a4cd2c0 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -2878,15 +2878,15 @@ - [ ] test_failover ## elasticbeanstalk -0% implemented +13% implemented - [ ] abort_environment_update - [ ] apply_environment_managed_action - [ ] check_dns_availability - [ ] compose_environments -- [ ] create_application +- [X] create_application - [ ] create_application_version - [ ] create_configuration_template -- [ ] create_environment +- [X] create_environment - [ ] create_platform_version - [ ] create_storage_location - [ ] delete_application @@ -2903,13 +2903,13 @@ - [ ] describe_environment_managed_action_history - [ ] describe_environment_managed_actions - [ ] describe_environment_resources -- [ ] describe_environments +- [X] describe_environments - [ ] describe_events - [ ] describe_instances_health - [ ] describe_platform_version -- [ ] list_available_solution_stacks +- [X] list_available_solution_stacks - [ ] list_platform_versions -- [ ] list_tags_for_resource +- [X] list_tags_for_resource - [ ] rebuild_environment - [ ] request_environment_info - [ ] restart_app_server @@ -2921,7 +2921,7 @@ - [ ] update_application_version - [ ] update_configuration_template - [ ] update_environment -- [ ] update_tags_for_resource +- [X] update_tags_for_resource - [ ] validate_configuration_settings ## elastictranscoder diff --git a/moto/__init__.py b/moto/__init__.py index 9b59f18eb9b0..4c9d4753c360 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -21,7 +21,7 @@ from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # noqa from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # noqa from .dynamodbstreams import mock_dynamodbstreams # noqa -from .eb import mock_eb # noqa +from .elasticbeanstalk import mock_elasticbeanstalk # noqa from .ec2 import mock_ec2, mock_ec2_deprecated # noqa from .ec2_instance_connect import mock_ec2_instance_connect # noqa from .ecr import mock_ecr, mock_ecr_deprecated # noqa diff --git a/moto/eb/__init__.py b/moto/elasticbeanstalk/__init__.py similarity index 59% rename from moto/eb/__init__.py rename to moto/elasticbeanstalk/__init__.py index 3e06e959525c..851fa445b76b 100644 --- a/moto/eb/__init__.py +++ b/moto/elasticbeanstalk/__init__.py @@ -1,4 +1,4 @@ from .models import eb_backends from moto.core.models import base_decorator -mock_eb = base_decorator(eb_backends) +mock_elasticbeanstalk = base_decorator(eb_backends) diff --git a/moto/eb/exceptions.py b/moto/elasticbeanstalk/exceptions.py similarity index 100% rename from moto/eb/exceptions.py rename to moto/elasticbeanstalk/exceptions.py diff --git a/moto/eb/models.py b/moto/elasticbeanstalk/models.py similarity index 61% rename from moto/eb/models.py rename to moto/elasticbeanstalk/models.py index 71873f30cc39..83ad65ab02e2 100644 --- a/moto/eb/models.py +++ b/moto/elasticbeanstalk/models.py @@ -3,7 +3,7 @@ import boto.beanstalk from moto.core import BaseBackend, BaseModel -from .exceptions import InvalidParameterValueError +from .exceptions import InvalidParameterValueError, ResourceNotFoundException class FakeEnvironment(BaseModel): @@ -90,6 +90,54 @@ def create_application(self, application_name): self.applications[application_name] = new_app return new_app + def create_environment(self, app, environment_name, stack_name, tags): + return app.create_environment( + environment_name=environment_name, + solution_stack_name=stack_name, + tags=tags, + ) + + def describe_environments(self): + envs = [] + for app in self.applications.values(): + for env in app.environments.values(): + envs.append(env) + return envs + + def list_available_solution_stacks(self): + # Implemented in response.py + pass + + def update_tags_for_resource(self, resource_arn, tags_to_add, tags_to_remove): + try: + res = self._find_environment_by_arn(resource_arn) + except KeyError: + raise ResourceNotFoundException( + "Resource not found for ARN '{}'.".format(resource_arn) + ) + + for key, value in tags_to_add.items(): + res.tags[key] = value + + for key in tags_to_remove: + del res.tags[key] + + def list_tags_for_resource(self, resource_arn): + try: + res = self._find_environment_by_arn(resource_arn) + except KeyError: + raise ResourceNotFoundException( + "Resource not found for ARN '{}'.".format(resource_arn) + ) + return res.tags + + def _find_environment_by_arn(self, arn): + for app in self.applications.keys(): + for env in self.applications[app].environments.values(): + if env.environment_arn == arn: + return env + raise KeyError() + eb_backends = dict( (region.name, EBBackend(region.name)) for region in boto.beanstalk.regions() diff --git a/moto/eb/responses.py b/moto/elasticbeanstalk/responses.py similarity index 97% rename from moto/eb/responses.py rename to moto/elasticbeanstalk/responses.py index 6178e4a7ffde..0416121b2d44 100644 --- a/moto/eb/responses.py +++ b/moto/elasticbeanstalk/responses.py @@ -1,7 +1,7 @@ from moto.core.responses import BaseResponse from moto.core.utils import tags_from_query_string from .models import eb_backends -from .exceptions import InvalidParameterValueError, ResourceNotFoundException +from .exceptions import InvalidParameterValueError class EBResponse(BaseResponse): @@ -34,9 +34,10 @@ def create_environment(self): ) tags = tags_from_query_string(self.querystring, prefix="Tags.member") - env = app.create_environment( + env = self.backend.create_environment( + app, environment_name=self._get_param("EnvironmentName"), - solution_stack_name=self._get_param("SolutionStackName"), + stack_name=self._get_param("SolutionStackName"), tags=tags, ) @@ -44,11 +45,7 @@ def create_environment(self): return template.render(environment=env, region=self.backend.region,) def describe_environments(self): - envs = [] - - for app in self.backend.applications.values(): - for env in app.environments.values(): - envs.append(env) + envs = self.backend.describe_environments() template = self.response_template(EB_DESCRIBE_ENVIRONMENTS) return template.render(environments=envs,) @@ -57,43 +54,19 @@ def describe_environments(self): def list_available_solution_stacks(): return EB_LIST_AVAILABLE_SOLUTION_STACKS - def _find_environment_by_arn(self, arn): - for app in self.backend.applications.keys(): - for env in self.backend.applications[app].environments.values(): - if env.environment_arn == arn: - return env - raise KeyError() - def update_tags_for_resource(self): resource_arn = self._get_param("ResourceArn") - try: - res = self._find_environment_by_arn(resource_arn) - except KeyError: - raise ResourceNotFoundException( - "Resource not found for ARN '{}'.".format(resource_arn) - ) - tags_to_add = tags_from_query_string( self.querystring, prefix="TagsToAdd.member" ) - for key, value in tags_to_add.items(): - res.tags[key] = value - tags_to_remove = self._get_multi_param("TagsToRemove.member") - for key in tags_to_remove: - del res.tags[key] + self.backend.update_tags_for_resource(resource_arn, tags_to_add, tags_to_remove) return EB_UPDATE_TAGS_FOR_RESOURCE def list_tags_for_resource(self): resource_arn = self._get_param("ResourceArn") - try: - res = self._find_environment_by_arn(resource_arn) - except KeyError: - raise ResourceNotFoundException( - "Resource not found for ARN '{}'.".format(resource_arn) - ) - tags = res.tags + tags = self.backend.list_tags_for_resource(resource_arn) template = self.response_template(EB_LIST_TAGS_FOR_RESOURCE) return template.render(tags=tags, arn=resource_arn,) diff --git a/moto/eb/urls.py b/moto/elasticbeanstalk/urls.py similarity index 100% rename from moto/eb/urls.py rename to moto/elasticbeanstalk/urls.py diff --git a/tests/test_eb/test_eb.py b/tests/test_eb/test_eb.py index 1064bf31a0a2..42eb09be3eba 100644 --- a/tests/test_eb/test_eb.py +++ b/tests/test_eb/test_eb.py @@ -2,10 +2,10 @@ import sure # noqa from botocore.exceptions import ClientError -from moto import mock_eb +from moto import mock_elasticbeanstalk -@mock_eb +@mock_elasticbeanstalk def test_create_application(): # Create Elastic Beanstalk Application conn = boto3.client("elasticbeanstalk", region_name="us-east-1") @@ -13,7 +13,7 @@ def test_create_application(): app["Application"]["ApplicationName"].should.equal("myapp") -@mock_eb +@mock_elasticbeanstalk def test_create_application_dup(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") conn.create_application(ApplicationName="myapp",) @@ -22,7 +22,7 @@ def test_create_application_dup(): ) -@mock_eb +@mock_elasticbeanstalk def test_describe_applications(): # Create Elastic Beanstalk Application conn = boto3.client("elasticbeanstalk", region_name="us-east-1") @@ -33,7 +33,7 @@ def test_describe_applications(): apps["Applications"][0]["ApplicationName"].should.equal("myapp") -@mock_eb +@mock_elasticbeanstalk def test_create_environment(): # Create Elastic Beanstalk Environment conn = boto3.client("elasticbeanstalk", region_name="us-east-1") @@ -42,7 +42,7 @@ def test_create_environment(): env["EnvironmentName"].should.equal("myenv") -@mock_eb +@mock_elasticbeanstalk def test_describe_environments(): # List Elastic Beanstalk Envs conn = boto3.client("elasticbeanstalk", region_name="us-east-1") @@ -72,7 +72,7 @@ def tags_list_to_dict(tag_list): return tag_dict -@mock_eb +@mock_elasticbeanstalk def test_create_environment_tags(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") conn.create_application(ApplicationName="myapp",) @@ -88,7 +88,7 @@ def test_create_environment_tags(): tags_list_to_dict(tags["ResourceTags"]).should.equal(env_tags) -@mock_eb +@mock_elasticbeanstalk def test_update_tags(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") conn.create_application(ApplicationName="myapp",) @@ -122,7 +122,7 @@ def test_update_tags(): tags_list_to_dict(tags["ResourceTags"]).should.equal(total_env_tags) -@mock_eb +@mock_elasticbeanstalk def test_list_available_solution_stacks(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") stacks = conn.list_available_solution_stacks() From 551dc024595cc602091ecf640311a3db4a52d6ca Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 30 Mar 2020 16:28:36 +0100 Subject: [PATCH 182/658] ElasticBeanstalk - Fix tests in Python2 and ServerMode --- moto/backends.py | 2 ++ moto/elasticbeanstalk/models.py | 16 ++++++++++++---- moto/elasticbeanstalk/responses.py | 3 +-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/moto/backends.py b/moto/backends.py index a358b8fd2780..a48df74a4586 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -23,6 +23,7 @@ from moto.ec2_instance_connect import ec2_instance_connect_backends from moto.ecr import ecr_backends from moto.ecs import ecs_backends +from moto.elasticbeanstalk import eb_backends from moto.elb import elb_backends from moto.elbv2 import elbv2_backends from moto.emr import emr_backends @@ -77,6 +78,7 @@ "ec2_instance_connect": ec2_instance_connect_backends, "ecr": ecr_backends, "ecs": ecs_backends, + "elasticbeanstalk": eb_backends, "elb": elb_backends, "elbv2": elbv2_backends, "events": events_backends, diff --git a/moto/elasticbeanstalk/models.py b/moto/elasticbeanstalk/models.py index 83ad65ab02e2..3767846c1117 100644 --- a/moto/elasticbeanstalk/models.py +++ b/moto/elasticbeanstalk/models.py @@ -1,6 +1,6 @@ import weakref -import boto.beanstalk +from boto3 import Session from moto.core import BaseBackend, BaseModel from .exceptions import InvalidParameterValueError, ResourceNotFoundException @@ -139,6 +139,14 @@ def _find_environment_by_arn(self, arn): raise KeyError() -eb_backends = dict( - (region.name, EBBackend(region.name)) for region in boto.beanstalk.regions() -) +eb_backends = {} +for region in Session().get_available_regions("elasticbeanstalk"): + eb_backends[region] = EBBackend(region) +for region in Session().get_available_regions( + "elasticbeanstalk", partition_name="aws-us-gov" +): + eb_backends[region] = EBBackend(region) +for region in Session().get_available_regions( + "elasticbeanstalk", partition_name="aws-cn" +): + eb_backends[region] = EBBackend(region) diff --git a/moto/elasticbeanstalk/responses.py b/moto/elasticbeanstalk/responses.py index 0416121b2d44..387cbb3ea24a 100644 --- a/moto/elasticbeanstalk/responses.py +++ b/moto/elasticbeanstalk/responses.py @@ -50,8 +50,7 @@ def describe_environments(self): template = self.response_template(EB_DESCRIBE_ENVIRONMENTS) return template.render(environments=envs,) - @staticmethod - def list_available_solution_stacks(): + def list_available_solution_stacks(self): return EB_LIST_AVAILABLE_SOLUTION_STACKS def update_tags_for_resource(self): From 0c191ac33b3f38a05bd41ed8ee1e082c926de3d4 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Mon, 30 Mar 2020 17:23:33 -0700 Subject: [PATCH 183/658] Raise errors on tagging buckets with aws:* Cannot tag S3 buckets with reserved tag key space `aws:` --- moto/s3/exceptions.py | 9 +++++++++ moto/s3/responses.py | 6 ++++++ tests/test_s3/test_s3.py | 18 ++++++++++++++++++ 3 files changed, 33 insertions(+) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index e26f384d50f8..c38a4f467d0a 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -368,3 +368,12 @@ def __init__(self): super(WrongPublicAccessBlockAccountIdError, self).__init__( "AccessDenied", "Access Denied" ) + + +class NoSystemTags(S3ClientError): + code = 400 + + def __init__(self): + super(NoSystemTags, self).__init__( + "InvalidTag", "System tags cannot be added/updated by requester" + ) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index b74be9a63442..197cd90806c1 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -34,6 +34,7 @@ InvalidNotificationARN, InvalidNotificationEvent, ObjectNotInActiveTierError, + NoSystemTags, ) from .models import ( s3_backend, @@ -1399,6 +1400,11 @@ def _bucket_tagging_from_xml(self, xml): for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]: tags.append(FakeTag(tag["Key"], tag["Value"])) + # Verify that "aws:" is not in the tags. If so, then this is a problem: + for tag in tags: + if tag.key.startswith("aws:"): + raise NoSystemTags() + tag_set = FakeTagSet(tags) tagging = FakeTagging(tag_set) return tagging diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 800daaef87dc..303ed523d924 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2413,6 +2413,24 @@ def test_boto3_put_bucket_tagging(): "Cannot provide multiple Tags with the same key" ) + # Cannot put tags that are "system" tags - i.e. tags that start with "aws:" + with assert_raises(ClientError) as ce: + s3.put_bucket_tagging( + Bucket=bucket_name, + Tagging={"TagSet": [{"Key": "aws:sometag", "Value": "nope"}]}, + ) + e = ce.exception + e.response["Error"]["Code"].should.equal("InvalidTag") + e.response["Error"]["Message"].should.equal( + "System tags cannot be added/updated by requester" + ) + + # This is OK though: + s3.put_bucket_tagging( + Bucket=bucket_name, + Tagging={"TagSet": [{"Key": "something:aws:stuff", "Value": "this is fine"}]}, + ) + @mock_s3 def test_boto3_get_bucket_tagging(): From 6dd6686afcc5c9dc40a9ba90b5b853b2a9f60e48 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 31 Mar 2020 11:10:38 +0100 Subject: [PATCH 184/658] Use TaggingService for S3 Buckets --- moto/s3/models.py | 23 ++++++++++++++++++----- moto/s3/responses.py | 8 ++++---- moto/utilities/tagging_service.py | 12 ++++++++++-- 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 8c2a86f4121f..aede52d26b9d 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -22,6 +22,7 @@ from bisect import insort from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime +from moto.utilities.tagging_service import TaggingService from .exceptions import ( BucketAlreadyExists, MissingBucket, @@ -787,7 +788,6 @@ def __init__(self, name, region_name): self.policy = None self.website_configuration = None self.acl = get_canned_acl("private") - self.tags = FakeTagging() self.cors = [] self.logging = {} self.notification_configuration = None @@ -1085,6 +1085,10 @@ def get_cfn_attribute(self, attribute_name): def set_acl(self, acl): self.acl = acl + @property + def arn(self): + return "arn:aws:s3:::{}".format(self.name) + @property def physical_resource_id(self): return self.name @@ -1110,7 +1114,7 @@ def to_config_dict(self): int(time.mktime(self.creation_date.timetuple())) ), # PY2 and 3 compatible "configurationItemMD5Hash": "", - "arn": "arn:aws:s3:::{}".format(self.name), + "arn": self.arn, "resourceType": "AWS::S3::Bucket", "resourceId": self.name, "resourceName": self.name, @@ -1119,7 +1123,7 @@ def to_config_dict(self): "resourceCreationTime": str(self.creation_date), "relatedEvents": [], "relationships": [], - "tags": {tag.key: tag.value for tag in self.tagging.tag_set.tags}, + "tags": s3_backend.tagger.get_tag_dict_for_resource(self.arn), "configuration": { "name": self.name, "owner": {"id": OWNER}, @@ -1181,6 +1185,7 @@ class S3Backend(BaseBackend): def __init__(self): self.buckets = {} self.account_public_access_block = None + self.tagger = TaggingService() def create_bucket(self, bucket_name, region_name): if bucket_name in self.buckets: @@ -1357,16 +1362,24 @@ def set_key_tagging(self, bucket_name, key_name, tagging, version_id=None): key.set_tagging(tagging) return key + def get_bucket_tags(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return self.tagger.list_tags_for_resource(bucket.arn) + def put_bucket_tagging(self, bucket_name, tagging): tag_keys = [tag.key for tag in tagging.tag_set.tags] if len(tag_keys) != len(set(tag_keys)): raise DuplicateTagKeys() bucket = self.get_bucket(bucket_name) - bucket.set_tags(tagging) + self.tagger.delete_all_tags_for_resource(bucket.arn) + self.tagger.tag_resource( + bucket.arn, + [{"Key": tag.key, "Value": tag.value} for tag in tagging.tag_set.tags], + ) def delete_bucket_tagging(self, bucket_name): bucket = self.get_bucket(bucket_name) - bucket.delete_tags() + self.tagger.delete_all_tags_for_resource(bucket.arn) def put_bucket_cors(self, bucket_name, cors_rules): bucket = self.get_bucket(bucket_name) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 197cd90806c1..f3a5eeaac48e 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -378,13 +378,13 @@ def _bucket_response_get(self, bucket_name, querystring): template = self.response_template(S3_OBJECT_ACL_RESPONSE) return template.render(obj=bucket) elif "tagging" in querystring: - bucket = self.backend.get_bucket(bucket_name) + tags = self.backend.get_bucket_tags(bucket_name)["Tags"] # "Special Error" if no tags: - if len(bucket.tagging.tag_set.tags) == 0: + if len(tags) == 0: template = self.response_template(S3_NO_BUCKET_TAGGING) return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_TAGGING_RESPONSE) - return template.render(bucket=bucket) + return template.render(tags=tags) elif "logging" in querystring: bucket = self.backend.get_bucket(bucket_name) if not bucket.logging: @@ -1929,7 +1929,7 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): S3_BUCKET_TAGGING_RESPONSE = """ - {% for tag in bucket.tagging.tag_set.tags %} + {% for tag in tags %} {{ tag.key }} {{ tag.value }} diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py index 89b857277b15..8c322855255d 100644 --- a/moto/utilities/tagging_service.py +++ b/moto/utilities/tagging_service.py @@ -5,15 +5,23 @@ def __init__(self, tagName="Tags", keyName="Key", valueName="Value"): self.valueName = valueName self.tags = {} + def get_tag_dict_for_resource(self, arn): + result = {} + if self.has_tags(arn): + for k, v in self.tags[arn].items(): + result[k] = v + return result + def list_tags_for_resource(self, arn): result = [] - if arn in self.tags: + if self.has_tags(arn): for k, v in self.tags[arn].items(): result.append({self.keyName: k, self.valueName: v}) return {self.tagName: result} def delete_all_tags_for_resource(self, arn): - del self.tags[arn] + if self.has_tags(arn): + del self.tags[arn] def has_tags(self, arn): return arn in self.tags From f7ad4cbc09164205d9f216355cec6c921170d3f9 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 31 Mar 2020 12:04:04 +0100 Subject: [PATCH 185/658] Use TaggingService for S3 Objects --- moto/s3/models.py | 30 ++++++++++++------- moto/s3/responses.py | 28 ++++++------------ moto/utilities/tagging_service.py | 6 ++++ tests/test_s3/test_s3.py | 9 ++++-- tests/test_utilities/test_tagging_service.py | 31 ++++++++++++++++++++ 5 files changed, 71 insertions(+), 33 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index aede52d26b9d..b5224b64acd3 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -95,6 +95,7 @@ def __init__( version_id=0, max_buffer_size=DEFAULT_KEY_BUFFER_SIZE, multipart=None, + bucket_name=None, ): self.name = name self.last_modified = datetime.datetime.utcnow() @@ -106,8 +107,8 @@ def __init__( self._etag = etag self._version_id = version_id self._is_versioned = is_versioned - self._tagging = FakeTagging() self.multipart = multipart + self.bucket_name = bucket_name self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size) self._max_buffer_size = max_buffer_size @@ -127,6 +128,13 @@ def value(self): self.lock.release() return r + @property + def arn(self): + # S3 Objects don't have an ARN, but we do need something unique when creating tags against this resource + return "arn:aws:s3:::{}/{}/{}".format( + self.bucket_name, self.name, self.version_id + ) + @value.setter def value(self, new_value): self._value_buffer.seek(0) @@ -153,9 +161,6 @@ def set_metadata(self, metadata, replace=False): self._metadata = {} self._metadata.update(metadata) - def set_tagging(self, tagging): - self._tagging = tagging - def set_storage_class(self, storage): if storage is not None and storage not in STORAGE_CLASS: raise InvalidStorageClass(storage=storage) @@ -211,10 +216,6 @@ def last_modified_RFC1123(self): def metadata(self): return self._metadata - @property - def tagging(self): - return self._tagging - @property def response_dict(self): res = { @@ -1355,11 +1356,17 @@ def get_key(self, bucket_name, key_name, version_id=None, part_number=None): else: return None - def set_key_tagging(self, bucket_name, key_name, tagging, version_id=None): - key = self.get_key(bucket_name, key_name, version_id) + def get_key_tags(self, key): + return self.tagger.list_tags_for_resource(key.arn) + + def set_key_tags(self, key, tagging, key_name=None): if key is None: raise MissingKey(key_name) - key.set_tagging(tagging) + self.tagger.delete_all_tags_for_resource(key.arn) + self.tagger.tag_resource( + key.arn, + [{"Key": tag.key, "Value": tag.value} for tag in tagging.tag_set.tags], + ) return key def get_bucket_tags(self, bucket_name): @@ -1587,6 +1594,7 @@ def copy_key( key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id) new_key = key.copy(dest_key_name, dest_bucket.is_versioned) + self.tagger.copy_tags(key.arn, new_key.arn) if storage is not None: new_key.set_storage_class(storage) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index f3a5eeaac48e..4e3b9a67b168 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -383,7 +383,7 @@ def _bucket_response_get(self, bucket_name, querystring): if len(tags) == 0: template = self.response_template(S3_NO_BUCKET_TAGGING) return 404, {}, template.render(bucket_name=bucket_name) - template = self.response_template(S3_BUCKET_TAGGING_RESPONSE) + template = self.response_template(S3_OBJECT_TAGGING_RESPONSE) return template.render(tags=tags) elif "logging" in querystring: bucket = self.backend.get_bucket(bucket_name) @@ -1091,8 +1091,9 @@ def _key_response_get(self, bucket_name, query, key_name, headers): template = self.response_template(S3_OBJECT_ACL_RESPONSE) return 200, response_headers, template.render(obj=key) if "tagging" in query: + tags = self.backend.get_key_tags(key)["Tags"] template = self.response_template(S3_OBJECT_TAGGING_RESPONSE) - return 200, response_headers, template.render(obj=key) + return 200, response_headers, template.render(tags=tags) response_headers.update(key.metadata) response_headers.update(key.response_dict) @@ -1164,8 +1165,9 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers version_id = query["versionId"][0] else: version_id = None + key = self.backend.get_key(bucket_name, key_name, version_id=version_id) tagging = self._tagging_from_xml(body) - self.backend.set_key_tagging(bucket_name, key_name, tagging, version_id) + self.backend.set_key_tags(key, tagging, key_name) return 200, response_headers, "" if "x-amz-copy-source" in request.headers: @@ -1206,7 +1208,7 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers tdirective = request.headers.get("x-amz-tagging-directive") if tdirective == "REPLACE": tagging = self._tagging_from_headers(request.headers) - new_key.set_tagging(tagging) + self.backend.set_key_tags(new_key, tagging) template = self.response_template(S3_OBJECT_COPY_RESPONSE) response_headers.update(new_key.response_dict) return 200, response_headers, template.render(key=new_key) @@ -1230,7 +1232,7 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers new_key.website_redirect_location = request.headers.get( "x-amz-website-redirect-location" ) - new_key.set_tagging(tagging) + self.backend.set_key_tags(new_key, tagging) template = self.response_template(S3_OBJECT_RESPONSE) response_headers.update(new_key.response_dict) @@ -1916,23 +1918,11 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): S3_OBJECT_TAGGING_RESPONSE = """\ - - {% for tag in obj.tagging.tag_set.tags %} - - {{ tag.key }} - {{ tag.value }} - - {% endfor %} - -""" - -S3_BUCKET_TAGGING_RESPONSE = """ - {% for tag in tags %} - {{ tag.key }} - {{ tag.value }} + {{ tag.Key }} + {{ tag.Value }} {% endfor %} diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py index 8c322855255d..2d6ac99c9fd0 100644 --- a/moto/utilities/tagging_service.py +++ b/moto/utilities/tagging_service.py @@ -35,6 +35,12 @@ def tag_resource(self, arn, tags): else: self.tags[arn][t[self.keyName]] = None + def copy_tags(self, from_arn, to_arn): + if self.has_tags(from_arn): + self.tag_resource( + to_arn, self.list_tags_for_resource(from_arn)[self.tagName] + ) + def untag_resource_using_names(self, arn, tag_names): for name in tag_names: if name in self.tags.get(arn, {}): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 303ed523d924..4ddc160a8bd9 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -3255,7 +3255,8 @@ def test_boto3_put_object_tagging_on_earliest_version(): # Older version has tags while the most recent does not resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id) resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - resp["TagSet"].should.equal( + sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"]) + sorted_tagset.should.equal( [{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}] ) @@ -3333,7 +3334,8 @@ def test_boto3_put_object_tagging_on_both_version(): resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id) resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - resp["TagSet"].should.equal( + sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"]) + sorted_tagset.should.equal( [{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}] ) @@ -3341,7 +3343,8 @@ def test_boto3_put_object_tagging_on_both_version(): Bucket=bucket_name, Key=key, VersionId=second_object.id ) resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - resp["TagSet"].should.equal( + sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"]) + sorted_tagset.should.equal( [{"Key": "item1", "Value": "baz"}, {"Key": "item2", "Value": "bin"}] ) diff --git a/tests/test_utilities/test_tagging_service.py b/tests/test_utilities/test_tagging_service.py index 249e903fe774..1eac276a19a5 100644 --- a/tests/test_utilities/test_tagging_service.py +++ b/tests/test_utilities/test_tagging_service.py @@ -77,3 +77,34 @@ def test_extract_tag_names(): expected = ["key1", "key2"] expected.should.be.equal(actual) + + +def test_copy_non_existing_arn(): + svc = TaggingService() + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + svc.tag_resource("new_arn", tags) + # + svc.copy_tags("non_existing_arn", "new_arn") + # Copying from a non-existing ARN should a NOOP + # Assert the old tags still exist + actual = sorted( + svc.list_tags_for_resource("new_arn")["Tags"], key=lambda t: t["Key"] + ) + actual.should.equal(tags) + + +def test_copy_existing_arn(): + svc = TaggingService() + tags_old_arn = [{"Key": "key1", "Value": "value1"}] + tags_new_arn = [{"Key": "key2", "Value": "value2"}] + svc.tag_resource("old_arn", tags_old_arn) + svc.tag_resource("new_arn", tags_new_arn) + # + svc.copy_tags("old_arn", "new_arn") + # Assert the old tags still exist + actual = sorted( + svc.list_tags_for_resource("new_arn")["Tags"], key=lambda t: t["Key"] + ) + actual.should.equal( + [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + ) From 8dbfd43c5c7556af546764d16b2f49d57a3127c4 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 1 Apr 2020 15:35:25 +0100 Subject: [PATCH 186/658] Use TaggingService for S3 - Cleanup --- moto/s3/models.py | 59 +++++++------------------------ moto/s3/responses.py | 60 +++++++++++++------------------- tests/test_config/test_config.py | 2 ++ tests/test_s3/test_s3.py | 11 ++---- 4 files changed, 40 insertions(+), 92 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index b5224b64acd3..44a94e7a32fc 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -35,7 +35,6 @@ MalformedXML, InvalidStorageClass, InvalidTargetBucketForLogging, - DuplicateTagKeys, CrossLocationLoggingProhibitted, NoSuchPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration, @@ -473,26 +472,10 @@ def get_canned_acl(acl): return FakeAcl(grants=grants) -class FakeTagging(BaseModel): - def __init__(self, tag_set=None): - self.tag_set = tag_set or FakeTagSet() - - -class FakeTagSet(BaseModel): - def __init__(self, tags=None): - self.tags = tags or [] - - -class FakeTag(BaseModel): - def __init__(self, key, value=None): - self.key = key - self.value = value - - class LifecycleFilter(BaseModel): def __init__(self, prefix=None, tag=None, and_filter=None): self.prefix = prefix - self.tag = tag + (self.tag_key, self.tag_value) = tag if tag else (None, None) self.and_filter = and_filter def to_config_dict(self): @@ -501,11 +484,11 @@ def to_config_dict(self): "predicate": {"type": "LifecyclePrefixPredicate", "prefix": self.prefix} } - elif self.tag: + elif self.tag_key: return { "predicate": { "type": "LifecycleTagPredicate", - "tag": {"key": self.tag.key, "value": self.tag.value}, + "tag": {"key": self.tag_key, "value": self.tag_value}, } } @@ -529,12 +512,9 @@ def to_config_dict(self): if self.prefix is not None: data.append({"type": "LifecyclePrefixPredicate", "prefix": self.prefix}) - for tag in self.tags: + for key, value in self.tags.items(): data.append( - { - "type": "LifecycleTagPredicate", - "tag": {"key": tag.key, "value": tag.value}, - } + {"type": "LifecycleTagPredicate", "tag": {"key": key, "value": value},} ) return data @@ -880,7 +860,7 @@ def set_lifecycle(self, rules): and_filter = None if rule["Filter"].get("And"): filters += 1 - and_tags = [] + and_tags = {} if rule["Filter"]["And"].get("Tag"): if not isinstance(rule["Filter"]["And"]["Tag"], list): rule["Filter"]["And"]["Tag"] = [ @@ -888,7 +868,7 @@ def set_lifecycle(self, rules): ] for t in rule["Filter"]["And"]["Tag"]: - and_tags.append(FakeTag(t["Key"], t.get("Value", ""))) + and_tags[t["Key"]] = t.get("Value", "") try: and_prefix = ( @@ -902,7 +882,7 @@ def set_lifecycle(self, rules): filter_tag = None if rule["Filter"].get("Tag"): filters += 1 - filter_tag = FakeTag( + filter_tag = ( rule["Filter"]["Tag"]["Key"], rule["Filter"]["Tag"].get("Value", ""), ) @@ -989,16 +969,6 @@ def set_cors(self, rules): def delete_cors(self): self.cors = [] - def set_tags(self, tagging): - self.tags = tagging - - def delete_tags(self): - self.tags = FakeTagging() - - @property - def tagging(self): - return self.tags - def set_logging(self, logging_config, bucket_backend): if not logging_config: self.logging = {} @@ -1359,13 +1329,12 @@ def get_key(self, bucket_name, key_name, version_id=None, part_number=None): def get_key_tags(self, key): return self.tagger.list_tags_for_resource(key.arn) - def set_key_tags(self, key, tagging, key_name=None): + def set_key_tags(self, key, tags, key_name=None): if key is None: raise MissingKey(key_name) self.tagger.delete_all_tags_for_resource(key.arn) self.tagger.tag_resource( - key.arn, - [{"Key": tag.key, "Value": tag.value} for tag in tagging.tag_set.tags], + key.arn, [{"Key": key, "Value": value} for key, value in tags.items()], ) return key @@ -1373,15 +1342,11 @@ def get_bucket_tags(self, bucket_name): bucket = self.get_bucket(bucket_name) return self.tagger.list_tags_for_resource(bucket.arn) - def put_bucket_tagging(self, bucket_name, tagging): - tag_keys = [tag.key for tag in tagging.tag_set.tags] - if len(tag_keys) != len(set(tag_keys)): - raise DuplicateTagKeys() + def put_bucket_tags(self, bucket_name, tags): bucket = self.get_bucket(bucket_name) self.tagger.delete_all_tags_for_resource(bucket.arn) self.tagger.tag_resource( - bucket.arn, - [{"Key": tag.key, "Value": tag.value} for tag in tagging.tag_set.tags], + bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()], ) def delete_bucket_tagging(self, bucket_name): diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 4e3b9a67b168..913b208614e5 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -24,6 +24,7 @@ from .exceptions import ( BucketAlreadyExists, + DuplicateTagKeys, S3ClientError, MissingBucket, MissingKey, @@ -43,9 +44,6 @@ FakeGrant, FakeAcl, FakeKey, - FakeTagging, - FakeTagSet, - FakeTag, ) from .utils import ( bucket_name_from_url, @@ -652,7 +650,7 @@ def _bucket_response_put( return "" elif "tagging" in querystring: tagging = self._bucket_tagging_from_xml(body) - self.backend.put_bucket_tagging(bucket_name, tagging) + self.backend.put_bucket_tags(bucket_name, tagging) return "" elif "website" in querystring: self.backend.set_bucket_website_configuration(bucket_name, body) @@ -1361,55 +1359,45 @@ def _acl_from_headers(self, headers): return None def _tagging_from_headers(self, headers): + tags = {} if headers.get("x-amz-tagging"): parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True) - tags = [] for tag in parsed_header.items(): - tags.append(FakeTag(tag[0], tag[1][0])) - - tag_set = FakeTagSet(tags) - tagging = FakeTagging(tag_set) - return tagging - else: - return FakeTagging() + tags[tag[0]] = tag[1][0] + return tags def _tagging_from_xml(self, xml): parsed_xml = xmltodict.parse(xml, force_list={"Tag": True}) - tags = [] + tags = {} for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]: - tags.append(FakeTag(tag["Key"], tag["Value"])) + tags[tag["Key"]] = tag["Value"] - tag_set = FakeTagSet(tags) - tagging = FakeTagging(tag_set) - return tagging + return tags def _bucket_tagging_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) - tags = [] + tags = {} # Optional if no tags are being sent: if parsed_xml["Tagging"].get("TagSet"): # If there is only 1 tag, then it's not a list: if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list): - tags.append( - FakeTag( - parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"], - parsed_xml["Tagging"]["TagSet"]["Tag"]["Value"], - ) - ) + tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[ + "Tagging" + ]["TagSet"]["Tag"]["Value"] else: for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]: - tags.append(FakeTag(tag["Key"], tag["Value"])) + if tag["Key"] in tags: + raise DuplicateTagKeys() + tags[tag["Key"]] = tag["Value"] # Verify that "aws:" is not in the tags. If so, then this is a problem: - for tag in tags: - if tag.key.startswith("aws:"): + for key, _ in tags.items(): + if key.startswith("aws:"): raise NoSystemTags() - tag_set = FakeTagSet(tags) - tagging = FakeTagging(tag_set) - return tagging + return tags def _cors_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) @@ -1730,10 +1718,10 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): {% if rule.filter.prefix != None %} {{ rule.filter.prefix }} {% endif %} - {% if rule.filter.tag %} + {% if rule.filter.tag_key %} - {{ rule.filter.tag.key }} - {{ rule.filter.tag.value }} + {{ rule.filter.tag_key }} + {{ rule.filter.tag_value }} {% endif %} {% if rule.filter.and_filter %} @@ -1741,10 +1729,10 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): {% if rule.filter.and_filter.prefix != None %} {{ rule.filter.and_filter.prefix }} {% endif %} - {% for tag in rule.filter.and_filter.tags %} + {% for key, value in rule.filter.and_filter.tags.items() %} - {{ tag.key }} - {{ tag.value }} + {{ key }} + {{ value }} {% endfor %} diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index 1ffd52a2cfd2..1bf39428e0e9 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -11,6 +11,8 @@ from moto.config import mock_config from moto.core import ACCOUNT_ID +import sure # noqa + @mock_config def test_put_configuration_recorder(): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 4ddc160a8bd9..e2acf32f2ba6 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4295,24 +4295,17 @@ def test_s3_config_dict(): FakeAcl, FakeGrant, FakeGrantee, - FakeTag, - FakeTagging, - FakeTagSet, OWNER, ) # Without any buckets: assert not s3_config_query.get_config_resource("some_bucket") - tags = FakeTagging( - FakeTagSet( - [FakeTag("someTag", "someValue"), FakeTag("someOtherTag", "someOtherValue")] - ) - ) + tags = {"someTag": "someValue", "someOtherTag": "someOtherValue"} # With 1 bucket in us-west-2: s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2") - s3_config_query.backends["global"].put_bucket_tagging("bucket1", tags) + s3_config_query.backends["global"].put_bucket_tags("bucket1", tags) # With a log bucket: s3_config_query.backends["global"].create_bucket("logbucket", "us-west-2") From dff1ab580b20a22a0b12f8731e86d9468f42cf4b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 1 Apr 2020 16:15:03 +0100 Subject: [PATCH 187/658] Extend new S3 tag structure to ResourceGroupStaging API --- moto/resourcegroupstaggingapi/models.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index d05a53f81548..b6e35d58606c 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -145,10 +145,7 @@ def tag_filter(tag_list): # Do S3, resource type s3 if not resource_type_filters or "s3" in resource_type_filters: for bucket in self.s3_backend.buckets.values(): - tags = [] - for tag in bucket.tags.tag_set.tags: - tags.append({"Key": tag.key, "Value": tag.value}) - + tags = self.s3_backend.tagger.list_tags_for_resource(bucket.arn)["Tags"] if not tags or not tag_filter( tags ): # Skip if no tags, or invalid filter @@ -362,8 +359,9 @@ def _get_tag_keys_generator(self): # Do S3, resource type s3 for bucket in self.s3_backend.buckets.values(): - for tag in bucket.tags.tag_set.tags: - yield tag.key + tags = self.s3_backend.tagger.get_tag_dict_for_resource(bucket.arn) + for key, _ in tags.items(): + yield key # EC2 tags def get_ec2_keys(res_id): @@ -414,9 +412,10 @@ def _get_tag_values_generator(self, tag_key): # Do S3, resource type s3 for bucket in self.s3_backend.buckets.values(): - for tag in bucket.tags.tag_set.tags: - if tag.key == tag_key: - yield tag.value + tags = self.s3_backend.tagger.get_tag_dict_for_resource(bucket.arn) + for key, value in tags.items(): + if key == tag_key: + yield value # EC2 tags def get_ec2_values(res_id): From af08d71310862727669cb1a5041df64472857191 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Wed, 1 Apr 2020 21:57:46 -0300 Subject: [PATCH 188/658] add support for RetentionInDays for LogGroup --- moto/cloudwatch/models.py | 2 +- moto/logs/models.py | 28 +++++++++---------- .../test_cloudformation_stack_integration.py | 1 + tests/test_logs/test_logs.py | 13 ++++----- 4 files changed, 20 insertions(+), 24 deletions(-) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index a8a1b1d19153..4cd4df156c02 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -431,7 +431,7 @@ def create_from_cloudformation_json( properties = cloudformation_json["Properties"] log_group_name = properties["LogGroupName"] tags = properties.get("Tags", {}) - return logs_backends[region_name].create_log_group(log_group_name, tags) + return logs_backends[region_name].create_log_group(log_group_name, tags, **properties) cloudwatch_backends = {} diff --git a/moto/logs/models.py b/moto/logs/models.py index 5e21d87931eb..755605734260 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -134,7 +134,7 @@ def get_index_and_direction_from_token(token): return None, 0 events = sorted( - filter(filter_func, self.events), key=lambda event: event.timestamp, + filter(filter_func, self.events), key=lambda event: event.timestamp ) direction, index = get_index_and_direction_from_token(next_token) @@ -169,11 +169,7 @@ def get_index_and_direction_from_token(token): if end_index > final_index: end_index = final_index elif end_index < 0: - return ( - [], - "b/{:056d}".format(0), - "f/{:056d}".format(0), - ) + return ([], "b/{:056d}".format(0), "f/{:056d}".format(0)) events_page = [ event.to_response_dict() for event in events[start_index : end_index + 1] @@ -219,7 +215,7 @@ def filter_func(event): class LogGroup: - def __init__(self, region, name, tags): + def __init__(self, region, name, tags, **kwargs): self.name = name self.region = region self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format( @@ -228,9 +224,9 @@ def __init__(self, region, name, tags): self.creationTime = int(unix_time_millis()) self.tags = tags self.streams = dict() # {name: LogStream} - self.retentionInDays = ( - None # AWS defaults to Never Expire for log group retention - ) + self.retention_in_days = kwargs.get( + "RetentionInDays" + ) # AWS defaults to Never Expire for log group retention def create_log_stream(self, log_stream_name): if log_stream_name in self.streams: @@ -368,12 +364,12 @@ def to_describe_dict(self): "storedBytes": sum(s.storedBytes for s in self.streams.values()), } # AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire) - if self.retentionInDays: - log_group["retentionInDays"] = self.retentionInDays + if self.retention_in_days: + log_group["retentionInDays"] = self.retention_in_days return log_group def set_retention_policy(self, retention_in_days): - self.retentionInDays = retention_in_days + self.retention_in_days = retention_in_days def list_tags(self): return self.tags if self.tags else {} @@ -401,10 +397,12 @@ def reset(self): self.__dict__ = {} self.__init__(region_name) - def create_log_group(self, log_group_name, tags): + def create_log_group(self, log_group_name, tags, **kwargs): if log_group_name in self.groups: raise ResourceAlreadyExistsException() - self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) + self.groups[log_group_name] = LogGroup( + self.region_name, log_group_name, tags, **kwargs + ) return self.groups[log_group_name] def ensure_log_group(self, log_group_name, tags): diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index e501796600fe..b7fe580da79c 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2380,6 +2380,7 @@ def test_create_log_group_using_fntransform(): logs_conn = boto3.client("logs", region_name="us-west-2") log_group = logs_conn.describe_log_groups()["logGroups"][0] log_group["logGroupName"].should.equal("some-log-group") + log_group["retentionInDays"].should.be.equal(90) @mock_cloudformation diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index e8f60ff03641..2429d7e93b03 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -12,17 +12,14 @@ @mock_logs -def test_log_group_create(): +def test_create_log_group(): conn = boto3.client("logs", "us-west-2") - log_group_name = "dummy" - response = conn.create_log_group(logGroupName=log_group_name) - response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) - assert len(response["logGroups"]) == 1 - # AWS defaults to Never Expire for log group retention - assert response["logGroups"][0].get("retentionInDays") == None + response = conn.create_log_group(logGroupName="dummy") + response = conn.describe_log_groups() - response = conn.delete_log_group(logGroupName=log_group_name) + response["logGroups"].should.have.length_of(1) + response["logGroups"][0].should_not.have.key("retentionInDays") @mock_logs From c15ca133b85a228060489758ae76c75583bd4c65 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Wed, 1 Apr 2020 22:00:20 -0300 Subject: [PATCH 189/658] add support for Fn::GetAtt in event's cloudformation --- moto/events/models.py | 8 ++++ .../test_cloudformation_stack_integration.py | 37 +++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/moto/events/models.py b/moto/events/models.py index f68b63e38b64..3a6f1bbc75c0 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -59,6 +59,14 @@ def remove_targets(self, ids): if index is not None: self.targets.pop(index) + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Arn": + return self.arn + + raise UnformattedGetAttTemplateException() + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index b7fe580da79c..94367f1dc2c1 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2463,3 +2463,40 @@ def test_stack_events_create_rule_without_name_integration(): rules = boto3.client("events", "us-west-2").list_rules() rules["Rules"][0]["Name"].should.contain("test_stack-Event-") + + +@mock_cloudformation +@mock_events +@mock_logs +def test_stack_events_create_rule_as_target(): + events_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "SecurityGroup": { + "Type": "AWS::Logs::LogGroup", + "Properties": { + "LogGroupName": {"Fn::GetAtt": ["Event", "Arn"]}, + "RetentionInDays": 3, + } + }, + "Event": { + "Type": "AWS::Events::Rule", + "Properties": { + "State": "ENABLED", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(events_template), + ) + + rules = boto3.client("events", "us-west-2").list_rules() + log_groups = boto3.client("logs", "us-west-2").describe_log_groups() + + rules["Rules"][0]["Name"].should.contain("test_stack-Event-") + + log_groups["logGroups"][0]["logGroupName"].should.equal(rules["Rules"][0]["Arn"]) + log_groups["logGroups"][0]["retentionInDays"].should.equal(3) From c25f6a72da03e2ee9fd939bded406ae7f56f2339 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Wed, 1 Apr 2020 22:11:50 -0300 Subject: [PATCH 190/658] refactor put_rule test --- tests/test_events/test_events.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 27006ff1b2ff..5b4e958d6a57 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -79,13 +79,23 @@ def generate_environment(): @mock_events def test_put_rule(): client = boto3.client("events", "us-west-2") - client.list_rules()["Rules"].should.have.length_of(0) - rule_data = get_random_rule() + rule_data = { + "Name": "my-event", + "ScheduleExpression": "rate(5 minutes)", + "EventPattern": '{"source": ["test-source"]}', + } + client.put_rule(**rule_data) - client.list_rules()["Rules"].should.have.length_of(1) + rules = client.list_rules()["Rules"] + + rules.should.have.length_of(1) + rules[0]["Name"].should.equal(rule_data["Name"]) + rules[0]["ScheduleExpression"].should.equal(rule_data["ScheduleExpression"]) + rules[0]["EventPattern"].should.equal(rule_data["EventPattern"]) + rules[0]["State"].should.equal("ENABLED") @mock_events From 759107445394efe2e169935dfc6ae161f898aec4 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Wed, 1 Apr 2020 22:12:17 -0300 Subject: [PATCH 191/658] add physical_resource_id support for Rule --- moto/events/models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/moto/events/models.py b/moto/events/models.py index 3a6f1bbc75c0..e1224242e49f 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -26,6 +26,10 @@ def __init__(self, name, region_name, **kwargs): self.role_arn = kwargs.get("RoleArn") self.targets = [] + @property + def physical_resource_id(self): + return self.name + # This song and dance for targets is because we need order for Limits and NextTokens, but can't use OrderedDicts # with Python 2.6, so tracking it with an array it is. def _check_target_exists(self, target_id): From 503eeb51aea4391012ef022f51347a9276d010be Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Wed, 1 Apr 2020 22:48:40 -0300 Subject: [PATCH 192/658] style with black --- moto/cloudwatch/models.py | 4 +++- .../test_cloudformation_stack_integration.py | 16 +++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 4cd4df156c02..bc941809b58e 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -431,7 +431,9 @@ def create_from_cloudformation_json( properties = cloudformation_json["Properties"] log_group_name = properties["LogGroupName"] tags = properties.get("Tags", {}) - return logs_backends[region_name].create_log_group(log_group_name, tags, **properties) + return logs_backends[region_name].create_log_group( + log_group_name, tags, **properties + ) cloudwatch_backends = {} diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 94367f1dc2c1..c99bf16f49a3 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2373,9 +2373,7 @@ def test_create_log_group_using_fntransform(): } cf_conn = boto3.client("cloudformation", "us-west-2") - cf_conn.create_stack( - StackName="test_stack", TemplateBody=json.dumps(template), - ) + cf_conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(template)) logs_conn = boto3.client("logs", region_name="us-west-2") log_group = logs_conn.describe_log_groups()["logGroups"][0] @@ -2401,7 +2399,7 @@ def test_stack_events_create_rule_integration(): } cf_conn = boto3.client("cloudformation", "us-west-2") cf_conn.create_stack( - StackName="test_stack", TemplateBody=json.dumps(events_template), + StackName="test_stack", TemplateBody=json.dumps(events_template) ) rules = boto3.client("events", "us-west-2").list_rules() @@ -2429,7 +2427,7 @@ def test_stack_events_delete_rule_integration(): } cf_conn = boto3.client("cloudformation", "us-west-2") cf_conn.create_stack( - StackName="test_stack", TemplateBody=json.dumps(events_template), + StackName="test_stack", TemplateBody=json.dumps(events_template) ) rules = boto3.client("events", "us-west-2").list_rules() @@ -2458,7 +2456,7 @@ def test_stack_events_create_rule_without_name_integration(): } cf_conn = boto3.client("cloudformation", "us-west-2") cf_conn.create_stack( - StackName="test_stack", TemplateBody=json.dumps(events_template), + StackName="test_stack", TemplateBody=json.dumps(events_template) ) rules = boto3.client("events", "us-west-2").list_rules() @@ -2477,7 +2475,7 @@ def test_stack_events_create_rule_as_target(): "Properties": { "LogGroupName": {"Fn::GetAtt": ["Event", "Arn"]}, "RetentionInDays": 3, - } + }, }, "Event": { "Type": "AWS::Events::Rule", @@ -2485,12 +2483,12 @@ def test_stack_events_create_rule_as_target(): "State": "ENABLED", "ScheduleExpression": "rate(5 minutes)", }, - } + }, }, } cf_conn = boto3.client("cloudformation", "us-west-2") cf_conn.create_stack( - StackName="test_stack", TemplateBody=json.dumps(events_template), + StackName="test_stack", TemplateBody=json.dumps(events_template) ) rules = boto3.client("events", "us-west-2").list_rules() From 231b1000571c1720655989c82760116402643935 Mon Sep 17 00:00:00 2001 From: mickeypash Date: Fri, 3 Apr 2020 01:50:17 +0100 Subject: [PATCH 193/658] Add test scaffold. Currently broken --- tests/test_ec2/test_elastic_block_store.py | 38 ++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 3c7e17ec8077..0e39d206905e 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -13,6 +13,7 @@ import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 +from moto.ec2.exceptions import VolumeInUseError from moto.ec2.models import OWNER_ID @@ -53,6 +54,43 @@ def test_create_and_delete_volume(): cm.exception.request_id.should_not.be.none +@mock_ec2_deprecated +def test_delete_attached_volume(): + conn = boto.ec2.connect_to_region("us-east-1") + reservation = conn.run_instances("ami-1234abcd") + # create an instance + instance = reservation.instances[0] + # create a volume + volume = conn.create_volume(80, "us-east-1a") + # attach volume to instance + volume.attach(instance.id, "/dev/sdh") + + volume.update() + volume.volume_state().should.equal("in-use") + volume.attachment_state().should.equal("attached") + + volume.attach_data.instance_id.should.equal(instance.id) + + # attempt to delete volume + # assert raises VolumeInUseError + with assert_raises(VolumeInUseError) as ex: + volume.delete() + ex.exception.error_code.should.equal("VolumeInUse") + ex.exception.status.should.equal(400) + ex.exception.message.should.equal(f"Volume {volume.id} is currently attached to {instance_id}") + + volume.detach() + + volume.update() + volume.volume_state().should.equal("available") + + volume.delete() + + all_volumes = conn.get_all_volumes() + my_volume = [item for item in all_volumes if item.id == volume.id] + my_volume.should.have.length_of(0) + + @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): conn = boto.ec2.connect_to_region("us-east-1") From 76b9cbe16d76decef8becad643c1426dba2c927d Mon Sep 17 00:00:00 2001 From: mickeypash Date: Fri, 3 Apr 2020 02:14:14 +0100 Subject: [PATCH 194/658] Fix test --- tests/test_ec2/test_elastic_block_store.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 0e39d206905e..1182610e8a7b 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -13,7 +13,6 @@ import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 -from moto.ec2.exceptions import VolumeInUseError from moto.ec2.models import OWNER_ID @@ -73,11 +72,11 @@ def test_delete_attached_volume(): # attempt to delete volume # assert raises VolumeInUseError - with assert_raises(VolumeInUseError) as ex: + with assert_raises(EC2ResponseError) as ex: volume.delete() ex.exception.error_code.should.equal("VolumeInUse") ex.exception.status.should.equal(400) - ex.exception.message.should.equal(f"Volume {volume.id} is currently attached to {instance_id}") + ex.exception.message.should.equal(f"Volume {volume.id} is currently attached to {instance.id}") volume.detach() From d3367b8a90b25fa2fab323889f72717054e63d54 Mon Sep 17 00:00:00 2001 From: mickeypash Date: Fri, 3 Apr 2020 02:27:46 +0100 Subject: [PATCH 195/658] Black formatting --- moto/ec2/exceptions.py | 5 ++--- tests/test_ec2/test_elastic_block_store.py | 4 +++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 4df507a0d3d8..5af4690aea35 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -232,12 +232,11 @@ def __init__(self, volume_id, instance_id): class VolumeInUseError(EC2ClientError): - def __init__(self, volume_id, instance_id): super(VolumeInUseError, self).__init__( "VolumeInUse", - "Volume {0} is currently attached to {1}" - .format(volume_id, instance_id)) + "Volume {0} is currently attached to {1}".format(volume_id, instance_id), + ) class InvalidDomainError(EC2ClientError): diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 1182610e8a7b..ac9c7e3d99c0 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -76,7 +76,9 @@ def test_delete_attached_volume(): volume.delete() ex.exception.error_code.should.equal("VolumeInUse") ex.exception.status.should.equal(400) - ex.exception.message.should.equal(f"Volume {volume.id} is currently attached to {instance.id}") + ex.exception.message.should.equal( + f"Volume {volume.id} is currently attached to {instance.id}" + ) volume.detach() From 9ab02e17d528d461b5a05b7aafa623addae65966 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 3 Apr 2020 10:30:05 +0100 Subject: [PATCH 196/658] #883 - Lambda - Add test to verify remove_permission functinonality --- moto/awslambda/models.py | 4 ++-- moto/awslambda/responses.py | 6 ++--- tests/test_awslambda/test_lambda.py | 36 +++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 9cdf2397c9d1..589a790ae7c6 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -1006,11 +1006,11 @@ def untag_resource(self, resource, tagKeys): return True return False - def add_policy_statement(self, function_name, raw): + def add_permission(self, function_name, raw): fn = self.get_function(function_name) fn.policy.add_statement(raw) - def del_policy_statement(self, function_name, sid, revision=""): + def remove_permission(self, function_name, sid, revision=""): fn = self.get_function(function_name) fn.policy.del_statement(sid, revision) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index ce6c93f16acb..4213840f6048 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -146,7 +146,7 @@ def _add_policy(self, request, full_url, headers): function_name = path.split("/")[-2] if self.lambda_backend.get_function(function_name): statement = self.body - self.lambda_backend.add_policy_statement(function_name, statement) + self.lambda_backend.add_permission(function_name, statement) return 200, {}, json.dumps({"Statement": statement}) else: return 404, {}, "{}" @@ -166,9 +166,7 @@ def _del_policy(self, request, full_url, headers, querystring): statement_id = path.split("/")[-1].split("?")[0] revision = querystring.get("RevisionId", "") if self.lambda_backend.get_function(function_name): - self.lambda_backend.del_policy_statement( - function_name, statement_id, revision - ) + self.lambda_backend.remove_permission(function_name, statement_id, revision) return 204, {}, "{}" else: return 404, {}, "{}" diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index eb8453e432f7..e67576518a13 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1677,6 +1677,42 @@ def test_create_function_with_unknown_arn(): ) +@mock_lambda +def test_remove_function_permission(): + conn = boto3.client("lambda", _lambda_region) + zip_content = get_test_zip_file1() + conn.create_function( + FunctionName="testFunction", + Runtime="python2.7", + Role=(get_role_name()), + Handler="lambda_function.handler", + Code={"ZipFile": zip_content}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + + conn.add_permission( + FunctionName="testFunction", + StatementId="1", + Action="lambda:InvokeFunction", + Principal="432143214321", + SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld", + SourceAccount="123412341234", + EventSourceToken="blah", + Qualifier="2", + ) + + remove = conn.remove_permission( + FunctionName="testFunction", StatementId="1", Qualifier="2", + ) + remove["ResponseMetadata"]["HTTPStatusCode"].should.equal(204) + policy = conn.get_policy(FunctionName="testFunction", Qualifier="2")["Policy"] + policy = json.loads(policy) + policy["Statement"].should.equal([]) + + def create_invalid_lambda(role): conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() From a6864f483db1c1b292098a67381c6f20437a8dd2 Mon Sep 17 00:00:00 2001 From: mickeypash Date: Fri, 3 Apr 2020 14:17:55 +0100 Subject: [PATCH 197/658] Use Python 2 format --- tests/test_ec2/test_elastic_block_store.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index ac9c7e3d99c0..4bd2a8dfa0bf 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -77,7 +77,7 @@ def test_delete_attached_volume(): ex.exception.error_code.should.equal("VolumeInUse") ex.exception.status.should.equal(400) ex.exception.message.should.equal( - f"Volume {volume.id} is currently attached to {instance.id}" + "Volume {0} is currently attached to {1}".format(volume.id, instance.id) ) volume.detach() From 280db9df6c43f606721d07b51e104eda8e065313 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 4 Apr 2020 14:09:38 +0100 Subject: [PATCH 198/658] #2800 - CognitoIdentity - Fix format of Identity ID --- moto/cognitoidentity/utils.py | 4 ++-- tests/test_cognitoidentity/test_cognitoidentity.py | 8 +++++--- tests/test_cognitoidentity/test_server.py | 1 - 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/moto/cognitoidentity/utils.py b/moto/cognitoidentity/utils.py index 6143d5121f0d..54016ad17b0c 100644 --- a/moto/cognitoidentity/utils.py +++ b/moto/cognitoidentity/utils.py @@ -1,5 +1,5 @@ -from moto.core.utils import get_random_hex +from uuid import uuid4 def get_random_identity_id(region): - return "{0}:{1}".format(region, get_random_hex(length=19)) + return "{0}:{1}".format(region, uuid4()) diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index 8eae183c659e..0ec7acfb0b93 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -7,6 +7,7 @@ from moto import mock_cognitoidentity from moto.cognitoidentity.utils import get_random_identity_id from moto.core import ACCOUNT_ID +from uuid import UUID @mock_cognitoidentity @@ -83,8 +84,10 @@ def test_describe_identity_pool_with_invalid_id_raises_error(): # testing a helper function def test_get_random_identity_id(): - assert len(get_random_identity_id("us-west-2")) > 0 - assert len(get_random_identity_id("us-west-2").split(":")[1]) == 19 + identity_id = get_random_identity_id("us-west-2") + region, id = identity_id.split(":") + region.should.equal("us-west-2") + UUID(id, version=4) # Will throw an error if it's not a valid UUID @mock_cognitoidentity @@ -96,7 +99,6 @@ def test_get_id(): IdentityPoolId="us-west-2:12345", Logins={"someurl": "12345"}, ) - print(result) assert ( result.get("IdentityId", "").startswith("us-west-2") or result.get("ResponseMetadata").get("HTTPStatusCode") == 200 diff --git a/tests/test_cognitoidentity/test_server.py b/tests/test_cognitoidentity/test_server.py index 903dae290a86..8c4229f06485 100644 --- a/tests/test_cognitoidentity/test_server.py +++ b/tests/test_cognitoidentity/test_server.py @@ -48,6 +48,5 @@ def test_get_id(): }, ) - print(res.data) json_data = json.loads(res.data.decode("utf-8")) assert ":" in json_data["IdentityId"] From 16db824d8ae293ae99877b8c4ea5a73785c5e1a5 Mon Sep 17 00:00:00 2001 From: David Holroyd Date: Mon, 6 Apr 2020 00:19:19 +0100 Subject: [PATCH 199/658] Fix response XML structure --- moto/s3/responses.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 197cd90806c1..06e7353d347b 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1868,18 +1868,16 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): {% endfor %} """ -S3_DELETE_OBJECT_SUCCESS = """ - - 200 - OK - +S3_DELETE_OBJECT_SUCCESS = """ + + 200 + OK """ -S3_OBJECT_RESPONSE = """ - - {{ key.etag }} - {{ key.last_modified_ISO8601 }} - +S3_OBJECT_RESPONSE = """ + + {{ key.etag }} + {{ key.last_modified_ISO8601 }} """ S3_OBJECT_ACL_RESPONSE = """ From 09de93412e4ec3b197205f8b8e9ea9c8a57ab961 Mon Sep 17 00:00:00 2001 From: jess Date: Mon, 6 Apr 2020 17:21:26 +1000 Subject: [PATCH 200/658] Prevent JSON dumps error when dealing with complex types --- moto/dynamodbstreams/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/dynamodbstreams/models.py b/moto/dynamodbstreams/models.py index dc6f0e0d3e6a..f62c49877a3d 100644 --- a/moto/dynamodbstreams/models.py +++ b/moto/dynamodbstreams/models.py @@ -7,7 +7,7 @@ from boto3 import Session from moto.core import BaseBackend, BaseModel -from moto.dynamodb2.models import dynamodb_backends +from moto.dynamodb2.models import dynamodb_backends, DynamoJsonEncoder class ShardIterator(BaseModel): @@ -137,7 +137,7 @@ def get_shard_iterator( def get_records(self, iterator_arn, limit): shard_iterator = self.shard_iterators[iterator_arn] - return json.dumps(shard_iterator.get(limit)) + return json.dumps(shard_iterator.get(limit), cls=DynamoJsonEncoder) dynamodbstreams_backends = {} From b6e73776d56ed47d4080c5a000bd3754492e1219 Mon Sep 17 00:00:00 2001 From: jess Date: Mon, 6 Apr 2020 18:41:46 +1000 Subject: [PATCH 201/658] alter testcase to trigger issue #2868 --- tests/test_dynamodbstreams/test_dynamodbstreams.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_dynamodbstreams/test_dynamodbstreams.py b/tests/test_dynamodbstreams/test_dynamodbstreams.py index 8fad0ff23a0f..d4d619a56cf7 100644 --- a/tests/test_dynamodbstreams/test_dynamodbstreams.py +++ b/tests/test_dynamodbstreams/test_dynamodbstreams.py @@ -121,12 +121,14 @@ def test_get_records_empty(self): assert "Records" in resp assert len(resp["Records"]) == 0 + def test_get_records_seq(self): conn = boto3.client("dynamodb", region_name="us-east-1") conn.put_item( TableName="test-streams", - Item={"id": {"S": "entry1"}, "first_col": {"S": "foo"}}, + Item={"id": {"S": "entry1"}, "first_col": {"S": "foo"}} + ) conn.put_item( TableName="test-streams", @@ -134,6 +136,7 @@ def test_get_records_seq(self): "id": {"S": "entry1"}, "first_col": {"S": "bar"}, "second_col": {"S": "baz"}, + "a": {"L": [{"M": {"b": {"S": "bar1"}}}]} }, ) conn.delete_item(TableName="test-streams", Key={"id": {"S": "entry1"}}) From 4c2460ddfdf4dd7263bf2899df95ea3c903e2b84 Mon Sep 17 00:00:00 2001 From: jess Date: Mon, 6 Apr 2020 18:45:23 +1000 Subject: [PATCH 202/658] fix whitespace changes --- tests/test_dynamodbstreams/test_dynamodbstreams.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/test_dynamodbstreams/test_dynamodbstreams.py b/tests/test_dynamodbstreams/test_dynamodbstreams.py index d4d619a56cf7..c75d66e7fbc1 100644 --- a/tests/test_dynamodbstreams/test_dynamodbstreams.py +++ b/tests/test_dynamodbstreams/test_dynamodbstreams.py @@ -121,14 +121,12 @@ def test_get_records_empty(self): assert "Records" in resp assert len(resp["Records"]) == 0 - def test_get_records_seq(self): conn = boto3.client("dynamodb", region_name="us-east-1") conn.put_item( TableName="test-streams", Item={"id": {"S": "entry1"}, "first_col": {"S": "foo"}} - ) conn.put_item( TableName="test-streams", From 49fd7988ab936d5d81ed00dd25c3b4e408566992 Mon Sep 17 00:00:00 2001 From: jess Date: Mon, 6 Apr 2020 19:55:54 +1000 Subject: [PATCH 203/658] make black happy --- tests/test_dynamodbstreams/test_dynamodbstreams.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_dynamodbstreams/test_dynamodbstreams.py b/tests/test_dynamodbstreams/test_dynamodbstreams.py index c75d66e7fbc1..065d7280e763 100644 --- a/tests/test_dynamodbstreams/test_dynamodbstreams.py +++ b/tests/test_dynamodbstreams/test_dynamodbstreams.py @@ -126,7 +126,7 @@ def test_get_records_seq(self): conn.put_item( TableName="test-streams", - Item={"id": {"S": "entry1"}, "first_col": {"S": "foo"}} + Item={"id": {"S": "entry1"}, "first_col": {"S": "foo"}}, ) conn.put_item( TableName="test-streams", @@ -134,7 +134,7 @@ def test_get_records_seq(self): "id": {"S": "entry1"}, "first_col": {"S": "bar"}, "second_col": {"S": "baz"}, - "a": {"L": [{"M": {"b": {"S": "bar1"}}}]} + "a": {"L": [{"M": {"b": {"S": "bar1"}}}]}, }, ) conn.delete_item(TableName="test-streams", Key={"id": {"S": "entry1"}}) From 81ca5c3ab0b62f1f2a7396a6e3e89e973d6774a6 Mon Sep 17 00:00:00 2001 From: usmankb Date: Mon, 6 Apr 2020 21:25:59 +0530 Subject: [PATCH 204/658] Implemented describe_instance_credit_specifications function in ec2 #2150 localstack --- moto/ec2/models.py | 6 ++++++ moto/ec2/responses/instances.py | 18 ++++++++++++++++++ tests/test_ec2/test_instances.py | 8 ++++++++ 3 files changed, 32 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index bf4936d091f8..a4c15c56deda 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -937,6 +937,12 @@ def describe_instance_attribute(self, instance_id, attribute): value = getattr(instance, key) return instance, value + def describe_instance_credit_specifications(self,instance_ids): + queried_instances = [] + for instance in self.get_multi_instances_by_id(instance_ids): + queried_instances.append(instance) + return queried_instances + def all_instances(self, filters=None): instances = [] for reservation in self.all_reservations(): diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 29c346f8242b..9e68eed7a6db 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -168,6 +168,12 @@ def describe_instance_attribute(self): return template.render(instance=instance, attribute=attribute, value=value) + def describe_instance_credit_specifications(self): + instance_ids = self._get_multi_param("InstanceId") + instance = self.ec2_backend.describe_instance_credit_specifications(instance_ids) + template = self.response_template(EC2_DESCRIBE_INSTANCE_CREDIT_SPECIFICATIONS) + return template.render(instances=instance) + def modify_instance_attribute(self): handlers = [ self._dot_value_instance_attribute_handler, @@ -671,6 +677,18 @@ def _security_grp_instance_attribute_handler(self): """ +EC2_DESCRIBE_INSTANCE_CREDIT_SPECIFICATIONS = """ + 1b234b5c-d6ef-7gh8-90i1-j2345678901 + + {% for instance in instances %} + + {{ instance.id }} + standard + + {% endfor %} + +""" + EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE {{ instance.id }} diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 85ba0fe01bbd..f4fcbb1866ff 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1165,6 +1165,14 @@ def test_describe_instance_status_with_instance_filter_deprecated(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none +@mock_ec2 +def test_describe_instance_credit_specifications(): + conn = boto3.client("ec2", region_name="us-west-1") + + # We want to filter based on this one + reservation = conn.run_instances(ImageId="ami-1234abcd", MinCount=1, MaxCount=1) + result = conn.describe_instance_credit_specifications(InstanceIds=[reservation["Instances"][0]["InstanceId"]]) + assert result['InstanceCreditSpecifications'][0]['InstanceId'] == reservation["Instances"][0]["InstanceId"] @mock_ec2 def test_describe_instance_status_with_instance_filter(): From a845de114209ead7606c0a2a1690fd03b10dde04 Mon Sep 17 00:00:00 2001 From: David Holroyd Date: Mon, 6 Apr 2020 21:01:43 +0100 Subject: [PATCH 205/658] PutObject and DeleteObject should produce no XML S3 itself produces an empty body, with any response metadata in HTTP headers only. --- moto/s3/responses.py | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 06e7353d347b..22cd45c0834d 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1232,9 +1232,8 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers ) new_key.set_tagging(tagging) - template = self.response_template(S3_OBJECT_RESPONSE) response_headers.update(new_key.response_dict) - return 200, response_headers, template.render(key=new_key) + return 200, response_headers, "" def _key_response_head(self, bucket_name, query, key_name, headers): response_headers = {} @@ -1552,8 +1551,7 @@ def _key_response_delete(self, bucket_name, query, key_name): return 204, {}, "" version_id = query.get("versionId", [None])[0] self.backend.delete_key(bucket_name, key_name, version_id=version_id) - template = self.response_template(S3_DELETE_OBJECT_SUCCESS) - return 204, {}, template.render() + return 204, {}, "" def _complete_multipart_body(self, body): ps = minidom.parseString(body).getElementsByTagName("Part") @@ -1868,18 +1866,6 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): {% endfor %} """ -S3_DELETE_OBJECT_SUCCESS = """ - - 200 - OK -""" - -S3_OBJECT_RESPONSE = """ - - {{ key.etag }} - {{ key.last_modified_ISO8601 }} - """ - S3_OBJECT_ACL_RESPONSE = """ From aae49493c466eb17d7c42cf6b41d739914271873 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 7 Apr 2020 08:49:19 +0100 Subject: [PATCH 206/658] Linting --- moto/ec2/models.py | 2 +- moto/ec2/responses/instances.py | 4 +++- tests/test_ec2/test_instances.py | 11 +++++++++-- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index a4c15c56deda..83e12eea7853 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -937,7 +937,7 @@ def describe_instance_attribute(self, instance_id, attribute): value = getattr(instance, key) return instance, value - def describe_instance_credit_specifications(self,instance_ids): + def describe_instance_credit_specifications(self, instance_ids): queried_instances = [] for instance in self.get_multi_instances_by_id(instance_ids): queried_instances.append(instance) diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 9e68eed7a6db..490ffb64278b 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -170,7 +170,9 @@ def describe_instance_attribute(self): def describe_instance_credit_specifications(self): instance_ids = self._get_multi_param("InstanceId") - instance = self.ec2_backend.describe_instance_credit_specifications(instance_ids) + instance = self.ec2_backend.describe_instance_credit_specifications( + instance_ids + ) template = self.response_template(EC2_DESCRIBE_INSTANCE_CREDIT_SPECIFICATIONS) return template.render(instances=instance) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index f4fcbb1866ff..595faa5ba9af 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1165,14 +1165,21 @@ def test_describe_instance_status_with_instance_filter_deprecated(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + @mock_ec2 def test_describe_instance_credit_specifications(): conn = boto3.client("ec2", region_name="us-west-1") # We want to filter based on this one reservation = conn.run_instances(ImageId="ami-1234abcd", MinCount=1, MaxCount=1) - result = conn.describe_instance_credit_specifications(InstanceIds=[reservation["Instances"][0]["InstanceId"]]) - assert result['InstanceCreditSpecifications'][0]['InstanceId'] == reservation["Instances"][0]["InstanceId"] + result = conn.describe_instance_credit_specifications( + InstanceIds=[reservation["Instances"][0]["InstanceId"]] + ) + assert ( + result["InstanceCreditSpecifications"][0]["InstanceId"] + == reservation["Instances"][0]["InstanceId"] + ) + @mock_ec2 def test_describe_instance_status_with_instance_filter(): From 856c07de63cd545e0836ec2083ee3836c5c56f71 Mon Sep 17 00:00:00 2001 From: usmankb Date: Wed, 8 Apr 2020 03:18:42 +0530 Subject: [PATCH 207/658] aws apigateway create,get domain names --- moto/apigateway/exceptions.py | 8 +++ moto/apigateway/models.py | 84 +++++++++++++++++++++++- moto/apigateway/responses.py | 59 +++++++++++++++++ moto/apigateway/urls.py | 2 + tests/test_apigateway/test_apigateway.py | 17 +++++ 5 files changed, 169 insertions(+), 1 deletion(-) diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index ccb870f52f00..24f06f3f1c58 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -119,3 +119,11 @@ def __init__(self): super(ApiKeyAlreadyExists, self).__init__( "ConflictException", "API Key already exists" ) + +class DomainNameNotFound(RESTError): + code = 404 + + def __init__(self): + super(DomainNameNotFound, self).__init__( + "NotFoundException", "Invalid Domain Name specified" + ) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 5b02e6204d86..7ca7e6315d61 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -34,6 +34,7 @@ NoIntegrationDefined, NoMethodDefined, ApiKeyAlreadyExists, + DomainNameNotFound ) STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}" @@ -463,7 +464,6 @@ def __init__(self, id, region_name, name, description, **kwargs): self.deployments = {} self.authorizers = {} self.stages = {} - self.resources = {} self.add_child("/") # Add default child @@ -609,6 +609,51 @@ def delete_deployment(self, deployment_id): return self.deployments.pop(deployment_id) +class DomainName(BaseModel,dict): + def __init__(self, domain_name, **kwargs): + super(DomainName, self).__init__() + self["domainName"] = domain_name + self["regionalDomainName"] = domain_name, + self["distributionDomainName"] = domain_name, + self["domainNameStatus"] = "AVAILABLE" + self["domainNameStatusMessage"] = "Domain Name Available" + self["regionalHostedZoneId"] = "Z2FDTNDATAQYW2" + self["distributionHostedZoneId"] = "Z2FDTNDATAQYW2" + self["certificateUploadDate"] = int(time.time()) + if kwargs.get("certificate_name"): + self["certificateName"] = kwargs.get("certificate_name") + if kwargs.get("certificate_arn"): + self["certificateArn"] = kwargs.get("certificate_arn") + if kwargs.get("certificate_body"): + self["certificateBody"] = kwargs.get("certificate_body") + if kwargs.get("tags"): + self["tags"] = kwargs.get("tags" ) + if kwargs.get("security_policy"): + self["securityPolicy"] = kwargs.get("security_policy") + if kwargs.get("certificate_chain"): + self["certificateChain"] = kwargs.get("certificate_chain") + if kwargs.get("regional_certificate_name"): + self["regionalCertificateName"] = kwargs.get( + "regional_certificate_name" + ) + if kwargs.get("certificate_private_key"): + self["certificatePrivateKey"] = kwargs.get( + "certificate_private_key" + ) + if kwargs.get("regional_certificate_arn"): + self["regionalCertificateArn"] = kwargs.get( + "regional_certificate_arn" + ) + if kwargs.get("endpoint_configuration"): + self["endpointConfiguration"] = kwargs.get( + "endpoint_configuration" + ) + if kwargs.get("generate_cli_skeleton"): + self["generateCliSkeleton"] = kwargs.get( + "generate_cli_skeleton" + ) + + class APIGatewayBackend(BaseBackend): def __init__(self, region_name): super(APIGatewayBackend, self).__init__() @@ -616,6 +661,7 @@ def __init__(self, region_name): self.keys = {} self.usage_plans = {} self.usage_plan_keys = {} + self.domain_names = {} self.region_name = region_name def reset(self): @@ -1001,6 +1047,42 @@ def _uri_validator(self, uri): except Exception: return False + def create_domain_name(self, domain_name, + certificate_name=None, tags=None, + certificate_arn=None, certificate_body=None, + certificate_private_key=None, + certificate_chain=None, + regional_certificate_name=None, + regional_certificate_arn=None, + endpoint_configuration=None, + security_policy=None, + generate_cli_skeleton=None): + if not domain_name: + raise DomainNameNotFound() + + new_domain_name = DomainName( + domain_name=domain_name, + certificate_name=certificate_name, + certificate_arn=certificate_arn, + certificate_body=certificate_body, + certificate_private_key=certificate_private_key, + certificate_chain=certificate_chain, + regional_certificate_name=regional_certificate_name, + regional_certificate_arn=regional_certificate_arn, + endpoint_configuration=endpoint_configuration, + tags=tags, security_policy=security_policy, + generate_cli_skeleton=generate_cli_skeleton, + ) + + self.domain_names[domain_name] = new_domain_name + return new_domain_name + + def get_domain_names(self): + return list(self.domain_names.values()) + + def get_domain_name(self, domain_name): + return self.domain_names[domain_name] + apigateway_backends = {} for region_name in Session().get_available_regions("apigateway"): diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index f0ed6adc9ee4..ce3bcbb8e8e4 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -527,3 +527,62 @@ def usage_plan_key_individual(self, request, full_url, headers): usage_plan_id, key_id ) return 200, {}, json.dumps(usage_plan_response) + + def domain_names(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + try: + if self.method == "GET": + domain_names = self.backend.get_domain_names() + return 200, {}, json.dumps({"item": domain_names}) + + elif self.method == "POST": + domain_name = self._get_param("domainName") + certificate_name = self._get_param("certificateName") + tags = self._get_param("tags") + certificate_arn = self._get_param("certificateArn") + certificate_body = self._get_param("certificateBody") + certificate_private_key = self._get_param( + "certificatePrivateKey" + ) + + certificate_chain = self._get_param("certificateChain") + regional_certificate_name = self._get_param( + "regionalCertificateName" + ) + regional_certificate_arn = self._get_param( + "regionalCertificateArn" + ) + endpoint_configuration = self._get_param( + "endpointConfiguration" + ) + security_policy = self._get_param("securityPolicy") + generate_cli_skeleton = self._get_param( + "generateCliSkeleton" + ) + domain_name_resp = self.backend.create_domain_name( + domain_name, certificate_name, tags, certificate_arn, + certificate_body, certificate_private_key, + certificate_chain, regional_certificate_name, + regional_certificate_arn, endpoint_configuration, + security_policy, generate_cli_skeleton + ) + + return 200, {}, json.dumps(domain_name_resp) + except BadRequestException as e: + return self.error( + "com.amazonaws.dynamodb.v20111205#BadRequestException", e.message + ) + + def domain_name_induvidual(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + url_path_parts = self.path.split("/") + domain_name = url_path_parts[2] + domain_names={} + + if self.method == "GET": + if domain_name is not None: + domain_names = self.backend.get_domain_name(domain_name) + + return 200, {}, json.dumps(domain_names) diff --git a/moto/apigateway/urls.py b/moto/apigateway/urls.py index 4ef6ae72bc4d..6c3b7f6bbc2d 100644 --- a/moto/apigateway/urls.py +++ b/moto/apigateway/urls.py @@ -21,6 +21,8 @@ "{0}/apikeys$": APIGatewayResponse().apikeys, "{0}/apikeys/(?P[^/]+)": APIGatewayResponse().apikey_individual, "{0}/usageplans$": APIGatewayResponse().usage_plans, + "{0}/domainnames$": APIGatewayResponse().domain_names, + "{0}/domainnames/(?P[^/]+)/?$": APIGatewayResponse().domain_name_induvidual, "{0}/usageplans/(?P[^/]+)/?$": APIGatewayResponse().usage_plan_individual, "{0}/usageplans/(?P[^/]+)/keys$": APIGatewayResponse().usage_plan_keys, "{0}/usageplans/(?P[^/]+)/keys/(?P[^/]+)/?$": APIGatewayResponse().usage_plan_key_individual, diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 0952f267403e..22e062cc5022 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1483,6 +1483,23 @@ def test_deployment(): stage["description"].should.equal("_new_description_") +@mock_apigateway +def test_create_domain_names(): + client = boto3.client("apigateway", region_name="us-west-2") + domain_name = "testDomain" + test_certificate_name = "test.certificate" + test_certificate_private_key = "testPrivateKey" + response = client.create_domain_name(domainName=domain_name, certificateName=test_certificate_name, + certificatePrivateKey=test_certificate_private_key) + + response["domainName"].should.equal(domain_name) + response["certificateName"].should.equal(test_certificate_name) + result = client.get_domain_names() + result["items"][0]["domainName"].should.equal(domain_name) + result = client.get_domain_name(domainName=domain_name) + result["domainName"].should.equal(domain_name) + + @mock_apigateway def test_http_proxying_integration(): responses.add( From 0163eb6a9dfc7809159183c84a27bb7be67fd47d Mon Sep 17 00:00:00 2001 From: Theodore Wong Date: Tue, 7 Apr 2020 15:32:44 -0700 Subject: [PATCH 208/658] Changed mock_ecs to support ecs.run_task calls with a default cluster --- moto/ecs/models.py | 5 ++- tests/test_ecs/test_ecs_boto3.py | 65 ++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 30e4687c410a..33d4dcf721cf 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -604,7 +604,10 @@ def deregister_task_definition(self, task_definition_str): raise Exception("{0} is not a task_definition".format(task_definition_name)) def run_task(self, cluster_str, task_definition_str, count, overrides, started_by): - cluster_name = cluster_str.split("/")[-1] + if cluster_str: + cluster_name = cluster_str.split("/")[-1] + else: + cluster_name = "default" if cluster_name in self.clusters: cluster = self.clusters[cluster_name] else: diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 69c920192550..7fd90b412de3 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1122,6 +1122,71 @@ def test_run_task(): response["tasks"][0]["stoppedReason"].should.equal("") +@mock_ec2 +@mock_ecs +def test_run_task_default_cluster(): + client = boto3.client("ecs", region_name="us-east-1") + ec2 = boto3.resource("ec2", region_name="us-east-1") + + test_cluster_name = "default" + + _ = client.create_cluster(clusterName=test_cluster_name) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", MinCount=1, MaxCount=1 + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family="test_ecs_task", + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + response = client.run_task( + launchType="FARGATE", + overrides={}, + taskDefinition="test_ecs_task", + count=2, + startedBy="moto", + ) + len(response["tasks"]).should.equal(2) + response["tasks"][0]["taskArn"].should.contain( + "arn:aws:ecs:us-east-1:012345678910:task/" + ) + response["tasks"][0]["clusterArn"].should.equal( + "arn:aws:ecs:us-east-1:012345678910:cluster/default" + ) + response["tasks"][0]["taskDefinitionArn"].should.equal( + "arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1" + ) + response["tasks"][0]["containerInstanceArn"].should.contain( + "arn:aws:ecs:us-east-1:012345678910:container-instance/" + ) + response["tasks"][0]["overrides"].should.equal({}) + response["tasks"][0]["lastStatus"].should.equal("RUNNING") + response["tasks"][0]["desiredStatus"].should.equal("RUNNING") + response["tasks"][0]["startedBy"].should.equal("moto") + response["tasks"][0]["stoppedReason"].should.equal("") + + @mock_ec2 @mock_ecs def test_start_task(): From 54f51fc7c159d2f6993d3cce1561ccaba2bf3e9a Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 8 Apr 2020 10:49:58 +0100 Subject: [PATCH 209/658] DynamoDB - TransactWriteItems implementation --- moto/dynamodb2/models.py | 90 ++++++- moto/dynamodb2/responses.py | 24 +- tests/test_dynamodb2/test_dynamodb.py | 352 ++++++++++++++++++++++++++ 3 files changed, 459 insertions(+), 7 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 152e719c4047..c0e55bd5ba6d 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -1406,9 +1406,9 @@ def update_item( table_name, key, update_expression, - attribute_updates, expression_attribute_names, expression_attribute_values, + attribute_updates=None, expected=None, condition_expression=None, ): @@ -1516,6 +1516,94 @@ def describe_ttl(self, table_name): return table.ttl + def transact_write_items(self, transact_items): + # Create a backup in case any of the transactions fail + original_table_state = copy.deepcopy(self.tables) + try: + for item in transact_items: + if "ConditionCheck" in item: + item = item["ConditionCheck"] + key = item["Key"] + table_name = item["TableName"] + condition_expression = item.get("ConditionExpression", None) + expression_attribute_names = item.get( + "ExpressionAttributeNames", None + ) + expression_attribute_values = item.get( + "ExpressionAttributeValues", None + ) + current = self.get_item(table_name, key) + + condition_op = get_filter_expression( + condition_expression, + expression_attribute_names, + expression_attribute_values, + ) + if not condition_op.expr(current): + raise ValueError("The conditional request failed") + elif "Put" in item: + item = item["Put"] + attrs = item["Item"] + table_name = item["TableName"] + condition_expression = item.get("ConditionExpression", None) + expression_attribute_names = item.get( + "ExpressionAttributeNames", None + ) + expression_attribute_values = item.get( + "ExpressionAttributeValues", None + ) + self.put_item( + table_name, + attrs, + condition_expression=condition_expression, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + ) + elif "Delete" in item: + item = item["Delete"] + key = item["Key"] + table_name = item["TableName"] + condition_expression = item.get("ConditionExpression", None) + expression_attribute_names = item.get( + "ExpressionAttributeNames", None + ) + expression_attribute_values = item.get( + "ExpressionAttributeValues", None + ) + self.delete_item( + table_name, + key, + condition_expression=condition_expression, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + ) + elif "Update" in item: + item = item["Update"] + key = item["Key"] + table_name = item["TableName"] + update_expression = item["UpdateExpression"] + condition_expression = item.get("ConditionExpression", None) + expression_attribute_names = item.get( + "ExpressionAttributeNames", None + ) + expression_attribute_values = item.get( + "ExpressionAttributeValues", None + ) + self.update_item( + table_name, + key, + update_expression=update_expression, + condition_expression=condition_expression, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + ) + else: + raise ValueError + except: # noqa: E722 Do not use bare except + # Rollback to the original state, and reraise the error + self.tables = original_table_state + raise + dynamodb_backends = {} for region in Session().get_available_regions("dynamodb"): diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 78126f7f1739..9b13f20a6a10 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -757,12 +757,12 @@ def update_item(self): item = self.dynamodb_backend.update_item( name, key, - update_expression, - attribute_updates, - expression_attribute_names, - expression_attribute_values, - expected, - condition_expression, + update_expression=update_expression, + attribute_updates=attribute_updates, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + expected=expected, + condition_expression=condition_expression, ) except InvalidUpdateExpression: er = "com.amazonaws.dynamodb.v20111205#ValidationException" @@ -925,3 +925,15 @@ def transact_get_items(self): result.update({"ConsumedCapacity": [v for v in consumed_capacity.values()]}) return dynamo_json_dump(result) + + def transact_write_items(self): + transact_items = self.body["TransactItems"] + try: + self.dynamodb_backend.transact_write_items(transact_items) + except ValueError: + er = "com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException" + return self.error( + er, "A condition specified in the operation could not be evaluated." + ) + response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}} + return dynamo_json_dump(response) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index bec24c966ef2..90deab6bec7a 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4177,3 +4177,355 @@ def test_gsi_verify_negative_number_order(): [float(item["gsiK1SortKey"]) for item in resp["Items"]].should.equal( [-0.7, -0.6, 0.7] ) + + +@mock_dynamodb2 +def test_transact_write_items_put(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Put multiple items + dynamodb.transact_write_items( + TransactItems=[ + { + "Put": { + "Item": {"id": {"S": "foo{}".format(str(i))}, "foo": {"S": "bar"},}, + "TableName": "test-table", + } + } + for i in range(0, 5) + ] + ) + # Assert all are present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(5) + + +@mock_dynamodb2 +def test_transact_write_items_put_conditional_expressions(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + dynamodb.put_item( + TableName="test-table", Item={"id": {"S": "foo2"},}, + ) + # Put multiple items + with assert_raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "Put": { + "Item": { + "id": {"S": "foo{}".format(str(i))}, + "foo": {"S": "bar"}, + }, + "TableName": "test-table", + "ConditionExpression": "#i <> :i", + "ExpressionAttributeNames": {"#i": "id"}, + "ExpressionAttributeValues": { + ":i": { + "S": "foo2" + } # This item already exist, so the ConditionExpression should fail + }, + } + } + for i in range(0, 5) + ] + ) + # Assert the exception is correct + ex.exception.response["Error"]["Code"].should.equal( + "ConditionalCheckFailedException" + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "A condition specified in the operation could not be evaluated." + ) + # Assert all are present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"id": {"S": "foo2"}}) + + +@mock_dynamodb2 +def test_transact_write_items_conditioncheck_passes(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item without email address + dynamodb.put_item( + TableName="test-table", Item={"id": {"S": "foo"},}, + ) + # Put an email address, after verifying it doesn't exist yet + dynamodb.transact_write_items( + TransactItems=[ + { + "ConditionCheck": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + } + }, + { + "Put": { + "Item": { + "id": {"S": "foo"}, + "email_address": {"S": "test@moto.com"}, + }, + "TableName": "test-table", + } + }, + ] + ) + # Assert all are present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"email_address": {"S": "test@moto.com"}, "id": {"S": "foo"}}) + + +@mock_dynamodb2 +def test_transact_write_items_conditioncheck_fails(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item with email address + dynamodb.put_item( + TableName="test-table", + Item={"id": {"S": "foo"}, "email_address": {"S": "test@moto.com"}}, + ) + # Try to put an email address, but verify whether it exists + # ConditionCheck should fail + with assert_raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "ConditionCheck": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + } + }, + { + "Put": { + "Item": { + "id": {"S": "foo"}, + "email_address": {"S": "update@moto.com"}, + }, + "TableName": "test-table", + } + }, + ] + ) + # Assert the exception is correct + ex.exception.response["Error"]["Code"].should.equal( + "ConditionalCheckFailedException" + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "A condition specified in the operation could not be evaluated." + ) + + # Assert the original email address is still present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"email_address": {"S": "test@moto.com"}, "id": {"S": "foo"}}) + + +@mock_dynamodb2 +def test_transact_write_items_delete(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item + dynamodb.put_item( + TableName="test-table", Item={"id": {"S": "foo"},}, + ) + # Delete the item + dynamodb.transact_write_items( + TransactItems=[ + {"Delete": {"Key": {"id": {"S": "foo"}}, "TableName": "test-table",}} + ] + ) + # Assert the item is deleted + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(0) + + +@mock_dynamodb2 +def test_transact_write_items_delete_with_successful_condition_expression(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item without email address + dynamodb.put_item( + TableName="test-table", Item={"id": {"S": "foo"},}, + ) + # ConditionExpression will pass - no email address has been specified yet + dynamodb.transact_write_items( + TransactItems=[ + { + "Delete": { + "Key": {"id": {"S": "foo"},}, + "TableName": "test-table", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + } + } + ] + ) + # Assert the item is deleted + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(0) + + +@mock_dynamodb2 +def test_transact_write_items_delete_with_failed_condition_expression(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item with email address + dynamodb.put_item( + TableName="test-table", + Item={"id": {"S": "foo"}, "email_address": {"S": "test@moto.com"}}, + ) + # Try to delete an item that does not have an email address + # ConditionCheck should fail + with assert_raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "Delete": { + "Key": {"id": {"S": "foo"},}, + "TableName": "test-table", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + } + } + ] + ) + # Assert the exception is correct + ex.exception.response["Error"]["Code"].should.equal( + "ConditionalCheckFailedException" + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "A condition specified in the operation could not be evaluated." + ) + # Assert the original item is still present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"email_address": {"S": "test@moto.com"}, "id": {"S": "foo"}}) + + +@mock_dynamodb2 +def test_transact_write_items_update(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item + dynamodb.put_item(TableName="test-table", Item={"id": {"S": "foo"}}) + # Update the item + dynamodb.transact_write_items( + TransactItems=[ + { + "Update": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "UpdateExpression": "SET #e = :v", + "ExpressionAttributeNames": {"#e": "email_address"}, + "ExpressionAttributeValues": {":v": {"S": "test@moto.com"}}, + } + } + ] + ) + # Assert the item is updated + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"id": {"S": "foo"}, "email_address": {"S": "test@moto.com"}}) + + +@mock_dynamodb2 +def test_transact_write_items_update_with_failed_condition_expression(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert an item with email address + dynamodb.put_item( + TableName="test-table", + Item={"id": {"S": "foo"}, "email_address": {"S": "test@moto.com"}}, + ) + # Try to update an item that does not have an email address + # ConditionCheck should fail + with assert_raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "Update": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "UpdateExpression": "SET #e = :v", + "ConditionExpression": "attribute_not_exists(#e)", + "ExpressionAttributeNames": {"#e": "email_address"}, + "ExpressionAttributeValues": {":v": {"S": "update@moto.com"}}, + } + } + ] + ) + # Assert the exception is correct + ex.exception.response["Error"]["Code"].should.equal( + "ConditionalCheckFailedException" + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "A condition specified in the operation could not be evaluated." + ) + # Assert the original item is still present + items = dynamodb.scan(TableName="test-table")["Items"] + items.should.have.length_of(1) + items[0].should.equal({"email_address": {"S": "test@moto.com"}, "id": {"S": "foo"}}) From 8237fdaff0562246d12abf9dd6588643db7f7105 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 8 Apr 2020 11:06:30 +0100 Subject: [PATCH 210/658] Linting --- moto/apigateway/exceptions.py | 1 + moto/apigateway/models.py | 58 +++++++++++------------- moto/apigateway/responses.py | 39 ++++++++-------- tests/test_apigateway/test_apigateway.py | 7 ++- 4 files changed, 51 insertions(+), 54 deletions(-) diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index 24f06f3f1c58..4a808945c0da 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -120,6 +120,7 @@ def __init__(self): "ConflictException", "API Key already exists" ) + class DomainNameNotFound(RESTError): code = 404 diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 7ca7e6315d61..6bef6f0191f4 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -34,7 +34,7 @@ NoIntegrationDefined, NoMethodDefined, ApiKeyAlreadyExists, - DomainNameNotFound + DomainNameNotFound, ) STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}" @@ -609,12 +609,12 @@ def delete_deployment(self, deployment_id): return self.deployments.pop(deployment_id) -class DomainName(BaseModel,dict): +class DomainName(BaseModel, dict): def __init__(self, domain_name, **kwargs): super(DomainName, self).__init__() self["domainName"] = domain_name - self["regionalDomainName"] = domain_name, - self["distributionDomainName"] = domain_name, + self["regionalDomainName"] = domain_name + self["distributionDomainName"] = domain_name self["domainNameStatus"] = "AVAILABLE" self["domainNameStatusMessage"] = "Domain Name Available" self["regionalHostedZoneId"] = "Z2FDTNDATAQYW2" @@ -627,31 +627,21 @@ def __init__(self, domain_name, **kwargs): if kwargs.get("certificate_body"): self["certificateBody"] = kwargs.get("certificate_body") if kwargs.get("tags"): - self["tags"] = kwargs.get("tags" ) + self["tags"] = kwargs.get("tags") if kwargs.get("security_policy"): self["securityPolicy"] = kwargs.get("security_policy") if kwargs.get("certificate_chain"): self["certificateChain"] = kwargs.get("certificate_chain") if kwargs.get("regional_certificate_name"): - self["regionalCertificateName"] = kwargs.get( - "regional_certificate_name" - ) + self["regionalCertificateName"] = kwargs.get("regional_certificate_name") if kwargs.get("certificate_private_key"): - self["certificatePrivateKey"] = kwargs.get( - "certificate_private_key" - ) + self["certificatePrivateKey"] = kwargs.get("certificate_private_key") if kwargs.get("regional_certificate_arn"): - self["regionalCertificateArn"] = kwargs.get( - "regional_certificate_arn" - ) + self["regionalCertificateArn"] = kwargs.get("regional_certificate_arn") if kwargs.get("endpoint_configuration"): - self["endpointConfiguration"] = kwargs.get( - "endpoint_configuration" - ) + self["endpointConfiguration"] = kwargs.get("endpoint_configuration") if kwargs.get("generate_cli_skeleton"): - self["generateCliSkeleton"] = kwargs.get( - "generate_cli_skeleton" - ) + self["generateCliSkeleton"] = kwargs.get("generate_cli_skeleton") class APIGatewayBackend(BaseBackend): @@ -1047,16 +1037,21 @@ def _uri_validator(self, uri): except Exception: return False - def create_domain_name(self, domain_name, - certificate_name=None, tags=None, - certificate_arn=None, certificate_body=None, - certificate_private_key=None, - certificate_chain=None, - regional_certificate_name=None, - regional_certificate_arn=None, - endpoint_configuration=None, - security_policy=None, - generate_cli_skeleton=None): + def create_domain_name( + self, + domain_name, + certificate_name=None, + tags=None, + certificate_arn=None, + certificate_body=None, + certificate_private_key=None, + certificate_chain=None, + regional_certificate_name=None, + regional_certificate_arn=None, + endpoint_configuration=None, + security_policy=None, + generate_cli_skeleton=None, + ): if not domain_name: raise DomainNameNotFound() @@ -1070,7 +1065,8 @@ def create_domain_name(self, domain_name, regional_certificate_name=regional_certificate_name, regional_certificate_arn=regional_certificate_arn, endpoint_configuration=endpoint_configuration, - tags=tags, security_policy=security_policy, + tags=tags, + security_policy=security_policy, generate_cli_skeleton=generate_cli_skeleton, ) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index ce3bcbb8e8e4..a3587e97abd9 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -542,30 +542,27 @@ def domain_names(self, request, full_url, headers): tags = self._get_param("tags") certificate_arn = self._get_param("certificateArn") certificate_body = self._get_param("certificateBody") - certificate_private_key = self._get_param( - "certificatePrivateKey" - ) + certificate_private_key = self._get_param("certificatePrivateKey") certificate_chain = self._get_param("certificateChain") - regional_certificate_name = self._get_param( - "regionalCertificateName" - ) - regional_certificate_arn = self._get_param( - "regionalCertificateArn" - ) - endpoint_configuration = self._get_param( - "endpointConfiguration" - ) + regional_certificate_name = self._get_param("regionalCertificateName") + regional_certificate_arn = self._get_param("regionalCertificateArn") + endpoint_configuration = self._get_param("endpointConfiguration") security_policy = self._get_param("securityPolicy") - generate_cli_skeleton = self._get_param( - "generateCliSkeleton" - ) + generate_cli_skeleton = self._get_param("generateCliSkeleton") domain_name_resp = self.backend.create_domain_name( - domain_name, certificate_name, tags, certificate_arn, - certificate_body, certificate_private_key, - certificate_chain, regional_certificate_name, - regional_certificate_arn, endpoint_configuration, - security_policy, generate_cli_skeleton + domain_name, + certificate_name, + tags, + certificate_arn, + certificate_body, + certificate_private_key, + certificate_chain, + regional_certificate_name, + regional_certificate_arn, + endpoint_configuration, + security_policy, + generate_cli_skeleton, ) return 200, {}, json.dumps(domain_name_resp) @@ -579,7 +576,7 @@ def domain_name_induvidual(self, request, full_url, headers): url_path_parts = self.path.split("/") domain_name = url_path_parts[2] - domain_names={} + domain_names = {} if self.method == "GET": if domain_name is not None: diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 22e062cc5022..29b07b7abd17 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1489,8 +1489,11 @@ def test_create_domain_names(): domain_name = "testDomain" test_certificate_name = "test.certificate" test_certificate_private_key = "testPrivateKey" - response = client.create_domain_name(domainName=domain_name, certificateName=test_certificate_name, - certificatePrivateKey=test_certificate_private_key) + response = client.create_domain_name( + domainName=domain_name, + certificateName=test_certificate_name, + certificatePrivateKey=test_certificate_private_key, + ) response["domainName"].should.equal(domain_name) response["certificateName"].should.equal(test_certificate_name) From 5c7e0b56afaeeafda053f9f15022d4520f9b2e2e Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 8 Apr 2020 13:53:53 +0100 Subject: [PATCH 211/658] #2877 - Ensure NetworkInterfaces are assigned to the default Subnet --- moto/ec2/models.py | 9 +++++- tests/test_ec2/test_instances.py | 2 +- tests/test_ec2/test_subnets.py | 47 ++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index be39bab28a28..2611c2f1aa9b 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -775,7 +775,14 @@ def prep_nics(self, nic_spec, private_ip=None, associate_public_ip=None): if "SubnetId" in nic: subnet = self.ec2_backend.get_subnet(nic["SubnetId"]) else: - subnet = None + # Get default Subnet + subnet = [ + subnet + for subnet in self.ec2_backend.get_all_subnets( + filters={"availabilityZone": self._placement.zone} + ) + if subnet.default_for_az + ][0] group_id = nic.get("SecurityGroupId") group_ids = [group_id] if group_id else [] diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 85ba0fe01bbd..fe1631223efb 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -71,7 +71,7 @@ def test_instance_launch_and_terminate(): instance.id.should.equal(instance.id) instance.state.should.equal("running") instance.launch_time.should.equal("2014-01-01T05:00:00.000Z") - instance.vpc_id.should.equal(None) + instance.vpc_id.shouldnt.equal(None) instance.placement.should.equal("us-east-1a") root_device_name = instance.root_device_name diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 7bb57aab48e2..a16693d5f17d 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -599,3 +599,50 @@ def validate_subnet_details_after_creating_eni( for eni in enis_created: client.delete_network_interface(NetworkInterfaceId=eni["NetworkInterfaceId"]) client.delete_subnet(SubnetId=subnet["SubnetId"]) + + +@mock_ec2 +def test_run_instances_should_attach_to_default_subnet(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + ec2.create_security_group(GroupName="sg01", Description="Test security group sg01") + # run_instances + instances = client.run_instances( + MinCount=1, + MaxCount=1, + SecurityGroups=["sg01"], + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [{"Key": "Name", "Value": "test-01"},], + } + ], + ) + default_subnet_id = client.describe_subnets()["Subnets"][0]["SubnetId"] + instances["Instances"][0]["NetworkInterfaces"][0]["SubnetId"].should.equal( + default_subnet_id + ) + + +@mock_ec2 +def test_describe_subnets_where_network_interface_has_no_subnets_attached(): + # https://github.com/spulec/moto/issues/2877 + # create security groups + ec2 = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + ec2.create_security_group(GroupName="sg01", Description="Test security group sg01") + # run_instances + client.run_instances( + MinCount=1, + MaxCount=1, + SecurityGroups=["sg01"], + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [{"Key": "Name", "Value": "test-01"},], + } + ], + ) + # describe_subnets + subnets = client.describe_subnets()["Subnets"] + subnets[0]["AvailableIpAddressCount"].should.equal(4090) From 8475804a8b37ea11fcdd5acde02dec1f6ca31b9b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 8 Apr 2020 14:02:35 +0100 Subject: [PATCH 212/658] Simplify tests --- tests/test_ec2/test_subnets.py | 40 +++++----------------------------- 1 file changed, 5 insertions(+), 35 deletions(-) diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index a16693d5f17d..eae0bc468d09 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -603,46 +603,16 @@ def validate_subnet_details_after_creating_eni( @mock_ec2 def test_run_instances_should_attach_to_default_subnet(): + # https://github.com/spulec/moto/issues/2877 ec2 = boto3.resource("ec2", region_name="us-west-1") client = boto3.client("ec2", region_name="us-west-1") ec2.create_security_group(GroupName="sg01", Description="Test security group sg01") # run_instances - instances = client.run_instances( - MinCount=1, - MaxCount=1, - SecurityGroups=["sg01"], - TagSpecifications=[ - { - "ResourceType": "instance", - "Tags": [{"Key": "Name", "Value": "test-01"},], - } - ], - ) - default_subnet_id = client.describe_subnets()["Subnets"][0]["SubnetId"] + instances = client.run_instances(MinCount=1, MaxCount=1, SecurityGroups=["sg01"],) + # Assert subnet is created appropriately + subnets = client.describe_subnets()["Subnets"] + default_subnet_id = subnets[0]["SubnetId"] instances["Instances"][0]["NetworkInterfaces"][0]["SubnetId"].should.equal( default_subnet_id ) - - -@mock_ec2 -def test_describe_subnets_where_network_interface_has_no_subnets_attached(): - # https://github.com/spulec/moto/issues/2877 - # create security groups - ec2 = boto3.resource("ec2", region_name="us-west-1") - client = boto3.client("ec2", region_name="us-west-1") - ec2.create_security_group(GroupName="sg01", Description="Test security group sg01") - # run_instances - client.run_instances( - MinCount=1, - MaxCount=1, - SecurityGroups=["sg01"], - TagSpecifications=[ - { - "ResourceType": "instance", - "Tags": [{"Key": "Name", "Value": "test-01"},], - } - ], - ) - # describe_subnets - subnets = client.describe_subnets()["Subnets"] subnets[0]["AvailableIpAddressCount"].should.equal(4090) From 414fcf7bbd0ac261b83928f5eec9166ef4748aa3 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 8 Apr 2020 15:14:39 +0100 Subject: [PATCH 213/658] Fix AvailibilityZones in CF tests --- .../test_cloudformation_stack_integration.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index e501796600fe..67ef0af9ba71 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -495,7 +495,7 @@ def test_autoscaling_group_with_elb(): "my-as-group": { "Type": "AWS::AutoScaling::AutoScalingGroup", "Properties": { - "AvailabilityZones": ["us-east1"], + "AvailabilityZones": ["us-east-1a"], "LaunchConfigurationName": {"Ref": "my-launch-config"}, "MinSize": "2", "MaxSize": "2", @@ -522,7 +522,7 @@ def test_autoscaling_group_with_elb(): "my-elb": { "Type": "AWS::ElasticLoadBalancing::LoadBalancer", "Properties": { - "AvailabilityZones": ["us-east1"], + "AvailabilityZones": ["us-east-1a"], "Listeners": [ { "LoadBalancerPort": "80", @@ -545,10 +545,10 @@ def test_autoscaling_group_with_elb(): web_setup_template_json = json.dumps(web_setup_template) - conn = boto.cloudformation.connect_to_region("us-west-1") + conn = boto.cloudformation.connect_to_region("us-east-1") conn.create_stack("web_stack", template_body=web_setup_template_json) - autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") + autoscale_conn = boto.ec2.autoscale.connect_to_region("us-east-1") autoscale_group = autoscale_conn.get_all_groups()[0] autoscale_group.launch_config_name.should.contain("my-launch-config") autoscale_group.load_balancers[0].should.equal("my-elb") @@ -557,7 +557,7 @@ def test_autoscaling_group_with_elb(): autoscale_conn.get_all_launch_configurations().should.have.length_of(1) # Confirm the ELB was actually created - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + elb_conn = boto.ec2.elb.connect_to_region("us-east-1") elb_conn.get_all_load_balancers().should.have.length_of(1) stack = conn.describe_stacks()[0] @@ -584,7 +584,7 @@ def test_autoscaling_group_with_elb(): elb_resource.physical_resource_id.should.contain("my-elb") # confirm the instances were created with the right tags - ec2_conn = boto.ec2.connect_to_region("us-west-1") + ec2_conn = boto.ec2.connect_to_region("us-east-1") reservations = ec2_conn.get_all_reservations() len(reservations).should.equal(1) reservation = reservations[0] @@ -604,7 +604,7 @@ def test_autoscaling_group_update(): "my-as-group": { "Type": "AWS::AutoScaling::AutoScalingGroup", "Properties": { - "AvailabilityZones": ["us-west-1"], + "AvailabilityZones": ["us-west-1a"], "LaunchConfigurationName": {"Ref": "my-launch-config"}, "MinSize": "2", "MaxSize": "2", From af57cfc7ec122668d492964dbe29f49afb60f26f Mon Sep 17 00:00:00 2001 From: usmankb Date: Wed, 8 Apr 2020 21:54:26 +0530 Subject: [PATCH 214/658] Added more tests and coverage --- moto/apigateway/exceptions.py | 10 +++++ moto/apigateway/models.py | 22 ++++++---- moto/apigateway/responses.py | 43 ++++++++++++++------ tests/test_apigateway/test_apigateway.py | 51 +++++++++++++++++++++++- 4 files changed, 103 insertions(+), 23 deletions(-) diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index 24f06f3f1c58..c9c90cea5133 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -120,6 +120,16 @@ def __init__(self): "ConflictException", "API Key already exists" ) + +class InvalidDomainName(BadRequestException): + code = 404 + + def __init__(self): + super(InvalidDomainName, self).__init__( + "BadRequestException", "No Domain Name specified" + ) + + class DomainNameNotFound(RESTError): code = 404 diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 7ca7e6315d61..7707bd9d5828 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -34,7 +34,8 @@ NoIntegrationDefined, NoMethodDefined, ApiKeyAlreadyExists, - DomainNameNotFound + DomainNameNotFound, + InvalidDomainName ) STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}" @@ -1047,25 +1048,26 @@ def _uri_validator(self, uri): except Exception: return False - def create_domain_name(self, domain_name, - certificate_name=None, tags=None, - certificate_arn=None, certificate_body=None, - certificate_private_key=None, + def create_domain_name(self,domain_name, + certificate_name=None,certificate_private_key=None, + tags=None, certificate_arn=None, + certificate_body=None, certificate_chain=None, regional_certificate_name=None, regional_certificate_arn=None, endpoint_configuration=None, security_policy=None, generate_cli_skeleton=None): + if not domain_name: - raise DomainNameNotFound() + raise InvalidDomainName() new_domain_name = DomainName( domain_name=domain_name, certificate_name=certificate_name, + certificate_private_key=certificate_private_key, certificate_arn=certificate_arn, certificate_body=certificate_body, - certificate_private_key=certificate_private_key, certificate_chain=certificate_chain, regional_certificate_name=regional_certificate_name, regional_certificate_arn=regional_certificate_arn, @@ -1081,7 +1083,11 @@ def get_domain_names(self): return list(self.domain_names.values()) def get_domain_name(self, domain_name): - return self.domain_names[domain_name] + domain_info = self.domain_names.get(domain_name) + if domain_info is None: + raise DomainNameNotFound + else: + return self.domain_names[domain_name] apigateway_backends = {} diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index ce3bcbb8e8e4..ec05c605e77d 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -11,6 +11,8 @@ AuthorizerNotFoundException, StageNotFoundException, ApiKeyAlreadyExists, + DomainNameNotFound, + InvalidDomainName ) API_KEY_SOURCES = ["AUTHORIZER", "HEADER"] @@ -561,17 +563,22 @@ def domain_names(self, request, full_url, headers): "generateCliSkeleton" ) domain_name_resp = self.backend.create_domain_name( - domain_name, certificate_name, tags, certificate_arn, - certificate_body, certificate_private_key, - certificate_chain, regional_certificate_name, - regional_certificate_arn, endpoint_configuration, - security_policy, generate_cli_skeleton + domain_name, certificate_name, + certificate_private_key,tags, certificate_arn, + certificate_body, certificate_chain, + regional_certificate_name, regional_certificate_arn, + endpoint_configuration, security_policy, + generate_cli_skeleton ) - return 200, {}, json.dumps(domain_name_resp) - except BadRequestException as e: - return self.error( - "com.amazonaws.dynamodb.v20111205#BadRequestException", e.message + + except InvalidDomainName as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), ) def domain_name_induvidual(self, request, full_url, headers): @@ -580,9 +587,19 @@ def domain_name_induvidual(self, request, full_url, headers): url_path_parts = self.path.split("/") domain_name = url_path_parts[2] domain_names={} + try: + if self.method == "GET": + if domain_name is not None: + domain_names = self.backend.get_domain_name(domain_name) + return 200, {}, json.dumps(domain_names) + + except DomainNameNotFound as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) - if self.method == "GET": - if domain_name is not None: - domain_names = self.backend.get_domain_name(domain_name) - return 200, {}, json.dumps(domain_names) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 22e062cc5022..accd0fad2652 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1489,15 +1489,62 @@ def test_create_domain_names(): domain_name = "testDomain" test_certificate_name = "test.certificate" test_certificate_private_key = "testPrivateKey" - response = client.create_domain_name(domainName=domain_name, certificateName=test_certificate_name, - certificatePrivateKey=test_certificate_private_key) + # success case with valid params + response = client.create_domain_name(domainName=domain_name, + certificateName=test_certificate_name, + certificatePrivateKey=test_certificate_private_key) response["domainName"].should.equal(domain_name) response["certificateName"].should.equal(test_certificate_name) + # without domain name it should throw BadRequestException + with assert_raises(ClientError) as ex: + client.create_domain_name(domainName="") + + ex.exception.response["Error"]["Message"].should.equal( + "No Domain Name specified") + ex.exception.response["Error"]["Code"].should.equal( + "BadRequestException") + + +@mock_apigateway +def test_get_domain_names(): + client = boto3.client("apigateway", region_name="us-west-2") + # without any domain names already present + result = client.get_domain_names() + result["items"].should.equal([]) + domain_name = "testDomain" + test_certificate_name = "test.certificate" + response = client.create_domain_name(domainName=domain_name, + certificateName=test_certificate_name) + + response["domainName"].should.equal(domain_name) + response["certificateName"].should.equal(test_certificate_name) + response["domainNameStatus"].should.equal("AVAILABLE") + # after adding a new domain name result = client.get_domain_names() result["items"][0]["domainName"].should.equal(domain_name) + result["items"][0]["certificateName"].should.equal(test_certificate_name) + result["items"][0]["domainNameStatus"].should.equal("AVAILABLE") + + +@mock_apigateway +def test_get_domain_name(): + client = boto3.client("apigateway", region_name="us-west-2") + domain_name = "testDomain" + # quering an invalid domain name which is not present + with assert_raises(ClientError) as ex: + client.get_domain_name(domainName=domain_name) + + ex.exception.response["Error"]["Message"].should.equal( + "Invalid Domain Name specified") + ex.exception.response["Error"]["Code"].should.equal( + "NotFoundException") + # adding a domain name + client.create_domain_name(domainName=domain_name) + # retrieving the data of added domain name. result = client.get_domain_name(domainName=domain_name) result["domainName"].should.equal(domain_name) + result["domainNameStatus"].should.equal("AVAILABLE") @mock_apigateway From 82311087f442dea0ea9f177efcce1851e66c2f18 Mon Sep 17 00:00:00 2001 From: usmankb Date: Wed, 8 Apr 2020 22:04:48 +0530 Subject: [PATCH 215/658] linting --- moto/apigateway/responses.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index e27982b2aa26..8bef1f13d55d 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -591,7 +591,7 @@ def domain_name_induvidual(self, request, full_url, headers): url_path_parts = self.path.split("/") domain_name = url_path_parts[2] - domain_names={} + domain_names = {} try: if self.method == "GET": if domain_name is not None: @@ -605,6 +605,3 @@ def domain_name_induvidual(self, request, full_url, headers): error.message, error.error_type ), ) - - - From 1654280e43fe227faa3df79836182e5089557fc2 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 9 Apr 2020 08:12:44 +0100 Subject: [PATCH 216/658] Linting --- moto/apigateway/models.py | 2 +- moto/apigateway/responses.py | 22 ++++++---------------- tests/test_apigateway/test_apigateway.py | 17 ++++++++--------- 3 files changed, 15 insertions(+), 26 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index d376db5ce730..16462e2789cb 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -35,7 +35,7 @@ NoMethodDefined, ApiKeyAlreadyExists, DomainNameNotFound, - InvalidDomainName + InvalidDomainName, ) STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}" diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 8bef1f13d55d..e4723f0d46f5 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -12,7 +12,7 @@ StageNotFoundException, ApiKeyAlreadyExists, DomainNameNotFound, - InvalidDomainName + InvalidDomainName, ) API_KEY_SOURCES = ["AUTHORIZER", "HEADER"] @@ -544,23 +544,13 @@ def domain_names(self, request, full_url, headers): tags = self._get_param("tags") certificate_arn = self._get_param("certificateArn") certificate_body = self._get_param("certificateBody") - certificate_private_key = self._get_param( - "certificatePrivateKey" - ) + certificate_private_key = self._get_param("certificatePrivateKey") certificate_chain = self._get_param("certificateChain") - regional_certificate_name = self._get_param( - "regionalCertificateName" - ) - regional_certificate_arn = self._get_param( - "regionalCertificateArn" - ) - endpoint_configuration = self._get_param( - "endpointConfiguration" - ) + regional_certificate_name = self._get_param("regionalCertificateName") + regional_certificate_arn = self._get_param("regionalCertificateArn") + endpoint_configuration = self._get_param("endpointConfiguration") security_policy = self._get_param("securityPolicy") - generate_cli_skeleton = self._get_param( - "generateCliSkeleton" - ) + generate_cli_skeleton = self._get_param("generateCliSkeleton") domain_name_resp = self.backend.create_domain_name( domain_name, certificate_name, diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index ec1049ac5355..a1a380974e9e 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1501,10 +1501,8 @@ def test_create_domain_names(): with assert_raises(ClientError) as ex: client.create_domain_name(domainName="") - ex.exception.response["Error"]["Message"].should.equal( - "No Domain Name specified") - ex.exception.response["Error"]["Code"].should.equal( - "BadRequestException") + ex.exception.response["Error"]["Message"].should.equal("No Domain Name specified") + ex.exception.response["Error"]["Code"].should.equal("BadRequestException") @mock_apigateway @@ -1515,8 +1513,9 @@ def test_get_domain_names(): result["items"].should.equal([]) domain_name = "testDomain" test_certificate_name = "test.certificate" - response = client.create_domain_name(domainName=domain_name, - certificateName=test_certificate_name) + response = client.create_domain_name( + domainName=domain_name, certificateName=test_certificate_name + ) response["domainName"].should.equal(domain_name) response["certificateName"].should.equal(test_certificate_name) @@ -1537,9 +1536,9 @@ def test_get_domain_name(): client.get_domain_name(domainName=domain_name) ex.exception.response["Error"]["Message"].should.equal( - "Invalid Domain Name specified") - ex.exception.response["Error"]["Code"].should.equal( - "NotFoundException") + "Invalid Domain Name specified" + ) + ex.exception.response["Error"]["Code"].should.equal("NotFoundException") # adding a domain name client.create_domain_name(domainName=domain_name) # retrieving the data of added domain name. From 7a9cdd4fd24978ba127e0c41a558d2ef740e0975 Mon Sep 17 00:00:00 2001 From: usmankb Date: Sat, 11 Apr 2020 08:37:00 +0530 Subject: [PATCH 217/658] Adding missing Param zoneId in the describe-availability-zone --- moto/ec2/responses/availability_zones_and_regions.py | 1 + .../test_ec2/test_availability_zones_and_regions.py | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/moto/ec2/responses/availability_zones_and_regions.py b/moto/ec2/responses/availability_zones_and_regions.py index d63e2f4adfc5..28cc3a495673 100644 --- a/moto/ec2/responses/availability_zones_and_regions.py +++ b/moto/ec2/responses/availability_zones_and_regions.py @@ -35,6 +35,7 @@ def describe_regions(self): {{ zone.name }} available {{ zone.region_name }} + {{ zone.zone_id }} {% endfor %} diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index d5355f3b1129..bec9459e8a15 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -52,3 +52,15 @@ def test_boto3_availability_zones(): resp = conn.describe_availability_zones() for rec in resp["AvailabilityZones"]: rec["ZoneName"].should.contain(region) + + +@mock_ec2 +def test_boto3_zoneId_in_availability_zones(): + conn = boto3.client("ec2", "us-east-1") + resp = conn.describe_availability_zones() + for rec in resp["AvailabilityZones"]: + rec.get("ZoneId").should.contain("use1") + conn = boto3.client("ec2", "us-west-1") + resp = conn.describe_availability_zones() + for rec in resp["AvailabilityZones"]: + rec.get("ZoneId").should.contain("usw1") From 593e5dc86ab8a608656a9a4c7330257b1653b5ff Mon Sep 17 00:00:00 2001 From: DenverJ Date: Sun, 12 Apr 2020 13:44:16 +1000 Subject: [PATCH 218/658] Add instance-id filter to describe_auto_scaling_instances --- moto/autoscaling/models.py | 6 +++-- moto/autoscaling/responses.py | 4 +++- tests/test_autoscaling/test_autoscaling.py | 28 +++++++++++++++++++++- 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 45ee7d192f25..84ae9c76b265 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -655,10 +655,12 @@ def delete_auto_scaling_group(self, group_name): self.set_desired_capacity(group_name, 0) self.autoscaling_groups.pop(group_name, None) - def describe_auto_scaling_instances(self): + def describe_auto_scaling_instances(self, instance_ids): instance_states = [] for group in self.autoscaling_groups.values(): - instance_states.extend(group.instance_states) + instance_states.extend( + [x for x in group.instance_states if not instance_ids or x.instance.id in instance_ids] + ) return instance_states def attach_instances(self, group_name, instance_ids): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 83e2f7d5ae43..41c79edb4cea 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -226,7 +226,9 @@ def create_or_update_tags(self): return template.render() def describe_auto_scaling_instances(self): - instance_states = self.autoscaling_backend.describe_auto_scaling_instances() + instance_states = self.autoscaling_backend.describe_auto_scaling_instances( + instance_ids=self._get_multi_param("InstanceIds.member") + ) template = self.response_template(DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) return template.render(instance_states=instance_states) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 2e72553818d5..094708ec9d3c 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -843,13 +843,39 @@ def test_describe_autoscaling_instances_boto3(): NewInstancesProtectedFromScaleIn=True, ) + response = client.describe_auto_scaling_instances() + len(response["AutoScalingInstances"]).should.equal(5) + for instance in response["AutoScalingInstances"]: + instance["AutoScalingGroupName"].should.equal("test_asg") + instance["AvailabilityZone"].should.equal("us-east-1a") + instance["ProtectedFromScaleIn"].should.equal(True) + + +@mock_autoscaling +def test_describe_autoscaling_instances_instanceid_filter(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=True, + ) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_ids = [ instance["InstanceId"] for instance in response["AutoScalingGroups"][0]["Instances"] ] - response = client.describe_auto_scaling_instances(InstanceIds=instance_ids) + response = client.describe_auto_scaling_instances(InstanceIds=instance_ids[0:2]) # Filter by first 2 of 5 + len(response["AutoScalingInstances"]).should.equal(2) for instance in response["AutoScalingInstances"]: instance["AutoScalingGroupName"].should.equal("test_asg") instance["AvailabilityZone"].should.equal("us-east-1a") From 965046aa39bce3c57d3648bdadc21c8b97599a3b Mon Sep 17 00:00:00 2001 From: DenverJ Date: Sun, 12 Apr 2020 17:08:40 +1000 Subject: [PATCH 219/658] Fix formatting --- moto/autoscaling/models.py | 6 +++++- tests/test_autoscaling/test_autoscaling.py | 4 +++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 84ae9c76b265..88577433ed9a 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -659,7 +659,11 @@ def describe_auto_scaling_instances(self, instance_ids): instance_states = [] for group in self.autoscaling_groups.values(): instance_states.extend( - [x for x in group.instance_states if not instance_ids or x.instance.id in instance_ids] + [ + x + for x in group.instance_states + if not instance_ids or x.instance.id in instance_ids + ] ) return instance_states diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 094708ec9d3c..5cf3dc6ffd22 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -874,7 +874,9 @@ def test_describe_autoscaling_instances_instanceid_filter(): for instance in response["AutoScalingGroups"][0]["Instances"] ] - response = client.describe_auto_scaling_instances(InstanceIds=instance_ids[0:2]) # Filter by first 2 of 5 + response = client.describe_auto_scaling_instances( + InstanceIds=instance_ids[0:2] + ) # Filter by first 2 of 5 len(response["AutoScalingInstances"]).should.equal(2) for instance in response["AutoScalingInstances"]: instance["AutoScalingGroupName"].should.equal("test_asg") From 79e63e3bcff9f8ba4f0246ed1c4310191bed6d83 Mon Sep 17 00:00:00 2001 From: usmankb Date: Sun, 12 Apr 2020 17:49:22 +0530 Subject: [PATCH 220/658] Added implementation for create-model,get-models,get-model in api gateway --- moto/apigateway/exceptions.py | 36 ++++++ moto/apigateway/models.py | 90 +++++++++++++- moto/apigateway/responses.py | 68 +++++++++++ moto/apigateway/urls.py | 2 + tests/test_apigateway/test_apigateway.py | 142 +++++++++++++++++++++++ 5 files changed, 337 insertions(+), 1 deletion(-) diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index c9c90cea5133..8f6d21aa0646 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -137,3 +137,39 @@ def __init__(self): super(DomainNameNotFound, self).__init__( "NotFoundException", "Invalid Domain Name specified" ) + + +class InvalidRestApiId(BadRequestException): + code = 404 + + def __init__(self): + super(InvalidRestApiId, self).__init__( + "BadRequestException", "No Rest API Id specified" + ) + + +class InvalidModelName(BadRequestException): + code = 404 + + def __init__(self): + super(InvalidModelName, self).__init__( + "BadRequestException", "No Model Name specified" + ) + + +class RestAPINotFound(RESTError): + code = 404 + + def __init__(self): + super(RestAPINotFound, self).__init__( + "NotFoundException", "Invalid Rest API Id specified" + ) + + +class ModelNotFound(RESTError): + code = 404 + + def __init__(self): + super(ModelNotFound, self).__init__( + "NotFoundException", "Invalid Model Name specified" + ) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 16462e2789cb..5ce95742e222 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -36,6 +36,10 @@ ApiKeyAlreadyExists, DomainNameNotFound, InvalidDomainName, + InvalidRestApiId, + InvalidModelName, + RestAPINotFound, + ModelNotFound ) STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}" @@ -466,6 +470,7 @@ def __init__(self, id, region_name, name, description, **kwargs): self.authorizers = {} self.stages = {} self.resources = {} + self.models = {} self.add_child("/") # Add default child def __repr__(self): @@ -494,6 +499,27 @@ def add_child(self, path, parent_id=None): self.resources[child_id] = child return child + def add_model(self, + name, + description=None, + schema=None, + content_type=None, + cli_input_json=None, + generate_cli_skeleton=None): + model_id = create_id() + new_model = Model( + id=model_id, + name=name, + description=description, + schema=schema, + content_type=content_type, + cli_input_json=cli_input_json, + generate_cli_skeleton=generate_cli_skeleton) + + self.models[name] = new_model + return new_model + + def get_resource_for_path(self, path_after_stage_name): for resource in self.resources.values(): if resource.get_path() == path_after_stage_name: @@ -645,6 +671,24 @@ def __init__(self, domain_name, **kwargs): self["generateCliSkeleton"] = kwargs.get("generate_cli_skeleton") +class Model(BaseModel,dict): + def __init__(self, id, name, **kwargs): + super(Model, self).__init__() + self["id"] = id + self["name"] = name + if kwargs.get("description"): + self["description"] = kwargs.get("description") + if kwargs.get("schema"): + self["schema"] = kwargs.get("schema") + if kwargs.get("content_type"): + self["contentType"] = kwargs.get("content_type") + if kwargs.get("cli_input_json"): + self["cliInputJson"] = kwargs.get("cli_input_json") + if kwargs.get("generate_cli_skeleton"): + self["generateCliSkeleton"] = kwargs.get("generate_cli_skeleton") + + + class APIGatewayBackend(BaseBackend): def __init__(self, region_name): super(APIGatewayBackend, self).__init__() @@ -653,6 +697,7 @@ def __init__(self, region_name): self.usage_plans = {} self.usage_plan_keys = {} self.domain_names = {} + self.models = {} self.region_name = region_name def reset(self): @@ -682,7 +727,9 @@ def create_rest_api( return rest_api def get_rest_api(self, function_id): - rest_api = self.apis[function_id] + rest_api = self.apis.get(function_id) + if rest_api is None: + raise RestAPINotFound() return rest_api def list_apis(self): @@ -1085,6 +1132,47 @@ def get_domain_name(self, domain_name): else: return self.domain_names[domain_name] + def create_model(self, + rest_api_id, + name, + content_type, + description=None, + schema=None, + cli_input_json=None, + generate_cli_skeleton=None): + + if not rest_api_id: + raise InvalidRestApiId + if not name: + raise InvalidModelName + + api = self.get_rest_api(rest_api_id) + new_model = api.add_model( + name=name, + description=description, + schema=schema, + content_type=content_type, + cli_input_json=cli_input_json, + generate_cli_skeleton=generate_cli_skeleton) + + return new_model + + def get_models(self, rest_api_id): + if not rest_api_id: + raise InvalidRestApiId + api = self.get_rest_api(rest_api_id) + models = api.models.values() + return list(models) + + def get_model(self, rest_api_id, model_name): + if not rest_api_id: + raise InvalidRestApiId + api = self.get_rest_api(rest_api_id) + model = api.models.get(model_name) + if model is None: + raise ModelNotFound + return model + apigateway_backends = {} for region_name in Session().get_available_regions("apigateway"): diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index e4723f0d46f5..c18b7f6c4c92 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -13,6 +13,10 @@ ApiKeyAlreadyExists, DomainNameNotFound, InvalidDomainName, + InvalidRestApiId, + InvalidModelName, + RestAPINotFound, + ModelNotFound ) API_KEY_SOURCES = ["AUTHORIZER", "HEADER"] @@ -595,3 +599,67 @@ def domain_name_induvidual(self, request, full_url, headers): error.message, error.error_type ), ) + + def models(self,request, full_url, headers): + self.setup_class(request, full_url, headers) + rest_api_id = self.path.replace("/restapis/", "", 1).split("/")[0] + + try: + if self.method == "GET": + models = self.backend.get_models( + rest_api_id + ) + return 200, {}, json.dumps({"item": models}) + + elif self.method == "POST": + name = self._get_param("name") + description = self._get_param("description") + schema = self._get_param("schema") + content_type = self._get_param("contentType") + cli_input_json = self._get_param("cliInputJson") + generate_cli_skeleton = self._get_param( + "generateCliSkeleton" + ) + model = self.backend.create_model( + rest_api_id, + name, + content_type, + description, + schema, + cli_input_json, + generate_cli_skeleton + ) + + return 200, {}, json.dumps(model) + + except (InvalidRestApiId, InvalidModelName,RestAPINotFound) as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) + + def model_induvidual(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + url_path_parts = self.path.split("/") + rest_api_id = url_path_parts[2] + model_name = url_path_parts[4] + model_info = {} + try: + if self.method == "GET": + model_info = self.backend.get_model( + rest_api_id, + model_name + ) + return 200, {}, json.dumps(model_info) + except (ModelNotFound, RestAPINotFound, InvalidRestApiId, + InvalidModelName) as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) \ No newline at end of file diff --git a/moto/apigateway/urls.py b/moto/apigateway/urls.py index 6c3b7f6bbc2d..751d8ae65374 100644 --- a/moto/apigateway/urls.py +++ b/moto/apigateway/urls.py @@ -22,6 +22,8 @@ "{0}/apikeys/(?P[^/]+)": APIGatewayResponse().apikey_individual, "{0}/usageplans$": APIGatewayResponse().usage_plans, "{0}/domainnames$": APIGatewayResponse().domain_names, + "{0}/restapis/(?P[^/]+)/models": APIGatewayResponse().models, + "{0}/restapis/(?P[^/]+)/models/(?P[^/]+)/?$": APIGatewayResponse().model_induvidual, "{0}/domainnames/(?P[^/]+)/?$": APIGatewayResponse().domain_name_induvidual, "{0}/usageplans/(?P[^/]+)/?$": APIGatewayResponse().usage_plan_individual, "{0}/usageplans/(?P[^/]+)/keys$": APIGatewayResponse().usage_plan_keys, diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index a1a380974e9e..3a6b75104a7f 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1547,6 +1547,148 @@ def test_get_domain_name(): result["domainNameStatus"].should.equal("AVAILABLE") +@mock_apigateway +def test_create_model(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", + description="this is my api" + ) + rest_api_id = response["id"] + dummy_rest_api_id = 'a12b3c4d' + model_name = "testModel" + description = "test model" + content_type = 'application/json' + # success case with valid params + response = client.create_model( + restApiId=rest_api_id, + name=model_name, + description=description, + contentType=content_type + ) + response["name"].should.equal(model_name) + response["description"].should.equal(description) + + # with an invalid rest_api_id it should throw NotFoundException + with assert_raises(ClientError) as ex: + client.create_model( + restApiId=dummy_rest_api_id, + name=model_name, + description=description, + contentType=content_type + ) + ex.exception.response["Error"]["Message"].should.equal( + "Invalid Rest API Id specified" + ) + ex.exception.response["Error"]["Code"].should.equal( + "NotFoundException" + ) + + with assert_raises(ClientError) as ex: + client.create_model( + restApiId=rest_api_id, + name="", + description=description, + contentType=content_type + ) + + ex.exception.response["Error"]["Message"].should.equal( + "No Model Name specified" + ) + ex.exception.response["Error"]["Code"].should.equal( + "BadRequestException" + ) + + +@mock_apigateway +def test_get_api_models(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api( + name="my_api", + description="this is my api" + ) + rest_api_id = response["id"] + model_name = "testModel" + description = "test model" + content_type = 'application/json' + # when no models are present + result = client.get_models( + restApiId=rest_api_id + ) + result["items"].should.equal([]) + # add a model + client.create_model( + restApiId=rest_api_id, + name=model_name, + description=description, + contentType=content_type + ) + # get models after adding + result = client.get_models( + restApiId=rest_api_id + ) + result["items"][0]["name"] = model_name + result["items"][0]["description"] = description + + +@mock_apigateway +def test_get_model_by_name(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api( + name="my_api", + description="this is my api" + ) + rest_api_id = response["id"] + dummy_rest_api_id = 'a12b3c4d' + model_name = "testModel" + description = "test model" + content_type = 'application/json' + # add a model + client.create_model( + restApiId=rest_api_id, + name=model_name, + description=description, + contentType=content_type + ) + # get models after adding + result = client.get_model( + restApiId=rest_api_id, modelName=model_name + ) + result["name"] = model_name + result["description"] = description + + with assert_raises(ClientError) as ex: + client.get_model( + restApiId=dummy_rest_api_id, modelName=model_name + ) + ex.exception.response["Error"]["Message"].should.equal( + "Invalid Rest API Id specified" + ) + ex.exception.response["Error"]["Code"].should.equal( + "NotFoundException" + ) + + +@mock_apigateway +def test_get_model_with_invalid_name(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api( + name="my_api", + description="this is my api" + ) + rest_api_id = response["id"] + # test with an invalid model name + with assert_raises(ClientError) as ex: + client.get_model( + restApiId=rest_api_id, modelName="fake" + ) + ex.exception.response["Error"]["Message"].should.equal( + "Invalid Model Name specified" + ) + ex.exception.response["Error"]["Code"].should.equal( + "NotFoundException" + ) + + @mock_apigateway def test_http_proxying_integration(): responses.add( From 1c96a05314ac3e4555dc07d1c5c1acf4cd9e7da8 Mon Sep 17 00:00:00 2001 From: usmankb Date: Sun, 12 Apr 2020 18:10:23 +0530 Subject: [PATCH 221/658] linting --- moto/apigateway/models.py | 17 ++++++++--------- moto/apigateway/responses.py | 4 ++-- moto/apigateway/urls.py | 2 +- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 5ce95742e222..b6a14b163a1c 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -500,12 +500,12 @@ def add_child(self, path, parent_id=None): return child def add_model(self, - name, - description=None, - schema=None, - content_type=None, - cli_input_json=None, - generate_cli_skeleton=None): + name, + description=None, + schema=None, + content_type=None, + cli_input_json=None, + generate_cli_skeleton=None): model_id = create_id() new_model = Model( id=model_id, @@ -519,7 +519,6 @@ def add_model(self, self.models[name] = new_model return new_model - def get_resource_for_path(self, path_after_stage_name): for resource in self.resources.values(): if resource.get_path() == path_after_stage_name: @@ -688,7 +687,6 @@ def __init__(self, id, name, **kwargs): self["generateCliSkeleton"] = kwargs.get("generate_cli_skeleton") - class APIGatewayBackend(BaseBackend): def __init__(self, region_name): super(APIGatewayBackend, self).__init__() @@ -1171,7 +1169,8 @@ def get_model(self, rest_api_id, model_name): model = api.models.get(model_name) if model is None: raise ModelNotFound - return model + else: + return model apigateway_backends = {} diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index c18b7f6c4c92..02ff536f3fa8 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -639,7 +639,7 @@ def models(self,request, full_url, headers): '{{"message":"{0}","code":"{1}"}}'.format( error.message, error.error_type ), - ) + ) def model_induvidual(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -662,4 +662,4 @@ def model_induvidual(self, request, full_url, headers): '{{"message":"{0}","code":"{1}"}}'.format( error.message, error.error_type ), - ) \ No newline at end of file + ) diff --git a/moto/apigateway/urls.py b/moto/apigateway/urls.py index 751d8ae65374..cb48e225f6f1 100644 --- a/moto/apigateway/urls.py +++ b/moto/apigateway/urls.py @@ -22,7 +22,7 @@ "{0}/apikeys/(?P[^/]+)": APIGatewayResponse().apikey_individual, "{0}/usageplans$": APIGatewayResponse().usage_plans, "{0}/domainnames$": APIGatewayResponse().domain_names, - "{0}/restapis/(?P[^/]+)/models": APIGatewayResponse().models, + "{0}/restapis/(?P[^/]+)/models$": APIGatewayResponse().models, "{0}/restapis/(?P[^/]+)/models/(?P[^/]+)/?$": APIGatewayResponse().model_induvidual, "{0}/domainnames/(?P[^/]+)/?$": APIGatewayResponse().domain_name_induvidual, "{0}/usageplans/(?P[^/]+)/?$": APIGatewayResponse().usage_plan_individual, From 4be97916bfa80b04764ca9aa0024ba91a57232ae Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Sun, 12 Apr 2020 20:05:35 +0100 Subject: [PATCH 222/658] Allow reuse of components packed in models.py By having models.py as one big file it causes to easily create circular dependencies. With the current setup it is not possible to re-use DynamoType. This refactor moves it out to its own file while trying to keep the structure as much as it is. --- moto/dynamodb2/__init__.py | 2 +- .../{models.py => models/__init__.py} | 227 +----------------- moto/dynamodb2/models/dynamo_type.py | 206 ++++++++++++++++ moto/dynamodb2/models/utilities.py | 17 ++ moto/dynamodb2/responses.py | 2 +- 5 files changed, 230 insertions(+), 224 deletions(-) rename moto/dynamodb2/{models.py => models/__init__.py} (86%) create mode 100644 moto/dynamodb2/models/dynamo_type.py create mode 100644 moto/dynamodb2/models/utilities.py diff --git a/moto/dynamodb2/__init__.py b/moto/dynamodb2/__init__.py index 3d6e8ec1f917..d141511c8ef9 100644 --- a/moto/dynamodb2/__init__.py +++ b/moto/dynamodb2/__init__.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from .models import dynamodb_backends as dynamodb_backends2 +from moto.dynamodb2.models import dynamodb_backends as dynamodb_backends2 from ..core.models import base_decorator, deprecated_base_decorator dynamodb_backend2 = dynamodb_backends2["us-east-1"] diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models/__init__.py similarity index 86% rename from moto/dynamodb2/models.py rename to moto/dynamodb2/models/__init__.py index 152e719c4047..29713d21175f 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models/__init__.py @@ -6,7 +6,6 @@ import json import re import uuid -import six from boto3 import Session from botocore.exceptions import ParamValidationError @@ -14,10 +13,11 @@ from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time from moto.core.exceptions import JsonRESTError -from .comparisons import get_comparison_func -from .comparisons import get_filter_expression -from .comparisons import get_expected -from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge +from moto.dynamodb2.comparisons import get_filter_expression +from moto.dynamodb2.comparisons import get_expected +from moto.dynamodb2.exceptions import InvalidIndexNameError, ItemSizeTooLarge +from moto.dynamodb2.models.utilities import bytesize, attribute_is_list +from moto.dynamodb2.models.dynamo_type import DynamoType class DynamoJsonEncoder(json.JSONEncoder): @@ -30,223 +30,6 @@ def dynamo_json_dump(dynamo_object): return json.dumps(dynamo_object, cls=DynamoJsonEncoder) -def bytesize(val): - return len(str(val).encode("utf-8")) - - -def attribute_is_list(attr): - """ - Checks if attribute denotes a list, and returns the name of the list and the given list index if so - :param attr: attr or attr[index] - :return: attr, index or None - """ - list_index_update = re.match("(.+)\\[([0-9]+)\\]", attr) - if list_index_update: - attr = list_index_update.group(1) - return attr, list_index_update.group(2) if list_index_update else None - - -class DynamoType(object): - """ - http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes - """ - - def __init__(self, type_as_dict): - if type(type_as_dict) == DynamoType: - self.type = type_as_dict.type - self.value = type_as_dict.value - else: - self.type = list(type_as_dict)[0] - self.value = list(type_as_dict.values())[0] - if self.is_list(): - self.value = [DynamoType(val) for val in self.value] - elif self.is_map(): - self.value = dict((k, DynamoType(v)) for k, v in self.value.items()) - - def get(self, key): - if not key: - return self - else: - key_head = key.split(".")[0] - key_tail = ".".join(key.split(".")[1:]) - if key_head not in self.value: - self.value[key_head] = DynamoType({"NONE": None}) - return self.value[key_head].get(key_tail) - - def set(self, key, new_value, index=None): - if index: - index = int(index) - if type(self.value) is not list: - raise InvalidUpdateExpression - if index >= len(self.value): - self.value.append(new_value) - # {'L': [DynamoType, ..]} ==> DynamoType.set() - self.value[min(index, len(self.value) - 1)].set(key, new_value) - else: - attr = (key or "").split(".").pop(0) - attr, list_index = attribute_is_list(attr) - if not key: - # {'S': value} ==> {'S': new_value} - self.type = new_value.type - self.value = new_value.value - else: - if attr not in self.value: # nonexistingattribute - type_of_new_attr = "M" if "." in key else new_value.type - self.value[attr] = DynamoType({type_of_new_attr: {}}) - # {'M': {'foo': DynamoType}} ==> DynamoType.set(new_value) - self.value[attr].set( - ".".join(key.split(".")[1:]), new_value, list_index - ) - - def delete(self, key, index=None): - if index: - if not key: - if int(index) < len(self.value): - del self.value[int(index)] - elif "." in key: - self.value[int(index)].delete(".".join(key.split(".")[1:])) - else: - self.value[int(index)].delete(key) - else: - attr = key.split(".")[0] - attr, list_index = attribute_is_list(attr) - - if list_index: - self.value[attr].delete(".".join(key.split(".")[1:]), list_index) - elif "." in key: - self.value[attr].delete(".".join(key.split(".")[1:])) - else: - self.value.pop(key) - - def filter(self, projection_expressions): - nested_projections = [ - expr[0 : expr.index(".")] for expr in projection_expressions if "." in expr - ] - if self.is_map(): - expressions_to_delete = [] - for attr in self.value: - if ( - attr not in projection_expressions - and attr not in nested_projections - ): - expressions_to_delete.append(attr) - elif attr in nested_projections: - relevant_expressions = [ - expr[len(attr + ".") :] - for expr in projection_expressions - if expr.startswith(attr + ".") - ] - self.value[attr].filter(relevant_expressions) - for expr in expressions_to_delete: - self.value.pop(expr) - - def __hash__(self): - return hash((self.type, self.value)) - - def __eq__(self, other): - return self.type == other.type and self.value == other.value - - def __ne__(self, other): - return self.type != other.type or self.value != other.value - - def __lt__(self, other): - return self.cast_value < other.cast_value - - def __le__(self, other): - return self.cast_value <= other.cast_value - - def __gt__(self, other): - return self.cast_value > other.cast_value - - def __ge__(self, other): - return self.cast_value >= other.cast_value - - def __repr__(self): - return "DynamoType: {0}".format(self.to_json()) - - @property - def cast_value(self): - if self.is_number(): - try: - return int(self.value) - except ValueError: - return float(self.value) - elif self.is_set(): - sub_type = self.type[0] - return set([DynamoType({sub_type: v}).cast_value for v in self.value]) - elif self.is_list(): - return [DynamoType(v).cast_value for v in self.value] - elif self.is_map(): - return dict([(k, DynamoType(v).cast_value) for k, v in self.value.items()]) - else: - return self.value - - def child_attr(self, key): - """ - Get Map or List children by key. str for Map, int for List. - - Returns DynamoType or None. - """ - if isinstance(key, six.string_types) and self.is_map(): - if "." in key and key.split(".")[0] in self.value: - return self.value[key.split(".")[0]].child_attr( - ".".join(key.split(".")[1:]) - ) - elif "." not in key and key in self.value: - return DynamoType(self.value[key]) - - if isinstance(key, int) and self.is_list(): - idx = key - if 0 <= idx < len(self.value): - return DynamoType(self.value[idx]) - - return None - - def size(self): - if self.is_number(): - value_size = len(str(self.value)) - elif self.is_set(): - sub_type = self.type[0] - value_size = sum([DynamoType({sub_type: v}).size() for v in self.value]) - elif self.is_list(): - value_size = sum([v.size() for v in self.value]) - elif self.is_map(): - value_size = sum( - [bytesize(k) + DynamoType(v).size() for k, v in self.value.items()] - ) - elif type(self.value) == bool: - value_size = 1 - else: - value_size = bytesize(self.value) - return value_size - - def to_json(self): - return {self.type: self.value} - - def compare(self, range_comparison, range_objs): - """ - Compares this type against comparison filters - """ - range_values = [obj.cast_value for obj in range_objs] - comparison_func = get_comparison_func(range_comparison) - return comparison_func(self.cast_value, *range_values) - - def is_number(self): - return self.type == "N" - - def is_set(self): - return self.type == "SS" or self.type == "NS" or self.type == "BS" - - def is_list(self): - return self.type == "L" - - def is_map(self): - return self.type == "M" - - def same_type(self, other): - return self.type == other.type - - # https://github.com/spulec/moto/issues/1874 # Ensure that the total size of an item does not exceed 400kb class LimitedSizeDict(dict): diff --git a/moto/dynamodb2/models/dynamo_type.py b/moto/dynamodb2/models/dynamo_type.py new file mode 100644 index 000000000000..300804c1e8a9 --- /dev/null +++ b/moto/dynamodb2/models/dynamo_type.py @@ -0,0 +1,206 @@ +import six + +from moto.dynamodb2.comparisons import get_comparison_func +from moto.dynamodb2.exceptions import InvalidUpdateExpression +from moto.dynamodb2.models.utilities import attribute_is_list, bytesize + + +class DynamoType(object): + """ + http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes + """ + + def __init__(self, type_as_dict): + if type(type_as_dict) == DynamoType: + self.type = type_as_dict.type + self.value = type_as_dict.value + else: + self.type = list(type_as_dict)[0] + self.value = list(type_as_dict.values())[0] + if self.is_list(): + self.value = [DynamoType(val) for val in self.value] + elif self.is_map(): + self.value = dict((k, DynamoType(v)) for k, v in self.value.items()) + + def get(self, key): + if not key: + return self + else: + key_head = key.split(".")[0] + key_tail = ".".join(key.split(".")[1:]) + if key_head not in self.value: + self.value[key_head] = DynamoType({"NONE": None}) + return self.value[key_head].get(key_tail) + + def set(self, key, new_value, index=None): + if index: + index = int(index) + if type(self.value) is not list: + raise InvalidUpdateExpression + if index >= len(self.value): + self.value.append(new_value) + # {'L': [DynamoType, ..]} ==> DynamoType.set() + self.value[min(index, len(self.value) - 1)].set(key, new_value) + else: + attr = (key or "").split(".").pop(0) + attr, list_index = attribute_is_list(attr) + if not key: + # {'S': value} ==> {'S': new_value} + self.type = new_value.type + self.value = new_value.value + else: + if attr not in self.value: # nonexistingattribute + type_of_new_attr = "M" if "." in key else new_value.type + self.value[attr] = DynamoType({type_of_new_attr: {}}) + # {'M': {'foo': DynamoType}} ==> DynamoType.set(new_value) + self.value[attr].set( + ".".join(key.split(".")[1:]), new_value, list_index + ) + + def delete(self, key, index=None): + if index: + if not key: + if int(index) < len(self.value): + del self.value[int(index)] + elif "." in key: + self.value[int(index)].delete(".".join(key.split(".")[1:])) + else: + self.value[int(index)].delete(key) + else: + attr = key.split(".")[0] + attr, list_index = attribute_is_list(attr) + + if list_index: + self.value[attr].delete(".".join(key.split(".")[1:]), list_index) + elif "." in key: + self.value[attr].delete(".".join(key.split(".")[1:])) + else: + self.value.pop(key) + + def filter(self, projection_expressions): + nested_projections = [ + expr[0 : expr.index(".")] for expr in projection_expressions if "." in expr + ] + if self.is_map(): + expressions_to_delete = [] + for attr in self.value: + if ( + attr not in projection_expressions + and attr not in nested_projections + ): + expressions_to_delete.append(attr) + elif attr in nested_projections: + relevant_expressions = [ + expr[len(attr + ".") :] + for expr in projection_expressions + if expr.startswith(attr + ".") + ] + self.value[attr].filter(relevant_expressions) + for expr in expressions_to_delete: + self.value.pop(expr) + + def __hash__(self): + return hash((self.type, self.value)) + + def __eq__(self, other): + return self.type == other.type and self.value == other.value + + def __ne__(self, other): + return self.type != other.type or self.value != other.value + + def __lt__(self, other): + return self.cast_value < other.cast_value + + def __le__(self, other): + return self.cast_value <= other.cast_value + + def __gt__(self, other): + return self.cast_value > other.cast_value + + def __ge__(self, other): + return self.cast_value >= other.cast_value + + def __repr__(self): + return "DynamoType: {0}".format(self.to_json()) + + @property + def cast_value(self): + if self.is_number(): + try: + return int(self.value) + except ValueError: + return float(self.value) + elif self.is_set(): + sub_type = self.type[0] + return set([DynamoType({sub_type: v}).cast_value for v in self.value]) + elif self.is_list(): + return [DynamoType(v).cast_value for v in self.value] + elif self.is_map(): + return dict([(k, DynamoType(v).cast_value) for k, v in self.value.items()]) + else: + return self.value + + def child_attr(self, key): + """ + Get Map or List children by key. str for Map, int for List. + + Returns DynamoType or None. + """ + if isinstance(key, six.string_types) and self.is_map(): + if "." in key and key.split(".")[0] in self.value: + return self.value[key.split(".")[0]].child_attr( + ".".join(key.split(".")[1:]) + ) + elif "." not in key and key in self.value: + return DynamoType(self.value[key]) + + if isinstance(key, int) and self.is_list(): + idx = key + if 0 <= idx < len(self.value): + return DynamoType(self.value[idx]) + + return None + + def size(self): + if self.is_number(): + value_size = len(str(self.value)) + elif self.is_set(): + sub_type = self.type[0] + value_size = sum([DynamoType({sub_type: v}).size() for v in self.value]) + elif self.is_list(): + value_size = sum([v.size() for v in self.value]) + elif self.is_map(): + value_size = sum( + [bytesize(k) + DynamoType(v).size() for k, v in self.value.items()] + ) + elif type(self.value) == bool: + value_size = 1 + else: + value_size = bytesize(self.value) + return value_size + + def to_json(self): + return {self.type: self.value} + + def compare(self, range_comparison, range_objs): + """ + Compares this type against comparison filters + """ + range_values = [obj.cast_value for obj in range_objs] + comparison_func = get_comparison_func(range_comparison) + return comparison_func(self.cast_value, *range_values) + + def is_number(self): + return self.type == "N" + + def is_set(self): + return self.type == "SS" or self.type == "NS" or self.type == "BS" + + def is_list(self): + return self.type == "L" + + def is_map(self): + return self.type == "M" + + def same_type(self, other): + return self.type == other.type diff --git a/moto/dynamodb2/models/utilities.py b/moto/dynamodb2/models/utilities.py new file mode 100644 index 000000000000..9dd6f1e9f423 --- /dev/null +++ b/moto/dynamodb2/models/utilities.py @@ -0,0 +1,17 @@ +import re + + +def bytesize(val): + return len(str(val).encode("utf-8")) + + +def attribute_is_list(attr): + """ + Checks if attribute denotes a list, and returns the name of the list and the given list index if so + :param attr: attr or attr[index] + :return: attr, index or None + """ + list_index_update = re.match("(.+)\\[([0-9]+)\\]", attr) + if list_index_update: + attr = list_index_update.group(1) + return attr, list_index_update.group(2) if list_index_update else None diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 78126f7f1739..65484aa0818d 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -10,7 +10,7 @@ from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge -from .models import dynamodb_backends, dynamo_json_dump +from moto.dynamodb2.models import dynamodb_backends, dynamo_json_dump TRANSACTION_MAX_ITEMS = 25 From d745dfd3d2eff02b628bd021374810a86ea73d4d Mon Sep 17 00:00:00 2001 From: DenverJ Date: Mon, 13 Apr 2020 10:50:01 +1000 Subject: [PATCH 223/658] Implement enter_standby, exit_standby and terminate_instance_in_auto_scaling_group --- moto/autoscaling/models.py | 67 ++- moto/autoscaling/responses.py | 144 ++++- tests/test_autoscaling/test_autoscaling.py | 584 ++++++++++++++++++++- 3 files changed, 777 insertions(+), 18 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 88577433ed9a..b757672d0821 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -267,6 +267,9 @@ def __init__( self.tags = tags if tags else [] self.set_desired_capacity(desired_capacity) + def active_instances(self): + return [x for x in self.instance_states if x.lifecycle_state == "InService"] + def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False): # for updates, if only AZs are provided, they must not clash with # the AZs of existing VPCs @@ -413,9 +416,11 @@ def set_desired_capacity(self, new_capacity): else: self.desired_capacity = new_capacity - curr_instance_count = len(self.instance_states) + curr_instance_count = len(self.active_instances()) if self.desired_capacity == curr_instance_count: + self.autoscaling_backend.update_attached_elbs(self.name) + self.autoscaling_backend.update_attached_target_groups(self.name) return if self.desired_capacity > curr_instance_count: @@ -442,6 +447,8 @@ def set_desired_capacity(self, new_capacity): self.instance_states = list( set(self.instance_states) - set(instances_to_remove) ) + self.autoscaling_backend.update_attached_elbs(self.name) + self.autoscaling_backend.update_attached_target_groups(self.name) def get_propagated_tags(self): propagated_tags = {} @@ -703,7 +710,7 @@ def set_instance_health( def detach_instances(self, group_name, instance_ids, should_decrement): group = self.autoscaling_groups[group_name] - original_size = len(group.instance_states) + original_size = group.desired_capacity detached_instances = [ x for x in group.instance_states if x.instance.id in instance_ids @@ -720,13 +727,8 @@ def detach_instances(self, group_name, instance_ids, should_decrement): if should_decrement: group.desired_capacity = original_size - len(instance_ids) - else: - count_needed = len(instance_ids) - group.replace_autoscaling_group_instances( - count_needed, group.get_propagated_tags() - ) - self.update_attached_elbs(group_name) + group.set_desired_capacity(group.desired_capacity) return detached_instances def set_desired_capacity(self, group_name, desired_capacity): @@ -791,7 +793,9 @@ def execute_policy(self, group_name): def update_attached_elbs(self, group_name): group = self.autoscaling_groups[group_name] - group_instance_ids = set(state.instance.id for state in group.instance_states) + group_instance_ids = set( + state.instance.id for state in group.active_instances() + ) # skip this if group.load_balancers is empty # otherwise elb_backend.describe_load_balancers returns all available load balancers @@ -908,15 +912,15 @@ def notify_terminate_instances(self, instance_ids): autoscaling_group_name, autoscaling_group, ) in self.autoscaling_groups.items(): - original_instance_count = len(autoscaling_group.instance_states) + original_active_instance_count = len(autoscaling_group.active_instances()) autoscaling_group.instance_states = list( filter( lambda i_state: i_state.instance.id not in instance_ids, autoscaling_group.instance_states, ) ) - difference = original_instance_count - len( - autoscaling_group.instance_states + difference = original_active_instance_count - len( + autoscaling_group.active_instances() ) if difference > 0: autoscaling_group.replace_autoscaling_group_instances( @@ -924,6 +928,45 @@ def notify_terminate_instances(self, instance_ids): ) self.update_attached_elbs(autoscaling_group_name) + def enter_standby_instances(self, group_name, instance_ids, should_decrement): + group = self.autoscaling_groups[group_name] + original_size = group.desired_capacity + standby_instances = [] + for instance_state in group.instance_states: + if instance_state.instance.id in instance_ids: + instance_state.lifecycle_state = "Standby" + standby_instances.append(instance_state) + if should_decrement: + group.desired_capacity = group.desired_capacity - len(instance_ids) + else: + group.set_desired_capacity(group.desired_capacity) + return standby_instances, original_size, group.desired_capacity + + def exit_standby_instances(self, group_name, instance_ids): + group = self.autoscaling_groups[group_name] + original_size = group.desired_capacity + standby_instances = [] + for instance_state in group.instance_states: + if instance_state.instance.id in instance_ids: + instance_state.lifecycle_state = "InService" + standby_instances.append(instance_state) + group.desired_capacity = group.desired_capacity + len(instance_ids) + return standby_instances, original_size, group.desired_capacity + + def terminate_instance(self, instance_id, should_decrement): + instance = self.ec2_backend.get_instance(instance_id) + instance_state = next( + instance_state + for group in self.autoscaling_groups.values() + for instance_state in group.instance_states + if instance_state.instance.id == instance.id + ) + group = instance.autoscaling_group + original_size = group.desired_capacity + self.detach_instances(group.name, [instance.id], should_decrement) + self.ec2_backend.terminate_instances([instance.id]) + return instance_state, original_size, group.desired_capacity + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 41c79edb4cea..06b68aa4b976 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -1,7 +1,12 @@ from __future__ import unicode_literals +import datetime from moto.core.responses import BaseResponse -from moto.core.utils import amz_crc32, amzn_request_id +from moto.core.utils import ( + amz_crc32, + amzn_request_id, + iso_8601_datetime_with_milliseconds, +) from .models import autoscaling_backends @@ -291,6 +296,50 @@ def detach_load_balancers(self): template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id + def enter_standby(self): + group_name = self._get_param("AutoScalingGroupName") + instance_ids = self._get_multi_param("InstanceIds.member") + should_decrement_string = self._get_param("ShouldDecrementDesiredCapacity") + if should_decrement_string == "true": + should_decrement = True + else: + should_decrement = False + ( + standby_instances, + original_size, + desired_capacity, + ) = self.autoscaling_backend.enter_standby_instances( + group_name, instance_ids, should_decrement + ) + template = self.response_template(ENTER_STANDBY_TEMPLATE) + return template.render( + standby_instances=standby_instances, + should_decrement=should_decrement, + original_size=original_size, + desired_capacity=desired_capacity, + timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), + ) + + @amz_crc32 + @amzn_request_id + def exit_standby(self): + group_name = self._get_param("AutoScalingGroupName") + instance_ids = self._get_multi_param("InstanceIds.member") + ( + standby_instances, + original_size, + desired_capacity, + ) = self.autoscaling_backend.exit_standby_instances(group_name, instance_ids) + template = self.response_template(EXIT_STANDBY_TEMPLATE) + return template.render( + standby_instances=standby_instances, + original_size=original_size, + desired_capacity=desired_capacity, + timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), + ) + def suspend_processes(self): autoscaling_group_name = self._get_param("AutoScalingGroupName") scaling_processes = self._get_multi_param("ScalingProcesses.member") @@ -310,6 +359,29 @@ def set_instance_protection(self): template = self.response_template(SET_INSTANCE_PROTECTION_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id + def terminate_instance_in_auto_scaling_group(self): + instance_id = self._get_param("InstanceId") + should_decrement_string = self._get_param("ShouldDecrementDesiredCapacity") + if should_decrement_string == "true": + should_decrement = True + else: + should_decrement = False + ( + instance, + original_size, + desired_capacity, + ) = self.autoscaling_backend.terminate_instance(instance_id, should_decrement) + template = self.response_template(TERMINATE_INSTANCES_TEMPLATE) + return template.render( + instance=instance, + should_decrement=should_decrement, + original_size=original_size, + desired_capacity=desired_capacity, + timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), + ) + CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ @@ -707,3 +779,73 @@ def set_instance_protection(self): """ + +ENTER_STANDBY_TEMPLATE = """ + + + {% for instance in standby_instances %} + + 12345678-1234-1234-1234-123456789012 + {{ group_name }} + {% if should_decrement %} + At {{ timestamp }} instance {{ instance.instance.id }} was moved to standby in response to a user request, shrinking the capacity from {{ original_size }} to {{ desired_capacity }}. + {% else %} + At {{ timestamp }} instance {{ instance.instance.id }} was moved to standby in response to a user request. + {% endif %} + Moving EC2 instance to Standby: {{ instance.instance.id }} + 50 + {{ timestamp }} +
{"Subnet ID":"??","Availability Zone":"{{ instance.instance.placement }}"}
+ InProgress +
+ {% endfor %} +
+
+ + 7c6e177f-f082-11e1-ac58-3714bEXAMPLE + +
""" + +EXIT_STANDBY_TEMPLATE = """ + + + {% for instance in standby_instances %} + + 12345678-1234-1234-1234-123456789012 + {{ group_name }} + Moving EC2 instance out of Standby: {{ instance.instance.id }} + 30 + At {{ timestamp }} instance {{ instance.instance.id }} was moved out of standby in response to a user request, increasing the capacity from {{ original_size }} to {{ desired_capacity }}. + {{ timestamp }} +
{"Subnet ID":"??","Availability Zone":"{{ instance.instance.placement }}"}
+ PreInService +
+ {% endfor %} +
+
+ + 7c6e177f-f082-11e1-ac58-3714bEXAMPLE + +
""" + +TERMINATE_INSTANCES_TEMPLATE = """ + + + 35b5c464-0b63-2fc7-1611-467d4a7f2497EXAMPLE + {{ group_name }} + {% if should_decrement %} + At {{ timestamp }} instance {{ instance.instance.id }} was taken out of service in response to a user request, shrinking the capacity from {{ original_size }} to {{ desired_capacity }}. + {% else %} + At {{ timestamp }} instance {{ instance.instance.id }} was taken out of service in response to a user request. + {% endif %} + Terminating EC2 instance: {{ instance.instance.id }} + 0 + {{ timestamp }} +
{"Subnet ID":"??","Availability Zone":"{{ instance.instance.placement }}"}
+ InProgress +
+
+ + a1ba8fb9-31d6-4d9a-ace1-a7f76749df11EXAMPLE + +
""" diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 5cf3dc6ffd22..3a10f20ffe9a 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -1102,8 +1102,6 @@ def test_detach_one_instance_decrement(): ec2_client = boto3.client("ec2", region_name="us-east-1") - response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) - response = client.detach_instances( AutoScalingGroupName="test_asg", InstanceIds=[instance_to_detach], @@ -1156,8 +1154,6 @@ def test_detach_one_instance(): ec2_client = boto3.client("ec2", region_name="us-east-1") - response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) - response = client.detach_instances( AutoScalingGroupName="test_asg", InstanceIds=[instance_to_detach], @@ -1178,6 +1174,516 @@ def test_detach_one_instance(): tags.should.have.length_of(2) +@mock_autoscaling +@mock_ec2 +def test_standby_one_instance_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] + instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby], + ShouldDecrementDesiredCapacity=True, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(2) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = client.describe_auto_scaling_instances(InstanceIds=[instance_to_standby]) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + # test to ensure tag has been retained (standby instance is still part of the ASG) + response = ec2_client.describe_instances() + for reservation in response["Reservations"]: + for instance in reservation["Instances"]: + tags = instance["Tags"] + tags.should.have.length_of(2) + + +@mock_autoscaling +@mock_ec2 +def test_standby_one_instance(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] + instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances(InstanceIds=[instance_to_standby]) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + # test to ensure tag has been retained (standby instance is still part of the ASG) + response = ec2_client.describe_instances() + for reservation in response["Reservations"]: + for instance in reservation["Instances"]: + tags = instance["Tags"] + tags.should.have.length_of(2) + + +@mock_elb +@mock_autoscaling +@mock_ec2 +def test_standby_elb_update(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances(InstanceIds=[instance_to_standby]) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + + +@mock_autoscaling +@mock_ec2 +def test_standby_terminate_instance_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_terminate], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_terminate] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.terminate_instance_in_auto_scaling_group( + InstanceId=instance_to_standby_terminate, ShouldDecrementDesiredCapacity=True + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # AWS still decrements desired capacity ASG if requested, even if the terminated instance is in standby + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) + response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"].should_not.equal( + instance_to_standby_terminate + ) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = ec2_client.describe_instances( + InstanceIds=[instance_to_standby_terminate] + ) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal( + "terminated" + ) + + +@mock_autoscaling +@mock_ec2 +def test_standby_terminate_instance_no_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_terminate], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_terminate] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.terminate_instance_in_auto_scaling_group( + InstanceId=instance_to_standby_terminate, ShouldDecrementDesiredCapacity=False + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + group = response["AutoScalingGroups"][0] + group["Instances"].should.have.length_of(2) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in group["Instances"]] + ) + group["DesiredCapacity"].should.equal(2) + + response = ec2_client.describe_instances( + InstanceIds=[instance_to_standby_terminate] + ) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal( + "terminated" + ) + + +@mock_autoscaling +@mock_ec2 +def test_standby_detach_instance_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_detach], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_detach] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.detach_instances( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_detach], + ShouldDecrementDesiredCapacity=True, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # AWS still decrements desired capacity ASG if requested, even if the detached instance was in standby + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) + response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"].should_not.equal( + instance_to_standby_detach + ) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + + +@mock_autoscaling +@mock_ec2 +def test_standby_detach_instance_no_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_detach], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_detach] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.detach_instances( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_detach], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + group = response["AutoScalingGroups"][0] + group["Instances"].should.have.length_of(2) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in group["Instances"]] + ) + group["DesiredCapacity"].should.equal(2) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + + +@mock_autoscaling +@mock_ec2 +def test_standby_exit_standby(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + { + "ResourceId": "test_asg", + "ResourceType": "auto-scaling-group", + "Key": "propogated-tag-key", + "Value": "propagate-tag-value", + "PropagateAtLaunch": True, + } + ], + VPCZoneIdentifier=mocked_networking["subnet1"], + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][ + "InstanceId" + ] + + ec2_client = boto3.client("ec2", region_name="us-east-1") + + response = client.enter_standby( + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_exit_standby], + ShouldDecrementDesiredCapacity=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2) + + response = client.describe_auto_scaling_instances( + InstanceIds=[instance_to_standby_exit_standby] + ) + response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") + + response = client.exit_standby( + AutoScalingGroupName="test_asg", InstanceIds=[instance_to_standby_exit_standby], + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + group = response["AutoScalingGroups"][0] + group["Instances"].should.have.length_of(3) + instance_to_standby_exit_standby.should.be.within( + [x["InstanceId"] for x in group["Instances"]] + ) + group["DesiredCapacity"].should.equal(3) + + response = ec2_client.describe_instances( + InstanceIds=[instance_to_standby_exit_standby] + ) + response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + + @mock_autoscaling @mock_ec2 def test_attach_one_instance(): @@ -1411,7 +1917,7 @@ def test_set_desired_capacity_down_boto3(): @mock_autoscaling @mock_ec2 -def test_terminate_instance_in_autoscaling_group(): +def test_terminate_instance_via_ec2_in_autoscaling_group(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") _ = client.create_launch_configuration( @@ -1440,3 +1946,71 @@ def test_terminate_instance_in_autoscaling_group(): for instance in response["AutoScalingGroups"][0]["Instances"] ) replaced_instance_id.should_not.equal(original_instance_id) + + +@mock_autoscaling +@mock_ec2 +def test_terminate_instance_in_auto_scaling_group_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + DesiredCapacity=1, + MaxSize=2, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + original_instance_id = next( + instance["InstanceId"] + for instance in response["AutoScalingGroups"][0]["Instances"] + ) + client.terminate_instance_in_auto_scaling_group( + InstanceId=original_instance_id, ShouldDecrementDesiredCapacity=True + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["AutoScalingGroups"][0]["Instances"].should.equal([]) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0) + + +@mock_autoscaling +@mock_ec2 +def test_terminate_instance_in_auto_scaling_group_no_decrement(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + _ = client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration" + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + MinSize=0, + DesiredCapacity=1, + MaxSize=2, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + original_instance_id = next( + instance["InstanceId"] + for instance in response["AutoScalingGroups"][0]["Instances"] + ) + client.terminate_instance_in_auto_scaling_group( + InstanceId=original_instance_id, ShouldDecrementDesiredCapacity=False + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + replaced_instance_id = next( + instance["InstanceId"] + for instance in response["AutoScalingGroups"][0]["Instances"] + ) + replaced_instance_id.should_not.equal(original_instance_id) + response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) From 1e1fe3ee4bffb5470795d8aab16fa3de1145f5a6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 14 Apr 2020 07:48:13 +0100 Subject: [PATCH 224/658] Update moto/dynamodb2/models.py Co-Authored-By: pvbouwel --- moto/dynamodb2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index a35eded61f6c..de2a06fd479f 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -286,7 +286,7 @@ def __repr__(self): return "Item: {0}".format(self.to_json()) def size(self): - return sum([bytesize(key) + value.size() for key, value in self.attrs.items()]) + return sum(bytesize(key) + value.size() for key, value in self.attrs.items()) def to_json(self): attributes = {} From 8122a40be064f28dd0aa2ea8567cc2fb2ce4dea8 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 14 Apr 2020 07:48:20 +0100 Subject: [PATCH 225/658] Update moto/dynamodb2/models.py Co-Authored-By: pvbouwel --- moto/dynamodb2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index de2a06fd479f..62c60efb07f6 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -1127,7 +1127,7 @@ def _trim_results(self, results, limit, exclusive_start_key, scanned_index=None) last_evaluated_key = None size_limit = 1000000 # DynamoDB has a 1MB size limit - item_size = sum([res.size() for res in results]) + item_size = sum(res.size() for res in results) if item_size > size_limit: item_size = idx = 0 while item_size + results[idx].size() < size_limit: From c2b4c397f272f80684c395150159028ed265fd20 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 14 Apr 2020 07:53:15 +0100 Subject: [PATCH 226/658] DDB test - Fix KeySchema, and set BillingMode for easier online testing --- tests/test_dynamodb2/test_dynamodb.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index daae79232abd..e00a45e1dbb7 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4143,13 +4143,13 @@ def test_dynamodb_max_1mb_limit(): TableName=table_name, KeySchema=[ {"AttributeName": "partition_key", "KeyType": "HASH"}, - {"AttributeName": "sort_key", "KeyType": "SORT"}, + {"AttributeName": "sort_key", "KeyType": "RANGE"}, ], AttributeDefinitions=[ {"AttributeName": "partition_key", "AttributeType": "S"}, {"AttributeName": "sort_key", "AttributeType": "S"}, ], - ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + BillingMode="PAY_PER_REQUEST", ) # Populate the table @@ -4170,3 +4170,4 @@ def test_dynamodb_max_1mb_limit(): # We shouldn't get everything back - the total result set is well over 1MB assert response["Count"] < len(items) response["LastEvaluatedKey"].shouldnt.be(None) + From 69f963a3c28acf3a0d0d1f2dac955e99cbb6a9c4 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 14 Apr 2020 08:06:00 +0100 Subject: [PATCH 227/658] Linting --- moto/apigateway/models.py | 44 +++++++------ moto/apigateway/responses.py | 29 ++++----- tests/test_apigateway/test_apigateway.py | 83 ++++++++---------------- 3 files changed, 64 insertions(+), 92 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index b6a14b163a1c..e5e5e3bfdd14 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -39,7 +39,7 @@ InvalidRestApiId, InvalidModelName, RestAPINotFound, - ModelNotFound + ModelNotFound, ) STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}" @@ -499,13 +499,15 @@ def add_child(self, path, parent_id=None): self.resources[child_id] = child return child - def add_model(self, - name, - description=None, - schema=None, - content_type=None, - cli_input_json=None, - generate_cli_skeleton=None): + def add_model( + self, + name, + description=None, + schema=None, + content_type=None, + cli_input_json=None, + generate_cli_skeleton=None, + ): model_id = create_id() new_model = Model( id=model_id, @@ -514,7 +516,8 @@ def add_model(self, schema=schema, content_type=content_type, cli_input_json=cli_input_json, - generate_cli_skeleton=generate_cli_skeleton) + generate_cli_skeleton=generate_cli_skeleton, + ) self.models[name] = new_model return new_model @@ -670,7 +673,7 @@ def __init__(self, domain_name, **kwargs): self["generateCliSkeleton"] = kwargs.get("generate_cli_skeleton") -class Model(BaseModel,dict): +class Model(BaseModel, dict): def __init__(self, id, name, **kwargs): super(Model, self).__init__() self["id"] = id @@ -1130,14 +1133,16 @@ def get_domain_name(self, domain_name): else: return self.domain_names[domain_name] - def create_model(self, - rest_api_id, - name, - content_type, - description=None, - schema=None, - cli_input_json=None, - generate_cli_skeleton=None): + def create_model( + self, + rest_api_id, + name, + content_type, + description=None, + schema=None, + cli_input_json=None, + generate_cli_skeleton=None, + ): if not rest_api_id: raise InvalidRestApiId @@ -1151,7 +1156,8 @@ def create_model(self, schema=schema, content_type=content_type, cli_input_json=cli_input_json, - generate_cli_skeleton=generate_cli_skeleton) + generate_cli_skeleton=generate_cli_skeleton, + ) return new_model diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 02ff536f3fa8..822d4c0ce1a6 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -16,7 +16,7 @@ InvalidRestApiId, InvalidModelName, RestAPINotFound, - ModelNotFound + ModelNotFound, ) API_KEY_SOURCES = ["AUTHORIZER", "HEADER"] @@ -600,15 +600,13 @@ def domain_name_induvidual(self, request, full_url, headers): ), ) - def models(self,request, full_url, headers): + def models(self, request, full_url, headers): self.setup_class(request, full_url, headers) rest_api_id = self.path.replace("/restapis/", "", 1).split("/")[0] try: if self.method == "GET": - models = self.backend.get_models( - rest_api_id - ) + models = self.backend.get_models(rest_api_id) return 200, {}, json.dumps({"item": models}) elif self.method == "POST": @@ -617,9 +615,7 @@ def models(self,request, full_url, headers): schema = self._get_param("schema") content_type = self._get_param("contentType") cli_input_json = self._get_param("cliInputJson") - generate_cli_skeleton = self._get_param( - "generateCliSkeleton" - ) + generate_cli_skeleton = self._get_param("generateCliSkeleton") model = self.backend.create_model( rest_api_id, name, @@ -627,12 +623,12 @@ def models(self,request, full_url, headers): description, schema, cli_input_json, - generate_cli_skeleton + generate_cli_skeleton, ) return 200, {}, json.dumps(model) - except (InvalidRestApiId, InvalidModelName,RestAPINotFound) as error: + except (InvalidRestApiId, InvalidModelName, RestAPINotFound) as error: return ( error.code, {}, @@ -649,13 +645,14 @@ def model_induvidual(self, request, full_url, headers): model_info = {} try: if self.method == "GET": - model_info = self.backend.get_model( - rest_api_id, - model_name - ) + model_info = self.backend.get_model(rest_api_id, model_name) return 200, {}, json.dumps(model_info) - except (ModelNotFound, RestAPINotFound, InvalidRestApiId, - InvalidModelName) as error: + except ( + ModelNotFound, + RestAPINotFound, + InvalidRestApiId, + InvalidModelName, + ) as error: return ( error.code, {}, diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 3a6b75104a7f..596ed2dd407a 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1550,82 +1550,67 @@ def test_get_domain_name(): @mock_apigateway def test_create_model(): client = boto3.client("apigateway", region_name="us-west-2") - response = client.create_rest_api(name="my_api", - description="this is my api" - ) + response = client.create_rest_api(name="my_api", description="this is my api") rest_api_id = response["id"] - dummy_rest_api_id = 'a12b3c4d' + dummy_rest_api_id = "a12b3c4d" model_name = "testModel" description = "test model" - content_type = 'application/json' + content_type = "application/json" # success case with valid params response = client.create_model( restApiId=rest_api_id, name=model_name, description=description, - contentType=content_type + contentType=content_type, ) response["name"].should.equal(model_name) response["description"].should.equal(description) # with an invalid rest_api_id it should throw NotFoundException with assert_raises(ClientError) as ex: - client.create_model( + client.create_model( restApiId=dummy_rest_api_id, name=model_name, description=description, - contentType=content_type + contentType=content_type, ) ex.exception.response["Error"]["Message"].should.equal( "Invalid Rest API Id specified" ) - ex.exception.response["Error"]["Code"].should.equal( - "NotFoundException" - ) + ex.exception.response["Error"]["Code"].should.equal("NotFoundException") with assert_raises(ClientError) as ex: - client.create_model( + client.create_model( restApiId=rest_api_id, name="", description=description, - contentType=content_type + contentType=content_type, ) - ex.exception.response["Error"]["Message"].should.equal( - "No Model Name specified" - ) - ex.exception.response["Error"]["Code"].should.equal( - "BadRequestException" - ) + ex.exception.response["Error"]["Message"].should.equal("No Model Name specified") + ex.exception.response["Error"]["Code"].should.equal("BadRequestException") @mock_apigateway def test_get_api_models(): client = boto3.client("apigateway", region_name="us-west-2") - response = client.create_rest_api( - name="my_api", - description="this is my api" - ) + response = client.create_rest_api(name="my_api", description="this is my api") rest_api_id = response["id"] model_name = "testModel" description = "test model" - content_type = 'application/json' + content_type = "application/json" # when no models are present - result = client.get_models( - restApiId=rest_api_id - ) + result = client.get_models(restApiId=rest_api_id) result["items"].should.equal([]) # add a model client.create_model( restApiId=rest_api_id, name=model_name, description=description, - contentType=content_type + contentType=content_type, ) # get models after adding - result = client.get_models( - restApiId=rest_api_id - ) + result = client.get_models(restApiId=rest_api_id) result["items"][0]["name"] = model_name result["items"][0]["description"] = description @@ -1633,60 +1618,44 @@ def test_get_api_models(): @mock_apigateway def test_get_model_by_name(): client = boto3.client("apigateway", region_name="us-west-2") - response = client.create_rest_api( - name="my_api", - description="this is my api" - ) + response = client.create_rest_api(name="my_api", description="this is my api") rest_api_id = response["id"] - dummy_rest_api_id = 'a12b3c4d' + dummy_rest_api_id = "a12b3c4d" model_name = "testModel" description = "test model" - content_type = 'application/json' + content_type = "application/json" # add a model client.create_model( restApiId=rest_api_id, name=model_name, description=description, - contentType=content_type + contentType=content_type, ) # get models after adding - result = client.get_model( - restApiId=rest_api_id, modelName=model_name - ) + result = client.get_model(restApiId=rest_api_id, modelName=model_name) result["name"] = model_name result["description"] = description with assert_raises(ClientError) as ex: - client.get_model( - restApiId=dummy_rest_api_id, modelName=model_name - ) + client.get_model(restApiId=dummy_rest_api_id, modelName=model_name) ex.exception.response["Error"]["Message"].should.equal( "Invalid Rest API Id specified" ) - ex.exception.response["Error"]["Code"].should.equal( - "NotFoundException" - ) + ex.exception.response["Error"]["Code"].should.equal("NotFoundException") @mock_apigateway def test_get_model_with_invalid_name(): client = boto3.client("apigateway", region_name="us-west-2") - response = client.create_rest_api( - name="my_api", - description="this is my api" - ) + response = client.create_rest_api(name="my_api", description="this is my api") rest_api_id = response["id"] # test with an invalid model name with assert_raises(ClientError) as ex: - client.get_model( - restApiId=rest_api_id, modelName="fake" - ) + client.get_model(restApiId=rest_api_id, modelName="fake") ex.exception.response["Error"]["Message"].should.equal( "Invalid Model Name specified" ) - ex.exception.response["Error"]["Code"].should.equal( - "NotFoundException" - ) + ex.exception.response["Error"]["Code"].should.equal("NotFoundException") @mock_apigateway From a6902e87137da229d63d138b898a63ffd12fe326 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 15 Apr 2020 07:26:09 +0100 Subject: [PATCH 228/658] Update tests/test_dynamodb2/test_dynamodb.py Co-Authored-By: Guilherme Martins Crocetti --- tests/test_dynamodb2/test_dynamodb.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index e00a45e1dbb7..2b4c0969c71f 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4168,6 +4168,5 @@ def test_dynamodb_max_1mb_limit(): KeyConditionExpression=Key("partition_key").eq("partition_key_val") ) # We shouldn't get everything back - the total result set is well over 1MB - assert response["Count"] < len(items) + len(items).should.be.greater_than(response["Count"]) response["LastEvaluatedKey"].shouldnt.be(None) - From f04d64d9816e219eeb5b3b310d3870664a174050 Mon Sep 17 00:00:00 2001 From: Jacob-House Date: Wed, 15 Apr 2020 18:48:33 -0230 Subject: [PATCH 229/658] Update EC2 instance type list --- moto/ec2/resources/instance_types.json | 2 +- scripts/get_instance_info.py | 96 ++++++++++++++++---------- 2 files changed, 59 insertions(+), 39 deletions(-) diff --git a/moto/ec2/resources/instance_types.json b/moto/ec2/resources/instance_types.json index 2fa2e4e936b9..a1b55ba21661 100644 --- a/moto/ec2/resources/instance_types.json +++ b/moto/ec2/resources/instance_types.json @@ -1 +1 @@ -{"m1.xlarge": {"ecu_per_vcpu": 2.0, "network_perf": 9.0, "intel_avx": "", "name": "M1 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.xlarge", "computeunits": 8.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "i3.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3800.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.4xlarge", "computeunits": 53.0, "ebs_throughput": 400.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "i2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "", "name": "I2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 800.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "hs1.8xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "High Storage Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 48000.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hs1.8xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 117.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.micro": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Micro", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.micro", "computeunits": 0.1, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.4xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 24000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.4xlarge", "computeunits": 56.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "m2.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "", "name": "M2 High Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 420.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.xlarge", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 17.1, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "p2.xlarge": {"ecu_per_vcpu": 3.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "General Purpose GPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.xlarge", "computeunits": 12.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 61.0, "ebs_max_bandwidth": 750.0, "gpus": 1, "ipv6_support": true}, "i2.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 9.0, "intel_avx": "", "name": "I2 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3200.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.4xlarge", "computeunits": 53.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t1.micro": {"ecu_per_vcpu": 0.0, "network_perf": 0.0, "intel_avx": "", "name": "T1 Micro", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "t1.micro", "computeunits": 0.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 4, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.613, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "d2.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "D2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.xlarge", "computeunits": 14.0, "ebs_throughput": 93.75, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "r3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "I3 High I/O Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 7600.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.8xlarge", "computeunits": 99.0, "ebs_throughput": 850.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "c3.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "g2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.medium": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Medium", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.medium", "computeunits": 0.4, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 18, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 4.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.xlarge", "computeunits": 13.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "x1.16xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 13.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1920.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.16xlarge", "computeunits": 174.5, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "p2.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "General Purpose GPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.8xlarge", "computeunits": 94.0, "ebs_throughput": 625.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32500.0, "vcpus": 32.0, "memory": 488.0, "ebs_max_bandwidth": 5000.0, "gpus": 8, "ipv6_support": true}, "f1.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "F1 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3760.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 400, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 8, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 976.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r4.8xlarge": {"ecu_per_vcpu": 3.09375, "network_perf": 13.0, "intel_avx": "Yes", "name": "R4 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.8xlarge", "computeunits": 99.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 37500.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 0, "ipv6_support": true}, "g3.4xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 11.0, "intel_avx": "Yes", "name": "G3 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.4xlarge", "computeunits": 47.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 20000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 1, "ipv6_support": true}, "cg1.4xlarge": {"ecu_per_vcpu": 2.09375, "network_perf": 12.0, "intel_avx": "", "name": "Cluster GPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cg1.4xlarge", "computeunits": 33.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 22.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.large": {"ecu_per_vcpu": 4.0, "network_perf": 7.0, "intel_avx": "Yes", "name": "C4 High-CPU Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.large", "computeunits": 8.0, "ebs_throughput": 62.5, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "m4.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "M4 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 256.0, "ebs_max_bandwidth": 10000.0, "gpus": 0, "ipv6_support": true}, "r4.4xlarge": {"ecu_per_vcpu": 3.3125, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.4xlarge", "computeunits": 53.0, "ebs_throughput": 437.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 18750.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 3500.0, "gpus": 0, "ipv6_support": true}, "r4.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.2xlarge", "computeunits": 27.0, "ebs_throughput": 218.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1750.0, "gpus": 0, "ipv6_support": true}, "c3.xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 7.0, "intel_avx": "Yes", "name": "C3 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.xlarge", "computeunits": 14.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "i3.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 475.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.large", "computeunits": 7.0, "ebs_throughput": 50.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 425.0, "gpus": 0, "ipv6_support": true}, "r4.xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.xlarge", "computeunits": 13.5, "ebs_throughput": 109.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 875.0, "gpus": 0, "ipv6_support": true}, "m2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "", "name": "M2 High Memory Double Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 850.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.2xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 120, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 34.2, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "m3.medium": {"ecu_per_vcpu": 3.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Medium", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 4.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.medium", "computeunits": 3.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "r3.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "R3 High-Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.4xlarge", "computeunits": 52.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 122.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.small": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Small", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.small", "computeunits": 0.2, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 8, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 2.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "R3 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i3.16xlarge": {"ecu_per_vcpu": 3.125, "network_perf": 17.0, "intel_avx": "Yes", "name": "I3 High I/O 16xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 15200.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.16xlarge", "computeunits": 200.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "c3.large": {"ecu_per_vcpu": 3.5, "network_perf": 6.0, "intel_avx": "Yes", "name": "C3 High-CPU Large", "architecture": "32/64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.large", "computeunits": 7.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "i2.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 7.0, "intel_avx": "", "name": "I2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 1600.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.2xlarge", "computeunits": 27.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "i3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 950.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.xlarge", "computeunits": 13.0, "ebs_throughput": 100.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 850.0, "gpus": 0, "ipv6_support": true}, "i2.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 13.0, "intel_avx": "", "name": "I2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 6400.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "i2.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "r4.16xlarge": {"ecu_per_vcpu": 3.046875, "network_perf": 17.0, "intel_avx": "Yes", "name": "R4 High-Memory 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.16xlarge", "computeunits": 195.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 75000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "g3.8xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 13.0, "intel_avx": "Yes", "name": "G3 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.8xlarge", "computeunits": 94.0, "ebs_throughput": 875.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 40000.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 7000.0, "gpus": 2, "ipv6_support": true}, "c3.4xlarge": {"ecu_per_vcpu": 3.4375, "network_perf": 9.0, "intel_avx": "Yes", "name": "C3 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 320.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.4xlarge", "computeunits": 55.0, "ebs_throughput": 250.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "r4.large": {"ecu_per_vcpu": 3.5, "network_perf": 11.0, "intel_avx": "Yes", "name": "R4 High-Memory Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "r4.large", "computeunits": 7.0, "ebs_throughput": 54.0, "vpc_only": true, "max_ips": 30, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3000.0, "vcpus": 2.0, "memory": 15.25, "ebs_max_bandwidth": 437.0, "gpus": 0, "ipv6_support": true}, "f1.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 11.0, "intel_avx": "Yes", "name": "F1 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 470.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "f1.2xlarge", "computeunits": 26.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 1, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 122.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "m4.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "m3.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 120, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 30.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c3.8xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 12.0, "intel_avx": "Yes", "name": "C3 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "c3.8xlarge", "computeunits": 108.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2680 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "cr1.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "High Memory Cluster Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 240.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cr1.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "cc2.8xlarge": {"ecu_per_vcpu": 2.75, "network_perf": 12.0, "intel_avx": "", "name": "Cluster Compute Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3360.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "cc2.8xlarge", "computeunits": 88.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m1.large": {"ecu_per_vcpu": 2.0, "network_perf": 7.0, "intel_avx": "", "name": "M1 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 840.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.large", "computeunits": 4.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 30, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}, "r3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "R3 High-Memory Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 80.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 30.5, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": true}, "g3.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "G3 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "g3.16xlarge", "computeunits": 188.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 750, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 64.0, "memory": 488.0, "ebs_max_bandwidth": 14000.0, "gpus": 4, "ipv6_support": true}, "m1.medium": {"ecu_per_vcpu": 2.0, "network_perf": 6.0, "intel_avx": "", "name": "M1 General Purpose Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 410.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.medium", "computeunits": 2.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 3.75, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "i3.2xlarge": {"ecu_per_vcpu": 3.375, "network_perf": 11.0, "intel_avx": "Yes", "name": "I3 High I/O Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 1900.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "i3.2xlarge", "computeunits": 27.0, "ebs_throughput": 200.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 12000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1700.0, "gpus": 0, "ipv6_support": true}, "t2.xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.xlarge", "computeunits": 0.9, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 4.0, "memory": 16.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "g2.2xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 16.0, "intel_avx": "", "name": "G2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 60.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "g2.2xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "c1.medium": {"ecu_per_vcpu": 2.5, "network_perf": 6.0, "intel_avx": "", "name": "C1 High-CPU Medium", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 350.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.medium", "computeunits": 5.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 12, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "t2.large": {"ecu_per_vcpu": 0.0, "network_perf": 4.0, "intel_avx": "Yes", "name": "T2 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.large", "computeunits": 0.6, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 36, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.2xlarge": {"ecu_per_vcpu": 3.5, "network_perf": 9.0, "intel_avx": "Yes", "name": "D2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 12000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.2xlarge", "computeunits": 28.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 61.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "c4.8xlarge": {"ecu_per_vcpu": 3.66666666667, "network_perf": 13.0, "intel_avx": "Yes", "name": "C4 High-CPU Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.8xlarge", "computeunits": 132.0, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 60.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "c4.2xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.2xlarge", "computeunits": 31.0, "ebs_throughput": 125.0, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 15.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": true}, "x1e.32xlarge": {"ecu_per_vcpu": 2.65625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1E 32xlarge", "architecture": "64-bit", "linux_virtualization": "Unknown", "storage": 3840.0, "placement_group_support": false, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1e.32xlarge", "computeunits": 340.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 3904.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": false}, "m4.10xlarge": {"ecu_per_vcpu": 3.1125, "network_perf": 13.0, "intel_avx": "Yes", "name": "M4 Deca Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.10xlarge", "computeunits": 124.5, "ebs_throughput": 500.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 40.0, "memory": 160.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "t2.2xlarge": {"ecu_per_vcpu": 0.0, "network_perf": 6.0, "intel_avx": "Yes", "name": "T2 Double Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.2xlarge", "computeunits": 1.35, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 45, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 8.0, "memory": 32.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.4xlarge": {"ecu_per_vcpu": 3.34375, "network_perf": 9.0, "intel_avx": "Yes", "name": "M4 Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.4xlarge", "computeunits": 53.5, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 64.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "t2.nano": {"ecu_per_vcpu": 0.0, "network_perf": 2.0, "intel_avx": "Yes", "name": "T2 Nano", "architecture": "32/64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "t2.nano", "computeunits": 0.05, "ebs_throughput": 0.0, "vpc_only": true, "max_ips": 4, "physical_processor": "Intel Xeon family", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 0.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "d2.8xlarge": {"ecu_per_vcpu": 3.22222222222, "network_perf": 13.0, "intel_avx": "Yes", "name": "D2 Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 48000.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "d2.8xlarge", "computeunits": 116.0, "ebs_throughput": 500.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 32000.0, "vcpus": 36.0, "memory": 244.0, "ebs_max_bandwidth": 4000.0, "gpus": 0, "ipv6_support": true}, "m3.large": {"ecu_per_vcpu": 3.25, "network_perf": 6.0, "intel_avx": "Yes", "name": "M3 General Purpose Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 32.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.large", "computeunits": 6.5, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 30, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 2.0, "memory": 7.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "m2.4xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "", "name": "M2 High Memory Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m2.4xlarge", "computeunits": 26.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 68.4, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "m1.small": {"ecu_per_vcpu": 1.0, "network_perf": 2.0, "intel_avx": "", "name": "M1 General Purpose Small", "architecture": "32/64-bit", "linux_virtualization": "PV", "storage": 160.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "m1.small", "computeunits": 1.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 8, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 1.0, "memory": 1.7, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c1.xlarge": {"ecu_per_vcpu": 2.5, "network_perf": 9.0, "intel_avx": "", "name": "C1 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "PV", "storage": 1680.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "c1.xlarge", "computeunits": 20.0, "ebs_throughput": 125.0, "vpc_only": false, "max_ips": 60, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 8000.0, "vcpus": 8.0, "memory": 7.0, "ebs_max_bandwidth": 1000.0, "gpus": 0, "ipv6_support": false}, "x1.32xlarge": {"ecu_per_vcpu": 2.7265625, "network_perf": 17.0, "intel_avx": "Yes", "name": "X1 Extra High-Memory 32xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 3840.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "x1.32xlarge", "computeunits": 349.0, "ebs_throughput": 1750.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E7-8880 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 80000.0, "vcpus": 128.0, "memory": 1952.0, "ebs_max_bandwidth": 14000.0, "gpus": 0, "ipv6_support": true}, "r3.8xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 12.0, "intel_avx": "Yes", "name": "R3 High-Memory Eight Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 640.0, "placement_group_support": true, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "r3.8xlarge", "computeunits": 104.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 0.0, "vcpus": 32.0, "memory": 244.0, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": true}, "m4.large": {"ecu_per_vcpu": 3.25, "network_perf": 7.0, "intel_avx": "Yes", "name": "M4 Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "m4.large", "computeunits": 6.5, "ebs_throughput": 56.25, "vpc_only": true, "max_ips": 20, "physical_processor": "Intel Xeon E5-2676 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 3600.0, "vcpus": 2.0, "memory": 8.0, "ebs_max_bandwidth": 450.0, "gpus": 0, "ipv6_support": true}, "p2.16xlarge": {"ecu_per_vcpu": 2.9375, "network_perf": 17.0, "intel_avx": "Yes", "name": "General Purpose GPU 16xlarge", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "p2.16xlarge", "computeunits": 188.0, "ebs_throughput": 1250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2686 v4", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 65000.0, "vcpus": 64.0, "memory": 732.0, "ebs_max_bandwidth": 10000.0, "gpus": 16, "ipv6_support": true}, "hi1.4xlarge": {"ecu_per_vcpu": 2.1875, "network_perf": 12.0, "intel_avx": "", "name": "HI1. High I/O Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 2048.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "", "apiname": "hi1.4xlarge", "computeunits": 35.0, "ebs_throughput": 0.0, "vpc_only": false, "max_ips": 240, "physical_processor": "", "fpga": 0, "intel_turbo": "", "enhanced_networking": false, "ebs_iops": 0.0, "vcpus": 16.0, "memory": 60.5, "ebs_max_bandwidth": 0.0, "gpus": 0, "ipv6_support": false}, "c4.4xlarge": {"ecu_per_vcpu": 3.875, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Quadruple Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.4xlarge", "computeunits": 62.0, "ebs_throughput": 250.0, "vpc_only": true, "max_ips": 240, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 16000.0, "vcpus": 16.0, "memory": 30.0, "ebs_max_bandwidth": 2000.0, "gpus": 0, "ipv6_support": true}, "c4.xlarge": {"ecu_per_vcpu": 4.0, "network_perf": 9.0, "intel_avx": "Yes", "name": "C4 High-CPU Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM", "storage": 0.0, "placement_group_support": true, "intel_avx2": "Yes", "clock_speed_ghz": "Yes", "apiname": "c4.xlarge", "computeunits": 16.0, "ebs_throughput": 93.75, "vpc_only": true, "max_ips": 60, "physical_processor": "Intel Xeon E5-2666 v3", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": true, "ebs_iops": 6000.0, "vcpus": 4.0, "memory": 7.5, "ebs_max_bandwidth": 750.0, "gpus": 0, "ipv6_support": true}, "m3.xlarge": {"ecu_per_vcpu": 3.25, "network_perf": 9.0, "intel_avx": "Yes", "name": "M3 General Purpose Extra Large", "architecture": "64-bit", "linux_virtualization": "HVM, PV", "storage": 80.0, "placement_group_support": false, "intel_avx2": "", "clock_speed_ghz": "Yes", "apiname": "m3.xlarge", "computeunits": 13.0, "ebs_throughput": 62.5, "vpc_only": false, "max_ips": 60, "physical_processor": "Intel Xeon E5-2670 v2", "fpga": 0, "intel_turbo": "Yes", "enhanced_networking": false, "ebs_iops": 4000.0, "vcpus": 4.0, "memory": 15.0, "ebs_max_bandwidth": 500.0, "gpus": 0, "ipv6_support": false}} \ No newline at end of file +{"a1.2xlarge": {"apiname": "a1.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "A1 Double Extra Large", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "a1.4xlarge": {"apiname": "a1.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 32.0, "name": "A1 Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "a1.large": {"apiname": "a1.large", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 4.0, "name": "A1 Large", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "a1.medium": {"apiname": "a1.medium", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 8, "memory": 2.0, "name": "A1 Medium", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": true}, "a1.metal": {"apiname": "a1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 32.0, "name": "A1 Metal", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "a1.xlarge": {"apiname": "a1.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 8.0, "name": "A1 Extra Large", "network_perf": 10.0, "physical_processor": "AWS Graviton Processor", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "c1.medium": {"apiname": "c1.medium", "architecture": "32/64-bit", "clock_speed_ghz": "unknown", "computeunits": 5.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.5, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 12, "memory": 1.7, "name": "C1 High-CPU Medium", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 350.0, "vcpus": 2.0, "vpc_only": false}, "c1.xlarge": {"apiname": "c1.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 20.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 2.5, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 60, "memory": 7.0, "name": "C1 High-CPU Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 1680.0, "vcpus": 8.0, "vpc_only": false}, "c3.2xlarge": {"apiname": "c3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 28.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 60, "memory": 15.0, "name": "C3 High-CPU Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 160.0, "vcpus": 8.0, "vpc_only": false}, "c3.4xlarge": {"apiname": "c3.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 55.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.4375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 240, "memory": 30.0, "name": "C3 High-CPU Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 320.0, "vcpus": 16.0, "vpc_only": false}, "c3.8xlarge": {"apiname": "c3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 108.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 240, "memory": 60.0, "name": "C3 High-CPU Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 640.0, "vcpus": 32.0, "vpc_only": false}, "c3.large": {"apiname": "c3.large", "architecture": "32/64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 7.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 30, "memory": 3.75, "name": "C3 High-CPU Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 32.0, "vcpus": 2.0, "vpc_only": false}, "c3.xlarge": {"apiname": "c3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.8 GHz", "computeunits": 14.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM, PV", "max_ips": 60, "memory": 7.5, "name": "C3 High-CPU Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2680 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 80.0, "vcpus": 4.0, "vpc_only": false}, "c4.2xlarge": {"apiname": "c4.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 31.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 15.0, "name": "C4 High-CPU Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "c4.4xlarge": {"apiname": "c4.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 62.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 30.0, "name": "C4 High-CPU Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "c4.8xlarge": {"apiname": "c4.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 132.0, "ebs_iops": 32000.0, "ebs_max_bandwidth": 4000.0, "ebs_throughput": 500.0, "ecu_per_vcpu": 3.6666666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 60.0, "name": "C4 High-CPU Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 36.0, "vpc_only": true}, "c4.large": {"apiname": "c4.large", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 8.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 3.75, "name": "C4 High-CPU Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "c4.xlarge": {"apiname": "c4.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.9 GHz", "computeunits": 16.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 750.0, "ebs_throughput": 93.75, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 7.5, "name": "C4 High-CPU Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2666 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "c5.12xlarge": {"apiname": "c5.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 188.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.9166666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 96.0, "name": "C5 High-CPU 12xlarge", "network_perf": 14.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "c5.18xlarge": {"apiname": "c5.18xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 281.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.9027777777777777, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 144.0, "name": "C5 High-CPU 18xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 72.0, "vpc_only": true}, "c5.24xlarge": {"apiname": "c5.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 375.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.90625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 192.0, "name": "C5 High-CPU 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "c5.2xlarge": {"apiname": "c5.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 34.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "C5 High-CPU Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "c5.4xlarge": {"apiname": "c5.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 68.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 32.0, "name": "C5 High-CPU Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "c5.9xlarge": {"apiname": "c5.9xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 141.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.9166666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 72.0, "name": "C5 High-CPU 9xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 36.0, "vpc_only": true}, "c5.large": {"apiname": "c5.large", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 9.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 4.0, "name": "C5 High-CPU Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "c5.metal": {"apiname": "c5.metal", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 375.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.90625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 192.0, "name": "C5 High-CPU Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "c5.xlarge": {"apiname": "c5.xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 17.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 8.0, "name": "C5 High-CPU Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "c5d.12xlarge": {"apiname": "c5d.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 188.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.9166666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 96.0, "name": "C5 High-CPU 12xlarge", "network_perf": 14.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "c5d.18xlarge": {"apiname": "c5d.18xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 281.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.9027777777777777, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 144.0, "name": "C5 High-CPU 18xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 1800.0, "vcpus": 72.0, "vpc_only": true}, "c5d.24xlarge": {"apiname": "c5d.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 375.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.90625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 192.0, "name": "C5 High-CPU 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "c5d.2xlarge": {"apiname": "c5d.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 34.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "C5 High-CPU Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 200.0, "vcpus": 8.0, "vpc_only": true}, "c5d.4xlarge": {"apiname": "c5d.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 68.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 32.0, "name": "C5 High-CPU Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 400.0, "vcpus": 16.0, "vpc_only": true}, "c5d.9xlarge": {"apiname": "c5d.9xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 141.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.9166666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 72.0, "name": "C5 High-CPU 9xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 900.0, "vcpus": 36.0, "vpc_only": true}, "c5d.large": {"apiname": "c5d.large", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 9.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 4.0, "name": "C5 High-CPU Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 50.0, "vcpus": 2.0, "vpc_only": true}, "c5d.metal": {"apiname": "c5d.metal", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 375.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.90625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 192.0, "name": "C5 High-CPU Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8275L", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "c5d.xlarge": {"apiname": "c5d.xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 17.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 8.0, "name": "C5 High-CPU Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 100.0, "vcpus": 4.0, "vpc_only": true}, "c5n.18xlarge": {"apiname": "c5n.18xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 192.0, "name": "C5N 18xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 72.0, "vpc_only": true}, "c5n.2xlarge": {"apiname": "c5n.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 21.0, "name": "C5N Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "c5n.4xlarge": {"apiname": "c5n.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 42.0, "name": "C5N Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "c5n.9xlarge": {"apiname": "c5n.9xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 96.0, "name": "C5N 9xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 36.0, "vpc_only": true}, "c5n.large": {"apiname": "c5n.large", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 5.25, "name": "C5N Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "c5n.metal": {"apiname": "c5n.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 192.0, "name": "C5N Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 72.0, "vpc_only": true}, "c5n.xlarge": {"apiname": "c5n.xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.0 Ghz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 10.5, "name": "C5N Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8124M", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "cc2.8xlarge": {"apiname": "cc2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.6 GHz", "computeunits": 88.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "HVM", "max_ips": 240, "memory": 60.5, "name": "Cluster Compute Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670", "placement_group_support": false, "storage": 3360.0, "vcpus": 32.0, "vpc_only": false}, "cr1.8xlarge": {"apiname": "cr1.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 88.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 244.0, "name": "High Memory Cluster Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670", "placement_group_support": false, "storage": 240.0, "vcpus": 32.0, "vpc_only": false}, "d2.2xlarge": {"apiname": "d2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 28.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "D2 Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676v3 (Haswell)", "placement_group_support": false, "storage": 12000.0, "vcpus": 8.0, "vpc_only": false}, "d2.4xlarge": {"apiname": "d2.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 56.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "D2 Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676v3 (Haswell)", "placement_group_support": false, "storage": 24000.0, "vcpus": 16.0, "vpc_only": false}, "d2.8xlarge": {"apiname": "d2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 116.0, "ebs_iops": 32000.0, "ebs_max_bandwidth": 4000.0, "ebs_throughput": 500.0, "ecu_per_vcpu": 3.2222222222222223, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "D2 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2676v3 (Haswell)", "placement_group_support": false, "storage": 48000.0, "vcpus": 36.0, "vpc_only": false}, "d2.xlarge": {"apiname": "d2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 14.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 750.0, "ebs_throughput": 93.75, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "D2 Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2676v3 (Haswell)", "placement_group_support": false, "storage": 6000.0, "vcpus": 4.0, "vpc_only": false}, "f1.16xlarge": {"apiname": "f1.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 188.0, "ebs_iops": 75000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": false, "fpga": 8, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 400, "memory": 976.0, "name": "F1 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 3760.0, "vcpus": 64.0, "vpc_only": true}, "f1.2xlarge": {"apiname": "f1.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 26.0, "ebs_iops": 12000.0, "ebs_max_bandwidth": 1700.0, "ebs_throughput": 212.5, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 1, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 122.0, "name": "F1 Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 470.0, "vcpus": 8.0, "vpc_only": true}, "f1.4xlarge": {"apiname": "f1.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 52.0, "ebs_iops": 44000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 400.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 2, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "F1 Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 940.0, "vcpus": 16.0, "vpc_only": true}, "g2.2xlarge": {"apiname": "g2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.6 GHz", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 15.0, "name": "G2 Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 (Sandy Bridge)", "placement_group_support": false, "storage": 60.0, "vcpus": 8.0, "vpc_only": false}, "g2.8xlarge": {"apiname": "g2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.6 GHz", "computeunits": 104.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 4, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 60.0, "name": "G2 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670 (Sandy Bridge)", "placement_group_support": false, "storage": 240.0, "vcpus": 32.0, "vpc_only": false}, "g3.16xlarge": {"apiname": "g3.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 4, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 488.0, "name": "G3 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "g3.4xlarge": {"apiname": "g3.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 47.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 1, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "G3 Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "g3.8xlarge": {"apiname": "g3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 94.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 2, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "G3 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "g3s.xlarge": {"apiname": "g3s.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 13.0, "ebs_iops": 5000.0, "ebs_max_bandwidth": 850.0, "ebs_throughput": 100.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 30.5, "name": "G3S Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "g4dn.12xlarge": {"apiname": "g4dn.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 4, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "G4DN 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 900.0, "vcpus": 48.0, "vpc_only": true}, "g4dn.16xlarge": {"apiname": "g4dn.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 256.0, "name": "G4DN 16xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 900.0, "vcpus": 64.0, "vpc_only": true}, "g4dn.2xlarge": {"apiname": "g4dn.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 32.0, "name": "G4DN Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 225.0, "vcpus": 8.0, "vpc_only": true}, "g4dn.4xlarge": {"apiname": "g4dn.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 64.0, "name": "G4DN Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 225.0, "vcpus": 16.0, "vpc_only": true}, "g4dn.8xlarge": {"apiname": "g4dn.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 128.0, "name": "G4DN Eight Extra Large", "network_perf": 22.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 900.0, "vcpus": 32.0, "vpc_only": true}, "g4dn.metal": {"apiname": "g4dn.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 8, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 384.0, "name": "G4DN Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "g4dn.xlarge": {"apiname": "g4dn.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 10000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 1, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "G4DN Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 125.0, "vcpus": 4.0, "vpc_only": true}, "h1.16xlarge": {"apiname": "h1.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 256.0, "name": "H1 16xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 16000.0, "vcpus": 64.0, "vpc_only": true}, "h1.2xlarge": {"apiname": "h1.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 26.0, "ebs_iops": 12000.0, "ebs_max_bandwidth": 1750.0, "ebs_throughput": 218.75, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "H1 Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 2000.0, "vcpus": 8.0, "vpc_only": true}, "h1.4xlarge": {"apiname": "h1.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 53.5, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.34375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 64.0, "name": "H1 Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 4000.0, "vcpus": 16.0, "vpc_only": true}, "h1.8xlarge": {"apiname": "h1.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 99.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "H1 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 8000.0, "vcpus": 32.0, "vpc_only": true}, "hs1.8xlarge": {"apiname": "hs1.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2 GHz", "computeunits": 35.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.0588235294117645, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 240, "memory": 117.0, "name": "High Storage Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2650", "placement_group_support": false, "storage": 48000.0, "vcpus": 17.0, "vpc_only": false}, "i2.2xlarge": {"apiname": "i2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 27.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "I2 Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 1600.0, "vcpus": 8.0, "vpc_only": false}, "i2.4xlarge": {"apiname": "i2.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 53.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.3125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "I2 Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 3200.0, "vcpus": 16.0, "vpc_only": false}, "i2.8xlarge": {"apiname": "i2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 104.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "I2 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 6400.0, "vcpus": 32.0, "vpc_only": false}, "i2.xlarge": {"apiname": "i2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 14.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "I2 Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 800.0, "vcpus": 4.0, "vpc_only": false}, "i3.16xlarge": {"apiname": "i3.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 200.0, "ebs_iops": 65000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 488.0, "name": "I3 High I/O 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 15200.0, "vcpus": 64.0, "vpc_only": true}, "i3.2xlarge": {"apiname": "i3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 27.0, "ebs_iops": 12000.0, "ebs_max_bandwidth": 1700.0, "ebs_throughput": 212.5, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "I3 High I/O Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 1900.0, "vcpus": 8.0, "vpc_only": true}, "i3.4xlarge": {"apiname": "i3.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 53.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.3125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "I3 High I/O Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 3800.0, "vcpus": 16.0, "vpc_only": true}, "i3.8xlarge": {"apiname": "i3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 99.0, "ebs_iops": 32500.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "I3 High I/O Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 7600.0, "vcpus": 32.0, "vpc_only": true}, "i3.large": {"apiname": "i3.large", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 7.0, "ebs_iops": 3000.0, "ebs_max_bandwidth": 425.0, "ebs_throughput": 53.13, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 15.25, "name": "I3 High I/O Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 475.0, "vcpus": 2.0, "vpc_only": true}, "i3.metal": {"apiname": "i3.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 208.0, "ebs_iops": 65000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.888888888888889, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 512.0, "name": "I3 High I/O Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 15200.0, "vcpus": 72.0, "vpc_only": true}, "i3.xlarge": {"apiname": "i3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 13.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 850.0, "ebs_throughput": 106.25, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "I3 High I/O Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 950.0, "vcpus": 4.0, "vpc_only": true}, "i3en.12xlarge": {"apiname": "i3en.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "I3EN 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 30000.0, "vcpus": 48.0, "vpc_only": true}, "i3en.24xlarge": {"apiname": "i3en.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "I3EN 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 60000.0, "vcpus": 96.0, "vpc_only": true}, "i3en.2xlarge": {"apiname": "i3en.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "I3EN Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 5000.0, "vcpus": 8.0, "vpc_only": true}, "i3en.3xlarge": {"apiname": "i3en.3xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 96.0, "name": "I3EN 3xlarge", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 7500.0, "vcpus": 12.0, "vpc_only": true}, "i3en.6xlarge": {"apiname": "i3en.6xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "I3EN 6xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 15000.0, "vcpus": 24.0, "vpc_only": true}, "i3en.large": {"apiname": "i3en.large", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "I3EN Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1250.0, "vcpus": 2.0, "vpc_only": true}, "i3en.metal": {"apiname": "i3en.metal", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "I3EN Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 60000.0, "vcpus": 96.0, "vpc_only": true}, "i3en.xlarge": {"apiname": "i3en.xlarge", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "I3EN Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 2500.0, "vcpus": 4.0, "vpc_only": true}, "m1.large": {"apiname": "m1.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 4.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 2.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 30, "memory": 7.5, "name": "M1 General Purpose Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 840.0, "vcpus": 2.0, "vpc_only": false}, "m1.medium": {"apiname": "m1.medium", "architecture": "32/64-bit", "clock_speed_ghz": "unknown", "computeunits": 2.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 2.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 12, "memory": 3.75, "name": "M1 General Purpose Medium", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 410.0, "vcpus": 1.0, "vpc_only": false}, "m1.small": {"apiname": "m1.small", "architecture": "32/64-bit", "clock_speed_ghz": "unknown", "computeunits": 1.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 1.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 8, "memory": 1.7, "name": "M1 General Purpose Small", "network_perf": 2.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 160.0, "vcpus": 1.0, "vpc_only": false}, "m1.xlarge": {"apiname": "m1.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 8.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 2.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 60, "memory": 15.0, "name": "M1 General Purpose Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 1680.0, "vcpus": 4.0, "vpc_only": false}, "m2.2xlarge": {"apiname": "m2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 13.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 120, "memory": 34.2, "name": "M2 High Memory Double Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 850.0, "vcpus": 4.0, "vpc_only": false}, "m2.4xlarge": {"apiname": "m2.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 240, "memory": 68.4, "name": "M2 High Memory Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 1680.0, "vcpus": 8.0, "vpc_only": false}, "m2.xlarge": {"apiname": "m2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 6.5, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 60, "memory": 17.1, "name": "M2 High Memory Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 420.0, "vcpus": 2.0, "vpc_only": false}, "m3.2xlarge": {"apiname": "m3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 120, "memory": 30.0, "name": "M3 General Purpose Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)", "placement_group_support": false, "storage": 160.0, "vcpus": 8.0, "vpc_only": false}, "m3.large": {"apiname": "m3.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 6.5, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 30, "memory": 7.5, "name": "M3 General Purpose Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)", "placement_group_support": false, "storage": 32.0, "vcpus": 2.0, "vpc_only": false}, "m3.medium": {"apiname": "m3.medium", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 3.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 12, "memory": 3.75, "name": "M3 General Purpose Medium", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)", "placement_group_support": false, "storage": 4.0, "vcpus": 1.0, "vpc_only": false}, "m3.xlarge": {"apiname": "m3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 13.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": false, "linux_virtualization": "HVM, PV", "max_ips": 60, "memory": 15.0, "name": "M3 General Purpose Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge/Sandy Bridge)", "placement_group_support": false, "storage": 80.0, "vcpus": 4.0, "vpc_only": false}, "m4.10xlarge": {"apiname": "m4.10xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 124.5, "ebs_iops": 32000.0, "ebs_max_bandwidth": 4000.0, "ebs_throughput": 500.0, "ecu_per_vcpu": 3.1125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 160.0, "name": "M4 General Purpose Deca Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 40.0, "vpc_only": true}, "m4.16xlarge": {"apiname": "m4.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 65000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 256.0, "name": "M4 General Purpose 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "m4.2xlarge": {"apiname": "m4.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "M4 General Purpose Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "m4.4xlarge": {"apiname": "m4.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 53.5, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.34375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 64.0, "name": "M4 General Purpose Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "m4.large": {"apiname": "m4.large", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 6.5, "ebs_iops": 3600.0, "ebs_max_bandwidth": 450.0, "ebs_throughput": 56.25, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 20, "memory": 8.0, "name": "M4 General Purpose Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "m4.xlarge": {"apiname": "m4.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.4 GHz", "computeunits": 13.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 750.0, "ebs_throughput": 93.75, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "M4 General Purpose Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2676 v3 (Haswell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "m5.12xlarge": {"apiname": "m5.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 173.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.6041666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 192.0, "name": "M5 General Purpose 12xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "m5.16xlarge": {"apiname": "m5.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 262.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 256.0, "name": "M5 General Purpose 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "m5.24xlarge": {"apiname": "m5.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "M5 General Purpose 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5.2xlarge": {"apiname": "m5.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 31.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "M5 General Purpose Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "m5.4xlarge": {"apiname": "m5.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 60.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.75, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 64.0, "name": "M5 General Purpose Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "m5.8xlarge": {"apiname": "m5.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 131.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "M5 General Purpose Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "m5.large": {"apiname": "m5.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 8.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 8.0, "name": "M5 General Purpose Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "m5.metal": {"apiname": "m5.metal", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "M5 General Purpose Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5.xlarge": {"apiname": "m5.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 16.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "M5 General Purpose Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "m5a.12xlarge": {"apiname": "m5a.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "M5A 12xlarge", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "m5a.16xlarge": {"apiname": "m5a.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 256.0, "name": "M5A 16xlarge", "network_perf": 14.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "m5a.24xlarge": {"apiname": "m5a.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 384.0, "name": "M5A 24xlarge", "network_perf": 16.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5a.2xlarge": {"apiname": "m5a.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "M5A Double Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "m5a.4xlarge": {"apiname": "m5a.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 64.0, "name": "M5A Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "m5a.8xlarge": {"apiname": "m5a.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "M5A Eight Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "m5a.large": {"apiname": "m5a.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 8.0, "name": "M5A Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "m5a.xlarge": {"apiname": "m5a.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "M5A Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "m5ad.12xlarge": {"apiname": "m5ad.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 675.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "M5AD 12xlarge", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "m5ad.24xlarge": {"apiname": "m5ad.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 384.0, "name": "M5AD 24xlarge", "network_perf": 16.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "m5ad.2xlarge": {"apiname": "m5ad.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "M5AD Double Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "m5ad.4xlarge": {"apiname": "m5ad.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 64.0, "name": "M5AD Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "m5ad.large": {"apiname": "m5ad.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 8.0, "name": "M5AD Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "m5ad.xlarge": {"apiname": "m5ad.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "M5AD Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "m5d.12xlarge": {"apiname": "m5d.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 173.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.6041666666666665, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 192.0, "name": "M5 General Purpose 12xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "m5d.16xlarge": {"apiname": "m5d.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 262.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 256.0, "name": "M5 General Purpose 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 2400.0, "vcpus": 64.0, "vpc_only": true}, "m5d.24xlarge": {"apiname": "m5d.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "M5 General Purpose 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "m5d.2xlarge": {"apiname": "m5d.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 31.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "M5 General Purpose Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "m5d.4xlarge": {"apiname": "m5d.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 60.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.75, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 64.0, "name": "M5 General Purpose Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "m5d.8xlarge": {"apiname": "m5d.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 131.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "M5 General Purpose Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1200.0, "vcpus": 32.0, "vpc_only": true}, "m5d.large": {"apiname": "m5d.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 8.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 8.0, "name": "M5 General Purpose Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "m5d.metal": {"apiname": "m5d.metal", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "M5 General Purpose Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "m5d.xlarge": {"apiname": "m5d.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 16.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 16.0, "name": "M5 General Purpose Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "m5dn.12xlarge": {"apiname": "m5dn.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "M5DN 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "m5dn.16xlarge": {"apiname": "m5dn.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 256.0, "name": "M5DN 16xlarge", "network_perf": 24.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 2400.0, "vcpus": 64.0, "vpc_only": true}, "m5dn.24xlarge": {"apiname": "m5dn.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 384.0, "name": "M5DN 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "m5dn.2xlarge": {"apiname": "m5dn.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "M5DN Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "m5dn.4xlarge": {"apiname": "m5dn.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 64.0, "name": "M5DN Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "m5dn.8xlarge": {"apiname": "m5dn.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "M5DN Eight Extra Large", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 1200.0, "vcpus": 32.0, "vpc_only": true}, "m5dn.large": {"apiname": "m5dn.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 8.0, "name": "M5DN Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "m5dn.metal": {"apiname": "m5dn.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 384.0, "name": "M5DN Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5dn.xlarge": {"apiname": "m5dn.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "M5DN Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "m5n.12xlarge": {"apiname": "m5n.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 192.0, "name": "M5N 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "m5n.16xlarge": {"apiname": "m5n.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 256.0, "name": "M5N 16xlarge", "network_perf": 24.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "m5n.24xlarge": {"apiname": "m5n.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 384.0, "name": "M5N 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5n.2xlarge": {"apiname": "m5n.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "M5N Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "m5n.4xlarge": {"apiname": "m5n.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 64.0, "name": "M5N Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "m5n.8xlarge": {"apiname": "m5n.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "M5N Eight Extra Large", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "m5n.large": {"apiname": "m5n.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 8.0, "name": "M5N Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "m5n.metal": {"apiname": "m5n.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 384.0, "name": "M5N Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "m5n.xlarge": {"apiname": "m5n.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "M5N Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "p2.16xlarge": {"apiname": "p2.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 65000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 16, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 768.0, "name": "General Purpose GPU 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "p2.8xlarge": {"apiname": "p2.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 94.0, "ebs_iops": 32500.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 8, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 488.0, "name": "General Purpose GPU Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "p2.xlarge": {"apiname": "p2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 12.0, "ebs_iops": 6000.0, "ebs_max_bandwidth": 750.0, "ebs_throughput": 93.75, "ecu_per_vcpu": 3.0, "enhanced_networking": true, "fpga": 0, "gpus": 1, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "General Purpose GPU Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "p3.16xlarge": {"apiname": "p3.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 188.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 8, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 488.0, "name": "P3 16xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "p3.2xlarge": {"apiname": "p3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 26.0, "ebs_iops": 10000.0, "ebs_max_bandwidth": 1750.0, "ebs_throughput": 218.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 1, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "P3 Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "p3.8xlarge": {"apiname": "p3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 94.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 4, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "P3 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "p3dn.24xlarge": {"apiname": "p3dn.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 345.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.59375, "enhanced_networking": true, "fpga": 0, "gpus": 8, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "P3DN 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8175 (Skylake)", "placement_group_support": false, "storage": 1800.0, "vcpus": 96.0, "vpc_only": true}, "r3.2xlarge": {"apiname": "r3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 26.0, "ebs_iops": 8000.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "R3 High-Memory Double Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 160.0, "vcpus": 8.0, "vpc_only": false}, "r3.4xlarge": {"apiname": "r3.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 52.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2000.0, "ebs_throughput": 250.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "R3 High-Memory Quadruple Extra Large", "network_perf": 8.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 320.0, "vcpus": 16.0, "vpc_only": false}, "r3.8xlarge": {"apiname": "r3.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 104.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "R3 High-Memory Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 640.0, "vcpus": 32.0, "vpc_only": false}, "r3.large": {"apiname": "r3.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 6.5, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 15.25, "name": "R3 High-Memory Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 32.0, "vcpus": 2.0, "vpc_only": false}, "r3.xlarge": {"apiname": "r3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 13.0, "ebs_iops": 4000.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.25, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "R3 High-Memory Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon E5-2670 v2 (Ivy Bridge)", "placement_group_support": false, "storage": 80.0, "vcpus": 4.0, "vpc_only": false}, "r4.16xlarge": {"apiname": "r4.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 195.0, "ebs_iops": 75000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.046875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 488.0, "name": "R4 High-Memory 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "r4.2xlarge": {"apiname": "r4.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 27.0, "ebs_iops": 12000.0, "ebs_max_bandwidth": 1700.0, "ebs_throughput": 212.5, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 61.0, "name": "R4 High-Memory Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "r4.4xlarge": {"apiname": "r4.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 53.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 3.3125, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 122.0, "name": "R4 High-Memory Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "r4.8xlarge": {"apiname": "r4.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 99.0, "ebs_iops": 37500.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.09375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 244.0, "name": "R4 High-Memory Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "r4.large": {"apiname": "r4.large", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 7.0, "ebs_iops": 3000.0, "ebs_max_bandwidth": 425.0, "ebs_throughput": 53.13, "ecu_per_vcpu": 3.5, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 15.25, "name": "R4 High-Memory Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "r4.xlarge": {"apiname": "r4.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 13.5, "ebs_iops": 6000.0, "ebs_max_bandwidth": 850.0, "ebs_throughput": 106.25, "ecu_per_vcpu": 3.375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 30.5, "name": "R4 High-Memory Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon E5-2686 v4 (Broadwell)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "r5.12xlarge": {"apiname": "r5.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 173.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.6041666666666665, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 384.0, "name": "R5 12xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "r5.16xlarge": {"apiname": "r5.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 262.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 512.0, "name": "R5 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "r5.24xlarge": {"apiname": "r5.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 347.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.6145833333333335, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 768.0, "name": "R5 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5.2xlarge": {"apiname": "r5.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 38.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 64.0, "name": "R5 Double Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "r5.4xlarge": {"apiname": "r5.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 71.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.4375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "R5 Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "r5.8xlarge": {"apiname": "r5.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 131.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 256.0, "name": "R5 Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "r5.large": {"apiname": "r5.large", "architecture": "64-bit", "clock_speed_ghz": "3.1 GHz", "computeunits": 9.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.5, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 16.0, "name": "R5 Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "r5.metal": {"apiname": "r5.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 347.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.6145833333333335, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 768.0, "name": "R5 Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5.xlarge": {"apiname": "r5.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 19.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "R5 Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "r5a.12xlarge": {"apiname": "r5a.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "R5A 12xlarge", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "r5a.16xlarge": {"apiname": "r5a.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 512.0, "name": "R5A 16xlarge", "network_perf": 14.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "r5a.24xlarge": {"apiname": "r5a.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "R5A 24xlarge", "network_perf": 16.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5a.2xlarge": {"apiname": "r5a.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "R5A Double Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "r5a.4xlarge": {"apiname": "r5a.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "R5A Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "r5a.8xlarge": {"apiname": "r5a.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 32000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 256.0, "name": "R5A Eight Extra Large", "network_perf": 10.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "r5a.large": {"apiname": "r5a.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "R5A Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "r5a.xlarge": {"apiname": "r5a.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2120.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "R5A Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "r5ad.12xlarge": {"apiname": "r5ad.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "R5AD 12xlarge", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "r5ad.24xlarge": {"apiname": "r5ad.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "R5AD 24xlarge", "network_perf": 16.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "r5ad.2xlarge": {"apiname": "r5ad.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2210.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "R5AD Double Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "r5ad.4xlarge": {"apiname": "r5ad.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2210.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "R5AD Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "r5ad.large": {"apiname": "r5ad.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2210.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "R5AD Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "r5ad.xlarge": {"apiname": "r5ad.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.0, "ebs_iops": 16000.0, "ebs_max_bandwidth": 2210.0, "ebs_throughput": 265.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "R5AD Extra Large", "network_perf": 12.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "r5d.12xlarge": {"apiname": "r5d.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 173.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 3.6041666666666665, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 384.0, "name": "R5D 12xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "r5d.16xlarge": {"apiname": "r5d.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 262.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 512.0, "name": "R5D 16xlarge", "network_perf": 16.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 2400.0, "vcpus": 64.0, "vpc_only": true}, "r5d.24xlarge": {"apiname": "r5d.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 347.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.6145833333333335, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 768.0, "name": "R5D 24xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "r5d.2xlarge": {"apiname": "r5d.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 38.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 64.0, "name": "R5D Double Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "r5d.4xlarge": {"apiname": "r5d.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 71.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.4375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 128.0, "name": "R5D Quadruple Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "r5d.8xlarge": {"apiname": "r5d.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 131.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 4.09375, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 256.0, "name": "R5D Eight Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 1200.0, "vcpus": 32.0, "vpc_only": true}, "r5d.large": {"apiname": "r5d.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 10.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 5.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 16.0, "name": "R5D Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "r5d.metal": {"apiname": "r5d.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 347.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 3.6145833333333335, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 768.0, "name": "R5D Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "r5d.xlarge": {"apiname": "r5d.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 19.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 4.75, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "R5D Extra Large", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8175", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "r5dn.12xlarge": {"apiname": "r5dn.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "R5DN 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "r5dn.16xlarge": {"apiname": "r5dn.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 512.0, "name": "R5DN 16xlarge", "network_perf": 24.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 2400.0, "vcpus": 64.0, "vpc_only": true}, "r5dn.24xlarge": {"apiname": "r5dn.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "R5DN 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 3600.0, "vcpus": 96.0, "vpc_only": true}, "r5dn.2xlarge": {"apiname": "r5dn.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "R5DN Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "r5dn.4xlarge": {"apiname": "r5dn.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "R5DN Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 600.0, "vcpus": 16.0, "vpc_only": true}, "r5dn.8xlarge": {"apiname": "r5dn.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 256.0, "name": "R5DN Eight Extra Large", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 1200.0, "vcpus": 32.0, "vpc_only": true}, "r5dn.large": {"apiname": "r5dn.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "R5DN Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "r5dn.metal": {"apiname": "r5dn.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 768.0, "name": "R5DN Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5dn.xlarge": {"apiname": "r5dn.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "R5DN Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}, "r5n.12xlarge": {"apiname": "r5n.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 384.0, "name": "R5N 12xlarge", "network_perf": 22.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 48.0, "vpc_only": true}, "r5n.16xlarge": {"apiname": "r5n.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 60000.0, "ebs_max_bandwidth": 10000.0, "ebs_throughput": 1250.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 512.0, "name": "R5N 16xlarge", "network_perf": 24.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 64.0, "vpc_only": true}, "r5n.24xlarge": {"apiname": "r5n.24xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 768.0, "name": "R5N 24xlarge", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5n.2xlarge": {"apiname": "r5n.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 64.0, "name": "R5N Double Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "r5n.4xlarge": {"apiname": "r5n.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 128.0, "name": "R5N Quadruple Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 16.0, "vpc_only": true}, "r5n.8xlarge": {"apiname": "r5n.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 30000.0, "ebs_max_bandwidth": 5000.0, "ebs_throughput": 625.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 240, "memory": 256.0, "name": "R5N Eight Extra Large", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 32.0, "vpc_only": true}, "r5n.large": {"apiname": "r5n.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 30, "memory": 16.0, "name": "R5N Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "r5n.metal": {"apiname": "r5n.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "memory": 768.0, "name": "R5N Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 96.0, "vpc_only": true}, "r5n.xlarge": {"apiname": "r5n.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 18750.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "R5N Extra Large", "network_perf": 18.0, "physical_processor": "Intel Xeon Platinum 8259 (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "t1.micro": {"apiname": "t1.micro", "architecture": "32/64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": false, "linux_virtualization": "PV", "max_ips": 4, "memory": 0.613, "name": "T1 Micro", "network_perf": 0.0, "physical_processor": "Variable", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": false}, "t2.2xlarge": {"apiname": "t2.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "Up to 3.0 GHz", "computeunits": 1.3599999999999999, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 45, "memory": 32.0, "name": "T2 Double Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "t2.large": {"apiname": "t2.large", "architecture": "64-bit", "clock_speed_ghz": "Up to 3.0 GHz", "computeunits": 0.6, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 36, "memory": 8.0, "name": "T2 Large", "network_perf": 4.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t2.medium": {"apiname": "t2.medium", "architecture": "32/64-bit", "clock_speed_ghz": "Up to 3.3 GHz", "computeunits": 0.4, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 18, "memory": 4.0, "name": "T2 Medium", "network_perf": 4.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t2.micro": {"apiname": "t2.micro", "architecture": "32/64-bit", "clock_speed_ghz": "Up to 3.3 GHz", "computeunits": 0.1, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 4, "memory": 1.0, "name": "T2 Micro", "network_perf": 4.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": true}, "t2.nano": {"apiname": "t2.nano", "architecture": "32/64-bit", "clock_speed_ghz": "Up to 3.3 GHz", "computeunits": 0.05, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 4, "memory": 0.5, "name": "T2 Nano", "network_perf": 2.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": true}, "t2.small": {"apiname": "t2.small", "architecture": "32/64-bit", "clock_speed_ghz": "Up to 3.3 GHz", "computeunits": 0.2, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 12, "memory": 2.0, "name": "T2 Small", "network_perf": 4.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 1.0, "vpc_only": true}, "t2.xlarge": {"apiname": "t2.xlarge", "architecture": "64-bit", "clock_speed_ghz": "Up to 3.0 GHz", "computeunits": 0.9, "ebs_iops": 0.0, "ebs_max_bandwidth": 0.0, "ebs_throughput": 0.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "unknown", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 45, "memory": 16.0, "name": "T2 Extra Large", "network_perf": 6.0, "physical_processor": "Intel Xeon Family", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "t3.2xlarge": {"apiname": "t3.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 3.2, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "T3 Double Extra Large", "network_perf": 6.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "t3.large": {"apiname": "t3.large", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.6, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 36, "memory": 8.0, "name": "T3 Large", "network_perf": 4.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.medium": {"apiname": "t3.medium", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.4, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 18, "memory": 4.0, "name": "T3 Medium", "network_perf": 4.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.micro": {"apiname": "t3.micro", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.2, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 4, "memory": 1.0, "name": "T3 Micro", "network_perf": 4.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.nano": {"apiname": "t3.nano", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.1, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 4, "memory": 0.5, "name": "T3 Nano", "network_perf": 2.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.small": {"apiname": "t3.small", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.4, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 12, "memory": 2.0, "name": "T3 Small", "network_perf": 4.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3.xlarge": {"apiname": "t3.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 1.6, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "T3 Extra Large", "network_perf": 6.0, "physical_processor": "Intel Skylake E5 2686 v5 (2.5 GHz)", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "t3a.2xlarge": {"apiname": "t3a.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 3.2, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 32.0, "name": "T3A Double Extra Large", "network_perf": 6.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 8.0, "vpc_only": true}, "t3a.large": {"apiname": "t3a.large", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.6, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 36, "memory": 8.0, "name": "T3A Large", "network_perf": 4.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.medium": {"apiname": "t3a.medium", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.4, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 18, "memory": 4.0, "name": "T3A Medium", "network_perf": 4.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.micro": {"apiname": "t3a.micro", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.2, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 4, "memory": 1.0, "name": "T3A Micro", "network_perf": 4.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.nano": {"apiname": "t3a.nano", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.1, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 4, "memory": 0.5, "name": "T3A Nano", "network_perf": 2.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.small": {"apiname": "t3a.small", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 0.4, "ebs_iops": 11800.0, "ebs_max_bandwidth": 1536.0, "ebs_throughput": 192.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 8, "memory": 2.0, "name": "T3A Small", "network_perf": 4.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 2.0, "vpc_only": true}, "t3a.xlarge": {"apiname": "t3a.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.5 GHz", "computeunits": 1.6, "ebs_iops": 15700.0, "ebs_max_bandwidth": 2048.0, "ebs_throughput": 256.0, "ecu_per_vcpu": 0.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 60, "memory": 16.0, "name": "T3A Extra Large", "network_perf": 6.0, "physical_processor": "AMD EPYC 7571", "placement_group_support": false, "storage": 0.0, "vcpus": 4.0, "vpc_only": true}, "u-12tb1.metal": {"apiname": "u-12tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 150, "memory": 12288.0, "name": "U-12TB1 Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Scalable (Skylake) processors", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "u-18tb1.metal": {"apiname": "u-18tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 160000.0, "ebs_max_bandwidth": 28000.0, "ebs_throughput": 3500.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 18432.0, "name": "U-18TB1 Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8280L (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "u-24tb1.metal": {"apiname": "u-24tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 160000.0, "ebs_max_bandwidth": 28000.0, "ebs_throughput": 3500.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 750, "memory": 24576.0, "name": "U-24TB1 Metal", "network_perf": 26.0, "physical_processor": "Intel Xeon Platinum 8280L (Cascade Lake)", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "u-6tb1.metal": {"apiname": "u-6tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 150, "memory": 6144.0, "name": "U-6TB1 Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Scalable (Skylake) processors", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "u-9tb1.metal": {"apiname": "u-9tb1.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 0.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 0.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "Unknown", "max_ips": 150, "memory": 9216.0, "name": "U-9TB1 Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Scalable (Skylake) processors", "placement_group_support": false, "storage": 0.0, "vcpus": 448.0, "vpc_only": true}, "x1.16xlarge": {"apiname": "x1.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 174.5, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 2.7265625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 976.0, "name": "X1 Extra High-Memory 16xlarge", "network_perf": 8.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 1920.0, "vcpus": 64.0, "vpc_only": true}, "x1.32xlarge": {"apiname": "x1.32xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 349.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.7265625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 1952.0, "name": "X1 Extra High-Memory 32xlarge", "network_perf": 8.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 3840.0, "vcpus": 128.0, "vpc_only": true}, "x1e.16xlarge": {"apiname": "x1e.16xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 179.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 2.796875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 1952.0, "name": "X1E 16xlarge", "network_perf": 12.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 1920.0, "vcpus": 64.0, "vpc_only": true}, "x1e.2xlarge": {"apiname": "x1e.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 23.0, "ebs_iops": 7400.0, "ebs_max_bandwidth": 1000.0, "ebs_throughput": 125.0, "ecu_per_vcpu": 2.875, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 244.0, "name": "X1E Double Extra Large", "network_perf": 10.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 240.0, "vcpus": 8.0, "vpc_only": true}, "x1e.32xlarge": {"apiname": "x1e.32xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 340.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 2.65625, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 3904.0, "name": "X1E 32xlarge", "network_perf": 20.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 3840.0, "vcpus": 128.0, "vpc_only": true}, "x1e.4xlarge": {"apiname": "x1e.4xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 47.0, "ebs_iops": 10000.0, "ebs_max_bandwidth": 1750.0, "ebs_throughput": 218.75, "ecu_per_vcpu": 2.9375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 488.0, "name": "X1E Quadruple Extra Large", "network_perf": 10.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 480.0, "vcpus": 16.0, "vpc_only": true}, "x1e.8xlarge": {"apiname": "x1e.8xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 91.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 437.5, "ecu_per_vcpu": 2.84375, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "Yes", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 976.0, "name": "X1E Eight Extra Large", "network_perf": 10.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 960.0, "vcpus": 32.0, "vpc_only": true}, "x1e.xlarge": {"apiname": "x1e.xlarge", "architecture": "64-bit", "clock_speed_ghz": "2.3 GHz", "computeunits": 12.0, "ebs_iops": 3700.0, "ebs_max_bandwidth": 500.0, "ebs_throughput": 62.5, "ecu_per_vcpu": 3.0, "enhanced_networking": true, "fpga": 0, "gpus": 0, "intel_avx": "Yes", "intel_avx2": "Yes", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 122.0, "name": "X1E Extra Large", "network_perf": 10.0, "physical_processor": "High Frequency Intel Xeon E7-8880 v3 (Haswell)", "placement_group_support": false, "storage": 120.0, "vcpus": 4.0, "vpc_only": true}, "z1d.12xlarge": {"apiname": "z1d.12xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 271.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 5.645833333333333, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "Z1D 12xlarge", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "z1d.2xlarge": {"apiname": "z1d.2xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 53.0, "ebs_iops": 13333.0, "ebs_max_bandwidth": 2333.0, "ebs_throughput": 292.0, "ecu_per_vcpu": 6.625, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 64.0, "name": "Z1D Double Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 300.0, "vcpus": 8.0, "vpc_only": true}, "z1d.3xlarge": {"apiname": "z1d.3xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 75.0, "ebs_iops": 20000.0, "ebs_max_bandwidth": 3500.0, "ebs_throughput": 438.0, "ecu_per_vcpu": 6.25, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 96.0, "name": "Z1D 3xlarge", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 450.0, "vcpus": 12.0, "vpc_only": true}, "z1d.6xlarge": {"apiname": "z1d.6xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 134.0, "ebs_iops": 40000.0, "ebs_max_bandwidth": 7000.0, "ebs_throughput": 875.0, "ecu_per_vcpu": 5.583333333333333, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 240, "memory": 192.0, "name": "Z1D 6xlarge", "network_perf": 12.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 900.0, "vcpus": 24.0, "vpc_only": true}, "z1d.large": {"apiname": "z1d.large", "architecture": "64-bit", "clock_speed_ghz": "4.0 GHz", "computeunits": 15.0, "ebs_iops": 13333.0, "ebs_max_bandwidth": 2333.0, "ebs_throughput": 291.0, "ecu_per_vcpu": 7.5, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 30, "memory": 16.0, "name": "Z1D Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 75.0, "vcpus": 2.0, "vpc_only": true}, "z1d.metal": {"apiname": "z1d.metal", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 271.0, "ebs_iops": 80000.0, "ebs_max_bandwidth": 14000.0, "ebs_throughput": 1750.0, "ecu_per_vcpu": 5.645833333333333, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 750, "memory": 384.0, "name": "Z1D Metal", "network_perf": 20.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 1800.0, "vcpus": 48.0, "vpc_only": true}, "z1d.xlarge": {"apiname": "z1d.xlarge", "architecture": "64-bit", "clock_speed_ghz": "unknown", "computeunits": 28.0, "ebs_iops": 13333.0, "ebs_max_bandwidth": 2333.0, "ebs_throughput": 291.0, "ecu_per_vcpu": 7.0, "enhanced_networking": false, "fpga": 0, "gpus": 0, "intel_avx": "unknown", "intel_avx2": "unknown", "intel_turbo": "unknown", "ipv6_support": true, "linux_virtualization": "HVM", "max_ips": 60, "memory": 32.0, "name": "Z1D Extra Large", "network_perf": 10.0, "physical_processor": "Intel Xeon Platinum 8151", "placement_group_support": false, "storage": 150.0, "vcpus": 4.0, "vpc_only": true}} \ No newline at end of file diff --git a/scripts/get_instance_info.py b/scripts/get_instance_info.py index f883c0cae753..7aea257f8e2c 100755 --- a/scripts/get_instance_info.py +++ b/scripts/get_instance_info.py @@ -1,4 +1,5 @@ #!/usr/bin/env python + import json import os import subprocess @@ -11,128 +12,142 @@ def __init__(self, instance): self.instance = instance def _get_td(self, td): - return self.instance.find('td', attrs={'class': td}) + return self.instance.find("td", attrs={"class": td}) def _get_sort(self, td): - return float(self.instance.find('td', attrs={'class': td}).find('span')['sort']) + return float(self.instance.find("td", attrs={"class": td}).find("span")["sort"]) @property def name(self): - return self._get_td('name').text.strip() + return self._get_td("name").text.strip() @property def apiname(self): - return self._get_td('apiname').text.strip() + return self._get_td("apiname").text.strip() @property def memory(self): - return self._get_sort('memory') + return self._get_sort("memory") @property def computeunits(self): - return self._get_sort('computeunits') + return self._get_sort("computeunits") @property def vcpus(self): - return self._get_sort('vcpus') + return self._get_sort("vcpus") @property def gpus(self): - return int(self._get_td('gpus').text.strip()) + return int(self._get_td("gpus").text.strip()) @property def fpga(self): - return int(self._get_td('fpga').text.strip()) + return int(self._get_td("fpga").text.strip()) @property def ecu_per_vcpu(self): - return self._get_sort('ecu-per-vcpu') + return self._get_sort("ecu-per-vcpu") @property def physical_processor(self): - return self._get_td('physical_processor').text.strip() + return self._get_td("physical_processor").text.strip() @property def clock_speed_ghz(self): - return self._get_td('clock_speed_ghz').text.strip() + return self._get_td("clock_speed_ghz").text.strip() @property def intel_avx(self): - return self._get_td('intel_avx').text.strip() + return self._get_td("intel_avx").text.strip() @property def intel_avx2(self): - return self._get_td('intel_avx2').text.strip() + return self._get_td("intel_avx2").text.strip() @property def intel_turbo(self): - return self._get_td('intel_turbo').text.strip() + return self._get_td("intel_turbo").text.strip() @property def storage(self): - return self._get_sort('storage') + return self._get_sort("storage") @property def architecture(self): - return self._get_td('architecture').text.strip() + return self._get_td("architecture").text.strip() @property def network_perf(self): # 2 == low - return self._get_sort('networkperf') + return self._get_sort("networkperf") @property def ebs_max_bandwidth(self): - return self._get_sort('ebs-max-bandwidth') + return self._get_sort("ebs-max-bandwidth") @property def ebs_throughput(self): - return self._get_sort('ebs-throughput') + return self._get_sort("ebs-throughput") @property def ebs_iops(self): - return self._get_sort('ebs-iops') + return self._get_sort("ebs-iops") @property def max_ips(self): - return int(self._get_td('maxips').text.strip()) + return int(self._get_td("maxips").text.strip()) @property def enhanced_networking(self): - return self._get_td('enhanced-networking').text.strip() != 'No' + return self._get_td("enhanced-networking").text.strip() != "No" @property def vpc_only(self): - return self._get_td('vpc-only').text.strip() != 'No' + return self._get_td("vpc-only").text.strip() != "No" @property def ipv6_support(self): - return self._get_td('ipv6-support').text.strip() != 'No' + return self._get_td("ipv6-support").text.strip() != "No" @property def placement_group_support(self): - return self._get_td('placement-group-support').text.strip() != 'No' + return self._get_td("placement-group-support").text.strip() != "No" @property def linux_virtualization(self): - return self._get_td('linux-virtualization').text.strip() + return self._get_td("linux-virtualization").text.strip() def to_dict(self): result = {} - for attr in [x for x in self.__class__.__dict__.keys() if not x.startswith('_') and x != 'to_dict']: - result[attr] = getattr(self, attr) + for attr in [ + x + for x in self.__class__.__dict__.keys() + if not x.startswith("_") and x != "to_dict" + ]: + try: + result[attr] = getattr(self, attr) + except ValueError as ex: + if "'N/A'" in str(ex): + print( + "Skipping attribute '{0}' for instance type '{1}' (not found)".format( + attr, self.name + ) + ) + else: + raise return self.apiname, result def main(): print("Getting HTML from http://www.ec2instances.info") - page_request = requests.get('http://www.ec2instances.info') - soup = BeautifulSoup(page_request.text, 'html.parser') - data_table = soup.find(id='data') + page_request = requests.get("http://www.ec2instances.info") + soup = BeautifulSoup(page_request.text, "html.parser") + data_table = soup.find(id="data") print("Finding data in table") - instances = data_table.find('tbody').find_all('tr') + instances = data_table.find("tbody").find_all("tr") print("Parsing data") result = {} @@ -140,11 +155,16 @@ def main(): instance_id, instance_data = Instance(instance).to_dict() result[instance_id] = instance_data - root_dir = subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).decode().strip() - dest = os.path.join(root_dir, 'moto/ec2/resources/instance_types.json') + root_dir = ( + subprocess.check_output(["git", "rev-parse", "--show-toplevel"]) + .decode() + .strip() + ) + dest = os.path.join(root_dir, "moto/ec2/resources/instance_types.json") print("Writing data to {0}".format(dest)) - with open(dest, 'w') as open_file: - json.dump(result, open_file) + with open(dest, "w") as open_file: + json.dump(result, open_file, sort_keys=True) + -if __name__ == '__main__': +if __name__ == "__main__": main() From 870b34ba7693e88df38d2f2765b972cfda955cee Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 16 Apr 2020 07:09:50 +0100 Subject: [PATCH 230/658] Spacing --- tests/test_dynamodb2/test_dynamodb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 8a8c69a8c670..a0a5e6406724 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4213,5 +4213,5 @@ def test_dynamodb_max_1mb_limit(): KeyConditionExpression=Key("partition_key").eq("partition_key_val") ) # We shouldn't get everything back - the total result set is well over 1MB - len(items).should.be.greater_than(response["Count"]) + len(items).should.be.greater_than(response["Count"]) response["LastEvaluatedKey"].shouldnt.be(None) From 92bbc3fbacbbe65fc6d9e134d15e78c97d3e256c Mon Sep 17 00:00:00 2001 From: Tim Date: Thu, 16 Apr 2020 08:20:43 -0700 Subject: [PATCH 231/658] Adds initial support for secretsmanager update_secret The support in this patch is preliminary and may or may not be feature complete. It provides the basic support for update_secret so that future work can build on it as needed. --- moto/secretsmanager/models.py | 28 +++++++ moto/secretsmanager/responses.py | 10 +++ .../test_secretsmanager.py | 76 +++++++++++++++++++ 3 files changed, 114 insertions(+) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 294a6401e6a7..11a024be622c 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -107,6 +107,34 @@ def get_secret_value(self, secret_id, version_id, version_stage): return response + def update_secret( + self, secret_id, secret_string=None, secret_binary=None, **kwargs + ): + + # error if secret does not exist + if secret_id not in self.secrets.keys(): + raise SecretNotFoundException() + + if "deleted_date" in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the UpdateSecret operation: " + "You can't perform this operation on the secret because it was marked for deletion." + ) + + version_id = self._add_secret( + secret_id, secret_string=secret_string, secret_binary=secret_binary + ) + + response = json.dumps( + { + "ARN": secret_arn(self.region, secret_id), + "Name": secret_id, + "VersionId": version_id, + } + ) + + return response + def create_secret( self, name, secret_string=None, secret_binary=None, tags=[], **kwargs ): diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index 28af7b91d72c..757b888a34c4 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -29,6 +29,16 @@ def create_secret(self): tags=tags, ) + def update_secret(self): + secret_id = self._get_param("SecretId") + secret_string = self._get_param("SecretString") + secret_binary = self._get_param("SecretBinary") + return secretsmanager_backends[self.region].update_secret( + secret_id=secret_id, + secret_string=secret_string, + secret_binary=secret_binary, + ) + def get_random_password(self): password_length = self._get_param("PasswordLength", if_none=32) exclude_characters = self._get_param("ExcludeCharacters", if_none="") diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 3b8c74e81ef0..49d1dc925117 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -711,3 +711,79 @@ def test_can_list_secret_version_ids(): returned_version_ids = [v["VersionId"] for v in versions_list["Versions"]] assert [first_version_id, second_version_id].sort() == returned_version_ids.sort() + + +@mock_secretsmanager +def test_update_secret(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + created_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") + + assert created_secret["ARN"] + assert created_secret["Name"] == "test-secret" + assert created_secret["VersionId"] != "" + + secret = conn.get_secret_value(SecretId="test-secret") + assert secret["SecretString"] == "foosecret" + + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert updated_secret["ARN"] + assert updated_secret["Name"] == "test-secret" + assert updated_secret["VersionId"] != "" + + secret = conn.get_secret_value(SecretId="test-secret") + assert secret["SecretString"] == "barsecret" + assert created_secret["VersionId"] != updated_secret["VersionId"] + + +@mock_secretsmanager +def test_update_secret_which_does_not_exit(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + with assert_raises(ClientError) as cm: + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert_equal( + "Secrets Manager can't find the specified secret.", + cm.exception.response["Error"]["Message"], + ) + + +@mock_secretsmanager +def test_update_secret_marked_as_deleted(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + created_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") + deleted_secret = conn.delete_secret(SecretId="test-secret") + + with assert_raises(ClientError) as cm: + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert ( + "because it was marked for deletion." + in cm.exception.response["Error"]["Message"] + ) + + +@mock_secretsmanager +def test_update_secret_marked_as_deleted_after_restoring(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + created_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") + deleted_secret = conn.delete_secret(SecretId="test-secret") + restored_secret = conn.restore_secret(SecretId="test-secret") + + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert updated_secret["ARN"] + assert updated_secret["Name"] == "test-secret" + assert updated_secret["VersionId"] != "" From b7f4ae21d17cc16580295bb5d6741bffb243e6ed Mon Sep 17 00:00:00 2001 From: Erik Hovland Date: Wed, 15 Apr 2020 20:08:44 -0700 Subject: [PATCH 232/658] Add assume_role_with_saml to STSBackend. Add the assume_role_with_saml method to the STSBackend class. --- moto/sts/models.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/moto/sts/models.py b/moto/sts/models.py index 12824b2ed1cb..b274b1acdcfe 100644 --- a/moto/sts/models.py +++ b/moto/sts/models.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +from base64 import b64decode import datetime +import xmltodict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.core import ACCOUNT_ID @@ -79,5 +81,24 @@ def get_assumed_role_from_access_key(self, access_key_id): def assume_role_with_web_identity(self, **kwargs): return self.assume_role(**kwargs) + def assume_role_with_saml(self, **kwargs): + del kwargs["principal_arn"] + saml_assertion_encoded = kwargs.pop("saml_assertion") + saml_assertion_decoded = b64decode(saml_assertion_encoded) + saml_assertion = xmltodict.parse(saml_assertion_decoded.decode("utf-8")) + kwargs["duration"] = int( + saml_assertion["samlp:Response"]["Assertion"]["AttributeStatement"][ + "Attribute" + ][2]["AttributeValue"] + ) + kwargs["role_session_name"] = saml_assertion["samlp:Response"]["Assertion"][ + "AttributeStatement" + ]["Attribute"][0]["AttributeValue"] + kwargs["external_id"] = None + kwargs["policy"] = None + role = AssumedRole(**kwargs) + self.assumed_roles.append(role) + return role + sts_backend = STSBackend() From b10718eea7fde315003c2e8ee83bd92a2a5d03fe Mon Sep 17 00:00:00 2001 From: Erik Hovland Date: Wed, 15 Apr 2020 20:10:22 -0700 Subject: [PATCH 233/658] Add AssumeRoleWithSAML response to responses.py. Add the AssumeRoleWithSAML response to the available STS responses. --- moto/sts/responses.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/moto/sts/responses.py b/moto/sts/responses.py index f36799b03267..9af2c3e12336 100644 --- a/moto/sts/responses.py +++ b/moto/sts/responses.py @@ -71,6 +71,19 @@ def assume_role_with_web_identity(self): template = self.response_template(ASSUME_ROLE_WITH_WEB_IDENTITY_RESPONSE) return template.render(role=role) + def assume_role_with_saml(self): + role_arn = self.querystring.get("RoleArn")[0] + principal_arn = self.querystring.get("PrincipalArn")[0] + saml_assertion = self.querystring.get("SAMLAssertion")[0] + + role = sts_backend.assume_role_with_saml( + role_arn=role_arn, + principal_arn=principal_arn, + saml_assertion=saml_assertion, + ) + template = self.response_template(ASSUME_ROLE_WITH_SAML_RESPONSE) + return template.render(role=role) + def get_caller_identity(self): template = self.response_template(GET_CALLER_IDENTITY_RESPONSE) @@ -168,6 +181,30 @@ def get_caller_identity(self): """ +ASSUME_ROLE_WITH_SAML_RESPONSE = """ + + https://signin.aws.amazon.com/saml + + {{ role.user_id }} + {{ role.arn }} + + + {{ role.access_key_id }} + {{ role.secret_access_key }} + {{ role.session_token }} + {{ role.expiration_ISO8601 }} + + {{ role.user_id }} + B64EncodedStringOfHashOfIssuerAccountIdAndUserId= + persistent + http://localhost:3000/ + + + c6104cbe-af31-11e0-8154-cbc7ccf896c7 + +""" + + GET_CALLER_IDENTITY_RESPONSE = """ {{ arn }} From 88494c58f9a45a3d100837d74ad9b4bbc9e9d24e Mon Sep 17 00:00:00 2001 From: Erik Hovland Date: Wed, 15 Apr 2020 20:11:33 -0700 Subject: [PATCH 234/658] Add a test for assume_role_with_saml. Add a test with SAML assertion to test the assume_role_with_saml method in the STSBackend. --- tests/test_sts/test_sts.py | 123 +++++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 4dee9184f856..efc04beb4e62 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +from base64 import b64encode import json import boto @@ -103,6 +104,128 @@ def test_assume_role(): ) +@freeze_time("2012-01-01 12:00:00") +@mock_sts +def test_assume_role_with_saml(): + client = boto3.client("sts", region_name="us-east-1") + + session_name = "session-name" + policy = json.dumps( + { + "Statement": [ + { + "Sid": "Stmt13690092345534", + "Action": ["S3:ListBucket"], + "Effect": "Allow", + "Resource": ["arn:aws:s3:::foobar-tester"], + } + ] + } + ) + role_name = "test-role" + provider_name = "TestProvFed" + user_name = "testuser" + role_input = "arn:aws:iam::{account_id}:role/{role_name}".format( + account_id=ACCOUNT_ID, role_name=role_name + ) + principal_role = "arn:aws:iam:{account_id}:saml-provider/{provider_name}".format( + account_id=ACCOUNT_ID, provider_name=provider_name + ) + saml_assertion = """ + + + http://localhost/ + + + + + http://localhost:3000/ + + + + + + + + + + + NTIyMzk0ZGI4MjI0ZjI5ZGNhYjkyOGQyZGQ1NTZjODViZjk5YTY4ODFjOWRjNjkyYzZmODY2ZDQ4NjlkZjY3YSAgLQo= + + + NTIyMzk0ZGI4MjI0ZjI5ZGNhYjkyOGQyZGQ1NTZjODViZjk5YTY4ODFjOWRjNjkyYzZmODY2ZDQ4NjlkZjY3YSAgLQo= + + + NTIyMzk0ZGI4MjI0ZjI5ZGNhYjkyOGQyZGQ1NTZjODViZjk5YTY4ODFjOWRjNjkyYzZmODY2ZDQ4NjlkZjY3YSAgLQo= + + + + + {username} + + + + + + + urn:amazon:webservices + + + + + {username}@localhost + + + arn:aws:iam::{account_id}:saml-provider/{provider_name},arn:aws:iam::{account_id}:role/{role_name} + + + 900 + + + + + urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport + + + +""".format( + account_id=ACCOUNT_ID, + role_name=role_name, + provider_name=provider_name, + username=user_name, + ).replace( + "\n", "" + ) + + assume_role_response = client.assume_role_with_saml( + RoleArn=role_input, + PrincipalArn=principal_role, + SAMLAssertion=b64encode(saml_assertion.encode("utf-8")).decode("utf-8"), + ) + + credentials = assume_role_response["Credentials"] + if not settings.TEST_SERVER_MODE: + credentials["Expiration"].isoformat().should.equal("2012-01-01T12:15:00+00:00") + credentials["SessionToken"].should.have.length_of(356) + assert credentials["SessionToken"].startswith("FQoGZXIvYXdzE") + credentials["AccessKeyId"].should.have.length_of(20) + assert credentials["AccessKeyId"].startswith("ASIA") + credentials["SecretAccessKey"].should.have.length_of(40) + + assume_role_response["AssumedRoleUser"]["Arn"].should.equal( + "arn:aws:sts::{account_id}:assumed-role/{role_name}/{fed_name}@localhost".format( + account_id=ACCOUNT_ID, role_name=role_name, fed_name=user_name + ) + ) + assert assume_role_response["AssumedRoleUser"]["AssumedRoleId"].startswith("AROA") + assert assume_role_response["AssumedRoleUser"]["AssumedRoleId"].endswith( + ":{fed_name}@localhost".format(fed_name=user_name) + ) + assume_role_response["AssumedRoleUser"]["AssumedRoleId"].should.have.length_of( + 21 + 1 + len("{fed_name}@localhost".format(fed_name=user_name)) + ) + + @freeze_time("2012-01-01 12:00:00") @mock_sts_deprecated def test_assume_role_with_web_identity(): From 4dc46a697d21eed1c22edcb2f8ffafbaa9e5445a Mon Sep 17 00:00:00 2001 From: Hugo Lopes Tavares Date: Thu, 16 Apr 2020 15:14:37 -0400 Subject: [PATCH 235/658] Bugfix: Allow stop_db_instance for compatible engines From the RDS documentation: You can stop and start a DB instance whether it is configured for a single Availability Zone or for Multi-AZ, for database engines that support Multi-AZ deployments. You can't stop an Amazon RDS for SQL Server DB instance in a Multi-AZ configuration. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StopInstance.html#USER_StopInstance.Limitations --- moto/rds2/models.py | 5 ++++- tests/test_rds2/test_rds2.py | 31 +++++++++++++++++++++++++++++-- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 963af1c637da..722d7d4fd88d 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -865,7 +865,10 @@ def reboot_db_instance(self, db_instance_identifier): def stop_database(self, db_instance_identifier, db_snapshot_identifier=None): database = self.describe_databases(db_instance_identifier)[0] # todo: certain rds types not allowed to be stopped at this time. - if database.is_replica or database.multi_az: + # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StopInstance.html#USER_StopInstance.Limitations + if database.is_replica or ( + database.multi_az and database.engine.lower().startswith("sqlserver") + ): # todo: more db types not supported by stop/start instance api raise InvalidDBClusterStateFaultError(db_instance_identifier) if database.status != "available": diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index e93ff43e9225..13e35549a52d 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -183,12 +183,12 @@ def test_start_database(): @mock_rds2 -def test_fail_to_stop_multi_az(): +def test_fail_to_stop_multi_az_and_sqlserver(): conn = boto3.client("rds", region_name="us-west-2") database = conn.create_db_instance( DBInstanceIdentifier="db-master-1", AllocatedStorage=10, - Engine="postgres", + Engine="sqlserver-ee", DBName="staging-postgres", DBInstanceClass="db.m1.small", LicenseModel="license-included", @@ -213,6 +213,33 @@ def test_fail_to_stop_multi_az(): ).should.throw(ClientError) +@mock_rds2 +def test_stop_multi_az_postgres(): + conn = boto3.client("rds", region_name="us-west-2") + database = conn.create_db_instance( + DBInstanceIdentifier="db-master-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + LicenseModel="license-included", + MasterUsername="root", + MasterUserPassword="hunter2", + Port=1234, + DBSecurityGroups=["my_sg"], + MultiAZ=True, + ) + + mydb = conn.describe_db_instances( + DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"] + )["DBInstances"][0] + mydb["DBInstanceStatus"].should.equal("available") + + response = conn.stop_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"]) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response["DBInstance"]["DBInstanceStatus"].should.equal("stopped") + + @mock_rds2 def test_fail_to_stop_readreplica(): conn = boto3.client("rds", region_name="us-west-2") From 76a249c0ecbc616588b5ccf26224ad9efa9a05c9 Mon Sep 17 00:00:00 2001 From: Andrey Kislyuk Date: Thu, 16 Apr 2020 21:28:27 -0700 Subject: [PATCH 236/658] awslambda: Do not assume X-Amz-Invocation-Type is set --- moto/awslambda/responses.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index ce6c93f16acb..28b0e74fd1ae 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -184,9 +184,9 @@ def _invoke(self, request, full_url): function_name, qualifier, self.body, self.headers, response_headers ) if payload: - if request.headers["X-Amz-Invocation-Type"] == "Event": + if request.headers.get("X-Amz-Invocation-Type") == "Event": status_code = 202 - elif request.headers["X-Amz-Invocation-Type"] == "DryRun": + elif request.headers.get("X-Amz-Invocation-Type") == "DryRun": status_code = 204 else: status_code = 200 From 7ea419dd54f475660f4e927ad39d62d226a43513 Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Sat, 11 Apr 2020 11:07:22 +0100 Subject: [PATCH 237/658] Better DDB expressions support1: TokenizationDDB Currently the mock for DynamoDB has adhoc code to implement its updateExpression functionality. This series will transform the logic such that Update Expressions are processed as follows: 1) Expression gets parsed into a tokenlist (tokenized) -> This commit 2) Tokenlist get transformed to expression tree (AST) 3) The AST gets validated (full semantic correctness) 4) AST gets processed to perform the update This alows for a more realistic mocking. It will throw exceptions much more aggressively avoiding situations where a test passes against the mock but fails with an exception when running against AWS. Introduction of step 3 also allows to have the update expression as an atomic unit of work. So updates at the start of the expression cannot be performed if there is an error further down the expression. This specific commit will tokenize expressions but the tokenlist is not yet used. It is purely to keep clear boundaries. It does do a minor refactoring of the exceptions to allow more re-use and to ease testing. This series of changes is to aid providing a long-term solution for https://github.com/spulec/moto/issues/2806. --- moto/dynamodb2/exceptions.py | 58 +++- moto/dynamodb2/parsing/__init__.py | 0 moto/dynamodb2/parsing/tokens.py | 210 ++++++++++++++ moto/dynamodb2/responses.py | 14 +- .../test_dynamodb_expression_tokenizer.py | 259 ++++++++++++++++++ 5 files changed, 527 insertions(+), 14 deletions(-) create mode 100644 moto/dynamodb2/parsing/__init__.py create mode 100644 moto/dynamodb2/parsing/tokens.py create mode 100644 tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py index 1f3b5f97477e..4c5dfd447a7a 100644 --- a/moto/dynamodb2/exceptions.py +++ b/moto/dynamodb2/exceptions.py @@ -2,9 +2,59 @@ class InvalidIndexNameError(ValueError): pass -class InvalidUpdateExpression(ValueError): - pass +class MockValidationException(ValueError): + def __init__(self, message): + self.exception_msg = message + + +class InvalidUpdateExpression(MockValidationException): + invalid_update_expression_msg = ( + "The document path provided in the update expression is invalid for update" + ) + + def __init__(self): + super(InvalidUpdateExpression, self).__init__( + self.invalid_update_expression_msg + ) + + +class UpdateExprSyntaxError(MockValidationException): + update_expr_syntax_error_msg = ( + "Invalid UpdateExpression: Syntax error; {error_detail}" + ) + + def __init__(self, error_detail): + self.error_detail = error_detail + super(UpdateExprSyntaxError, self).__init__( + self.update_expr_syntax_error_msg.format(error_detail=error_detail) + ) + + +class InvalidTokenException(UpdateExprSyntaxError): + token_detail_msg = 'token: "{token}", near: "{near}"' + + def __init__(self, token, near): + self.token = token + self.near = near + super(InvalidTokenException, self).__init__( + self.token_detail_msg.format(token=token, near=near) + ) + + +class InvalidExpressionAttributeNameKey(MockValidationException): + invalid_expr_attr_name_msg = ( + 'ExpressionAttributeNames contains invalid key: Syntax error; key: "{key}"' + ) + + def __init__(self, key): + self.key = key + super(InvalidExpressionAttributeNameKey, self).__init__( + self.invalid_expr_attr_name_msg.format(key=key) + ) + +class ItemSizeTooLarge(MockValidationException): + item_size_too_large_msg = "Item size has exceeded the maximum allowed size" -class ItemSizeTooLarge(Exception): - message = "Item size has exceeded the maximum allowed size" + def __init__(self): + super(ItemSizeTooLarge, self).__init__(self.item_size_too_large_msg) diff --git a/moto/dynamodb2/parsing/__init__.py b/moto/dynamodb2/parsing/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/moto/dynamodb2/parsing/tokens.py b/moto/dynamodb2/parsing/tokens.py new file mode 100644 index 000000000000..07d65ae64d24 --- /dev/null +++ b/moto/dynamodb2/parsing/tokens.py @@ -0,0 +1,210 @@ +import re + +from moto.dynamodb2.exceptions import ( + InvalidTokenException, + InvalidExpressionAttributeNameKey, +) + + +class Token(object): + _TOKEN_INSTANCE = None + MINUS_SIGN = "-" + PLUS_SIGN = "+" + SPACE_SIGN = " " + EQUAL_SIGN = "=" + OPEN_ROUND_BRACKET = "(" + CLOSE_ROUND_BRACKET = ")" + COMMA = "," + SPACE = " " + DOT = "." + OPEN_SQUARE_BRACKET = "[" + CLOSE_SQUARE_BRACKET = "]" + + SPECIAL_CHARACTERS = [ + MINUS_SIGN, + PLUS_SIGN, + SPACE_SIGN, + EQUAL_SIGN, + OPEN_ROUND_BRACKET, + CLOSE_ROUND_BRACKET, + COMMA, + SPACE, + DOT, + OPEN_SQUARE_BRACKET, + CLOSE_SQUARE_BRACKET, + ] + + # Attribute: an identifier that is an attribute + ATTRIBUTE = 0 + # Place holder for attribute name + ATTRIBUTE_NAME = 1 + # Placeholder for attribute value starts with : + ATTRIBUTE_VALUE = 2 + # WhiteSpace shall be grouped together + WHITESPACE = 3 + # Placeholder for a number + NUMBER = 4 + + PLACEHOLDER_NAMES = { + ATTRIBUTE: "Attribute", + ATTRIBUTE_NAME: "AttributeName", + ATTRIBUTE_VALUE: "AttributeValue", + WHITESPACE: "Whitespace", + NUMBER: "Number", + } + + def __init__(self, token_type, value): + assert ( + token_type in self.SPECIAL_CHARACTERS + or token_type in self.PLACEHOLDER_NAMES + ) + self.type = token_type + self.value = value + + def __repr__(self): + if isinstance(self.type, int): + return 'Token("{tt}", "{tv}")'.format( + tt=self.PLACEHOLDER_NAMES[self.type], tv=self.value + ) + else: + return 'Token("{tt}", "{tv}")'.format(tt=self.type, tv=self.value) + + def __eq__(self, other): + return self.type == other.type and self.value == other.value + + +class ExpressionTokenizer(object): + """ + Takes a string and returns a list of tokens. While attribute names in DynamoDB must be between 1 and 255 characters + long there are no other restrictions for attribute names. For expressions however there are additional rules. If an + attribute name does not adhere then it must be passed via an ExpressionAttributeName. This tokenizer is aware of the + rules of Expression attributes. + + We consider a Token as a tuple which has the tokenType + + From https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html + 1) If an attribute name begins with a number or contains a space, a special character, or a reserved word, you + must use an expression attribute name to replace that attribute's name in the expression. + => So spaces,+,- or other special characters do identify tokens in update expressions + + 2) When using a dot (.) in an attribute name you must use expression-attribute-names. A dot in an expression + will be interpreted as a separator in a document path + + 3) For a nested structure if you want to use expression_attribute_names you must specify one per part of the + path. Since for members of expression_attribute_names the . is part of the name + + """ + + @classmethod + def is_simple_token_character(cls, character): + return character.isalnum() or character in ("_", ":", "#") + + @classmethod + def is_possible_token_boundary(cls, character): + return ( + character in Token.SPECIAL_CHARACTERS + or not cls.is_simple_token_character(character) + ) + + @classmethod + def is_expression_attribute(cls, input_string): + return re.compile("^[a-zA-Z][a-zA-Z0-9_]*$").match(input_string) is not None + + @classmethod + def is_expression_attribute_name(cls, input_string): + """ + https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html + An expression attribute name must begin with a pound sign (#), and be followed by one or more alphanumeric + characters. + """ + return input_string.startswith("#") and cls.is_expression_attribute( + input_string[1:] + ) + + @classmethod + def is_expression_attribute_value(cls, input_string): + return re.compile("^:[a-zA-Z0-9_]*$").match(input_string) is not None + + def raise_unexpected_token(self): + """If during parsing an unexpected token is encountered""" + if len(self.token_list) == 0: + near = "" + else: + if len(self.token_list) == 1: + near = self.token_list[-1].value + else: + if self.token_list[-1].type == Token.WHITESPACE: + # Last token was whitespace take 2nd last token value as well to help User orientate + near = self.token_list[-2].value + self.token_list[-1].value + else: + near = self.token_list[-1].value + + problematic_token = self.staged_characters[0] + raise InvalidTokenException(problematic_token, near + self.staged_characters) + + def __init__(self, input_expression_str): + self.input_expression_str = input_expression_str + self.token_list = [] + self.staged_characters = "" + + @classmethod + def make_list(cls, input_expression_str): + assert isinstance(input_expression_str, str) + return ExpressionTokenizer(input_expression_str)._make_list() + + def add_token(self, token_type, token_value): + self.token_list.append(Token(token_type, token_value)) + + def add_token_from_stage(self, token_type): + self.add_token(token_type, self.staged_characters) + self.staged_characters = "" + + def process_staged_characters(self): + if len(self.staged_characters) == 0: + return + if self.staged_characters.startswith("#"): + if self.is_expression_attribute_name(self.staged_characters): + self.add_token_from_stage(Token.ATTRIBUTE_NAME) + else: + raise InvalidExpressionAttributeNameKey(self.staged_characters) + elif self.staged_characters.isnumeric(): + self.add_token_from_stage(Token.NUMBER) + elif self.is_expression_attribute(self.staged_characters): + self.add_token_from_stage(Token.ATTRIBUTE) + elif self.is_expression_attribute_value(self.staged_characters): + self.add_token_from_stage(Token.ATTRIBUTE_VALUE) + else: + self.raise_unexpected_token() + + def _make_list(self): + """ + Just go through characters if a character is not a token boundary stage it for adding it as a grouped token + later if it is a tokenboundary process staged characters and then process the token boundary as well. + """ + for character in self.input_expression_str: + if not self.is_possible_token_boundary(character): + self.staged_characters += character + else: + self.process_staged_characters() + + if character == Token.SPACE: + if ( + len(self.token_list) > 0 + and self.token_list[-1].type == Token.WHITESPACE + ): + self.token_list[-1].value = ( + self.token_list[-1].value + character + ) + else: + self.add_token(Token.WHITESPACE, character) + elif character in Token.SPECIAL_CHARACTERS: + self.add_token(character, character) + elif not self.is_simple_token_character(character): + self.staged_characters += character + self.raise_unexpected_token() + else: + raise NotImplementedError( + "Encountered character which was not implemented : " + character + ) + self.process_staged_characters() + return self.token_list diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 65484aa0818d..d21d1d756604 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -9,7 +9,7 @@ from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id -from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge +from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge, MockValidationException from moto.dynamodb2.models import dynamodb_backends, dynamo_json_dump @@ -298,7 +298,7 @@ def put_item(self): ) except ItemSizeTooLarge: er = "com.amazonaws.dynamodb.v20111205#ValidationException" - return self.error(er, ItemSizeTooLarge.message) + return self.error(er, ItemSizeTooLarge.item_size_too_large_msg) except KeyError as ke: er = "com.amazonaws.dynamodb.v20111205#ValidationException" return self.error(er, ke.args[0]) @@ -764,15 +764,9 @@ def update_item(self): expected, condition_expression, ) - except InvalidUpdateExpression: + except MockValidationException as mve: er = "com.amazonaws.dynamodb.v20111205#ValidationException" - return self.error( - er, - "The document path provided in the update expression is invalid for update", - ) - except ItemSizeTooLarge: - er = "com.amazonaws.dynamodb.v20111205#ValidationException" - return self.error(er, ItemSizeTooLarge.message) + return self.error(er, mve.exception_msg) except ValueError: er = "com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException" return self.error( diff --git a/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py b/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py new file mode 100644 index 000000000000..3330d431ecd8 --- /dev/null +++ b/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py @@ -0,0 +1,259 @@ +from moto.dynamodb2.exceptions import ( + InvalidTokenException, + InvalidExpressionAttributeNameKey, +) +from moto.dynamodb2.parsing.tokens import ExpressionTokenizer, Token + + +def test_expression_tokenizer_single_set_action(): + set_action = "SET attrName = :attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + ] + + +def test_expression_tokenizer_single_set_action_leading_space(): + set_action = "Set attrName = :attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "Set"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + ] + + +def test_expression_tokenizer_single_set_action_attribute_name_leading_space(): + set_action = "SET #a = :attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_NAME, "#a"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + ] + + +def test_expression_tokenizer_single_set_action_trailing_space(): + set_action = "SET attrName = :attrValue " + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + Token(Token.WHITESPACE, " "), + ] + + +def test_expression_tokenizer_single_set_action_multi_spaces(): + set_action = "SET attrName = :attrValue " + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + Token(Token.WHITESPACE, " "), + ] + + +def test_expression_tokenizer_single_set_action_with_numbers_in_identifiers(): + set_action = "SET attrName3 = :attr3Value" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName3"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attr3Value"), + ] + + +def test_expression_tokenizer_single_set_action_with_underscore_in_identifier(): + set_action = "SET attr_Name = :attr_Value" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attr_Name"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attr_Value"), + ] + + +def test_expression_tokenizer_leading_underscore_in_attribute_name_expression(): + """Leading underscore is not allowed for an attribute name""" + set_action = "SET attrName = _idid" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "_" + assert te.near == "= _idid" + + +def test_expression_tokenizer_leading_underscore_in_attribute_value_expression(): + """Leading underscore is allowed in an attribute value""" + set_action = "SET attrName = :_attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":_attrValue"), + ] + + +def test_expression_tokenizer_single_set_action_nested_attribute(): + set_action = "SET attrName.elem = :attrValue" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attrName"), + Token(Token.DOT, "."), + Token(Token.ATTRIBUTE, "elem"), + Token(Token.WHITESPACE, " "), + Token(Token.EQUAL_SIGN, "="), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE_VALUE, ":attrValue"), + ] + + +def test_expression_tokenizer_list_index_with_sub_attribute(): + set_action = "SET itemmap.itemlist[1].foos=:Item" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "itemmap"), + Token(Token.DOT, "."), + Token(Token.ATTRIBUTE, "itemlist"), + Token(Token.OPEN_SQUARE_BRACKET, "["), + Token(Token.NUMBER, "1"), + Token(Token.CLOSE_SQUARE_BRACKET, "]"), + Token(Token.DOT, "."), + Token(Token.ATTRIBUTE, "foos"), + Token(Token.EQUAL_SIGN, "="), + Token(Token.ATTRIBUTE_VALUE, ":Item"), + ] + + +def test_expression_tokenizer_list_index_surrounded_with_whitespace(): + set_action = "SET itemlist[ 1 ]=:Item" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "itemlist"), + Token(Token.OPEN_SQUARE_BRACKET, "["), + Token(Token.WHITESPACE, " "), + Token(Token.NUMBER, "1"), + Token(Token.WHITESPACE, " "), + Token(Token.CLOSE_SQUARE_BRACKET, "]"), + Token(Token.EQUAL_SIGN, "="), + Token(Token.ATTRIBUTE_VALUE, ":Item"), + ] + + +def test_expression_tokenizer_single_set_action_attribute_name_invalid_key(): + """ + ExpressionAttributeNames contains invalid key: Syntax error; key: "#va#l2" + """ + set_action = "SET #va#l2 = 3" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidExpressionAttributeNameKey as e: + assert e.key == "#va#l2" + + +def test_expression_tokenizer_single_set_action_attribute_name_invalid_key_double_hash(): + """ + ExpressionAttributeNames contains invalid key: Syntax error; key: "#va#l" + """ + set_action = "SET #va#l = 3" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidExpressionAttributeNameKey as e: + assert e.key == "#va#l" + + +def test_expression_tokenizer_single_set_action_attribute_name_valid_key(): + set_action = "SET attr=#val2" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attr"), + Token(Token.EQUAL_SIGN, "="), + Token(Token.ATTRIBUTE_NAME, "#val2"), + ] + + +def test_expression_tokenizer_just_a_pipe(): + set_action = "|" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "|" + assert te.near == "|" + + +def test_expression_tokenizer_just_a_pipe_with_leading_white_spaces(): + set_action = " |" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "|" + assert te.near == " |" + + +def test_expression_tokenizer_just_a_pipe_for_set_expression(): + set_action = "SET|" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "|" + assert te.near == "SET|" + + +def test_expression_tokenizer_just_an_attribute_and_a_pipe_for_set_expression(): + set_action = "SET a|" + try: + ExpressionTokenizer.make_list(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "|" + assert te.near == "a|" From 9ed613e197e1d6e85f9631f7a15a2b8ce2f19b2e Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Sat, 11 Apr 2020 21:17:16 +0100 Subject: [PATCH 238/658] Better DDB expressions support2: ExpressionTree Part of structured approach for UpdateExpressions: 1) Expression gets parsed into a tokenlist (tokenized) 2) Tokenlist get transformed to expression tree (AST) -> This commit 3) The AST gets validated (full semantic correctness) 4) AST gets processed to perform the update This commit uses the tokenlist to build an expression tree. This tree is not yet used. Still it allows to raise additional Validation Exceptions which previously were missed silently therefore it allows tests to catch these type of ValidationException. For that reason DDB UpdateExpressions will be parsed already. It also makes sure we won't break existing tests. One of the existing tests had to be changed in order to still pass: - test_dynamodb_table_with_range_key.test_update_item_with_expression This test passed in a numeric literal which is not supported by DynamoDB and with the current tokenization it would get the same error as in AWS DynamoDB. --- moto/dynamodb2/models/__init__.py | 12 +- moto/dynamodb2/parsing/ast_nodes.py | 205 ++++ moto/dynamodb2/parsing/expressions.py | 1010 +++++++++++++++++ moto/dynamodb2/parsing/reserved_keywords.py | 29 + moto/dynamodb2/parsing/reserved_keywords.txt | 573 ++++++++++ moto/dynamodb2/parsing/tokens.py | 17 +- moto/dynamodb2/responses.py | 5 - tests/test_dynamodb2/test_dynamodb.py | 68 ++ .../test_dynamodb_expressions.py | 395 +++++++ .../test_dynamodb_table_with_range_key.py | 16 +- 10 files changed, 2317 insertions(+), 13 deletions(-) create mode 100644 moto/dynamodb2/parsing/ast_nodes.py create mode 100644 moto/dynamodb2/parsing/expressions.py create mode 100644 moto/dynamodb2/parsing/reserved_keywords.py create mode 100644 moto/dynamodb2/parsing/reserved_keywords.txt create mode 100644 tests/test_dynamodb2/test_dynamodb_expressions.py diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 29713d21175f..1f448f288b54 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -14,10 +14,11 @@ from moto.core.utils import unix_time from moto.core.exceptions import JsonRESTError from moto.dynamodb2.comparisons import get_filter_expression -from moto.dynamodb2.comparisons import get_expected -from moto.dynamodb2.exceptions import InvalidIndexNameError, ItemSizeTooLarge +from moto.dynamodb2.comparisons import get_expected, get_comparison_func +from moto.dynamodb2.exceptions import InvalidIndexNameError, ItemSizeTooLarge, InvalidUpdateExpression from moto.dynamodb2.models.utilities import bytesize, attribute_is_list from moto.dynamodb2.models.dynamo_type import DynamoType +from moto.dynamodb2.parsing.expressions import UpdateExpressionParser class DynamoJsonEncoder(json.JSONEncoder): @@ -1197,6 +1198,13 @@ def update_item( ): table = self.get_table(table_name) + # Support spaces between operators in an update expression + # E.g. `a = b + c` -> `a=b+c` + if update_expression: + # Parse expression to get validation errors + UpdateExpressionParser.make(update_expression) + update_expression = re.sub(r"\s*([=\+-])\s*", "\\1", update_expression) + if all([table.hash_key_attr in key, table.range_key_attr in key]): # Covers cases where table has hash and range keys, ``key`` param # will be a dict diff --git a/moto/dynamodb2/parsing/ast_nodes.py b/moto/dynamodb2/parsing/ast_nodes.py new file mode 100644 index 000000000000..78c7b6b2bc8e --- /dev/null +++ b/moto/dynamodb2/parsing/ast_nodes.py @@ -0,0 +1,205 @@ +import abc +import six + + +@six.add_metaclass(abc.ABCMeta) +class Node: + def __init__(self, children=None): + self.type = self.__class__.__name__ + assert children is None or isinstance(children, list) + self.children = children + self.parent = None + + if isinstance(children, list): + for child in children: + if isinstance(child, Node): + child.set_parent(self) + + def set_parent(self, parent_node): + self.parent = parent_node + + +class LeafNode(Node): + """A LeafNode is a Node where none of the children are Nodes themselves.""" + + def __init__(self, children=None): + super(LeafNode, self).__init__(children) + + +@six.add_metaclass(abc.ABCMeta) +class Expression(Node): + """ + Abstract Syntax Tree representing the expression + + For the Grammar start here and jump down into the classes at the righ-hand side to look further. Nodes marked with + a star are abstract and won't appear in the final AST. + + Expression* => UpdateExpression + Expression* => ConditionExpression + """ + + +class UpdateExpression(Expression): + """ + UpdateExpression => UpdateExpressionClause* + UpdateExpression => UpdateExpressionClause* UpdateExpression + """ + + +@six.add_metaclass(abc.ABCMeta) +class UpdateExpressionClause(UpdateExpression): + """ + UpdateExpressionClause* => UpdateExpressionSetClause + UpdateExpressionClause* => UpdateExpressionRemoveClause + UpdateExpressionClause* => UpdateExpressionAddClause + UpdateExpressionClause* => UpdateExpressionDeleteClause + """ + + +class UpdateExpressionSetClause(UpdateExpressionClause): + """ + UpdateExpressionSetClause => SET SetActions + """ + + +class UpdateExpressionSetActions(UpdateExpressionClause): + """ + UpdateExpressionSetClause => SET SetActions + + SetActions => SetAction + SetActions => SetAction , SetActions + + """ + + +class UpdateExpressionSetAction(UpdateExpressionClause): + """ + SetAction => Path = Value + """ + + +class UpdateExpressionRemoveActions(UpdateExpressionClause): + """ + UpdateExpressionSetClause => REMOVE RemoveActions + + RemoveActions => RemoveAction + RemoveActions => RemoveAction , RemoveActions + """ + + +class UpdateExpressionRemoveAction(UpdateExpressionClause): + """ + RemoveAction => Path + """ + + +class UpdateExpressionAddActions(UpdateExpressionClause): + """ + UpdateExpressionAddClause => ADD RemoveActions + + AddActions => AddAction + AddActions => AddAction , AddActions + """ + + +class UpdateExpressionAddAction(UpdateExpressionClause): + """ + AddAction => Path Value + """ + + +class UpdateExpressionDeleteActions(UpdateExpressionClause): + """ + UpdateExpressionDeleteClause => DELETE RemoveActions + + DeleteActions => DeleteAction + DeleteActions => DeleteAction , DeleteActions + """ + + +class UpdateExpressionDeleteAction(UpdateExpressionClause): + """ + DeleteAction => Path Value + """ + + +class UpdateExpressionPath(UpdateExpressionClause): + pass + + +class UpdateExpressionValue(UpdateExpressionClause): + """ + Value => Operand + Value => Operand + Value + Value => Operand - Value + """ + + +class UpdateExpressionGroupedValue(UpdateExpressionClause): + """ + GroupedValue => ( Value ) + """ + + +class UpdateExpressionRemoveClause(UpdateExpressionClause): + """ + UpdateExpressionRemoveClause => REMOVE RemoveActions + """ + + +class UpdateExpressionAddClause(UpdateExpressionClause): + """ + UpdateExpressionAddClause => ADD AddActions + """ + + +class UpdateExpressionDeleteClause(UpdateExpressionClause): + """ + UpdateExpressionDeleteClause => DELETE DeleteActions + """ + + +class ExpressionPathDescender(Node): + """Node identifying descender into nested structure (.) in expression""" + + +class ExpressionSelector(LeafNode): + """Node identifying selector [selection_index] in expresion""" + + def __init__(self, selection_index): + super(ExpressionSelector, self).__init__(children=[selection_index]) + + +class ExpressionAttribute(LeafNode): + """An attribute identifier as used in the DDB item""" + + def __init__(self, attribute): + super(ExpressionAttribute, self).__init__(children=[attribute]) + + +class ExpressionAttributeName(LeafNode): + """An ExpressionAttributeName is an alias for an attribute identifier""" + + def __init__(self, attribute_name): + super(ExpressionAttributeName, self).__init__(children=[attribute_name]) + + +class ExpressionAttributeValue(LeafNode): + """An ExpressionAttributeValue is an alias for an value""" + + def __init__(self, value): + super(ExpressionAttributeValue, self).__init__(children=[value]) + + +class ExpressionValueOperator(LeafNode): + """An ExpressionValueOperator is an operation that works on 2 values""" + + def __init__(self, value): + super(ExpressionValueOperator, self).__init__(children=[value]) + + +class UpdateExpressionFunction(Node): + """ + A Node representing a function of an Update Expression. The first child is the function name the others are the + arguments. + """ diff --git a/moto/dynamodb2/parsing/expressions.py b/moto/dynamodb2/parsing/expressions.py new file mode 100644 index 000000000000..e418bb47ef00 --- /dev/null +++ b/moto/dynamodb2/parsing/expressions.py @@ -0,0 +1,1010 @@ +import logging +from abc import abstractmethod +import abc +import six +from collections import deque + +from moto.dynamodb2.parsing.ast_nodes import ( + UpdateExpression, + UpdateExpressionSetClause, + UpdateExpressionSetActions, + UpdateExpressionSetAction, + UpdateExpressionRemoveActions, + UpdateExpressionRemoveAction, + UpdateExpressionPath, + UpdateExpressionValue, + UpdateExpressionGroupedValue, + UpdateExpressionRemoveClause, + ExpressionPathDescender, + ExpressionSelector, + ExpressionAttribute, + ExpressionAttributeName, + ExpressionAttributeValue, + ExpressionValueOperator, + UpdateExpressionFunction, + UpdateExpressionAddClause, + UpdateExpressionAddActions, + UpdateExpressionAddAction, + UpdateExpressionDeleteAction, + UpdateExpressionDeleteActions, + UpdateExpressionDeleteClause, +) +from moto.dynamodb2.exceptions import InvalidTokenException +from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer + + +class NestableExpressionParserMixin(object): + """ + For nodes that can be nested in themselves (recursive). Take for example UpdateExpression's grammar: + + UpdateExpression => UpdateExpressionClause* + UpdateExpression => UpdateExpressionClause* UpdateExpression + + If we consider it of structure + NestableExpression => TargetClause* + NestableExpression => TargetClause* NestableExpression + + This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern. + + This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where + in the originating expression. + """ + + def __init__(self, *args, **kwargs): + self.target_clauses = deque() + + def _parse_target_clause(self, factory_class): + """ + + Args: + factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser + + Returns: + + """ + logging.debug( + "Move token pos {pos} to continue parsing with specific factory class {fc}".format( + pos=self.token_pos, fc=factory_class.__class__.__name__ + ) + ) + # noinspection PyProtectedMember + ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos() + self.target_clauses.append(ast) + logging.debug( + "Continue where previous parsing ended {token_pos}".format( + token_pos=token_pos + ) + ) + self.token_pos = token_pos + + @abstractmethod + def _initializer_args(self): + """ + Get the arguments of the initializer. This is implemented by the calling class. See ExpressionParser for an + example. + + Returns: + dict: A dictionary of the initializer arguments + """ + + @classmethod + @abstractmethod + def _nestable_class(cls): + """ + Get the class of the Node that will be created that would be nested. For the example in the docstring this would + be UpdateExpression + + Returns: + class: The class of the Nodes that will be created. + """ + + def _create_node(self): + """ + target_clauses has the nodes in order of encountering. Go through them backwards and build the tree bottom up. + + This way left-deep-descending traversal will process nodes in order. + + Continuing the example of an UpdateExpression: + For example SET a=3 REMOVE b + UpdateExpression + / \ + SET a=3 UpdateExpression + | + REMOVE b + + self.target_clauses looks like: ( SET a=3 >> REMOVE b ) + Returns: + moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory. + """ + assert len(self.target_clauses) > 0, "No nodes for {cn}".format( + cn=self.__class__.__name__ + ) + target_node = self._nestable_class()(children=[self.target_clauses.pop()]) + while len(self.target_clauses) > 0: + target_node = self._nestable_class()( + children=[self.target_clauses.pop(), target_node] + ) + return target_node + + +@six.add_metaclass(abc.ABCMeta) +class ExpressionParser: + """Abstract class""" + + def __init__(self, expression_token_list, token_pos=0): + """ + + Args: + expression_token_list: + token_pos(int): Location where parsing is + """ + self.token_list = expression_token_list + self.token_pos = token_pos + + def _initializer_args(self): + return {"expression_token_list": self.token_list, "token_pos": self.token_pos} + + @abstractmethod + def _parse(self): + """ + Start parsing the token_list from token_pos for the factory type. + + Returns: + moto.dynamodb2.ast_nodes.Node: AST which is root node of resulting abstract syntax tree + """ + + @classmethod + def is_possible_start(cls, token): + return token is not None and cls._is_possible_start(token) + + @classmethod + @abstractmethod + def _is_possible_start(cls, token): + """ + + Args: + token(moto.dynamodb2.tokens.Token): + + Returns: + bool: True if token is a possible start for entries processed by `cls` + """ + + def _parse_with_pos(self): + """ + Start parsing the token_list from token_pos for the factory type and also return the resulting token_pos. + + Returns: + (ast, token_pos): tuple of AST which is root node of resulting abstract syntax tree and token_pos is the + position in the tokenlist. + """ + return self._parse(), self.token_pos + + def parse(self): + return self._parse() + + def get_next_token_type(self): + """ + Get the type of the next token to be processed + + Returns: + str: Token type or None if no more next token + """ + try: + return self.get_next_token().type + except AttributeError: + return None + + def get_next_token(self): + """ + Get the next token to be processed + + Returns: + moto.dynamodb2.tokens.Token: or None if no more next token + """ + try: + return self.token_list[self.token_pos] + except IndexError: + return None + + def get_next_token_value(self): + """ + Get the value of the next token to be processed + + Returns: + str: value or None if no more next token + """ + try: + return self.get_next_token().value + except AttributeError: + return None + + def is_at_end(self): + """Return boolean indicating whether we are at end of the parsing""" + return self.token_pos == len(self.token_list) + + def is_at_start(self): + """Return boolean indicating whether we are at start of the parsing""" + return self.token_pos == 0 + + def get_last_token_value(self): + """Get the last token that was correctly parsed or return empty string""" + if self.token_pos > 0: + return self.token_list[self.token_pos - 1].value + else: + return "" + + def get_last_token_type(self): + """Get the last token type that was correctly parsed or return None""" + if self.token_pos > 0: + return self.token_list[self.token_pos - 1].type + else: + return None + + def get_2nd_last_token_value_if_last_was_whitespace(self): + """Get the 2nd last token that was correctly parsed if last one was whitespace or return empty string""" + if self.token_pos > 1 and self.get_last_token_type() == Token.WHITESPACE: + return self.token_list[self.token_pos - 2].value + else: + return "" + + def get_following_token_value(self): + """Get the token value after the one that is being parsed or empty string if non existent.""" + try: + return self.token_list[self.token_pos + 1].value + except IndexError: + return "" + + def get_following_token_type(self): + """Get the token type after the one that is being parsed or None if non existent.""" + try: + return self.token_list[self.token_pos + 1].type + except IndexError: + return None + + def get_2nd_following_token_value_if_following_was_whitespace(self): + """Get the 2nd following token that was correctly parsed if 1st one was whitespace or return empty string""" + if self.get_following_token_type() == Token.WHITESPACE: + try: + return self.token_list[self.token_pos + 2].value + except IndexError: + return "" + else: + return "" + + def skip_white_space(self): + try: + while self.get_next_token_type() == Token.WHITESPACE: + self.token_pos += 1 + except IndexError: + assert self.token_pos > 0, "We should always have positive indexes" + logging.debug("We are out of range so end is reached") + + def process_token_of_type(self, token_type): + """ + Maker sure the next token is of type `token_type` if not raise unexpected token + Args: + token_type: A token type + + Returns: + str: The value if the token is of type `token_type` + """ + if self.get_next_token_type() == token_type: + token_value = self.get_next_token_value() + self.goto_next_significant_token() + return token_value + else: + self.raise_unexpected_token() + + def goto_next_significant_token(self): + """Continue past current token and skip all whitespaces""" + self.token_pos += 1 + self.skip_white_space() + + def raise_unexpected_token(self): + if self.is_at_end(): + problematic_token = "" + problematic_token_in_near = "" + else: + problematic_token_in_near = problematic_token = self.get_next_token_value() + + near = "".join( + [ + self.get_2nd_last_token_value_if_last_was_whitespace(), + self.get_last_token_value(), + problematic_token_in_near, + self.get_following_token_value(), + self.get_2nd_following_token_value_if_following_was_whitespace(), + ] + ) + + raise InvalidTokenException(problematic_token, near) + + +class NestableBinExpressionParser(ExpressionParser): + """ + For nodes that can be nested in themselves (recursive) but with an operation. Take for example + UpdateExpressionValue's grammar: + + Value => Operand* + Value => Operand* + Value + Value => Operand* - Value + + If we consider it of structure + NestableBinExpression => TargetClause* + NestableBinExpression => TargetClause* BinOp NestableBinExpression + + This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern. + + This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where + in the originating expression. + """ + + def __init__(self, *args, **kwargs): + super(NestableBinExpressionParser, self).__init__(*args, **kwargs) + self.target_nodes = deque() + + def _parse_target_clause(self, factory_class): + """ + + Args: + factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser + + Returns: + + """ + # noinspection PyProtectedMember + ast, self.token_pos = factory_class( + **self._initializer_args() + )._parse_with_pos() + self.target_nodes.append(ast) + logging.debug( + "Continue where previous parsing ended {token_pos}".format( + token_pos=self.token_pos + ) + ) + + def _parse(self): + self._parse_target_clause(self._operand_factory_class()) + while self._binop_factory_class().is_possible_start(self.get_next_token()): + self._parse_target_clause(self._binop_factory_class()) + if self._operand_factory_class().is_possible_start(self.get_next_token()): + self._parse_target_clause(self._operand_factory_class()) + else: + self.raise_unexpected_token() + + @abstractmethod + def _operand_factory_class(self): + """ + Get the Parser class of the Operands for the Binary operations/actions. + + Returns: + class: + """ + + @abstractmethod + def _binop_factory_class(self): + """ + Get a factory that gets the possible binary operation. + + Returns: + class: A class extending ExpressionParser + """ + + def _create_node(self): + """ + target_clauses has the nodes in order of encountering. Go through them forward and build the tree bottom up. + For simplicity docstring will use Operand Node rather than the specific node + + This way left-deep-descending traversal will process nodes in order. + + Continuing the example of an UpdateExpressionValue: + For example value => a + :val - :val2 + UpdateExpressionValue + / | \ + UpdateExpressionValue BinOp Operand + / | | | | + UpdateExpressionValue BinOp Operand - :val2 + / | | + Operand + :val + | + a + + self.target_nodes looks like: ( a >> + >> :val >> - >> :val2 ) + Returns: + moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory. + """ + if len(self.target_nodes) == 1: + return UpdateExpressionValue(children=[self.target_nodes.popleft()]) + else: + target_node = UpdateExpressionValue( + children=[ + self.target_nodes.popleft(), + self.target_nodes.popleft(), + self.target_nodes.popleft(), + ] + ) + while len(self.target_nodes) >= 2: + target_node = UpdateExpressionValue( + children=[ + target_node, + self.target_nodes.popleft(), + self.target_nodes.popleft(), + ] + ) + assert len(self.target_nodes) == 0 + return target_node + + +class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin): + """ + Parser to create update expressions + """ + + @classmethod + def _sub_factories(cls): + return [ + UpdateExpressionSetClauseParser, + UpdateExpressionAddClauseParser, + UpdateExpressionDeleteClauseParser, + UpdateExpressionRemoveClauseParser, + ] + + @classmethod + def _is_possible_start(cls, token): + pass + + def __init__(self, *args, **kwargs): + super(UpdateExpressionParser, self).__init__(*args, **kwargs) + NestableExpressionParserMixin.__init__(self) + + @classmethod + def _nestable_class(cls): + return UpdateExpression + + def _parse_expression_clause(self, factory_class): + return self._parse_target_clause(factory_class) + + def _parse_by_a_subfactory(self): + for sub_factory in self._sub_factories(): + if sub_factory.is_possible_start(self.get_next_token()): + self._parse_expression_clause(sub_factory) + return True + return False + + def _parse(self): + """ + Update Expression is the top-most node therefore it is expected to end up at the end of the expression. + """ + while True: + self.skip_white_space() + if self.is_at_end(): + logging.debug("End reached") + break + elif self._parse_by_a_subfactory(): + continue + else: + self.raise_unexpected_token() + + return self._create_node(), self.token_pos + + @classmethod + def make(cls, expression_str): + token_list = ExpressionTokenizer.make_list(expression_str) + return cls(token_list).parse() + + +class UpdateExpressionSetClauseParser(ExpressionParser): + """ + UpdateExpressionSetClause => SET SetActions + """ + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.ATTRIBUTE and token.value.upper() == "SET" + + def _parse(self): + assert self.is_possible_start(self.get_next_token()) + self.goto_next_significant_token() + ast, self.token_pos = UpdateExpressionSetActionsParser( + **self._initializer_args() + )._parse_with_pos() + # noinspection PyProtectedMember + return UpdateExpressionSetClause(children=[ast]) + + +class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin): + """ + UpdateExpressionSetActions + """ + + def __init__(self, *args, **kwargs): + super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs) + NestableExpressionParserMixin.__init__(self) + + @classmethod + def _is_possible_start(cls, token): + raise RuntimeError( + "{class_name} cannot be identified by the next token.".format( + class_name=cls._nestable_class().__name__ + ) + ) + + @classmethod + @abstractmethod + def _nestable_class(cls): + return UpdateExpressionSetActions + + @classmethod + @abstractmethod + def _nested_expression_parser_class(cls): + """Returns the parser for the query part that creates the nested nodes""" + + def _parse(self): + """ + UpdateExpressionSetActions is inside the expression so it can be followed by others. Process SetActions one by + one until no more SetAction. + """ + self.skip_white_space() + + while self._nested_expression_parser_class().is_possible_start( + self.get_next_token() + ): + self._parse_target_clause(self._nested_expression_parser_class()) + self.skip_white_space() + if self.get_next_token_type() == Token.COMMA: + self.goto_next_significant_token() + else: + break + + if len(self.target_clauses) == 0: + logging.debug( + "Didn't encounter a single {nc} in {nepc}.".format( + nc=self._nestable_class().__name__, + nepc=self._nested_expression_parser_class().__name__, + ) + ) + self.raise_unexpected_token() + + return self._create_node() + + +class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser): + """ + UpdateExpressionSetActions + """ + + @classmethod + def _nested_expression_parser_class(cls): + return UpdateExpressionSetActionParser + + @classmethod + def _nestable_class(cls): + return UpdateExpressionSetActions + + +class UpdateExpressionSetActionParser(ExpressionParser): + """ + SetAction => Path = Value + + So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value. + """ + + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionPathParser.is_possible_start(token) + + def _parse(self): + """ + UpdateExpressionSetActionParser only gets called when expecting a SetAction. So we should be aggressive on + raising invalid Tokens. We can thus do the following: + 1) Process path + 2) skip whitespace if there are any + 3) Process equal-sign token + 4) skip whitespace if there are any + 3) Process value + + """ + path, self.token_pos = UpdateExpressionPathParser( + **self._initializer_args() + )._parse_with_pos() + self.skip_white_space() + self.process_token_of_type(Token.EQUAL_SIGN) + self.skip_white_space() + value, self.token_pos = UpdateExpressionValueParser( + **self._initializer_args() + )._parse_with_pos() + return UpdateExpressionSetAction(children=[path, value]) + + +class UpdateExpressionPathParser(ExpressionParser): + """ + Paths are selectors within items to specify a part within an Item. DynamoDB does not impose much restrictions on the + data it stores but it does store more strict restrictions on how they are represented in UpdateExpression's. + + """ + + def __init__(self, *args, **kwargs): + super(UpdateExpressionPathParser, self).__init__(*args, **kwargs) + self.path_nodes = [] + + @classmethod + def _is_possible_start(cls, token): + """ + Args: + token(Token): the token to be checked + + Returns: + bool: Whether the token could be the start of an UpdateExpressionPath + """ + if token.type == Token.ATTRIBUTE_NAME: + return True + elif token.type == Token.ATTRIBUTE and token.value.upper() != "REMOVE": + """We have to make sure remove is not passed""" + return True + return False + + def _parse(self): + return self.process_path() + + def process_path(self): + self.parse_path() + return UpdateExpressionPath(children=self.path_nodes) + + def parse_path(self): + """ + A path is comprised of: + - Attribute: the name of an attribute as how it is stored which has no special characters + - ATTRIBUTE_NAME: A placeholder that has no special characters except leading # to refer to attributes that + have a name that is not allowed in an UpdateExpression) + - DOT's: These are used to decent in a nested structure. When a DOT is in a path expression it is never part + of an attribute name but always means to descent into a MAP. We will call each descend a patch + chain + - SELECTORs: E.g.: [1] These are used to select an element in ordered datatypes like a list. + + Whitespaces can be between all these elements that build a path. For SELECTORs it is also allowed to have + whitespaces between brackets and numbers but the number cannot be split up with spaces + + Attributes and attribute_names must be separated with DOT's. + Returns: + UpdateExpressionPath: + """ + self.parse_path_chain() + while self.is_next_token_start_of_patch_chain(): + self.process_dot() + self.parse_path_chain() + + def is_next_token_start_of_patch_chain(self): + return self.get_next_token_type() == Token.DOT + + def process_dot(self): + self.path_nodes.append(ExpressionPathDescender()) + self.goto_next_significant_token() + + def parse_path_chain(self): + self.process_attribute_identifying_token() + self.skip_white_space() + while self.is_next_token_start_of_selector(): + self.process_selector() + self.skip_white_space() + + def process_attribute_identifying_token(self): + if self.get_next_token_type() == Token.ATTRIBUTE: + self.path_nodes.append(ExpressionAttribute(self.get_next_token_value())) + elif self.get_next_token_type() == Token.ATTRIBUTE_NAME: + self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value())) + else: + self.raise_unexpected_token() + + self.goto_next_significant_token() + + def is_next_token_start_of_selector(self): + return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET + + def process_selector(self): + """ + Process the selector is only called when a selector must be processed. So do the following actions: + - skip opening bracket + - skip optional spaces + - read numeric literal + - skip optional spaces + - pass closing bracket + """ + self.process_token_of_type(Token.OPEN_SQUARE_BRACKET) + selector_value = self.process_token_of_type(Token.NUMBER) + self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET) + self.path_nodes.append(ExpressionSelector(selector_value)) + + +class UpdateExpressionValueParser(NestableBinExpressionParser): + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionOperandParser.is_possible_start(token) + + def _operand_factory_class(self): + return UpdateExpressionOperandParser + + def _binop_factory_class(self): + return UpdateExpressionValueOperatorParser + + +class UpdateExpressionGroupedValueParser(ExpressionParser): + """ + A grouped value is an Update Expression value clause that is surrounded by round brackets. Each Operand can be + a grouped value by itself. + """ + + def _parse(self): + self.process_token_of_type(Token.OPEN_ROUND_BRACKET) + value, self.token_pos = UpdateExpressionValueParser( + **self._initializer_args() + )._parse_with_pos() + self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) + return UpdateExpressionGroupedValue(children=value) + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.OPEN_ROUND_BRACKET + + +class UpdateExpressionValueOperatorParser(ExpressionParser): + OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN] + + @classmethod + def _is_possible_start(cls, token): + return token.type in cls.OPERATION_TOKENS + + def _parse(self): + operation_value = self.get_next_token_value() + assert operation_value in self.OPERATION_TOKENS + self.goto_next_significant_token() + return ExpressionValueOperator(operation_value) + + +class UpdateExpressionOperandParser(ExpressionParser): + """ + Grammar + Operand* => AttributeValue + Operand* => UpdateExpressionFunction + Operand* => Path + Operand* => GroupedValue + """ + + @classmethod + def _sub_factories(cls): + return [ + UpdateExpressionAttributeValueParser, + UpdateExpressionFunctionParser, + UpdateExpressionPathParser, + UpdateExpressionGroupedValueParser, + ] + + @classmethod + def _is_possible_start(cls, token): + return any(parser.is_possible_start(token) for parser in cls._sub_factories()) + + def _parse(self): + for factory in self._sub_factories(): + if factory.is_possible_start(self.get_next_token()): + node, self.token_pos = factory( + **self._initializer_args() + )._parse_with_pos() + return node + self.raise_unexpected_token() + + +class UpdateExpressionAttributeValueParser(ExpressionParser): + def _parse(self): + attr_value = ExpressionAttributeValue( + self.process_token_of_type(Token.ATTRIBUTE_VALUE) + ) + return attr_value + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.ATTRIBUTE_VALUE + + +class UpdateExpressionFunctionParser(ExpressionParser): + """ + A helper to process a function of an Update Expression + """ + + # TODO(pbbouwel): Function names are supposedly case sensitive according to doc add tests + # Map function to the factories for its elements + FUNCTIONS = { + "if_not_exists": [UpdateExpressionPathParser, UpdateExpressionValueParser], + "list_append": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], + } + + @classmethod + def _is_possible_start(cls, token): + """ + Check whether a token is supposed to be a function + Args: + token(Token): the token to check + + Returns: + bool: True if token is the start of a function. + """ + if token.type == Token.ATTRIBUTE: + return token.value in cls.FUNCTIONS.keys() + else: + return False + + def _parse(self): + function_name = self.get_next_token_value() + self.goto_next_significant_token() + self.process_token_of_type(Token.OPEN_ROUND_BRACKET) + function_elements = [function_name] + function_arguments = self.FUNCTIONS[function_name] + for i, func_elem_factory in enumerate(function_arguments): + func_elem, self.token_pos = func_elem_factory( + **self._initializer_args() + )._parse_with_pos() + function_elements.append(func_elem) + if i + 1 < len(function_arguments): + self.skip_white_space() + self.process_token_of_type(Token.COMMA) + self.process_token_of_type(Token.CLOSE_ROUND_BRACKET) + return UpdateExpressionFunction(children=function_elements) + + +class UpdateExpressionRemoveClauseParser(ExpressionParser): + """ + UpdateExpressionRemoveClause => REMOVE RemoveActions + """ + + def _parse(self): + assert self.is_possible_start(self.get_next_token()) + self.goto_next_significant_token() + ast, self.token_pos = UpdateExpressionRemoveActionsParser( + **self._initializer_args() + )._parse_with_pos() + # noinspection PyProtectedMember + return UpdateExpressionRemoveClause(children=[ast]) + + @classmethod + def _is_possible_start(cls, token): + """REMOVE is not a keyword""" + return token.type == Token.ATTRIBUTE and token.value.upper() == "REMOVE" + + +class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser): + """ + UpdateExpressionSetActions + """ + + @classmethod + def _nested_expression_parser_class(cls): + return UpdateExpressionRemoveActionParser + + @classmethod + def _nestable_class(cls): + return UpdateExpressionRemoveActions + + +class UpdateExpressionRemoveActionParser(ExpressionParser): + """ + RemoveAction => Path = Value + + So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value. + """ + + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionPathParser.is_possible_start(token) + + def _parse(self): + """ + UpdateExpressionRemoveActionParser only gets called when expecting a RemoveAction. So we should be aggressive on + raising invalid Tokens. We can thus do the following: + 1) Process path + 2) skip whitespace if there are any + + """ + path, self.token_pos = UpdateExpressionPathParser( + **self._initializer_args() + )._parse_with_pos() + self.skip_white_space() + return UpdateExpressionRemoveAction(children=[path]) + + +class UpdateExpressionAddClauseParser(ExpressionParser): + def _parse(self): + assert self.is_possible_start(self.get_next_token()) + self.goto_next_significant_token() + ast, self.token_pos = UpdateExpressionAddActionsParser( + **self._initializer_args() + )._parse_with_pos() + # noinspection PyProtectedMember + return UpdateExpressionAddClause(children=[ast]) + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.ATTRIBUTE and token.value.upper() == "ADD" + + +class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser): + """ + UpdateExpressionSetActions + """ + + @classmethod + def _nested_expression_parser_class(cls): + return UpdateExpressionAddActionParser + + @classmethod + def _nestable_class(cls): + return UpdateExpressionAddActions + + +@six.add_metaclass(abc.ABCMeta) +class UpdateExpressionPathValueParser(ExpressionParser): + def _parse_path_and_value(self): + """ + UpdateExpressionAddActionParser only gets called when expecting an AddAction. So we should be aggressive on + raising invalid Tokens. We can thus do the following: + 1) Process path + 2) skip whitespace if there are any + 3) Process a value + 4) skip whitespace if there are any + + Returns: + [path, value]: A list containing the Path node and the AttributeValue nodes + """ + path, self.token_pos = UpdateExpressionPathParser( + **self._initializer_args() + )._parse_with_pos() + self.skip_white_space() + value, self.token_pos = UpdateExpressionAttributeValueParser( + **self._initializer_args() + )._parse_with_pos() + self.skip_white_space() + return [path, value] + + +class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser): + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionPathParser.is_possible_start(token) + + def _parse(self): + return UpdateExpressionAddAction(children=self._parse_path_and_value()) + + +class UpdateExpressionDeleteClauseParser(ExpressionParser): + def _parse(self): + assert self.is_possible_start(self.get_next_token()) + self.goto_next_significant_token() + ast, self.token_pos = UpdateExpressionDeleteActionsParser( + **self._initializer_args() + )._parse_with_pos() + # noinspection PyProtectedMember + return UpdateExpressionDeleteClause(children=[ast]) + + @classmethod + def _is_possible_start(cls, token): + return token.type == Token.ATTRIBUTE and token.value.upper() == "DELETE" + + +class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser): + """ + UpdateExpressionSetActions + """ + + @classmethod + def _nested_expression_parser_class(cls): + return UpdateExpressionDeleteActionParser + + @classmethod + def _nestable_class(cls): + return UpdateExpressionDeleteActions + + +class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser): + @classmethod + def _is_possible_start(cls, token): + return UpdateExpressionPathParser.is_possible_start(token) + + def _parse(self): + return UpdateExpressionDeleteAction(children=self._parse_path_and_value()) diff --git a/moto/dynamodb2/parsing/reserved_keywords.py b/moto/dynamodb2/parsing/reserved_keywords.py new file mode 100644 index 000000000000..d82b16e98deb --- /dev/null +++ b/moto/dynamodb2/parsing/reserved_keywords.py @@ -0,0 +1,29 @@ +class ReservedKeywords(list): + """ + DynamoDB has an extensive list of keywords. Keywords are considered when validating the expression Tree. + Not earlier since an update expression like "SET path = VALUE 1" fails with: + 'Invalid UpdateExpression: Syntax error; token: "1", near: "VALUE 1"' + """ + + KEYWORDS = None + + @classmethod + def get_reserved_keywords(cls): + if cls.KEYWORDS is None: + cls.KEYWORDS = cls._get_reserved_keywords() + return cls.KEYWORDS + + @classmethod + def _get_reserved_keywords(cls): + """ + Get a list of reserved keywords of DynamoDB + """ + try: + import importlib.resources as pkg_resources + except ImportError: + import importlib_resources as pkg_resources + + reserved_keywords = pkg_resources.read_text( + "moto.dynamodb2.parsing", "reserved_keywords.txt" + ) + return reserved_keywords.split() diff --git a/moto/dynamodb2/parsing/reserved_keywords.txt b/moto/dynamodb2/parsing/reserved_keywords.txt new file mode 100644 index 000000000000..7c0106127a3e --- /dev/null +++ b/moto/dynamodb2/parsing/reserved_keywords.txt @@ -0,0 +1,573 @@ +ABORT +ABSOLUTE +ACTION +ADD +AFTER +AGENT +AGGREGATE +ALL +ALLOCATE +ALTER +ANALYZE +AND +ANY +ARCHIVE +ARE +ARRAY +AS +ASC +ASCII +ASENSITIVE +ASSERTION +ASYMMETRIC +AT +ATOMIC +ATTACH +ATTRIBUTE +AUTH +AUTHORIZATION +AUTHORIZE +AUTO +AVG +BACK +BACKUP +BASE +BATCH +BEFORE +BEGIN +BETWEEN +BIGINT +BINARY +BIT +BLOB +BLOCK +BOOLEAN +BOTH +BREADTH +BUCKET +BULK +BY +BYTE +CALL +CALLED +CALLING +CAPACITY +CASCADE +CASCADED +CASE +CAST +CATALOG +CHAR +CHARACTER +CHECK +CLASS +CLOB +CLOSE +CLUSTER +CLUSTERED +CLUSTERING +CLUSTERS +COALESCE +COLLATE +COLLATION +COLLECTION +COLUMN +COLUMNS +COMBINE +COMMENT +COMMIT +COMPACT +COMPILE +COMPRESS +CONDITION +CONFLICT +CONNECT +CONNECTION +CONSISTENCY +CONSISTENT +CONSTRAINT +CONSTRAINTS +CONSTRUCTOR +CONSUMED +CONTINUE +CONVERT +COPY +CORRESPONDING +COUNT +COUNTER +CREATE +CROSS +CUBE +CURRENT +CURSOR +CYCLE +DATA +DATABASE +DATE +DATETIME +DAY +DEALLOCATE +DEC +DECIMAL +DECLARE +DEFAULT +DEFERRABLE +DEFERRED +DEFINE +DEFINED +DEFINITION +DELETE +DELIMITED +DEPTH +DEREF +DESC +DESCRIBE +DESCRIPTOR +DETACH +DETERMINISTIC +DIAGNOSTICS +DIRECTORIES +DISABLE +DISCONNECT +DISTINCT +DISTRIBUTE +DO +DOMAIN +DOUBLE +DROP +DUMP +DURATION +DYNAMIC +EACH +ELEMENT +ELSE +ELSEIF +EMPTY +ENABLE +END +EQUAL +EQUALS +ERROR +ESCAPE +ESCAPED +EVAL +EVALUATE +EXCEEDED +EXCEPT +EXCEPTION +EXCEPTIONS +EXCLUSIVE +EXEC +EXECUTE +EXISTS +EXIT +EXPLAIN +EXPLODE +EXPORT +EXPRESSION +EXTENDED +EXTERNAL +EXTRACT +FAIL +FALSE +FAMILY +FETCH +FIELDS +FILE +FILTER +FILTERING +FINAL +FINISH +FIRST +FIXED +FLATTERN +FLOAT +FOR +FORCE +FOREIGN +FORMAT +FORWARD +FOUND +FREE +FROM +FULL +FUNCTION +FUNCTIONS +GENERAL +GENERATE +GET +GLOB +GLOBAL +GO +GOTO +GRANT +GREATER +GROUP +GROUPING +HANDLER +HASH +HAVE +HAVING +HEAP +HIDDEN +HOLD +HOUR +IDENTIFIED +IDENTITY +IF +IGNORE +IMMEDIATE +IMPORT +IN +INCLUDING +INCLUSIVE +INCREMENT +INCREMENTAL +INDEX +INDEXED +INDEXES +INDICATOR +INFINITE +INITIALLY +INLINE +INNER +INNTER +INOUT +INPUT +INSENSITIVE +INSERT +INSTEAD +INT +INTEGER +INTERSECT +INTERVAL +INTO +INVALIDATE +IS +ISOLATION +ITEM +ITEMS +ITERATE +JOIN +KEY +KEYS +LAG +LANGUAGE +LARGE +LAST +LATERAL +LEAD +LEADING +LEAVE +LEFT +LENGTH +LESS +LEVEL +LIKE +LIMIT +LIMITED +LINES +LIST +LOAD +LOCAL +LOCALTIME +LOCALTIMESTAMP +LOCATION +LOCATOR +LOCK +LOCKS +LOG +LOGED +LONG +LOOP +LOWER +MAP +MATCH +MATERIALIZED +MAX +MAXLEN +MEMBER +MERGE +METHOD +METRICS +MIN +MINUS +MINUTE +MISSING +MOD +MODE +MODIFIES +MODIFY +MODULE +MONTH +MULTI +MULTISET +NAME +NAMES +NATIONAL +NATURAL +NCHAR +NCLOB +NEW +NEXT +NO +NONE +NOT +NULL +NULLIF +NUMBER +NUMERIC +OBJECT +OF +OFFLINE +OFFSET +OLD +ON +ONLINE +ONLY +OPAQUE +OPEN +OPERATOR +OPTION +OR +ORDER +ORDINALITY +OTHER +OTHERS +OUT +OUTER +OUTPUT +OVER +OVERLAPS +OVERRIDE +OWNER +PAD +PARALLEL +PARAMETER +PARAMETERS +PARTIAL +PARTITION +PARTITIONED +PARTITIONS +PATH +PERCENT +PERCENTILE +PERMISSION +PERMISSIONS +PIPE +PIPELINED +PLAN +POOL +POSITION +PRECISION +PREPARE +PRESERVE +PRIMARY +PRIOR +PRIVATE +PRIVILEGES +PROCEDURE +PROCESSED +PROJECT +PROJECTION +PROPERTY +PROVISIONING +PUBLIC +PUT +QUERY +QUIT +QUORUM +RAISE +RANDOM +RANGE +RANK +RAW +READ +READS +REAL +REBUILD +RECORD +RECURSIVE +REDUCE +REF +REFERENCE +REFERENCES +REFERENCING +REGEXP +REGION +REINDEX +RELATIVE +RELEASE +REMAINDER +RENAME +REPEAT +REPLACE +REQUEST +RESET +RESIGNAL +RESOURCE +RESPONSE +RESTORE +RESTRICT +RESULT +RETURN +RETURNING +RETURNS +REVERSE +REVOKE +RIGHT +ROLE +ROLES +ROLLBACK +ROLLUP +ROUTINE +ROW +ROWS +RULE +RULES +SAMPLE +SATISFIES +SAVE +SAVEPOINT +SCAN +SCHEMA +SCOPE +SCROLL +SEARCH +SECOND +SECTION +SEGMENT +SEGMENTS +SELECT +SELF +SEMI +SENSITIVE +SEPARATE +SEQUENCE +SERIALIZABLE +SESSION +SET +SETS +SHARD +SHARE +SHARED +SHORT +SHOW +SIGNAL +SIMILAR +SIZE +SKEWED +SMALLINT +SNAPSHOT +SOME +SOURCE +SPACE +SPACES +SPARSE +SPECIFIC +SPECIFICTYPE +SPLIT +SQL +SQLCODE +SQLERROR +SQLEXCEPTION +SQLSTATE +SQLWARNING +START +STATE +STATIC +STATUS +STORAGE +STORE +STORED +STREAM +STRING +STRUCT +STYLE +SUB +SUBMULTISET +SUBPARTITION +SUBSTRING +SUBTYPE +SUM +SUPER +SYMMETRIC +SYNONYM +SYSTEM +TABLE +TABLESAMPLE +TEMP +TEMPORARY +TERMINATED +TEXT +THAN +THEN +THROUGHPUT +TIME +TIMESTAMP +TIMEZONE +TINYINT +TO +TOKEN +TOTAL +TOUCH +TRAILING +TRANSACTION +TRANSFORM +TRANSLATE +TRANSLATION +TREAT +TRIGGER +TRIM +TRUE +TRUNCATE +TTL +TUPLE +TYPE +UNDER +UNDO +UNION +UNIQUE +UNIT +UNKNOWN +UNLOGGED +UNNEST +UNPROCESSED +UNSIGNED +UNTIL +UPDATE +UPPER +URL +USAGE +USE +USER +USERS +USING +UUID +VACUUM +VALUE +VALUED +VALUES +VARCHAR +VARIABLE +VARIANCE +VARINT +VARYING +VIEW +VIEWS +VIRTUAL +VOID +WAIT +WHEN +WHENEVER +WHERE +WHILE +WINDOW +WITH +WITHIN +WITHOUT +WORK +WRAPPED +WRITE +YEAR +ZONE diff --git a/moto/dynamodb2/parsing/tokens.py b/moto/dynamodb2/parsing/tokens.py index 07d65ae64d24..4fbb7883afa3 100644 --- a/moto/dynamodb2/parsing/tokens.py +++ b/moto/dynamodb2/parsing/tokens.py @@ -1,4 +1,5 @@ import re +import sys from moto.dynamodb2.exceptions import ( InvalidTokenException, @@ -147,9 +148,17 @@ def __init__(self, input_expression_str): self.token_list = [] self.staged_characters = "" + @classmethod + def is_py2(cls): + return sys.version_info[0] == 2 + @classmethod def make_list(cls, input_expression_str): - assert isinstance(input_expression_str, str) + if cls.is_py2(): + pass + else: + assert isinstance(input_expression_str, str) + return ExpressionTokenizer(input_expression_str)._make_list() def add_token(self, token_type, token_value): @@ -159,6 +168,10 @@ def add_token_from_stage(self, token_type): self.add_token(token_type, self.staged_characters) self.staged_characters = "" + @classmethod + def is_numeric(cls, input_str): + return re.compile("[0-9]+").match(input_str) is not None + def process_staged_characters(self): if len(self.staged_characters) == 0: return @@ -167,7 +180,7 @@ def process_staged_characters(self): self.add_token_from_stage(Token.ATTRIBUTE_NAME) else: raise InvalidExpressionAttributeNameKey(self.staged_characters) - elif self.staged_characters.isnumeric(): + elif self.is_numeric(self.staged_characters): self.add_token_from_stage(Token.NUMBER) elif self.is_expression_attribute(self.staged_characters): self.add_token_from_stage(Token.ATTRIBUTE) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index d21d1d756604..a5aeeac70525 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -748,11 +748,6 @@ def update_item(self): expression_attribute_names = self.body.get("ExpressionAttributeNames", {}) expression_attribute_values = self.body.get("ExpressionAttributeValues", {}) - # Support spaces between operators in an update expression - # E.g. `a = b + c` -> `a=b+c` - if update_expression: - update_expression = re.sub(r"\s*([=\+-])\s*", "\\1", update_expression) - try: item = self.dynamodb_backend.update_item( name, diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index bec24c966ef2..09401d5626b7 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals, print_function +import re from decimal import Decimal import six @@ -4177,3 +4178,70 @@ def test_gsi_verify_negative_number_order(): [float(item["gsiK1SortKey"]) for item in resp["Items"]].should.equal( [-0.7, -0.6, 0.7] ) + + +def assert_raise_syntax_error(client_error, token, near): + """ + Assert whether a client_error is as expected Syntax error. Syntax error looks like: `syntax_error_template` + + Args: + client_error(ClientError): The ClientError exception that was raised + token(str): The token that ws unexpected + near(str): The part in the expression that shows where the error occurs it generally has the preceding token the + optional separation and the problematic token. + """ + syntax_error_template = ( + 'Invalid UpdateExpression: Syntax error; token: "{token}", near: "{near}"' + ) + expected_syntax_error = syntax_error_template.format(token=token, near=near) + assert client_error.response["Error"]["Code"] == "ValidationException" + assert expected_syntax_error == client_error.response["Error"]["Message"] + + +@mock_dynamodb2 +def test_update_expression_with_numeric_literal_instead_of_value(): + """ + DynamoDB requires literals to be passed in as values. If they are put literally in the expression a token error will + be raised + """ + dynamodb = boto3.client("dynamodb", region_name="eu-west-1") + + dynamodb.create_table( + TableName="moto-test", + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + ) + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = myNum + 1", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_raise_syntax_error(e, "1", "+ 1") + + +@mock_dynamodb2 +def test_update_expression_with_multiple_set_clauses_must_be_comma_separated(): + """ + An UpdateExpression can have multiple set clauses but if they are passed in without the separating comma. + """ + dynamodb = boto3.client("dynamodb", region_name="eu-west-1") + + dynamodb.create_table( + TableName="moto-test", + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + ) + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = myNum Mystr2 myNum2", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_raise_syntax_error(e, "Mystr2", "myNum Mystr2 myNum2") diff --git a/tests/test_dynamodb2/test_dynamodb_expressions.py b/tests/test_dynamodb2/test_dynamodb_expressions.py new file mode 100644 index 000000000000..1066231af043 --- /dev/null +++ b/tests/test_dynamodb2/test_dynamodb_expressions.py @@ -0,0 +1,395 @@ +from moto.dynamodb2.exceptions import InvalidTokenException +from moto.dynamodb2.parsing.expressions import UpdateExpressionParser +from moto.dynamodb2.parsing.reserved_keywords import ReservedKeywords + + +def test_get_reserved_keywords(): + reserved_keywords = ReservedKeywords.get_reserved_keywords() + assert "SET" in reserved_keywords + assert "DELETE" in reserved_keywords + assert "ADD" in reserved_keywords + # REMOVE is not part of the list of reserved keywords. + assert "REMOVE" not in reserved_keywords + + +def test_update_expression_numeric_literal_in_expression(): + set_action = "SET attrName = 3" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "3" + assert te.near == "= 3" + + +def test_expression_tokenizer_multi_number_numeric_literal_in_expression(): + set_action = "SET attrName = 34" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "34" + assert te.near == "= 34" + + +def test_expression_tokenizer_numeric_literal_unclosed_square_bracket(): + set_action = "SET MyStr[ 3" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == "3" + + +def test_expression_tokenizer_wrong_closing_bracket_with_space(): + set_action = "SET MyStr[3 )" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "3 )" + + +def test_expression_tokenizer_wrong_closing_bracket(): + set_action = "SET MyStr[3)" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "3)" + + +def test_expression_tokenizer_only_numeric_literal_for_set(): + set_action = "SET 2" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "2" + assert te.near == "SET 2" + + +def test_expression_tokenizer_only_numeric_literal(): + set_action = "2" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "2" + assert te.near == "2" + + +def test_expression_tokenizer_set_closing_round_bracket(): + set_action = "SET )" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "SET )" + + +def test_expression_tokenizer_set_closing_followed_by_numeric_literal(): + set_action = "SET ) 3" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "SET ) 3" + + +def test_expression_tokenizer_numeric_literal_unclosed_square_bracket_trailing_space(): + set_action = "SET MyStr[ 3 " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == "3 " + + +def test_expression_tokenizer_unbalanced_round_brackets_only_opening(): + set_action = "SET MyStr = (:_val" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == ":_val" + + +def test_expression_tokenizer_unbalanced_round_brackets_only_opening_trailing_space(): + set_action = "SET MyStr = (:_val " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == ":_val " + + +def test_expression_tokenizer_unbalanced_square_brackets_only_opening(): + set_action = "SET MyStr = [:_val" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "[" + assert te.near == "= [:_val" + + +def test_expression_tokenizer_unbalanced_square_brackets_only_opening_trailing_spaces(): + set_action = "SET MyStr = [:_val " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "[" + assert te.near == "= [:_val" + + +def test_expression_tokenizer_unbalanced_round_brackets_multiple_opening(): + set_action = "SET MyStr = (:_val + (:val2" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "" + assert te.near == ":val2" + + +def test_expression_tokenizer_unbalanced_round_brackets_only_closing(): + set_action = "SET MyStr = ):_val" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "= ):_val" + + +def test_expression_tokenizer_unbalanced_square_brackets_only_closing(): + set_action = "SET MyStr = ]:_val" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "]" + assert te.near == "= ]:_val" + + +def test_expression_tokenizer_unbalanced_round_brackets_only_closing_followed_by_other_parts(): + set_action = "SET MyStr = ):_val + :val2" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == ")" + assert te.near == "= ):_val" + + +def test_update_expression_starts_with_keyword_reset_followed_by_identifier(): + update_expression = "RESET NonExistent" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "RESET" + assert te.near == "RESET NonExistent" + + +def test_update_expression_starts_with_keyword_reset_followed_by_identifier_and_value(): + update_expression = "RESET NonExistent value" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "RESET" + assert te.near == "RESET NonExistent" + + +def test_update_expression_starts_with_leading_spaces_and_keyword_reset_followed_by_identifier_and_value(): + update_expression = " RESET NonExistent value" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "RESET" + assert te.near == " RESET NonExistent" + + +def test_update_expression_with_only_keyword_reset(): + update_expression = "RESET" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "RESET" + assert te.near == "RESET" + + +def test_update_nested_expression_with_selector_just_should_fail_parsing_at_numeric_literal_value(): + update_expression = "SET a[0].b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_update_nested_expression_with_selector_and_spaces_should_only_fail_parsing_at_numeric_literal_value(): + update_expression = "SET a [ 2 ]. b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_update_nested_expression_with_double_selector_and_spaces_should_only_fail_parsing_at_numeric_literal_value(): + update_expression = "SET a [2][ 3 ]. b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_update_nested_expression_should_only_fail_parsing_at_numeric_literal_value(): + update_expression = "SET a . b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_nested_selectors_in_update_expression_should_fail_at_nesting(): + update_expression = "SET a [ [2] ]. b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "[" + assert te.near == "[ [2" + + +def test_update_expression_number_in_selector_cannot_be_splite(): + update_expression = "SET a [2 1]. b = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "1" + assert te.near == "2 1]" + + +def test_update_expression_cannot_have_successive_attributes(): + update_expression = "SET #a a = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "a" + assert te.near == "#a a =" + + +def test_update_expression_path_with_both_attribute_and_attribute_name_should_only_fail_at_numeric_value(): + update_expression = "SET #a.a = 5" + try: + UpdateExpressionParser.make(update_expression) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "5" + assert te.near == "= 5" + + +def test_expression_tokenizer_2_same_operators_back_to_back(): + set_action = "SET MyStr = NoExist + + :_val " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "+" + assert te.near == "+ + :_val" + + +def test_expression_tokenizer_2_different_operators_back_to_back(): + set_action = "SET MyStr = NoExist + - :_val " + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "-" + assert te.near == "+ - :_val" + + +def test_update_expression_remove_does_not_allow_operations(): + remove_action = "REMOVE NoExist + " + try: + UpdateExpressionParser.make(remove_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "+" + assert te.near == "NoExist + " + + +def test_update_expression_add_does_not_allow_attribute_after_path(): + """value here is not really a value since a value starts with a colon (:)""" + add_expr = "ADD attr val foobar" + try: + UpdateExpressionParser.make(add_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "val" + assert te.near == "attr val foobar" + + +def test_update_expression_add_does_not_allow_attribute_foobar_after_value(): + add_expr = "ADD attr :val foobar" + try: + UpdateExpressionParser.make(add_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "foobar" + assert te.near == ":val foobar" + + +def test_update_expression_delete_does_not_allow_attribute_after_path(): + """value here is not really a value since a value starts with a colon (:)""" + delete_expr = "DELETE attr val" + try: + UpdateExpressionParser.make(delete_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "val" + assert te.near == "attr val" + + +def test_update_expression_delete_does_not_allow_attribute_foobar_after_value(): + delete_expr = "DELETE attr :val foobar" + try: + UpdateExpressionParser.make(delete_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "foobar" + assert te.near == ":val foobar" + + +def test_update_expression_parsing_is_not_keyword_aware(): + """path and VALUE are keywords. Yet a token error will be thrown for the numeric literal 1.""" + delete_expr = "SET path = VALUE 1" + try: + UpdateExpressionParser.make(delete_expr) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "1" + assert te.near == "VALUE 1" diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index c433a3a31f64..1aa2175c16ce 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1254,14 +1254,22 @@ def test_update_item_with_expression(): item_key = {"forum_name": "the-key", "subject": "123"} - table.update_item(Key=item_key, UpdateExpression="SET field=2") + table.update_item( + Key=item_key, + UpdateExpression="SET field = :field_value", + ExpressionAttributeValues={":field_value": 2}, + ) dict(table.get_item(Key=item_key)["Item"]).should.equal( - {"field": "2", "forum_name": "the-key", "subject": "123"} + {"field": Decimal("2"), "forum_name": "the-key", "subject": "123"} ) - table.update_item(Key=item_key, UpdateExpression="SET field = 3") + table.update_item( + Key=item_key, + UpdateExpression="SET field = :field_value", + ExpressionAttributeValues={":field_value": 3}, + ) dict(table.get_item(Key=item_key)["Item"]).should.equal( - {"field": "3", "forum_name": "the-key", "subject": "123"} + {"field": Decimal("3"), "forum_name": "the-key", "subject": "123"} ) From 891801d5697f80ab44afe8a20e5896e8807237b6 Mon Sep 17 00:00:00 2001 From: Bob Wombat Hogg Date: Sat, 18 Apr 2020 07:46:28 -0400 Subject: [PATCH 239/658] Use ISO 8601 format for ELB DescribeLoadBalancers --- moto/elb/models.py | 5 ++++- moto/elb/responses.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/moto/elb/models.py b/moto/elb/models.py index f77811623def..4991b0754421 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -1,6 +1,9 @@ from __future__ import unicode_literals import datetime + +import pytz + from boto.ec2.elb.attributes import ( LbAttributes, ConnectionSettingAttribute, @@ -83,7 +86,7 @@ def __init__( self.zones = zones self.listeners = [] self.backends = [] - self.created_time = datetime.datetime.now() + self.created_time = datetime.datetime.now(pytz.utc) self.scheme = scheme self.attributes = FakeLoadBalancer.get_default_attributes() self.policies = Policies() diff --git a/moto/elb/responses.py b/moto/elb/responses.py index de21f23e799d..79db5a788793 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -442,7 +442,7 @@ def _add_tags(self, elb): {% endfor %} {{ load_balancer.name }} - {{ load_balancer.created_time }} + {{ load_balancer.created_time.isoformat() }} {% if load_balancer.health_check %} {{ load_balancer.health_check.interval }} From fc4d88401d3d3822f71d96588a3304cea2d784aa Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Sun, 19 Apr 2020 16:38:29 +0100 Subject: [PATCH 240/658] Improve DDB expressions support3: AST Validation Part of structured approach for UpdateExpressions: 1) Expression gets parsed into a tokenlist (tokenized) 2) Tokenlist get transformed to expression tree (AST) 3) The AST gets validated (full semantic correctness) -> this commit 4) AST gets processed to perform the update This commit uses the AST to perform validation. Validation makes sure the nodes encounterd have valid values and they will also resolve values for references that refer to item state or values passed into the expression. --- moto/dynamodb2/exceptions.py | 72 ++- moto/dynamodb2/models/dynamo_type.py | 31 ++ moto/dynamodb2/parsing/README.md | 23 + moto/dynamodb2/parsing/ast_nodes.py | 157 +++++- moto/dynamodb2/parsing/expressions.py | 38 +- moto/dynamodb2/parsing/validators.py | 341 +++++++++++++ moto/dynamodb2/responses.py | 2 +- .../test_dynamodb_expressions.py | 10 + .../test_dynamodb_validation.py | 464 ++++++++++++++++++ 9 files changed, 1127 insertions(+), 11 deletions(-) create mode 100644 moto/dynamodb2/parsing/README.md create mode 100644 moto/dynamodb2/parsing/validators.py create mode 100644 tests/test_dynamodb2/test_dynamodb_validation.py diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py index 4c5dfd447a7a..a6acae071eab 100644 --- a/moto/dynamodb2/exceptions.py +++ b/moto/dynamodb2/exceptions.py @@ -7,22 +7,73 @@ def __init__(self, message): self.exception_msg = message -class InvalidUpdateExpression(MockValidationException): +class InvalidUpdateExpressionInvalidDocumentPath(MockValidationException): invalid_update_expression_msg = ( "The document path provided in the update expression is invalid for update" ) def __init__(self): - super(InvalidUpdateExpression, self).__init__( + super(InvalidUpdateExpressionInvalidDocumentPath, self).__init__( self.invalid_update_expression_msg ) -class UpdateExprSyntaxError(MockValidationException): - update_expr_syntax_error_msg = ( - "Invalid UpdateExpression: Syntax error; {error_detail}" +class InvalidUpdateExpression(MockValidationException): + invalid_update_expr_msg = "Invalid UpdateExpression: {update_expression_error}" + + def __init__(self, update_expression_error): + self.update_expression_error = update_expression_error + super(InvalidUpdateExpression, self).__init__( + self.invalid_update_expr_msg.format( + update_expression_error=update_expression_error + ) + ) + + +class AttributeDoesNotExist(MockValidationException): + attr_does_not_exist_msg = ( + "The provided expression refers to an attribute that does not exist in the item" ) + def __init__(self): + super(AttributeDoesNotExist, self).__init__(self.attr_does_not_exist_msg) + + +class ExpressionAttributeNameNotDefined(InvalidUpdateExpression): + name_not_defined_msg = "An expression attribute name used in the document path is not defined; attribute name: {n}" + + def __init__(self, attribute_name): + self.not_defined_attribute_name = attribute_name + super(ExpressionAttributeNameNotDefined, self).__init__( + self.name_not_defined_msg.format(n=attribute_name) + ) + + +class AttributeIsReservedKeyword(InvalidUpdateExpression): + attribute_is_keyword_msg = ( + "Attribute name is a reserved keyword; reserved keyword: {keyword}" + ) + + def __init__(self, keyword): + self.keyword = keyword + super(AttributeIsReservedKeyword, self).__init__( + self.attribute_is_keyword_msg.format(keyword=keyword) + ) + + +class ExpressionAttributeValueNotDefined(InvalidUpdateExpression): + attr_value_not_defined_msg = "An expression attribute value used in expression is not defined; attribute value: {attribute_value}" + + def __init__(self, attribute_value): + self.attribute_value = attribute_value + super(ExpressionAttributeValueNotDefined, self).__init__( + self.attr_value_not_defined_msg.format(attribute_value=attribute_value) + ) + + +class UpdateExprSyntaxError(InvalidUpdateExpression): + update_expr_syntax_error_msg = "Syntax error; {error_detail}" + def __init__(self, error_detail): self.error_detail = error_detail super(UpdateExprSyntaxError, self).__init__( @@ -58,3 +109,14 @@ class ItemSizeTooLarge(MockValidationException): def __init__(self): super(ItemSizeTooLarge, self).__init__(self.item_size_too_large_msg) + + +class IncorrectOperandType(InvalidUpdateExpression): + inv_operand_msg = "Incorrect operand type for operator or function; operator or function: {f}, operand type: {t}" + + def __init__(self, operator_or_function, operand_type): + self.operator_or_function = operator_or_function + self.operand_type = operand_type + super(IncorrectOperandType, self).__init__( + self.inv_operand_msg.format(f=operator_or_function, t=operand_type) + ) diff --git a/moto/dynamodb2/models/dynamo_type.py b/moto/dynamodb2/models/dynamo_type.py index 300804c1e8a9..a3199dcaa2ea 100644 --- a/moto/dynamodb2/models/dynamo_type.py +++ b/moto/dynamodb2/models/dynamo_type.py @@ -123,6 +123,37 @@ def __ge__(self, other): def __repr__(self): return "DynamoType: {0}".format(self.to_json()) + def __add__(self, other): + if self.type != other.type: + raise TypeError("Different types of operandi is not allowed.") + if self.type == "N": + return DynamoType({"N": "{v}".format(v=int(self.value) + int(other.value))}) + else: + raise TypeError("Sum only supported for Numbers.") + + def __sub__(self, other): + if self.type != other.type: + raise TypeError("Different types of operandi is not allowed.") + if self.type == "N": + return DynamoType({"N": "{v}".format(v=int(self.value) - int(other.value))}) + else: + raise TypeError("Sum only supported for Numbers.") + + def __getitem__(self, item): + if isinstance(item, six.string_types): + # If our DynamoType is a map it should be subscriptable with a key + if self.type == "M": + return self.value[item] + elif isinstance(item, int): + # If our DynamoType is a list is should be subscriptable with an index + if self.type == "L": + return self.value[item] + raise TypeError( + "This DynamoType {dt} is not subscriptable by a {it}".format( + dt=self.type, it=type(item) + ) + ) + @property def cast_value(self): if self.is_number(): diff --git a/moto/dynamodb2/parsing/README.md b/moto/dynamodb2/parsing/README.md new file mode 100644 index 000000000000..6c4390d0207a --- /dev/null +++ b/moto/dynamodb2/parsing/README.md @@ -0,0 +1,23 @@ +# Parsing dev documentation + +Parsing happens in a structured manner and happens in different phases. +This document explains these phases. + + +## 1) Expression gets parsed into a tokenlist (tokenized) +A string gets parsed from left to right and gets converted into a list of tokens. +The tokens are available in `tokens.py`. + +## 2) Tokenlist get transformed to expression tree (AST) +This is the parsing of the token list. This parsing will result in an Abstract Syntax Tree (AST). +The different node types are available in `ast_nodes.py`. The AST is a representation that has all +the information that is in the expression but its tree form allows processing it in a structured manner. + +## 3) The AST gets validated (full semantic correctness) +The AST is used for validation. The paths and attributes are validated to be correct. At the end of the +validation all the values will be resolved. + +## 4) Update Expression gets executed using the validated AST +Finally the AST is used to execute the update expression. There should be no reason for this step to fail +since validation has completed. Due to this we have the update expressions behaving atomically (i.e. all the +actions of the update expresion are performed or none of them are performed). \ No newline at end of file diff --git a/moto/dynamodb2/parsing/ast_nodes.py b/moto/dynamodb2/parsing/ast_nodes.py index 78c7b6b2bc8e..81735a8c9642 100644 --- a/moto/dynamodb2/parsing/ast_nodes.py +++ b/moto/dynamodb2/parsing/ast_nodes.py @@ -1,6 +1,11 @@ import abc +from abc import abstractmethod +from collections import deque + import six +from moto.dynamodb2.models import DynamoType + @six.add_metaclass(abc.ABCMeta) class Node: @@ -167,7 +172,15 @@ class ExpressionSelector(LeafNode): """Node identifying selector [selection_index] in expresion""" def __init__(self, selection_index): - super(ExpressionSelector, self).__init__(children=[selection_index]) + try: + super(ExpressionSelector, self).__init__(children=[int(selection_index)]) + except ValueError: + assert ( + False + ), "Expression selector must be an int, this is a bug in the moto library." + + def get_index(self): + return self.children[0] class ExpressionAttribute(LeafNode): @@ -176,6 +189,9 @@ class ExpressionAttribute(LeafNode): def __init__(self, attribute): super(ExpressionAttribute, self).__init__(children=[attribute]) + def get_attribute_name(self): + return self.children[0] + class ExpressionAttributeName(LeafNode): """An ExpressionAttributeName is an alias for an attribute identifier""" @@ -183,6 +199,9 @@ class ExpressionAttributeName(LeafNode): def __init__(self, attribute_name): super(ExpressionAttributeName, self).__init__(children=[attribute_name]) + def get_attribute_name_placeholder(self): + return self.children[0] + class ExpressionAttributeValue(LeafNode): """An ExpressionAttributeValue is an alias for an value""" @@ -190,6 +209,9 @@ class ExpressionAttributeValue(LeafNode): def __init__(self, value): super(ExpressionAttributeValue, self).__init__(children=[value]) + def get_value_name(self): + return self.children[0] + class ExpressionValueOperator(LeafNode): """An ExpressionValueOperator is an operation that works on 2 values""" @@ -197,9 +219,142 @@ class ExpressionValueOperator(LeafNode): def __init__(self, value): super(ExpressionValueOperator, self).__init__(children=[value]) + def get_operator(self): + return self.children[0] + class UpdateExpressionFunction(Node): """ A Node representing a function of an Update Expression. The first child is the function name the others are the arguments. """ + + def get_function_name(self): + return self.children[0] + + def get_nth_argument(self, n=1): + """Return nth element where n is a 1-based index.""" + assert n >= 1 + return self.children[n] + + +class DDBTypedValue(Node): + """ + A node representing a DDBTyped value. This can be any structure as supported by DyanmoDB. The node only has 1 child + which is the value of type `DynamoType`. + """ + + def __init__(self, value): + assert isinstance(value, DynamoType), "DDBTypedValue must be of DynamoType" + super(DDBTypedValue, self).__init__(children=[value]) + + def get_value(self): + return self.children[0] + + +class NoneExistingPath(LeafNode): + """A placeholder for Paths that did not exist in the Item.""" + + def __init__(self, creatable=False): + super(NoneExistingPath, self).__init__(children=[creatable]) + + def is_creatable(self): + """Can this path be created if need be. For example path creating element in a dictionary or creating a new + attribute under root level of an item.""" + return self.children[0] + + +class DepthFirstTraverser(object): + """ + Helper class that allows depth first traversal and to implement custom processing for certain AST nodes. The + processor of a node must return the new resulting node. This node will be placed in the tree. Processing of a + node using this traverser should therefore only transform child nodes. The returned node will get the same parent + as the node before processing had. + """ + + @abstractmethod + def _processing_map(self): + """ + A map providing a processing function per node class type to a function that takes in a Node object and + processes it. A Node can only be processed by a single function and they are considered in order. Therefore if + multiple classes from a single class hierarchy strain are used the more specific classes have to be put before + the less specific ones. That requires overriding `nodes_to_be_processed`. If no multiple classes form a single + class hierarchy strain are used the default implementation of `nodes_to_be_processed` should be OK. + Returns: + dict: Mapping a Node Class to a processing function. + """ + pass + + def nodes_to_be_processed(self): + """Cached accessor for getting Node types that need to be processed.""" + return tuple(k for k in self._processing_map().keys()) + + def process(self, node): + """Process a Node""" + for class_key, processor in self._processing_map().items(): + if isinstance(node, class_key): + return processor(node) + + def pre_processing_of_child(self, parent_node, child_id): + """Hook that is called pre-processing of the child at position `child_id`""" + pass + + def traverse_node_recursively(self, node, child_id=-1): + """ + Traverse nodes depth first processing nodes bottom up (if root node is considered the top). + + Args: + node(Node): The node which is the last node to be processed but which allows to identify all the + work (which is in the children) + child_id(int): The index in the list of children from the parent that this node corresponds to + + Returns: + Node: The node of the new processed AST + """ + if isinstance(node, Node): + parent_node = node.parent + if node.children is not None: + for i, child_node in enumerate(node.children): + self.pre_processing_of_child(node, i) + self.traverse_node_recursively(child_node, i) + # noinspection PyTypeChecker + if isinstance(node, self.nodes_to_be_processed()): + node = self.process(node) + node.parent = parent_node + parent_node.children[child_id] = node + return node + + def traverse(self, node): + return self.traverse_node_recursively(node) + + +class NodeDepthLeftTypeFetcher(object): + """Helper class to fetch a node of a specific type. Depth left-first traversal""" + + def __init__(self, node_type, root_node): + assert issubclass(node_type, Node) + self.node_type = node_type + self.root_node = root_node + self.queue = deque() + self.add_nodes_left_to_right_depth_first(self.root_node) + + def add_nodes_left_to_right_depth_first(self, node): + if isinstance(node, Node) and node.children is not None: + for child_node in node.children: + self.add_nodes_left_to_right_depth_first(child_node) + self.queue.append(child_node) + self.queue.append(node) + + def __iter__(self): + return self + + def next(self): + return self.__next__() + + def __next__(self): + while len(self.queue) > 0: + candidate = self.queue.popleft() + if isinstance(candidate, self.node_type): + return candidate + else: + raise StopIteration diff --git a/moto/dynamodb2/parsing/expressions.py b/moto/dynamodb2/parsing/expressions.py index e418bb47ef00..4c1d42a55dc8 100644 --- a/moto/dynamodb2/parsing/expressions.py +++ b/moto/dynamodb2/parsing/expressions.py @@ -29,7 +29,7 @@ UpdateExpressionDeleteActions, UpdateExpressionDeleteClause, ) -from moto.dynamodb2.exceptions import InvalidTokenException +from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer @@ -371,6 +371,7 @@ def _parse(self): self._parse_target_clause(self._operand_factory_class()) else: self.raise_unexpected_token() + return self._create_node() @abstractmethod def _operand_factory_class(self): @@ -485,7 +486,7 @@ def _parse(self): else: self.raise_unexpected_token() - return self._create_node(), self.token_pos + return self._create_node() @classmethod def make(cls, expression_str): @@ -804,15 +805,41 @@ def _is_possible_start(cls, token): return token.type == Token.ATTRIBUTE_VALUE +class UpdateExpressionAttributeValueOrPathParser(ExpressionParser): + def _parse(self): + if UpdateExpressionAttributeValueParser.is_possible_start( + self.get_next_token() + ): + token, self.token_pos = UpdateExpressionAttributeValueParser( + **self._initializer_args() + )._parse_with_pos() + else: + token, self.token_pos = UpdateExpressionPathParser( + **self._initializer_args() + )._parse_with_pos() + return token + + @classmethod + def _is_possible_start(cls, token): + return any( + [ + UpdateExpressionAttributeValueParser.is_possible_start(token), + UpdateExpressionPathParser.is_possible_start(token), + ] + ) + + class UpdateExpressionFunctionParser(ExpressionParser): """ A helper to process a function of an Update Expression """ - # TODO(pbbouwel): Function names are supposedly case sensitive according to doc add tests # Map function to the factories for its elements FUNCTIONS = { - "if_not_exists": [UpdateExpressionPathParser, UpdateExpressionValueParser], + "if_not_exists": [ + UpdateExpressionPathParser, + UpdateExpressionAttributeValueOrPathParser, + ], "list_append": [UpdateExpressionOperandParser, UpdateExpressionOperandParser], } @@ -833,6 +860,9 @@ def _is_possible_start(cls, token): def _parse(self): function_name = self.get_next_token_value() + if function_name not in self.FUNCTIONS.keys(): + # Function names are case sensitive + raise InvalidUpdateExpression(function_name) self.goto_next_significant_token() self.process_token_of_type(Token.OPEN_ROUND_BRACKET) function_elements = [function_name] diff --git a/moto/dynamodb2/parsing/validators.py b/moto/dynamodb2/parsing/validators.py new file mode 100644 index 000000000000..180c7a874831 --- /dev/null +++ b/moto/dynamodb2/parsing/validators.py @@ -0,0 +1,341 @@ +""" +See docstring class Validator below for more details on validation +""" +from abc import abstractmethod +from copy import deepcopy + +from moto.dynamodb2.exceptions import ( + AttributeIsReservedKeyword, + ExpressionAttributeValueNotDefined, + AttributeDoesNotExist, + ExpressionAttributeNameNotDefined, + IncorrectOperandType, + InvalidUpdateExpressionInvalidDocumentPath, +) +from moto.dynamodb2.models import DynamoType +from moto.dynamodb2.parsing.ast_nodes import ( + ExpressionAttribute, + UpdateExpressionPath, + UpdateExpressionSetAction, + UpdateExpressionAddAction, + UpdateExpressionDeleteAction, + UpdateExpressionRemoveAction, + DDBTypedValue, + ExpressionAttributeValue, + ExpressionAttributeName, + DepthFirstTraverser, + NoneExistingPath, + UpdateExpressionFunction, + ExpressionPathDescender, + UpdateExpressionValue, + ExpressionValueOperator, + ExpressionSelector, +) +from moto.dynamodb2.parsing.reserved_keywords import ReservedKeywords + + +class ExpressionAttributeValueProcessor(DepthFirstTraverser): + def __init__(self, expression_attribute_values): + self.expression_attribute_values = expression_attribute_values + + def _processing_map(self): + return { + ExpressionAttributeValue: self.replace_expression_attribute_value_with_value + } + + def replace_expression_attribute_value_with_value(self, node): + """A node representing an Expression Attribute Value. Resolve and replace value""" + assert isinstance(node, ExpressionAttributeValue) + attribute_value_name = node.get_value_name() + try: + target = self.expression_attribute_values[attribute_value_name] + except KeyError: + raise ExpressionAttributeValueNotDefined( + attribute_value=attribute_value_name + ) + return DDBTypedValue(DynamoType(target)) + + +class ExpressionAttributeResolvingProcessor(DepthFirstTraverser): + def _processing_map(self): + return { + UpdateExpressionSetAction: self.disable_resolving, + UpdateExpressionPath: self.process_expression_path_node, + } + + def __init__(self, expression_attribute_names, item): + self.expression_attribute_names = expression_attribute_names + self.item = item + self.resolving = False + + def pre_processing_of_child(self, parent_node, child_id): + """ + We have to enable resolving if we are processing a child of UpdateExpressionSetAction that is not first. + Because first argument is path to be set, 2nd argument would be the value. + """ + if isinstance( + parent_node, + ( + UpdateExpressionSetAction, + UpdateExpressionRemoveAction, + UpdateExpressionDeleteAction, + UpdateExpressionAddAction, + ), + ): + if child_id == 0: + self.resolving = False + else: + self.resolving = True + + def disable_resolving(self, node=None): + self.resolving = False + return node + + def process_expression_path_node(self, node): + """Resolve ExpressionAttribute if not part of a path and resolving is enabled.""" + if self.resolving: + return self.resolve_expression_path(node) + else: + # Still resolve but return original note to make sure path is correct Just make sure nodes are creatable. + result_node = self.resolve_expression_path(node) + if ( + isinstance(result_node, NoneExistingPath) + and not result_node.is_creatable() + ): + raise InvalidUpdateExpressionInvalidDocumentPath() + + return node + + def resolve_expression_path(self, node): + assert isinstance(node, UpdateExpressionPath) + + target = deepcopy(self.item.attrs) + for child in node.children: + # First replace placeholder with attribute_name + attr_name = None + if isinstance(child, ExpressionAttributeName): + attr_placeholder = child.get_attribute_name_placeholder() + try: + attr_name = self.expression_attribute_names[attr_placeholder] + except KeyError: + raise ExpressionAttributeNameNotDefined(attr_placeholder) + elif isinstance(child, ExpressionAttribute): + attr_name = child.get_attribute_name() + self.raise_exception_if_keyword(attr_name) + if attr_name is not None: + # Resolv attribute_name + try: + target = target[attr_name] + except (KeyError, TypeError): + if child == node.children[-1]: + return NoneExistingPath(creatable=True) + return NoneExistingPath() + else: + if isinstance(child, ExpressionPathDescender): + continue + elif isinstance(child, ExpressionSelector): + index = child.get_index() + if target.is_list(): + try: + target = target[index] + except IndexError: + # When a list goes out of bounds when assigning that is no problem when at the assignment + # side. It will just append to the list. + if child == node.children[-1]: + return NoneExistingPath(creatable=True) + return NoneExistingPath() + else: + raise InvalidUpdateExpressionInvalidDocumentPath + else: + raise NotImplementedError( + "Path resolution for {t}".format(t=type(child)) + ) + return DDBTypedValue(DynamoType(target)) + + @classmethod + def raise_exception_if_keyword(cls, attribute): + if attribute.upper() in ReservedKeywords.get_reserved_keywords(): + raise AttributeIsReservedKeyword(attribute) + + +class UpdateExpressionFunctionEvaluator(DepthFirstTraverser): + """ + At time of writing there are only 2 functions for DDB UpdateExpressions. They both are specific to the SET + expression as per the official AWS docs: + https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ + Expressions.UpdateExpressions.html#Expressions.UpdateExpressions.SET + """ + + def _processing_map(self): + return {UpdateExpressionFunction: self.process_function} + + def process_function(self, node): + assert isinstance(node, UpdateExpressionFunction) + function_name = node.get_function_name() + first_arg = node.get_nth_argument(1) + second_arg = node.get_nth_argument(2) + + if function_name == "if_not_exists": + if isinstance(first_arg, NoneExistingPath): + result = second_arg + else: + result = first_arg + assert isinstance(result, (DDBTypedValue, NoneExistingPath)) + return result + elif function_name == "list_append": + first_arg = self.get_list_from_ddb_typed_value(first_arg, function_name) + second_arg = self.get_list_from_ddb_typed_value(second_arg, function_name) + for list_element in second_arg.value: + first_arg.value.append(list_element) + return DDBTypedValue(first_arg) + else: + raise NotImplementedError( + "Unsupported function for moto {name}".format(name=function_name) + ) + + @classmethod + def get_list_from_ddb_typed_value(cls, node, function_name): + assert isinstance(node, DDBTypedValue) + dynamo_value = node.get_value() + assert isinstance(dynamo_value, DynamoType) + if not dynamo_value.is_list(): + raise IncorrectOperandType(function_name, dynamo_value.type) + return dynamo_value + + +class NoneExistingPathChecker(DepthFirstTraverser): + """ + Pass through the AST and make sure there are no none-existing paths. + """ + + def _processing_map(self): + return {NoneExistingPath: self.raise_none_existing_path} + + def raise_none_existing_path(self, node): + raise AttributeDoesNotExist + + +class ExecuteOperations(DepthFirstTraverser): + def _processing_map(self): + return {UpdateExpressionValue: self.process_update_expression_value} + + def process_update_expression_value(self, node): + """ + If an UpdateExpressionValue only has a single child the node will be replaced with the childe. + Otherwise it has 3 children and the middle one is an ExpressionValueOperator which details how to combine them + Args: + node(Node): + + Returns: + Node: The resulting node of the operation if present or the child. + """ + assert isinstance(node, UpdateExpressionValue) + if len(node.children) == 1: + return node.children[0] + elif len(node.children) == 3: + operator_node = node.children[1] + assert isinstance(operator_node, ExpressionValueOperator) + operator = operator_node.get_operator() + left_operand = self.get_dynamo_value_from_ddb_typed_value(node.children[0]) + right_operand = self.get_dynamo_value_from_ddb_typed_value(node.children[2]) + if operator == "+": + return self.get_sum(left_operand, right_operand) + elif operator == "-": + return self.get_subtraction(left_operand, right_operand) + else: + raise NotImplementedError( + "Moto does not support operator {operator}".format( + operator=operator + ) + ) + else: + raise NotImplementedError( + "UpdateExpressionValue only has implementations for 1 or 3 children." + ) + + @classmethod + def get_dynamo_value_from_ddb_typed_value(cls, node): + assert isinstance(node, DDBTypedValue) + dynamo_value = node.get_value() + assert isinstance(dynamo_value, DynamoType) + return dynamo_value + + @classmethod + def get_sum(cls, left_operand, right_operand): + """ + Args: + left_operand(DynamoType): + right_operand(DynamoType): + + Returns: + DDBTypedValue: + """ + try: + return DDBTypedValue(left_operand + right_operand) + except TypeError: + raise IncorrectOperandType("+", left_operand.type) + + @classmethod + def get_subtraction(cls, left_operand, right_operand): + """ + Args: + left_operand(DynamoType): + right_operand(DynamoType): + + Returns: + DDBTypedValue: + """ + try: + return DDBTypedValue(left_operand - right_operand) + except TypeError: + raise IncorrectOperandType("-", left_operand.type) + + +class Validator(object): + """ + A validator is used to validate expressions which are passed in as an AST. + """ + + def __init__( + self, expression, expression_attribute_names, expression_attribute_values, item + ): + """ + Besides validation the Validator should also replace referenced parts of an item which is cheapest upon + validation. + + Args: + expression(Node): The root node of the AST representing the expression to be validated + expression_attribute_names(ExpressionAttributeNames): + expression_attribute_values(ExpressionAttributeValues): + item(Item): The item which will be updated (pointed to by Key of update_item) + """ + self.expression_attribute_names = expression_attribute_names + self.expression_attribute_values = expression_attribute_values + self.item = item + self.processors = self.get_ast_processors() + self.node_to_validate = deepcopy(expression) + + @abstractmethod + def get_ast_processors(self): + """Get the different processors that go through the AST tree and processes the nodes.""" + + def validate(self): + n = self.node_to_validate + for processor in self.processors: + n = processor.traverse(n) + return n + + +class UpdateExpressionValidator(Validator): + def get_ast_processors(self): + """Get the different processors that go through the AST tree and processes the nodes.""" + processors = [ + ExpressionAttributeValueProcessor(self.expression_attribute_values), + ExpressionAttributeResolvingProcessor( + self.expression_attribute_names, self.item + ), + UpdateExpressionFunctionEvaluator(), + NoneExistingPathChecker(), + ExecuteOperations(), + ] + return processors diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index a5aeeac70525..d14a54873713 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -9,7 +9,7 @@ from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id -from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge, MockValidationException +from .exceptions import InvalidIndexNameError, ItemSizeTooLarge, MockValidationException from moto.dynamodb2.models import dynamodb_backends, dynamo_json_dump diff --git a/tests/test_dynamodb2/test_dynamodb_expressions.py b/tests/test_dynamodb2/test_dynamodb_expressions.py index 1066231af043..2c82d8bc4873 100644 --- a/tests/test_dynamodb2/test_dynamodb_expressions.py +++ b/tests/test_dynamodb2/test_dynamodb_expressions.py @@ -393,3 +393,13 @@ def test_update_expression_parsing_is_not_keyword_aware(): except InvalidTokenException as te: assert te.token == "1" assert te.near == "VALUE 1" + + +def test_expression_if_not_exists_is_not_valid_in_remove_statement(): + set_action = "REMOVE if_not_exists(a,b)" + try: + UpdateExpressionParser.make(set_action) + assert False, "Exception not raised correctly" + except InvalidTokenException as te: + assert te.token == "(" + assert te.near == "if_not_exists(a" diff --git a/tests/test_dynamodb2/test_dynamodb_validation.py b/tests/test_dynamodb2/test_dynamodb_validation.py new file mode 100644 index 000000000000..d60dd48f6c15 --- /dev/null +++ b/tests/test_dynamodb2/test_dynamodb_validation.py @@ -0,0 +1,464 @@ +from moto.dynamodb2.exceptions import ( + AttributeIsReservedKeyword, + ExpressionAttributeValueNotDefined, + AttributeDoesNotExist, + ExpressionAttributeNameNotDefined, + IncorrectOperandType, + InvalidUpdateExpressionInvalidDocumentPath, +) +from moto.dynamodb2.models import Item, DynamoType +from moto.dynamodb2.parsing.ast_nodes import ( + NodeDepthLeftTypeFetcher, + UpdateExpressionSetAction, + UpdateExpressionValue, + DDBTypedValue, +) +from moto.dynamodb2.parsing.expressions import UpdateExpressionParser +from moto.dynamodb2.parsing.validators import UpdateExpressionValidator +from parameterized import parameterized + + +def test_validation_of_update_expression_with_keyword(): + try: + update_expression = "SET myNum = path + :val" + update_expression_values = {":val": {"N": "3"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "path": {"N": "3"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + ).validate() + assert False, "No exception raised" + except AttributeIsReservedKeyword as e: + assert e.keyword == "path" + + +@parameterized( + ["SET a = #b + :val2", "SET a = :val2 + #b",] +) +def test_validation_of_a_set_statement_with_incorrect_passed_value(update_expression): + """ + By running permutations it shows that values are replaced prior to resolving attributes. + + An error occurred (ValidationException) when calling the UpdateItem operation: Invalid UpdateExpression: + An expression attribute value used in expression is not defined; attribute value: :val2 + """ + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "3"}}, + ) + try: + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names={"#b": "ok"}, + expression_attribute_values={":val": {"N": "3"}}, + item=item, + ).validate() + except ExpressionAttributeValueNotDefined as e: + assert e.attribute_value == ":val2" + + +def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_item(): + """ + When an update expression tries to get an attribute that does not exist it must throw the appropriate exception. + + An error occurred (ValidationException) when calling the UpdateItem operation: + The provided expression refers to an attribute that does not exist in the item + """ + try: + update_expression = "SET a = nonexistent" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "path": {"N": "3"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + assert False, "No exception raised" + except AttributeDoesNotExist: + assert True + + +@parameterized( + ["SET a = #c", "SET a = #c + #d",] +) +def test_validation_of_update_expression_with_attribute_name_that_is_not_defined( + update_expression, +): + """ + When an update expression tries to get an attribute name that is not provided it must throw an exception. + + An error occurred (ValidationException) when calling the UpdateItem operation: Invalid UpdateExpression: + An expression attribute name used in the document path is not defined; attribute name: #c + """ + try: + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "path": {"N": "3"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names={"#b": "ok"}, + expression_attribute_values=None, + item=item, + ).validate() + assert False, "No exception raised" + except ExpressionAttributeNameNotDefined as e: + assert e.not_defined_attribute_name == "#c" + + +def test_validation_of_if_not_exists_not_existing_invalid_replace_value(): + try: + update_expression = "SET a = if_not_exists(b, a.c)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + assert False, "No exception raised" + except AttributeDoesNotExist: + assert True + + +def get_first_node_of_type(ast, node_type): + return next(NodeDepthLeftTypeFetcher(node_type, ast)) + + +def get_set_action_value(ast): + """ + Helper that takes an AST and gets the first UpdateExpressionSetAction and retrieves the value of that action. + This should only be called on validated expressions. + Args: + ast(Node): + + Returns: + DynamoType: The DynamoType object representing the Dynamo value. + """ + set_action = get_first_node_of_type(ast, UpdateExpressionSetAction) + typed_value = set_action.children[1] + assert isinstance(typed_value, DDBTypedValue) + dynamo_value = typed_value.children[0] + assert isinstance(dynamo_value, DynamoType) + return dynamo_value + + +def test_validation_of_if_not_exists_not_existing_value(): + update_expression = "SET a = if_not_exists(b, a)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"S": "A"}) + + +def test_validation_of_if_not_exists_with_existing_attribute_should_return_attribute(): + update_expression = "SET a = if_not_exists(b, a)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}, "b": {"S": "B"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"S": "B"}) + + +def test_validation_of_if_not_exists_with_existing_attribute_should_return_value(): + update_expression = "SET a = if_not_exists(b, :val)" + update_expression_values = {":val": {"N": "4"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "3"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "3"}) + + +def test_validation_of_if_not_exists_with_non_existing_attribute_should_return_value(): + update_expression = "SET a = if_not_exists(b, :val)" + update_expression_values = {":val": {"N": "4"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "4"}) + + +def test_validation_of_sum_operation(): + update_expression = "SET a = a + b" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "7"}) + + +def test_validation_homogeneous_list_append_function(): + update_expression = "SET ri = list_append(ri, :vals)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":vals": {"L": [{"S": "i3"}, {"S": "i4"}]}}, + item=item, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType( + {"L": [{"S": "i1"}, {"S": "i2"}, {"S": "i3"}, {"S": "i4"}]} + ) + + +def test_validation_hetereogenous_list_append_function(): + update_expression = "SET ri = list_append(ri, :vals)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":vals": {"L": [{"N": "3"}]}}, + item=item, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"L": [{"S": "i1"}, {"S": "i2"}, {"N": "3"}]}) + + +def test_validation_list_append_function_with_non_list_arg(): + """ + Must error out: + Invalid UpdateExpression: Incorrect operand type for operator or function; + operator or function: list_append, operand type: S' + Returns: + + """ + try: + update_expression = "SET ri = list_append(ri, :vals)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":vals": {"S": "N"}}, + item=item, + ).validate() + except IncorrectOperandType as e: + assert e.operand_type == "S" + assert e.operator_or_function == "list_append" + + +def test_sum_with_incompatible_types(): + """ + Must error out: + Invalid UpdateExpression: Incorrect operand type for operator or function; operator or function: +, operand type: S' + Returns: + + """ + try: + update_expression = "SET ri = :val + :val2" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "ri": {"L": [{"S": "i1"}, {"S": "i2"}]}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":val": {"S": "N"}, ":val2": {"N": "3"}}, + item=item, + ).validate() + except IncorrectOperandType as e: + assert e.operand_type == "S" + assert e.operator_or_function == "+" + + +def test_validation_of_subraction_operation(): + update_expression = "SET ri = :val - :val2" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":val": {"N": "1"}, ":val2": {"N": "3"}}, + item=item, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "-2"}) + + +def test_cannot_index_into_a_string(): + """ + Must error out: + The document path provided in the update expression is invalid for update' + """ + try: + update_expression = "set itemstr[1]=:Item" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "itemstr": {"S": "somestring"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":Item": {"S": "string_update"}}, + item=item, + ).validate() + assert False, "Must raise exception" + except InvalidUpdateExpressionInvalidDocumentPath: + assert True + + +def test_validation_set_path_does_not_need_to_be_resolvable_when_setting_a_new_attribute(): + """If this step just passes we are happy enough""" + update_expression = "set d=a" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "a": {"N": "3"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + dynamo_value = get_set_action_value(validated_ast) + assert dynamo_value == DynamoType({"N": "3"}) + + +def test_validation_set_path_does_not_need_to_be_resolvable_but_must_be_creatable_when_setting_a_new_attribute(): + try: + update_expression = "set d.e=a" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "a": {"N": "3"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + assert False, "Must raise exception" + except InvalidUpdateExpressionInvalidDocumentPath: + assert True From e6b51a28ee884697cba89a68e5f9948880c25199 Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Sun, 19 Apr 2020 16:50:53 +0100 Subject: [PATCH 241/658] Enable AST Validation This commit puts AST validation on the execution path. This means updates get validated prior to being executed. There were quite a few tests that were not working against Amazon DDB. These tests I considered broken and as such this commit adapts them such that they pass against Amazon DDB. test_update_item_on_map() => One of the SET actions would try to set a nested element by specifying the nesting on the path rather than by putting a map as a value for a non-existent key. This got changed. test_item_size_is_under_400KB => Used the keyword "item" which DDB doesn't like. Change to cont in order to keep the same sizings. => Secondly the size error messages differs a bit depending whether it is part of the update or part of a put_item. For an update it should be: Item size to update has exceeded the maximum allowed size otherwise it is Item size has exceeded the maximum allowed size' test_remove_top_level_attribute => Used a keyword item. Use ExpressionAttributeNames test_update_item_double_nested_remove => Used keywords name & first. Migrated to non-deprecated API and use ExpressionAttributeNames test_update_item_set & test_boto3_update_item_conditions_pass & test_boto3_update_item_conditions_pass_because_expect_not_exists & test_boto3_update_item_conditions_pass_because_expect_not_exists_by_compare_to_null & test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_not_null & test_boto3_update_item_conditions_fail & test_boto3_update_item_conditions_fail_because_expect_not_exists & test_boto3_update_item_conditions_fail_because_expect_not_exists_by_compare_to_null => Were broken tests which had string literal instead of value placeholder --- moto/dynamodb2/exceptions.py | 11 ++++ moto/dynamodb2/models/__init__.py | 22 +++++-- tests/test_dynamodb2/test_dynamodb.py | 54 ++++++++++++---- .../test_dynamodb_table_without_range_key.py | 63 +++++++++++++------ 4 files changed, 117 insertions(+), 33 deletions(-) diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py index a6acae071eab..5dd87ef6beba 100644 --- a/moto/dynamodb2/exceptions.py +++ b/moto/dynamodb2/exceptions.py @@ -111,6 +111,17 @@ def __init__(self): super(ItemSizeTooLarge, self).__init__(self.item_size_too_large_msg) +class ItemSizeToUpdateTooLarge(MockValidationException): + item_size_to_update_too_large_msg = ( + "Item size to update has exceeded the maximum allowed size" + ) + + def __init__(self): + super(ItemSizeToUpdateTooLarge, self).__init__( + self.item_size_to_update_too_large_msg + ) + + class IncorrectOperandType(InvalidUpdateExpression): inv_operand_msg = "Incorrect operand type for operator or function; operator or function: {f}, operand type: {t}" diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 1f448f288b54..00825e06a142 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -14,11 +14,16 @@ from moto.core.utils import unix_time from moto.core.exceptions import JsonRESTError from moto.dynamodb2.comparisons import get_filter_expression -from moto.dynamodb2.comparisons import get_expected, get_comparison_func -from moto.dynamodb2.exceptions import InvalidIndexNameError, ItemSizeTooLarge, InvalidUpdateExpression +from moto.dynamodb2.comparisons import get_expected +from moto.dynamodb2.exceptions import ( + InvalidIndexNameError, + ItemSizeTooLarge, + ItemSizeToUpdateTooLarge, +) from moto.dynamodb2.models.utilities import bytesize, attribute_is_list from moto.dynamodb2.models.dynamo_type import DynamoType from moto.dynamodb2.parsing.expressions import UpdateExpressionParser +from moto.dynamodb2.parsing.validators import UpdateExpressionValidator class DynamoJsonEncoder(json.JSONEncoder): @@ -151,7 +156,10 @@ def update( if "." in key and attr not in self.attrs: raise ValueError # Setting nested attr not allowed if first attr does not exist yet elif attr not in self.attrs: - self.attrs[attr] = dyn_value # set new top-level attribute + try: + self.attrs[attr] = dyn_value # set new top-level attribute + except ItemSizeTooLarge: + raise ItemSizeToUpdateTooLarge() else: self.attrs[attr].set( ".".join(key.split(".")[1:]), dyn_value, list_index @@ -1202,7 +1210,7 @@ def update_item( # E.g. `a = b + c` -> `a=b+c` if update_expression: # Parse expression to get validation errors - UpdateExpressionParser.make(update_expression) + update_expression_ast = UpdateExpressionParser.make(update_expression) update_expression = re.sub(r"\s*([=\+-])\s*", "\\1", update_expression) if all([table.hash_key_attr in key, table.range_key_attr in key]): @@ -1247,6 +1255,12 @@ def update_item( item = table.get_item(hash_value, range_value) if update_expression: + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=expression_attribute_names, + expression_attribute_values=expression_attribute_values, + item=item, + ).validate() item.update( update_expression, expression_attribute_names, diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 09401d5626b7..0004001bc354 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2147,13 +2147,33 @@ def test_update_item_on_map(): # Nonexistent nested attributes are supported for existing top-level attributes. table.update_item( Key={"forum_name": "the-key", "subject": "123"}, - UpdateExpression="SET body.#nested.#data = :tb, body.nested.#nonexistentnested.#data = :tb2", + UpdateExpression="SET body.#nested.#data = :tb", + ExpressionAttributeNames={"#nested": "nested", "#data": "data",}, + ExpressionAttributeValues={":tb": "new_value"}, + ) + # Running this against AWS DDB gives an exception so make sure it also fails.: + with assert_raises(client.exceptions.ClientError): + # botocore.exceptions.ClientError: An error occurred (ValidationException) when calling the UpdateItem + # operation: The document path provided in the update expression is invalid for update + table.update_item( + Key={"forum_name": "the-key", "subject": "123"}, + UpdateExpression="SET body.#nested.#nonexistentnested.#data = :tb2", + ExpressionAttributeNames={ + "#nested": "nested", + "#nonexistentnested": "nonexistentnested", + "#data": "data", + }, + ExpressionAttributeValues={":tb2": "other_value"}, + ) + + table.update_item( + Key={"forum_name": "the-key", "subject": "123"}, + UpdateExpression="SET body.#nested.#nonexistentnested = :tb2", ExpressionAttributeNames={ "#nested": "nested", "#nonexistentnested": "nonexistentnested", - "#data": "data", }, - ExpressionAttributeValues={":tb": "new_value", ":tb2": "other_value"}, + ExpressionAttributeValues={":tb2": {"data": "other_value"}}, ) resp = table.scan() @@ -2161,8 +2181,8 @@ def test_update_item_on_map(): {"nested": {"data": "new_value", "nonexistentnested": {"data": "other_value"}}} ) - # Test nested value for a nonexistent attribute. - with assert_raises(client.exceptions.ConditionalCheckFailedException): + # Test nested value for a nonexistent attribute throws a ClientError. + with assert_raises(client.exceptions.ClientError): table.update_item( Key={"forum_name": "the-key", "subject": "123"}, UpdateExpression="SET nonexistent.#nested = :tb", @@ -3184,7 +3204,10 @@ def test_remove_top_level_attribute(): TableName=table_name, Item={"id": {"S": "foo"}, "item": {"S": "bar"}} ) client.update_item( - TableName=table_name, Key={"id": {"S": "foo"}}, UpdateExpression="REMOVE item" + TableName=table_name, + Key={"id": {"S": "foo"}}, + UpdateExpression="REMOVE #i", + ExpressionAttributeNames={"#i": "item"}, ) # result = client.get_item(TableName=table_name, Key={"id": {"S": "foo"}})["Item"] @@ -3359,21 +3382,21 @@ def test_item_size_is_under_400KB(): assert_failure_due_to_item_size( func=client.put_item, TableName="moto-test", - Item={"id": {"S": "foo"}, "item": {"S": large_item}}, + Item={"id": {"S": "foo"}, "cont": {"S": large_item}}, ) assert_failure_due_to_item_size( - func=table.put_item, Item={"id": "bar", "item": large_item} + func=table.put_item, Item={"id": "bar", "cont": large_item} ) - assert_failure_due_to_item_size( + assert_failure_due_to_item_size_to_update( func=client.update_item, TableName="moto-test", Key={"id": {"S": "foo2"}}, - UpdateExpression="set item=:Item", + UpdateExpression="set cont=:Item", ExpressionAttributeValues={":Item": {"S": large_item}}, ) # Assert op fails when updating a nested item assert_failure_due_to_item_size( - func=table.put_item, Item={"id": "bar", "itemlist": [{"item": large_item}]} + func=table.put_item, Item={"id": "bar", "itemlist": [{"cont": large_item}]} ) assert_failure_due_to_item_size( func=client.put_item, @@ -3394,6 +3417,15 @@ def assert_failure_due_to_item_size(func, **kwargs): ) +def assert_failure_due_to_item_size_to_update(func, **kwargs): + with assert_raises(ClientError) as ex: + func(**kwargs) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.exception.response["Error"]["Message"].should.equal( + "Item size to update has exceeded the maximum allowed size" + ) + + @mock_dynamodb2 # https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#DDB-Query-request-KeyConditionExpression def test_hash_key_cannot_use_begins_with_operations(): diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 08d7724f8546..b5cc01c84918 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -443,23 +443,40 @@ def test_update_item_nested_remove(): dict(returned_item).should.equal({"username": "steve", "Meta": {}}) -@mock_dynamodb2_deprecated +@mock_dynamodb2 def test_update_item_double_nested_remove(): - conn = boto.dynamodb2.connect_to_region("us-east-1") - table = Table.create("messages", schema=[HashKey("username")]) + conn = boto3.client("dynamodb", region_name="us-east-1") + conn.create_table( + TableName="messages", + KeySchema=[{"AttributeName": "username", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "username", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) - data = {"username": "steve", "Meta": {"Name": {"First": "Steve", "Last": "Urkel"}}} - table.put_item(data=data) + item = { + "username": {"S": "steve"}, + "Meta": { + "M": {"Name": {"M": {"First": {"S": "Steve"}, "Last": {"S": "Urkel"}}}} + }, + } + conn.put_item(TableName="messages", Item=item) key_map = {"username": {"S": "steve"}} # Then remove the Meta.FullName field - conn.update_item("messages", key_map, update_expression="REMOVE Meta.Name.First") - - returned_item = table.get_item(username="steve") - dict(returned_item).should.equal( - {"username": "steve", "Meta": {"Name": {"Last": "Urkel"}}} + conn.update_item( + TableName="messages", + Key=key_map, + UpdateExpression="REMOVE Meta.#N.#F", + ExpressionAttributeNames={"#N": "Name", "#F": "First"}, ) + returned_item = conn.get_item(TableName="messages", Key=key_map) + expected_item = { + "username": {"S": "steve"}, + "Meta": {"M": {"Name": {"M": {"Last": {"S": "Urkel"}}}}}, + } + dict(returned_item["Item"]).should.equal(expected_item) + @mock_dynamodb2_deprecated def test_update_item_set(): @@ -471,7 +488,10 @@ def test_update_item_set(): key_map = {"username": {"S": "steve"}} conn.update_item( - "messages", key_map, update_expression="SET foo=bar, blah=baz REMOVE SentBy" + "messages", + key_map, + update_expression="SET foo=:bar, blah=:baz REMOVE SentBy", + expression_attribute_values={":bar": {"S": "bar"}, ":baz": {"S": "baz"}}, ) returned_item = table.get_item(username="steve") @@ -616,8 +636,9 @@ def test_boto3_update_item_conditions_fail(): table.put_item(Item={"username": "johndoe", "foo": "baz"}) table.update_item.when.called_with( Key={"username": "johndoe"}, - UpdateExpression="SET foo=bar", + UpdateExpression="SET foo=:bar", Expected={"foo": {"Value": "bar"}}, + ExpressionAttributeValues={":bar": "bar"}, ).should.throw(botocore.client.ClientError) @@ -627,8 +648,9 @@ def test_boto3_update_item_conditions_fail_because_expect_not_exists(): table.put_item(Item={"username": "johndoe", "foo": "baz"}) table.update_item.when.called_with( Key={"username": "johndoe"}, - UpdateExpression="SET foo=bar", + UpdateExpression="SET foo=:bar", Expected={"foo": {"Exists": False}}, + ExpressionAttributeValues={":bar": "bar"}, ).should.throw(botocore.client.ClientError) @@ -638,8 +660,9 @@ def test_boto3_update_item_conditions_fail_because_expect_not_exists_by_compare_ table.put_item(Item={"username": "johndoe", "foo": "baz"}) table.update_item.when.called_with( Key={"username": "johndoe"}, - UpdateExpression="SET foo=bar", + UpdateExpression="SET foo=:bar", Expected={"foo": {"ComparisonOperator": "NULL"}}, + ExpressionAttributeValues={":bar": "bar"}, ).should.throw(botocore.client.ClientError) @@ -649,8 +672,9 @@ def test_boto3_update_item_conditions_pass(): table.put_item(Item={"username": "johndoe", "foo": "bar"}) table.update_item( Key={"username": "johndoe"}, - UpdateExpression="SET foo=baz", + UpdateExpression="SET foo=:baz", Expected={"foo": {"Value": "bar"}}, + ExpressionAttributeValues={":baz": "baz"}, ) returned_item = table.get_item(Key={"username": "johndoe"}) assert dict(returned_item)["Item"]["foo"].should.equal("baz") @@ -662,8 +686,9 @@ def test_boto3_update_item_conditions_pass_because_expect_not_exists(): table.put_item(Item={"username": "johndoe", "foo": "bar"}) table.update_item( Key={"username": "johndoe"}, - UpdateExpression="SET foo=baz", + UpdateExpression="SET foo=:baz", Expected={"whatever": {"Exists": False}}, + ExpressionAttributeValues={":baz": "baz"}, ) returned_item = table.get_item(Key={"username": "johndoe"}) assert dict(returned_item)["Item"]["foo"].should.equal("baz") @@ -675,8 +700,9 @@ def test_boto3_update_item_conditions_pass_because_expect_not_exists_by_compare_ table.put_item(Item={"username": "johndoe", "foo": "bar"}) table.update_item( Key={"username": "johndoe"}, - UpdateExpression="SET foo=baz", + UpdateExpression="SET foo=:baz", Expected={"whatever": {"ComparisonOperator": "NULL"}}, + ExpressionAttributeValues={":baz": "baz"}, ) returned_item = table.get_item(Key={"username": "johndoe"}) assert dict(returned_item)["Item"]["foo"].should.equal("baz") @@ -688,8 +714,9 @@ def test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_n table.put_item(Item={"username": "johndoe", "foo": "bar"}) table.update_item( Key={"username": "johndoe"}, - UpdateExpression="SET foo=baz", + UpdateExpression="SET foo=:baz", Expected={"foo": {"ComparisonOperator": "NOT_NULL"}}, + ExpressionAttributeValues={":baz": "baz"}, ) returned_item = table.get_item(Key={"username": "johndoe"}) assert dict(returned_item)["Item"]["foo"].should.equal("baz") From 3a774ed0e0b22a42cf206533a9c4e6952089937f Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Sun, 19 Apr 2020 17:55:00 +0100 Subject: [PATCH 242/658] Make sure reserved_keywords.txt is packaged with the library. --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 79b9875ee4bf..b142f3203ce3 100755 --- a/setup.py +++ b/setup.py @@ -100,4 +100,5 @@ def get_version(): project_urls={ "Documentation": "http://docs.getmoto.org/en/latest/", }, + data_files=[('', ['moto/dynamodb2/parsing/reserved_keywords.txt'])], ) From ed5e0b586c50955515fb2ed72a7c028ece91a9d3 Mon Sep 17 00:00:00 2001 From: Dmitry Ryzhikov Date: Mon, 20 Apr 2020 00:15:00 +0300 Subject: [PATCH 243/658] Handle ValueError raised on missing table name --- moto/dynamodb2/responses.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 65484aa0818d..32f10abc02ed 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -92,16 +92,24 @@ def call_action(self): def list_tables(self): body = self.body limit = body.get("Limit", 100) - if body.get("ExclusiveStartTableName"): - last = body.get("ExclusiveStartTableName") - start = list(self.dynamodb_backend.tables.keys()).index(last) + 1 + all_tables = list(self.dynamodb_backend.tables.keys()) + + exclusive_start_table_name = body.get("ExclusiveStartTableName") + if exclusive_start_table_name: + try: + last_table_index = all_tables.index(exclusive_start_table_name) + except ValueError: + start = len(all_tables) + else: + start = last_table_index + 1 else: start = 0 - all_tables = list(self.dynamodb_backend.tables.keys()) + if limit: tables = all_tables[start : start + limit] else: tables = all_tables[start:] + response = {"TableNames": tables} if limit and len(all_tables) > start + limit: response["LastEvaluatedTableName"] = tables[-1] From 0d04306861f82cb7489b1d6d261dbaf3d6c745dd Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Sun, 19 Apr 2020 19:12:40 -0700 Subject: [PATCH 244/658] Fix deprecation warning. --- moto/ec2/urls.py | 2 +- moto/ssm/models.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/ec2/urls.py b/moto/ec2/urls.py index 4d85b2f56516..78f234320d94 100644 --- a/moto/ec2/urls.py +++ b/moto/ec2/urls.py @@ -2,6 +2,6 @@ from .responses import EC2Response -url_bases = ["https?://ec2\.(.+)\.amazonaws\.com(|\.cn)"] +url_bases = [r"https?://ec2\.(.+)\.amazonaws\.com(|\.cn)"] url_paths = {"{0}/": EC2Response.dispatch} diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 201f43c5a47a..3ce3b3a227cc 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -651,7 +651,7 @@ def label_parameter_version(self, name, version, labels): label.startswith("aws") or label.startswith("ssm") or label[:1].isdigit() - or not re.match("^[a-zA-z0-9_\.\-]*$", label) + or not re.match(r"^[a-zA-z0-9_\.\-]*$", label) ): invalid_labels.append(label) continue From ad0805de0ec3546767f5c13141e3d072b8c6f496 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20K=C3=A4ufl?= Date: Mon, 20 Apr 2020 09:19:24 +0200 Subject: [PATCH 245/658] Add Python 3.8 to trove classifiers --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 79b9875ee4bf..684c0dcea6db 100755 --- a/setup.py +++ b/setup.py @@ -94,6 +94,7 @@ def get_version(): "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "License :: OSI Approved :: Apache Software License", "Topic :: Software Development :: Testing", ], From 1a3a7d6a92619ec4142f5d4263f4b308b15e4209 Mon Sep 17 00:00:00 2001 From: Dmitry Ryzhikov Date: Mon, 20 Apr 2020 20:23:37 +0300 Subject: [PATCH 246/658] Add test for missing table name --- tests/test_dynamodb2/test_dynamodb.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index bec24c966ef2..cb9230a4a0d9 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4177,3 +4177,12 @@ def test_gsi_verify_negative_number_order(): [float(item["gsiK1SortKey"]) for item in resp["Items"]].should.equal( [-0.7, -0.6, 0.7] ) + + +@mock_dynamodb2 +def test_list_tables_exclusive_start_table_name_empty(): + client = boto3.client("dynamodb", region_name="us-east-1") + + resp = client.list_tables(Limit=1, ExclusiveStartTableName="whatever") + + len(resp["TableNames"]).should.equal(0) From b6789a2cc7d7ab9c2bd3fdd5b95d00e6fa20758d Mon Sep 17 00:00:00 2001 From: Tomoya Iwata Date: Tue, 21 Apr 2020 14:11:53 +0900 Subject: [PATCH 247/658] Added existence check of target thing to IoT ListThingPrincipals fix #2910 --- moto/iot/exceptions.py | 4 ++-- moto/iot/models.py | 8 ++++++++ tests/test_iot/test_iot.py | 7 +++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index d114a12ad557..7a578c22173d 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -7,10 +7,10 @@ class IoTClientError(JsonRESTError): class ResourceNotFoundException(IoTClientError): - def __init__(self): + def __init__(self, msg=None): self.code = 404 super(ResourceNotFoundException, self).__init__( - "ResourceNotFoundException", "The specified resource does not exist" + "ResourceNotFoundException", msg or "The specified resource does not exist" ) diff --git a/moto/iot/models.py b/moto/iot/models.py index de4383b964c1..51a23b6c6e22 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -805,6 +805,14 @@ def list_principal_things(self, principal_arn): return thing_names def list_thing_principals(self, thing_name): + + things = [_ for _ in self.things.values() if _.thing_name == thing_name] + if len(things) == 0: + raise ResourceNotFoundException( + "Failed to list principals for thing %s because the thing does not exist in your account" + % thing_name + ) + principals = [ k[0] for k, v in self.principal_things.items() if k[1] == thing_name ] diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index f8c4f579c0ce..f3c1517148a7 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -728,6 +728,13 @@ def test_principal_thing(): res = client.list_thing_principals(thingName=thing_name) res.should.have.key("principals").which.should.have.length_of(0) + with assert_raises(ClientError) as e: + client.list_thing_principals(thingName='xxx') + + e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + e.exception.response["Error"]["Message"].should.equal( + "Failed to list principals for thing xxx because the thing does not exist in your account" + ) @mock_iot def test_delete_principal_thing(): From d9b782be0a6944426347378345b2289732a2c7d9 Mon Sep 17 00:00:00 2001 From: Tomoya Iwata Date: Tue, 21 Apr 2020 14:43:04 +0900 Subject: [PATCH 248/658] fix lint --- tests/test_iot/test_iot.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index f3c1517148a7..2f43de5b9729 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -729,13 +729,14 @@ def test_principal_thing(): res.should.have.key("principals").which.should.have.length_of(0) with assert_raises(ClientError) as e: - client.list_thing_principals(thingName='xxx') + client.list_thing_principals(thingName="xxx") e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") e.exception.response["Error"]["Message"].should.equal( "Failed to list principals for thing xxx because the thing does not exist in your account" ) + @mock_iot def test_delete_principal_thing(): client = boto3.client("iot", region_name="ap-northeast-1") From 12669400b715ba9a3eb7759407fbf61f1283874c Mon Sep 17 00:00:00 2001 From: thatguysimon Date: Tue, 21 Apr 2020 16:53:22 +0300 Subject: [PATCH 249/658] Mark sts>get_caller_identity as implemented Seems like it's implemented but not marked --- IMPLEMENTATION_COVERAGE.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 705618524723..36caec17591d 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -7210,13 +7210,13 @@ - [ ] update_vtl_device_type ## sts -50% implemented +62% implemented - [X] assume_role - [ ] assume_role_with_saml - [X] assume_role_with_web_identity - [ ] decode_authorization_message - [ ] get_access_key_info -- [ ] get_caller_identity +- [X] get_caller_identity - [X] get_federation_token - [X] get_session_token From 753a39ed0d195c3f3092d2f22fa361f93711f57b Mon Sep 17 00:00:00 2001 From: MarcosBernal Date: Tue, 21 Apr 2020 20:10:39 +0200 Subject: [PATCH 250/658] Add get_databases method to glue moto client. Update IMPLEMENTATION_COVERAGE.md with methods that were covered previously --- IMPLEMENTATION_COVERAGE.md | 18 +++++++++--------- moto/glue/models.py | 3 +++ moto/glue/responses.py | 4 ++++ tests/test_glue/test_datacatalog.py | 21 +++++++++++++++++++++ 4 files changed, 37 insertions(+), 9 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 705618524723..82ee2f0462e8 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3351,11 +3351,11 @@ - [ ] update_listener ## glue -4% implemented -- [ ] batch_create_partition +14/123 = 11% implemented +- [X] batch_create_partition - [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table +- [X] batch_delete_partition +- [X] batch_delete_table - [ ] batch_delete_table_version - [ ] batch_get_crawlers - [ ] batch_get_dev_endpoints @@ -3372,7 +3372,7 @@ - [ ] create_dev_endpoint - [ ] create_job - [ ] create_ml_transform -- [ ] create_partition +- [X] create_partition - [ ] create_script - [ ] create_security_configuration - [X] create_table @@ -3404,7 +3404,7 @@ - [ ] get_crawlers - [ ] get_data_catalog_encryption_settings - [X] get_database -- [ ] get_databases +- [X] get_databases - [ ] get_dataflow_graph - [ ] get_dev_endpoint - [ ] get_dev_endpoints @@ -3418,7 +3418,7 @@ - [ ] get_ml_task_runs - [ ] get_ml_transform - [ ] get_ml_transforms -- [ ] get_partition +- [X] get_partition - [ ] get_partitions - [ ] get_plan - [ ] get_resource_policy @@ -3470,8 +3470,8 @@ - [ ] update_dev_endpoint - [ ] update_job - [ ] update_ml_transform -- [ ] update_partition -- [ ] update_table +- [X] update_partition +- [X] update_table - [ ] update_trigger - [ ] update_user_defined_function - [ ] update_workflow diff --git a/moto/glue/models.py b/moto/glue/models.py index 8f3396d9ac16..cf930cfb296e 100644 --- a/moto/glue/models.py +++ b/moto/glue/models.py @@ -34,6 +34,9 @@ def get_database(self, database_name): except KeyError: raise DatabaseNotFoundException(database_name) + def get_databases(self): + return [self.databases[key] for key in self.databases] if self.databases else [] + def create_table(self, database_name, table_name, table_input): database = self.get_database(database_name) diff --git a/moto/glue/responses.py b/moto/glue/responses.py index bf7b5776b41b..4fb144bba47e 100644 --- a/moto/glue/responses.py +++ b/moto/glue/responses.py @@ -30,6 +30,10 @@ def get_database(self): database = self.glue_backend.get_database(database_name) return json.dumps({"Database": {"Name": database.name}}) + def get_databases(self): + database_list = self.glue_backend.get_databases() + return json.dumps({"DatabaseList": [{"Name": database.name} for database in database_list]}) + def create_table(self): database_name = self.parameters.get("DatabaseName") table_input = self.parameters.get("TableInput") diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index 31731e5980fe..54fb17451367 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -52,6 +52,27 @@ def test_get_database_not_exits(): ) +@mock_glue +def test_get_databases_empty(): + client = boto3.client("glue", region_name="us-east-1") + response = client.get_databases() + response["DatabaseList"].should.have.length_of(0) + + +@mock_glue +def test_get_databases_several_items(): + client = boto3.client("glue", region_name="us-east-1") + database_name_1, database_name_2 = "firstdatabase", "seconddatabase" + + helpers.create_database(client, database_name_1) + helpers.create_database(client, database_name_2) + + database_list = sorted(client.get_databases()["DatabaseList"], key=lambda x: x["Name"]) + database_list.should.have.length_of(2) + database_list[0].should.equal({"Name": database_name_1}) + database_list[1].should.equal({"Name": database_name_2}) + + @mock_glue def test_create_table(): client = boto3.client("glue", region_name="us-east-1") From 9381c670ab5d9ab169b071c4cf7580d04dfb4636 Mon Sep 17 00:00:00 2001 From: MarcosBernal Date: Tue, 21 Apr 2020 22:33:55 +0200 Subject: [PATCH 251/658] change code style to pass black --check --- moto/glue/responses.py | 4 +++- tests/test_glue/test_datacatalog.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/moto/glue/responses.py b/moto/glue/responses.py index 4fb144bba47e..66185e099859 100644 --- a/moto/glue/responses.py +++ b/moto/glue/responses.py @@ -32,7 +32,9 @@ def get_database(self): def get_databases(self): database_list = self.glue_backend.get_databases() - return json.dumps({"DatabaseList": [{"Name": database.name} for database in database_list]}) + return json.dumps( + {"DatabaseList": [{"Name": database.name} for database in database_list]} + ) def create_table(self): database_name = self.parameters.get("DatabaseName") diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index 54fb17451367..bc68b48f6a15 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -67,7 +67,9 @@ def test_get_databases_several_items(): helpers.create_database(client, database_name_1) helpers.create_database(client, database_name_2) - database_list = sorted(client.get_databases()["DatabaseList"], key=lambda x: x["Name"]) + database_list = sorted( + client.get_databases()["DatabaseList"], key=lambda x: x["Name"] + ) database_list.should.have.length_of(2) database_list[0].should.equal({"Name": database_name_1}) database_list[1].should.equal({"Name": database_name_2}) From 156ba56fdc94414e45ff83763d04fca91b111513 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Mon, 15 Apr 2019 19:57:42 -0500 Subject: [PATCH 252/658] set default status for s3 posts and add support for success_action_redirect. --- moto/s3/responses.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 22cd45c0834d..2f52e0d4ad8c 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -776,8 +776,9 @@ def _bucket_response_delete(self, body, bucket_name, querystring): template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR) return 409, {}, template.render(bucket=removed_bucket) - def _bucket_response_post(self, request, body, bucket_name): - if not request.headers.get("Content-Length"): + def _bucket_response_post(self, request, body, bucket_name, headers): + response_headers = {} + if not request.headers.get('Content-Length'): return 411, {}, "Content-Length required" path = self._get_path(request) @@ -810,13 +811,21 @@ def _bucket_response_post(self, request, body, bucket_name): else: f = request.files["file"].stream.read() + if 'success_action_redirect' in form: + response_headers['Location'] = form['success_action_redirect'] + + if 'success_action_status' in form: + status_code = form['success_action_status'] + else: + status_code = 204 + new_key = self.backend.set_key(bucket_name, key, f) # Metadata metadata = metadata_from_headers(form) new_key.set_metadata(metadata) - return 200, {}, "" + return status_code, response_headers, "" @staticmethod def _get_path(request): From b3f6e5ab2fed73cfc9f66de92b16cfa52e3602bc Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 29 May 2019 15:22:29 -0500 Subject: [PATCH 253/658] add test --- moto/s3/responses.py | 2 ++ tests/test_s3/test_s3.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 2f52e0d4ad8c..5526646a3229 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -816,6 +816,8 @@ def _bucket_response_post(self, request, body, bucket_name, headers): if 'success_action_status' in form: status_code = form['success_action_status'] + elif 'success_action_redirect' in form: + status_code = 303 else: status_code = 204 diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 303ed523d924..f7040e006cbd 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -14,6 +14,7 @@ import mimetypes import zlib import pickle +import uuid import json import boto @@ -4428,3 +4429,34 @@ def test_s3_config_dict(): assert not logging_bucket["supplementaryConfiguration"].get( "BucketTaggingConfiguration" ) + + +@mock_s3 +def test_creating_presigned_post(): + bucket = 'presigned-test' + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket=bucket) + success_url = 'http://localhost/completed' + fdata = b'test data\n' + file_uid = uuid.uuid4() + conditions = [ + {"Content-Type": 'text/plain'}, + {"x-amz-server-side-encryption": "AES256"}, + {'success_action_redirect': success_url}, + ] + conditions.append(["content-length-range", 1, 30]) + data = s3.generate_presigned_post( + Bucket=bucket, + Key='{file_uid}.txt'.format(file_uid=file_uid), + Fields={ + 'content-type': 'text/plain', + 'success_action_redirect': success_url, + 'x-amz-server-side-encryption': 'AES256' + }, + Conditions=conditions, + ExpiresIn=1000, + ) + resp = requests.post(data['url'], data=data['fields'], files={'file': fdata}, allow_redirects=False) + assert resp.headers['Location'] == url + assert resp.status_code == 303 + assert s3.get_object(Bucket=bucket, Key='{file_uuid}.txt'.format(file_uid=file_uid))['Body'].read() == fdata From 49b056563a2396727e17253d09e6924ce24ef09e Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 21 Apr 2020 19:51:48 -0500 Subject: [PATCH 254/658] process multipart form --- moto/s3/responses.py | 49 +++++++++++++++++++++++++++++----------- tests/test_s3/test_s3.py | 4 ++-- 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 5526646a3229..92a82e4ff0f9 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -7,7 +7,7 @@ from botocore.awsrequest import AWSPreparedRequest from moto.core.utils import str_to_rfc_1123_datetime, py2_strip_unicode_keys -from six.moves.urllib.parse import parse_qs, urlparse, unquote +from six.moves.urllib.parse import parse_qs, urlparse, unquote, parse_qsl import xmltodict @@ -143,6 +143,31 @@ def is_delete_keys(request, path, bucket_name): ) +def _process_multipart_formdata(request): + """ + When not using the live server, the request does not pass through flask, so it is not processed. + This will only be used in places where we end up with a requests PreparedRequest. + """ + form = {} + boundkey = request.headers['Content-Type'][len('multipart/form-data; boundary='):] + boundary = f'--{boundkey}' + data = request.body.decode().split(boundary) + fields = [field.split('\r\n\r\n') for field in data][1:-1] + for key, value in fields: + key, value = key.replace('\r\n', ''), value.replace('\r\n', '') + key = key.split('; ') + if len(key) == 2: + disposition, name = key + filename = None + else: + disposition, name, filename = key + name = name[len('name='):].strip('"') + if disposition.endswith('form-data'): + form[name] = value + import code; code.interact(local=locals()) + return form + + class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): def __init__(self, backend): super(ResponseObject, self).__init__() @@ -776,9 +801,9 @@ def _bucket_response_delete(self, body, bucket_name, querystring): template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR) return 409, {}, template.render(bucket=removed_bucket) - def _bucket_response_post(self, request, body, bucket_name, headers): + def _bucket_response_post(self, request, body, bucket_name): response_headers = {} - if not request.headers.get('Content-Length'): + if not request.headers.get("Content-Length"): return 411, {}, "Content-Length required" path = self._get_path(request) @@ -796,14 +821,12 @@ def _bucket_response_post(self, request, body, bucket_name, headers): if hasattr(request, "form"): # Not HTTPretty form = request.form + elif request.headers.get('Content-Type').startswith('multipart/form-data'): + form = _process_multipart_formdata(request) else: # HTTPretty, build new form object body = body.decode() - - form = {} - for kv in body.split("&"): - k, v = kv.split("=") - form[k] = v + form = dict(parse_qsl(body)) key = form["key"] if "file" in form: @@ -811,12 +834,12 @@ def _bucket_response_post(self, request, body, bucket_name, headers): else: f = request.files["file"].stream.read() - if 'success_action_redirect' in form: - response_headers['Location'] = form['success_action_redirect'] + if "success_action_redirect" in form: + response_headers["Location"] = form["success_action_redirect"] - if 'success_action_status' in form: - status_code = form['success_action_status'] - elif 'success_action_redirect' in form: + if "success_action_status" in form: + status_code = form["success_action_status"] + elif "success_action_redirect" in form: status_code = 303 else: status_code = 204 diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index f7040e006cbd..c226a7b3b0bf 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4457,6 +4457,6 @@ def test_creating_presigned_post(): ExpiresIn=1000, ) resp = requests.post(data['url'], data=data['fields'], files={'file': fdata}, allow_redirects=False) - assert resp.headers['Location'] == url + assert resp.headers['Location'] == success_url assert resp.status_code == 303 - assert s3.get_object(Bucket=bucket, Key='{file_uuid}.txt'.format(file_uid=file_uid))['Body'].read() == fdata + assert s3.get_object(Bucket=bucket, Key='{file_uid}.txt'.format(file_uid=file_uid))['Body'].read() == fdata From 4b0ba7320433b4b66488fc851c828f2ec1b56836 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 21 Apr 2020 20:13:53 -0500 Subject: [PATCH 255/658] use werkzeug hooray, thanks pallets discord! --- moto/s3/responses.py | 34 ++++++++-------------------------- 1 file changed, 8 insertions(+), 26 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 92a82e4ff0f9..965d15f57b1f 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -5,6 +5,7 @@ import six from botocore.awsrequest import AWSPreparedRequest +from werkzeug.wrappers import Request from moto.core.utils import str_to_rfc_1123_datetime, py2_strip_unicode_keys from six.moves.urllib.parse import parse_qs, urlparse, unquote, parse_qsl @@ -143,31 +144,6 @@ def is_delete_keys(request, path, bucket_name): ) -def _process_multipart_formdata(request): - """ - When not using the live server, the request does not pass through flask, so it is not processed. - This will only be used in places where we end up with a requests PreparedRequest. - """ - form = {} - boundkey = request.headers['Content-Type'][len('multipart/form-data; boundary='):] - boundary = f'--{boundkey}' - data = request.body.decode().split(boundary) - fields = [field.split('\r\n\r\n') for field in data][1:-1] - for key, value in fields: - key, value = key.replace('\r\n', ''), value.replace('\r\n', '') - key = key.split('; ') - if len(key) == 2: - disposition, name = key - filename = None - else: - disposition, name, filename = key - name = name[len('name='):].strip('"') - if disposition.endswith('form-data'): - form[name] = value - import code; code.interact(local=locals()) - return form - - class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): def __init__(self, backend): super(ResponseObject, self).__init__() @@ -822,7 +798,13 @@ def _bucket_response_post(self, request, body, bucket_name): # Not HTTPretty form = request.form elif request.headers.get('Content-Type').startswith('multipart/form-data'): - form = _process_multipart_formdata(request) + request = Request.from_values( + input_stream=six.BytesIO(request.body), + content_length=request.headers['Content-Length'], + content_type=request.headers['Content-Type'], + method='POST', + ) + form = request.form else: # HTTPretty, build new form object body = body.decode() From 80b27a6b93d0c52d8f9f5349ec87efd036a66247 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Tue, 21 Apr 2020 21:43:32 -0500 Subject: [PATCH 256/658] blacken --- moto/s3/responses.py | 8 ++++---- tests/test_s3/test_s3.py | 33 ++++++++++++++++++++------------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 965d15f57b1f..6ac139a14dc0 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -797,12 +797,12 @@ def _bucket_response_post(self, request, body, bucket_name): if hasattr(request, "form"): # Not HTTPretty form = request.form - elif request.headers.get('Content-Type').startswith('multipart/form-data'): + elif request.headers.get("Content-Type").startswith("multipart/form-data"): request = Request.from_values( input_stream=six.BytesIO(request.body), - content_length=request.headers['Content-Length'], - content_type=request.headers['Content-Type'], - method='POST', + content_length=request.headers["Content-Length"], + content_type=request.headers["Content-Type"], + method="POST", ) form = request.form else: diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index c226a7b3b0bf..ffbd73966b04 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4433,30 +4433,37 @@ def test_s3_config_dict(): @mock_s3 def test_creating_presigned_post(): - bucket = 'presigned-test' - s3 = boto3.client('s3', region_name='us-east-1') + bucket = "presigned-test" + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket=bucket) - success_url = 'http://localhost/completed' - fdata = b'test data\n' + success_url = "http://localhost/completed" + fdata = b"test data\n" file_uid = uuid.uuid4() conditions = [ - {"Content-Type": 'text/plain'}, + {"Content-Type": "text/plain"}, {"x-amz-server-side-encryption": "AES256"}, - {'success_action_redirect': success_url}, + {"success_action_redirect": success_url}, ] conditions.append(["content-length-range", 1, 30]) data = s3.generate_presigned_post( Bucket=bucket, - Key='{file_uid}.txt'.format(file_uid=file_uid), + Key="{file_uid}.txt".format(file_uid=file_uid), Fields={ - 'content-type': 'text/plain', - 'success_action_redirect': success_url, - 'x-amz-server-side-encryption': 'AES256' + "content-type": "text/plain", + "success_action_redirect": success_url, + "x-amz-server-side-encryption": "AES256", }, Conditions=conditions, ExpiresIn=1000, ) - resp = requests.post(data['url'], data=data['fields'], files={'file': fdata}, allow_redirects=False) - assert resp.headers['Location'] == success_url + resp = requests.post( + data["url"], data=data["fields"], files={"file": fdata}, allow_redirects=False + ) + assert resp.headers["Location"] == success_url assert resp.status_code == 303 - assert s3.get_object(Bucket=bucket, Key='{file_uid}.txt'.format(file_uid=file_uid))['Body'].read() == fdata + assert ( + s3.get_object(Bucket=bucket, Key="{file_uid}.txt".format(file_uid=file_uid))[ + "Body" + ].read() + == fdata + ) From 50111929cc16ea270b6c7d266c934777c15c9ad5 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 22 Apr 2020 12:18:27 +0100 Subject: [PATCH 257/658] STS - Handle AssumeRoleWithSAML as an unsigned request --- moto/server.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/moto/server.py b/moto/server.py index 92fe6f229924..7987a629d5c3 100644 --- a/moto/server.py +++ b/moto/server.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import argparse +import io import json import re import sys @@ -29,6 +30,7 @@ "AWSCognitoIdentityService": ("cognito-identity", "us-east-1"), "AWSCognitoIdentityProviderService": ("cognito-idp", "us-east-1"), } +UNSIGNED_ACTIONS = {"AssumeRoleWithSAML": ("sts", "us-east-1")} class DomainDispatcherApplication(object): @@ -77,9 +79,13 @@ def infer_service_region_host(self, environ): else: # Unsigned request target = environ.get("HTTP_X_AMZ_TARGET") + action = self.get_action_from_body(environ) if target: service, _ = target.split(".", 1) service, region = UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION) + elif action and action in UNSIGNED_ACTIONS: + # See if we can match the Action to a known service + service, region = UNSIGNED_ACTIONS.get(action) else: # S3 is the last resort when the target is also unknown service, region = DEFAULT_SERVICE_REGION @@ -130,6 +136,22 @@ def get_application(self, environ): self.app_instances[backend] = app return app + def get_action_from_body(self, environ): + body = None + try: + request_body_size = int(environ.get("CONTENT_LENGTH", 0)) + if "wsgi.input" in environ: + body = environ["wsgi.input"].read(request_body_size).decode("utf-8") + body_dict = dict(x.split("=") for x in str(body).split("&")) + return body_dict["Action"] + except ValueError: + pass + finally: + if body: + # We've consumed the body = need to reset it + environ["wsgi.input"] = io.StringIO(body) + return None + def __call__(self, environ, start_response): backend_app = self.get_application(environ) return backend_app(environ, start_response) From 25d1e1059e6ad28050147dc2257e6a12846396a9 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 22 Apr 2020 14:07:19 +0100 Subject: [PATCH 258/658] STS - Only check request-body of eligible requests for Actions --- moto/server.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/moto/server.py b/moto/server.py index 7987a629d5c3..498f6c504dce 100644 --- a/moto/server.py +++ b/moto/server.py @@ -139,12 +139,16 @@ def get_application(self, environ): def get_action_from_body(self, environ): body = None try: - request_body_size = int(environ.get("CONTENT_LENGTH", 0)) - if "wsgi.input" in environ: + # AWS requests use querystrings as the body (Action=x&Data=y&...) + simple_form = environ["CONTENT_TYPE"].startswith( + "application/x-www-form-urlencoded" + ) + request_body_size = int(environ["CONTENT_LENGTH"]) + if simple_form and request_body_size: body = environ["wsgi.input"].read(request_body_size).decode("utf-8") - body_dict = dict(x.split("=") for x in str(body).split("&")) + body_dict = dict(x.split("=") for x in body.split("&")) return body_dict["Action"] - except ValueError: + except (KeyError, ValueError): pass finally: if body: From 50a147592debbbb5e887d40d34ae146dbb266cdd Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 22 Apr 2020 09:08:30 -0500 Subject: [PATCH 259/658] Make all CallbackResponse requests into a Werkzeug Request The "request" object in CallbackResponse is the PreparedRequest send by whatever client is used to contact the mocked moto service. This can end up with unparsed bodies, as we added for processing presigned post requests in #2155. This will make sure that all of the requests comming in from mocked functions also get processed by werkzeug as if it was running a live server. --- moto/core/models.py | 20 ++++++++++++++++++++ moto/s3/responses.py | 9 --------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index 73942c669b47..460823bd6b33 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -12,6 +12,8 @@ from collections import defaultdict from botocore.handlers import BUILTIN_HANDLERS from botocore.awsrequest import AWSResponse +from six.moves.urllib.parse import urlparse +from werkzeug.wrappers import Request import mock from moto import settings @@ -175,6 +177,24 @@ def get_response(self, request): """ Need to override this so we can pass decode_content=False """ + if not isinstance(request, Request): + url = urlparse(request.url) + if request.body is None: + body = None + elif isinstance(request.body, six.text_type): + body = six.BytesIO(six.b(request.body)) + else: + body = six.BytesIO(request.body) + req = Request.from_values( + path='?'.join([url.path, url.query]), + input_stream=body, + content_length=request.headers.get("Content-Length"), + content_type=request.headers.get("Content-Type"), + method=request.method, + base_url='{scheme}://{netloc}'.format(scheme=url.scheme, netloc=url.netloc), + headers=[(k, v) for k, v in six.iteritems(request.headers)] + ) + request = req headers = self.get_headers() result = self.callback(request) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 6ac139a14dc0..442489a8a946 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -5,7 +5,6 @@ import six from botocore.awsrequest import AWSPreparedRequest -from werkzeug.wrappers import Request from moto.core.utils import str_to_rfc_1123_datetime, py2_strip_unicode_keys from six.moves.urllib.parse import parse_qs, urlparse, unquote, parse_qsl @@ -797,14 +796,6 @@ def _bucket_response_post(self, request, body, bucket_name): if hasattr(request, "form"): # Not HTTPretty form = request.form - elif request.headers.get("Content-Type").startswith("multipart/form-data"): - request = Request.from_values( - input_stream=six.BytesIO(request.body), - content_length=request.headers["Content-Length"], - content_type=request.headers["Content-Type"], - method="POST", - ) - form = request.form else: # HTTPretty, build new form object body = body.decode() From 4cd2b201b5cb07165816b1e3a0c453e7a5410d1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marcos=20Bernal=20Espa=C3=B1a?= Date: Wed, 22 Apr 2020 16:44:25 +0200 Subject: [PATCH 260/658] Update IMPLEMENTATION_COVERAGE.md Co-Authored-By: Bert Blommers --- IMPLEMENTATION_COVERAGE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 82ee2f0462e8..78c7ba0e4eb8 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3351,7 +3351,7 @@ - [ ] update_listener ## glue -14/123 = 11% implemented +11% implemented - [X] batch_create_partition - [ ] batch_delete_connection - [X] batch_delete_partition From d9e2aeed5856ef762779d8920572398a1ed6c4c1 Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Wed, 22 Apr 2020 10:02:25 -0500 Subject: [PATCH 261/658] blacken --- moto/core/models.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index 460823bd6b33..1ee11607ab36 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -186,13 +186,15 @@ def get_response(self, request): else: body = six.BytesIO(request.body) req = Request.from_values( - path='?'.join([url.path, url.query]), + path="?".join([url.path, url.query]), input_stream=body, content_length=request.headers.get("Content-Length"), content_type=request.headers.get("Content-Type"), method=request.method, - base_url='{scheme}://{netloc}'.format(scheme=url.scheme, netloc=url.netloc), - headers=[(k, v) for k, v in six.iteritems(request.headers)] + base_url="{scheme}://{netloc}".format( + scheme=url.scheme, netloc=url.netloc + ), + headers=[(k, v) for k, v in six.iteritems(request.headers)], ) request = req headers = self.get_headers() From 343b20a5fbfebac4cebf3f1dbd6e794084fb65fe Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 22 Apr 2020 16:36:41 +0100 Subject: [PATCH 262/658] Update CONTRIBUTING to add Linting info --- CONTRIBUTING.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 40da55ccf72c..941fc0624e58 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,10 @@ Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_ ## Running the tests locally -Moto has a Makefile which has some helpful commands for getting setup. You should be able to run `make init` to install the dependencies and then `make test` to run the tests. +Moto has a Makefile which has some helpful commands for getting setup. You should be able to run `make init` to install the dependencies and then `make test` to run the tests. + +## Linting +Run `make lint` or `black --check moto tests` to verify whether your code confirms to the guidelines. ## Is there a missing feature? From 194de2b6eaf5d884b6bbce8856d6f0e21eb45149 Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Wed, 22 Apr 2020 13:32:12 -0700 Subject: [PATCH 263/658] Add af-south-1 The new version of botcore adds this region: https://github.com/boto/botocore/commit/f7dc4730ad34c6c3322da7d43ba64452bb3ae0d8#diff-9dfab05d4ba739e097a193e8b5fa61caR13 Which in turn, breaks moto: ``` /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/__init__.py:3: in from .acm import mock_acm # noqa /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/acm/__init__.py:2: in from .models import acm_backends /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/acm/models.py:7: in from moto.ec2 import ec2_backends /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/__init__.py:2: in from .models import ec2_backends /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:5169: in ec2_backends = { /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:5170: in region.name: EC2Backend(region.name) for region in RegionsAndZonesBackend.regions /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:5083: in __init__ super(EC2Backend, self).__init__() /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:852: in __init__ super(InstanceBackend, self).__init__() /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:1127: in __init__ super(TagBackend, self).__init__() /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:2368: in __init__ super(EBSBackend, self).__init__() /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:1904: in __init__ super(SecurityGroupBackend, self).__init__() /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:1355: in __init__ self._load_amis() /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:1362: in _load_amis self.amis[ami_id] = Ami(self, **ami) /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:1313: in __init__ volume = self.ec2_backend.create_volume(15, region_name) /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:2372: in create_volume zone = self.get_zone_by_name(zone_name) /moto-1.3.15.dev640-py2.py3-none-any.whl/moto/ec2/models.py:1713: in get_zone_by_name for zone in self.zones[self.region_name]: E KeyError: 'af-south-1' ``` --- moto/ec2/models.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index ee9b0fcc464c..dc8e617e0547 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1512,6 +1512,9 @@ class RegionsAndZonesBackend(object): regions.append(Region(region, "ec2.{}.amazonaws.com.cn".format(region))) zones = { + "af-south-1": [ + Zone(region_name="af-south-1", name="af-south-1a", zone_id="afs1-az1"), + ], "ap-south-1": [ Zone(region_name="ap-south-1", name="ap-south-1a", zone_id="aps1-az1"), Zone(region_name="ap-south-1", name="ap-south-1b", zone_id="aps1-az3"), From 1d31ea6397ef2349ea89ba481b1c2af23bde9d05 Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Thu, 23 Apr 2020 08:25:14 -0700 Subject: [PATCH 264/658] add two more zones. --- moto/ec2/models.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index dc8e617e0547..332c8f0304f4 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1514,6 +1514,8 @@ class RegionsAndZonesBackend(object): zones = { "af-south-1": [ Zone(region_name="af-south-1", name="af-south-1a", zone_id="afs1-az1"), + Zone(region_name="af-south-1", name="af-south-1b", zone_id="afs1-az2"), + Zone(region_name="af-south-1", name="af-south-1c", zone_id="afs1-az3"), ], "ap-south-1": [ Zone(region_name="ap-south-1", name="ap-south-1a", zone_id="aps1-az1"), From 9ed6e52d0ab86a0bd7b00caeb4af2c2cfb54bf42 Mon Sep 17 00:00:00 2001 From: Antoine Wendlinger Date: Wed, 22 Apr 2020 19:31:43 +0200 Subject: [PATCH 265/658] Handle VersionId in S3:delete_objects VersionId is not read in delete_objects requests, and the behavior differs from its singular counterpart delete_object. This fixes the issue. --- moto/s3/responses.py | 26 +++++++++++++++++--------- tests/test_s3/test_s3.py | 23 +++++++++++++++++++++++ 2 files changed, 40 insertions(+), 9 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 442489a8a946..ec6015f7ae6b 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -840,26 +840,33 @@ def _get_path(request): def _bucket_response_delete_keys(self, request, body, bucket_name): template = self.response_template(S3_DELETE_KEYS_RESPONSE) - keys = minidom.parseString(body).getElementsByTagName("Key") - deleted_names = [] + objects = minidom.parseString(body).getElementsByTagName("Object") + + deleted_objects = [] error_names = [] - if len(keys) == 0: + if len(objects) == 0: raise MalformedXML() - for k in keys: - key_name = k.firstChild.nodeValue + for object_ in objects: + key_name = object_.getElementsByTagName("Key")[0].firstChild.nodeValue + version_id_node = object_.getElementsByTagName("VersionId") + if version_id_node: + version_id = version_id_node[0].firstChild.nodeValue + else: + version_id = None + success = self.backend.delete_key( - bucket_name, undo_clean_key_name(key_name) + bucket_name, undo_clean_key_name(key_name), version_id=version_id ) if success: - deleted_names.append(key_name) + deleted_objects.append((key_name, version_id)) else: error_names.append(key_name) return ( 200, {}, - template.render(deleted=deleted_names, delete_errors=error_names), + template.render(deleted=deleted_objects, delete_errors=error_names), ) def _handle_range_header(self, request, headers, response_content): @@ -1861,9 +1868,10 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): S3_DELETE_KEYS_RESPONSE = """ -{% for k in deleted %} +{% for k, v in deleted %} {{k}} +{% if v %}{{v}}{% endif %} {% endfor %} {% for k in delete_errors %} diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index ffbd73966b04..4a94c9c38aec 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2218,6 +2218,29 @@ def test_boto3_deleted_versionings_list(): assert len(listed["Contents"]) == 1 +@mock_s3 +def test_boto3_delete_objects_for_specific_version_id(): + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + client.create_bucket(Bucket="blah") + client.put_bucket_versioning( + Bucket="blah", VersioningConfiguration={"Status": "Enabled"} + ) + + client.put_object(Bucket="blah", Key="test1", Body=b"test1a") + client.put_object(Bucket="blah", Key="test1", Body=b"test1b") + + response = client.list_object_versions(Bucket="blah", Prefix="test1") + id_to_delete = [v["VersionId"] for v in response["Versions"] if v["IsLatest"]][0] + + response = client.delete_objects( + Bucket="blah", Delete={"Objects": [{"Key": "test1", "VersionId": id_to_delete}]} + ) + assert response["Deleted"] == [{"Key": "test1", "VersionId": id_to_delete}] + + listed = client.list_objects_v2(Bucket="blah") + assert len(listed["Contents"]) == 1 + + @mock_s3 def test_boto3_delete_versioned_bucket(): client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) From 3e145ef8df0433141a8c17bd33505991a25bf4be Mon Sep 17 00:00:00 2001 From: = Date: Fri, 24 Apr 2020 16:12:55 +0200 Subject: [PATCH 266/658] Do not remove tags after secret update, handle description --- moto/secretsmanager/models.py | 21 ++++++++++++++++----- moto/secretsmanager/responses.py | 2 ++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 11a024be622c..3a13d1119b01 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -121,8 +121,12 @@ def update_secret( "You can't perform this operation on the secret because it was marked for deletion." ) + secret = self.secrets[secret_id] + tags = secret["tags"] + description = secret["description"] + version_id = self._add_secret( - secret_id, secret_string=secret_string, secret_binary=secret_binary + secret_id, secret_string=secret_string, secret_binary=secret_binary, description=description, tags=tags ) response = json.dumps( @@ -136,7 +140,7 @@ def update_secret( return response def create_secret( - self, name, secret_string=None, secret_binary=None, tags=[], **kwargs + self, name, secret_string=None, secret_binary=None, description=None, tags=[], **kwargs ): # error if secret exists @@ -146,7 +150,7 @@ def create_secret( ) version_id = self._add_secret( - name, secret_string=secret_string, secret_binary=secret_binary, tags=tags + name, secret_string=secret_string, secret_binary=secret_binary, description=description, tags=tags ) response = json.dumps( @@ -164,6 +168,7 @@ def _add_secret( secret_id, secret_string=None, secret_binary=None, + description=None, tags=[], version_id=None, version_stages=None, @@ -216,13 +221,18 @@ def _add_secret( secret["rotation_lambda_arn"] = "" secret["auto_rotate_after_days"] = 0 secret["tags"] = tags + secret["description"] = description return version_id def put_secret_value(self, secret_id, secret_string, secret_binary, version_stages): + secret = self.secrets[secret_id] + tags = secret["tags"] + description = secret["description"] + version_id = self._add_secret( - secret_id, secret_string, secret_binary, version_stages=version_stages + secret_id, secret_string, secret_binary, description=description, tags=tags, version_stages=version_stages ) response = json.dumps( @@ -310,6 +320,7 @@ def rotate_secret( self._add_secret( secret_id, old_secret_version["secret_string"], + secret["description"], secret["tags"], version_id=new_version_id, version_stages=["AWSCURRENT"], @@ -416,7 +427,7 @@ def list_secrets(self, max_results, next_token): { "ARN": secret_arn(self.region, secret["secret_id"]), "DeletedDate": secret.get("deleted_date", None), - "Description": "", + "Description": secret.get["description"], "KmsKeyId": "", "LastAccessedDate": None, "LastChangedDate": None, diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index 757b888a34c4..9a899c90dac8 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -21,11 +21,13 @@ def create_secret(self): name = self._get_param("Name") secret_string = self._get_param("SecretString") secret_binary = self._get_param("SecretBinary") + description = self._get_param("Description", if_none="") tags = self._get_param("Tags", if_none=[]) return secretsmanager_backends[self.region].create_secret( name=name, secret_string=secret_string, secret_binary=secret_binary, + description=description, tags=tags, ) From 6483e3be806f25f02632f0f53f8810c8ae212468 Mon Sep 17 00:00:00 2001 From: = Date: Fri, 24 Apr 2020 18:17:03 +0200 Subject: [PATCH 267/658] do not require secret to exist on PutSecretValue operation --- moto/secretsmanager/models.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 3a13d1119b01..07a112fbcc46 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -227,9 +227,13 @@ def _add_secret( def put_secret_value(self, secret_id, secret_string, secret_binary, version_stages): - secret = self.secrets[secret_id] - tags = secret["tags"] - description = secret["description"] + if secret_id in self.secrets.keys(): + secret = self.secrets[secret_id] + tags = secret["tags"] + description = secret["description"] + else: + tags = [] + description = "" version_id = self._add_secret( secret_id, secret_string, secret_binary, description=description, tags=tags, version_stages=version_stages @@ -427,7 +431,7 @@ def list_secrets(self, max_results, next_token): { "ARN": secret_arn(self.region, secret["secret_id"]), "DeletedDate": secret.get("deleted_date", None), - "Description": secret.get["description"], + "Description": secret.get("description", ""), "KmsKeyId": "", "LastAccessedDate": None, "LastChangedDate": None, From ef67aee1a38e7b722d395f424aff206bc63af0dd Mon Sep 17 00:00:00 2001 From: = Date: Fri, 24 Apr 2020 18:53:24 +0200 Subject: [PATCH 268/658] apply black formatting --- moto/secretsmanager/models.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 07a112fbcc46..7762d41bc9d6 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -126,7 +126,11 @@ def update_secret( description = secret["description"] version_id = self._add_secret( - secret_id, secret_string=secret_string, secret_binary=secret_binary, description=description, tags=tags + secret_id, + secret_string=secret_string, + secret_binary=secret_binary, + description=description, + tags=tags, ) response = json.dumps( @@ -140,7 +144,13 @@ def update_secret( return response def create_secret( - self, name, secret_string=None, secret_binary=None, description=None, tags=[], **kwargs + self, + name, + secret_string=None, + secret_binary=None, + description=None, + tags=[], + **kwargs ): # error if secret exists @@ -150,7 +160,11 @@ def create_secret( ) version_id = self._add_secret( - name, secret_string=secret_string, secret_binary=secret_binary, description=description, tags=tags + name, + secret_string=secret_string, + secret_binary=secret_binary, + description=description, + tags=tags, ) response = json.dumps( @@ -236,7 +250,12 @@ def put_secret_value(self, secret_id, secret_string, secret_binary, version_stag description = "" version_id = self._add_secret( - secret_id, secret_string, secret_binary, description=description, tags=tags, version_stages=version_stages + secret_id, + secret_string, + secret_binary, + description=description, + tags=tags, + version_stages=version_stages, ) response = json.dumps( From 908468edb6e9151d1607972c325f5e9dd22b6681 Mon Sep 17 00:00:00 2001 From: Matthew Gladney Date: Fri, 24 Apr 2020 14:15:22 -0400 Subject: [PATCH 269/658] Support OptInStatus for EC2 describe_region calls --- moto/ec2/models.py | 17 +++++++++++++---- .../responses/availability_zones_and_regions.py | 1 + .../test_availability_zones_and_regions.py | 7 +++++++ 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 332c8f0304f4..7a935120954e 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1490,9 +1490,10 @@ def remove_launch_permission(self, ami_id, user_ids=None, group=None): class Region(object): - def __init__(self, name, endpoint): + def __init__(self, name, endpoint, opt_in_status): self.name = name self.endpoint = endpoint + self.opt_in_status = opt_in_status class Zone(object): @@ -1503,13 +1504,21 @@ def __init__(self, name, region_name, zone_id): class RegionsAndZonesBackend(object): + regions_not_enabled_by_default = [ + 'ap-east-1', + 'me-south-1' + ] + regions = [] for region in Session().get_available_regions("ec2"): - regions.append(Region(region, "ec2.{}.amazonaws.com".format(region))) + if region in regions_not_enabled_by_default: + regions.append(Region(region, "ec2.{}.amazonaws.com".format(region), "not-opted-in")) + else: + regions.append(Region(region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required")) for region in Session().get_available_regions("ec2", partition_name="aws-us-gov"): - regions.append(Region(region, "ec2.{}.amazonaws.com".format(region))) + regions.append(Region(region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required")) for region in Session().get_available_regions("ec2", partition_name="aws-cn"): - regions.append(Region(region, "ec2.{}.amazonaws.com.cn".format(region))) + regions.append(Region(region, "ec2.{}.amazonaws.com.cn".format(region), "opt-in-not-required")) zones = { "af-south-1": [ diff --git a/moto/ec2/responses/availability_zones_and_regions.py b/moto/ec2/responses/availability_zones_and_regions.py index 28cc3a495673..61d4eb1ae49f 100644 --- a/moto/ec2/responses/availability_zones_and_regions.py +++ b/moto/ec2/responses/availability_zones_and_regions.py @@ -22,6 +22,7 @@ def describe_regions(self): {{ region.name }} {{ region.endpoint }} + {{ region.opt_in_status }} {% endfor %} diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index bec9459e8a15..92fb2b6578c6 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -40,6 +40,13 @@ def test_boto3_describe_regions(): resp = ec2.describe_regions(RegionNames=[test_region]) resp["Regions"].should.have.length_of(1) resp["Regions"][0].should.have.key("RegionName").which.should.equal(test_region) + resp["Regions"][0].should.have.key("OptInStatus").which.should.equal("opt-in-not-required") + + test_region = "ap-east-1" + resp = ec2.describe_regions(RegionNames=[test_region]) + resp["Regions"].should.have.length_of(1) + resp["Regions"][0].should.have.key("RegionName").which.should.equal(test_region) + resp["Regions"][0].should.have.key("OptInStatus").which.should.equal("not-opted-in") @mock_ec2 From 59c1f0e1313ae362b192dfbc98b698129157a660 Mon Sep 17 00:00:00 2001 From: Matthew Gladney Date: Fri, 24 Apr 2020 14:20:58 -0400 Subject: [PATCH 270/658] black reformatting changes --- moto/ec2/models.py | 25 +++++++++++++------ .../test_availability_zones_and_regions.py | 4 ++- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 7a935120954e..7f7b6369ecc3 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1504,21 +1504,30 @@ def __init__(self, name, region_name, zone_id): class RegionsAndZonesBackend(object): - regions_not_enabled_by_default = [ - 'ap-east-1', - 'me-south-1' - ] + regions_not_enabled_by_default = ["ap-east-1", "me-south-1"] regions = [] for region in Session().get_available_regions("ec2"): if region in regions_not_enabled_by_default: - regions.append(Region(region, "ec2.{}.amazonaws.com".format(region), "not-opted-in")) + regions.append( + Region(region, "ec2.{}.amazonaws.com".format(region), "not-opted-in") + ) else: - regions.append(Region(region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required")) + regions.append( + Region( + region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required" + ) + ) for region in Session().get_available_regions("ec2", partition_name="aws-us-gov"): - regions.append(Region(region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required")) + regions.append( + Region(region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required") + ) for region in Session().get_available_regions("ec2", partition_name="aws-cn"): - regions.append(Region(region, "ec2.{}.amazonaws.com.cn".format(region), "opt-in-not-required")) + regions.append( + Region( + region, "ec2.{}.amazonaws.com.cn".format(region), "opt-in-not-required" + ) + ) zones = { "af-south-1": [ diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index 92fb2b6578c6..830d4c2bf85f 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -40,7 +40,9 @@ def test_boto3_describe_regions(): resp = ec2.describe_regions(RegionNames=[test_region]) resp["Regions"].should.have.length_of(1) resp["Regions"][0].should.have.key("RegionName").which.should.equal(test_region) - resp["Regions"][0].should.have.key("OptInStatus").which.should.equal("opt-in-not-required") + resp["Regions"][0].should.have.key("OptInStatus").which.should.equal( + "opt-in-not-required" + ) test_region = "ap-east-1" resp = ec2.describe_regions(RegionNames=[test_region]) From b63110be9e7fc249eda1528a9161fa1870f0484a Mon Sep 17 00:00:00 2001 From: = Date: Fri, 24 Apr 2020 21:47:11 +0200 Subject: [PATCH 271/658] handle description in describe secret operation, add tests --- moto/secretsmanager/models.py | 2 +- .../test_secretsmanager.py | 101 ++++++++++++++++++ 2 files changed, 102 insertions(+), 1 deletion(-) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 7762d41bc9d6..29bd6c96e1c0 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -279,7 +279,7 @@ def describe_secret(self, secret_id): { "ARN": secret_arn(self.region, secret["secret_id"]), "Name": secret["name"], - "Description": "", + "Description": secret.get("description", ""), "KmsKeyId": "", "RotationEnabled": secret["rotation_enabled"], "RotationLambdaARN": secret["rotation_lambda_arn"], diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 49d1dc925117..6ec53460a8b6 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -137,6 +137,45 @@ def test_create_secret_with_tags(): ] +@mock_secretsmanager +def test_create_secret_with_description(): + conn = boto3.client("secretsmanager", region_name="us-east-1") + secret_name = "test-secret-with-tags" + + result = conn.create_secret( + Name=secret_name, SecretString="foosecret", Description="desc" + ) + assert result["ARN"] + assert result["Name"] == secret_name + secret_value = conn.get_secret_value(SecretId=secret_name) + assert secret_value["SecretString"] == "foosecret" + secret_details = conn.describe_secret(SecretId=secret_name) + assert secret_details["Description"] == "desc" + + +@mock_secretsmanager +def test_create_secret_with_tags_and_description(): + conn = boto3.client("secretsmanager", region_name="us-east-1") + secret_name = "test-secret-with-tags" + + result = conn.create_secret( + Name=secret_name, + SecretString="foosecret", + Description="desc", + Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}], + ) + assert result["ARN"] + assert result["Name"] == secret_name + secret_value = conn.get_secret_value(SecretId=secret_name) + assert secret_value["SecretString"] == "foosecret" + secret_details = conn.describe_secret(SecretId=secret_name) + assert secret_details["Tags"] == [ + {"Key": "Foo", "Value": "Bar"}, + {"Key": "Mykey", "Value": "Myvalue"}, + ] + assert secret_details["Description"] == "desc" + + @mock_secretsmanager def test_delete_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") @@ -690,6 +729,31 @@ def test_put_secret_value_versions_differ_if_same_secret_put_twice(): assert first_version_id != second_version_id +@mock_secretsmanager +def test_put_secret_value_maintains_description_and_tags(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + conn.create_secret( + Name=DEFAULT_SECRET_NAME, + SecretString="foosecret", + Description="desc", + Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}], + ) + + conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.put_secret_value( + SecretId=DEFAULT_SECRET_NAME, + SecretString="dupe_secret", + VersionStages=["AWSCURRENT"], + ) + secret_details = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) + assert secret_details["Tags"] == [ + {"Key": "Foo", "Value": "Bar"}, + {"Key": "Mykey", "Value": "Myvalue"}, + ] + assert secret_details["Description"] == "desc" + + @mock_secretsmanager def test_can_list_secret_version_ids(): conn = boto3.client("secretsmanager", region_name="us-west-2") @@ -739,6 +803,43 @@ def test_update_secret(): assert created_secret["VersionId"] != updated_secret["VersionId"] +@mock_secretsmanager +def test_update_secret_with_tags_and_description(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + created_secret = conn.create_secret( + Name="test-secret", + SecretString="foosecret", + Description="desc", + Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}], + ) + + assert created_secret["ARN"] + assert created_secret["Name"] == "test-secret" + assert created_secret["VersionId"] != "" + + secret = conn.get_secret_value(SecretId="test-secret") + assert secret["SecretString"] == "foosecret" + + updated_secret = conn.update_secret( + SecretId="test-secret", SecretString="barsecret" + ) + + assert updated_secret["ARN"] + assert updated_secret["Name"] == "test-secret" + assert updated_secret["VersionId"] != "" + + secret = conn.get_secret_value(SecretId="test-secret") + assert secret["SecretString"] == "barsecret" + assert created_secret["VersionId"] != updated_secret["VersionId"] + secret_details = conn.describe_secret(SecretId="test-secret") + assert secret_details["Tags"] == [ + {"Key": "Foo", "Value": "Bar"}, + {"Key": "Mykey", "Value": "Myvalue"}, + ] + assert secret_details["Description"] == "desc" + + @mock_secretsmanager def test_update_secret_which_does_not_exit(): conn = boto3.client("secretsmanager", region_name="us-west-2") From a658900d69ca4ae36a4b265161809a529aabb211 Mon Sep 17 00:00:00 2001 From: JohnWC Date: Sat, 25 Apr 2020 03:13:36 -0500 Subject: [PATCH 272/658] Add policy to apigateway --- moto/apigateway/models.py | 4 ++++ moto/apigateway/responses.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index e5e5e3bfdd14..e011af60144d 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -461,6 +461,7 @@ def __init__(self, id, region_name, name, description, **kwargs): self.description = description self.create_date = int(time.time()) self.api_key_source = kwargs.get("api_key_source") or "HEADER" + self.policy = kwargs.get("policy") or None self.endpoint_configuration = kwargs.get("endpoint_configuration") or { "types": ["EDGE"] } @@ -485,6 +486,7 @@ def to_dict(self): "apiKeySource": self.api_key_source, "endpointConfiguration": self.endpoint_configuration, "tags": self.tags, + "policy": self.policy, } def add_child(self, path, parent_id=None): @@ -713,6 +715,7 @@ def create_rest_api( api_key_source=None, endpoint_configuration=None, tags=None, + policy=None, ): api_id = create_id() rest_api = RestAPI( @@ -723,6 +726,7 @@ def create_rest_api( api_key_source=api_key_source, endpoint_configuration=endpoint_configuration, tags=tags, + policy=policy, ) self.apis[api_id] = rest_api return rest_api diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 822d4c0ce1a6..a3c41a6d4b48 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -59,6 +59,7 @@ def restapis(self, request, full_url, headers): api_key_source = self._get_param("apiKeySource") endpoint_configuration = self._get_param("endpointConfiguration") tags = self._get_param("tags") + policy = self._get_param("policy") # Param validation if api_key_source and api_key_source not in API_KEY_SOURCES: @@ -94,6 +95,7 @@ def restapis(self, request, full_url, headers): api_key_source=api_key_source, endpoint_configuration=endpoint_configuration, tags=tags, + policy=policy, ) return 200, {}, json.dumps(rest_api.to_dict()) From 0828c5af9dfff7430537cfb26cc62a8523d9cef3 Mon Sep 17 00:00:00 2001 From: JohnWC Date: Sat, 25 Apr 2020 03:27:59 -0500 Subject: [PATCH 273/658] Add unit test for add apigateway with policy --- tests/test_apigateway/test_apigateway.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 596ed2dd407a..107dc5d05dca 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -69,6 +69,24 @@ def test_create_rest_api_with_tags(): response["tags"].should.equal({"MY_TAG1": "MY_VALUE1"}) +@mock_apigateway +def test_create_rest_api_with_policy(): + client = boto3.client("apigateway", region_name="us-west-2") + + policy = "{\"Version\": \"2012-10-17\",\"Statement\": []}" + response = client.create_rest_api( + name="my_api", + description="this is my api", + policy=policy + ) + api_id = response["id"] + + response = client.get_rest_api(restApiId=api_id) + + assert "policy" in response + response["policy"].should.equal(policy) + + @mock_apigateway def test_create_rest_api_invalid_apikeysource(): client = boto3.client("apigateway", region_name="us-west-2") From 637e0188a2ab81bf3a72b7ddae2677f235c50973 Mon Sep 17 00:00:00 2001 From: Olivier Parent Colombel Date: Mon, 20 Apr 2020 20:54:31 +0200 Subject: [PATCH 274/658] Allow S3 keys to start with leading slashes. --- moto/s3/responses.py | 3 ++- moto/s3/urls.py | 2 +- tests/test_s3/test_s3.py | 22 ++++++++++++++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 442489a8a946..ce1e6128ddcd 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -134,7 +134,8 @@ def parse_key_name(pth): - return pth.lstrip("/") + # strip the first '/' left by urlparse + return pth[1:] if pth.startswith('/') else pth def is_delete_keys(request, path, bucket_name): diff --git a/moto/s3/urls.py b/moto/s3/urls.py index 752762184d09..4c4e9ea76858 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -15,5 +15,5 @@ # path-based bucket + key "{0}/(?P[^/]+)/(?P.+)": S3ResponseInstance.key_or_control_response, # subdomain bucket + key with empty first part of path - "{0}//(?P.*)$": S3ResponseInstance.key_or_control_response, + "{0}/(?P/.*)$": S3ResponseInstance.key_or_control_response, } diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index ffbd73966b04..3048f6507d18 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -3744,6 +3744,28 @@ def test_root_dir_with_empty_name_works(): store_and_read_back_a_key("/") +@parameterized(['mybucket', 'my.bucket']) +@mock_s3 +def test_leading_slashes_not_removed(bucket_name): + """Make sure that leading slashes are not removed internally.""" + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + s3.create_bucket(Bucket=bucket_name) + + uploaded_key = '/key' + invalid_key_1 = 'key' + invalid_key_2 = '//key' + + s3.put_object(Bucket=bucket_name, Key=uploaded_key, Body=b'Some body') + + with assert_raises(ClientError) as e: + s3.get_object(Bucket=bucket_name, Key=invalid_key_1) + e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + + with assert_raises(ClientError) as e: + s3.get_object(Bucket=bucket_name, Key=invalid_key_2) + e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + + @parameterized( [("foo/bar/baz",), ("foo",), ("foo/run_dt%3D2019-01-01%252012%253A30%253A00",)] ) From d852f7dd063ae17cc1fa7f97bc3510e3daef55e9 Mon Sep 17 00:00:00 2001 From: Olivier Parent Colombel Date: Sat, 25 Apr 2020 15:10:23 +0200 Subject: [PATCH 275/658] Fixing lint errors. --- moto/s3/responses.py | 2 +- tests/test_s3/test_s3.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index ce1e6128ddcd..71c424244f58 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -135,7 +135,7 @@ def parse_key_name(pth): # strip the first '/' left by urlparse - return pth[1:] if pth.startswith('/') else pth + return pth[1:] if pth.startswith("/") else pth def is_delete_keys(request, path, bucket_name): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 3048f6507d18..fea76b9e3cc6 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -3744,18 +3744,18 @@ def test_root_dir_with_empty_name_works(): store_and_read_back_a_key("/") -@parameterized(['mybucket', 'my.bucket']) +@parameterized(["mybucket", "my.bucket"]) @mock_s3 def test_leading_slashes_not_removed(bucket_name): """Make sure that leading slashes are not removed internally.""" s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) - uploaded_key = '/key' - invalid_key_1 = 'key' - invalid_key_2 = '//key' + uploaded_key = "/key" + invalid_key_1 = "key" + invalid_key_2 = "//key" - s3.put_object(Bucket=bucket_name, Key=uploaded_key, Body=b'Some body') + s3.put_object(Bucket=bucket_name, Key=uploaded_key, Body=b"Some body") with assert_raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=invalid_key_1) From 4a800d8f2c8677b098ea0a2c41deface8236c267 Mon Sep 17 00:00:00 2001 From: JohnWC Date: Sat, 25 Apr 2020 11:24:54 -0500 Subject: [PATCH 276/658] Updated for black --- tests/test_apigateway/test_apigateway.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 107dc5d05dca..b04328a03799 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -73,11 +73,9 @@ def test_create_rest_api_with_tags(): def test_create_rest_api_with_policy(): client = boto3.client("apigateway", region_name="us-west-2") - policy = "{\"Version\": \"2012-10-17\",\"Statement\": []}" + policy = '{"Version": "2012-10-17","Statement": []}' response = client.create_rest_api( - name="my_api", - description="this is my api", - policy=policy + name="my_api", description="this is my api", policy=policy ) api_id = response["id"] From ec731ac901563d256d8b24779e35050f06a9bfba Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Sun, 26 Apr 2020 15:12:33 +0100 Subject: [PATCH 277/658] Improve DDB expressions support4: Execution using AST Part of structured approach for UpdateExpressions: 1) Expression gets parsed into a tokenlist (tokenized) 2) Tokenlist get transformed to expression tree (AST) 3) The AST gets validated (full semantic correctness) 4) AST gets processed to perform the update -> this commit This commit uses the AST to execute the UpdateExpression. All the existing tests pass. The only tests that have been updated are in test_dynamodb_table_with_range_key.py because they wrongly allow adding a set to a path that doesn't exist. This has been alligend to correspond to the behavior of AWS DynamoDB. This commit will resolve https://github.com/spulec/moto/issues/2806 Multiple tests have been implemented that verify this. --- moto/dynamodb2/exceptions.py | 18 + moto/dynamodb2/models/__init__.py | 214 +-------- moto/dynamodb2/models/dynamo_type.py | 106 ++++- moto/dynamodb2/parsing/executors.py | 262 ++++++++++ moto/dynamodb2/parsing/validators.py | 127 +++-- tests/test_dynamodb2/test_dynamodb.py | 271 ++++++++++- .../test_dynamodb2/test_dynamodb_executor.py | 446 ++++++++++++++++++ .../test_dynamodb_table_with_range_key.py | 25 +- 8 files changed, 1200 insertions(+), 269 deletions(-) create mode 100644 moto/dynamodb2/parsing/executors.py create mode 100644 tests/test_dynamodb2/test_dynamodb_executor.py diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py index 5dd87ef6beba..18e498a90553 100644 --- a/moto/dynamodb2/exceptions.py +++ b/moto/dynamodb2/exceptions.py @@ -39,6 +39,17 @@ def __init__(self): super(AttributeDoesNotExist, self).__init__(self.attr_does_not_exist_msg) +class ProvidedKeyDoesNotExist(MockValidationException): + provided_key_does_not_exist_msg = ( + "The provided key element does not match the schema" + ) + + def __init__(self): + super(ProvidedKeyDoesNotExist, self).__init__( + self.provided_key_does_not_exist_msg + ) + + class ExpressionAttributeNameNotDefined(InvalidUpdateExpression): name_not_defined_msg = "An expression attribute name used in the document path is not defined; attribute name: {n}" @@ -131,3 +142,10 @@ def __init__(self, operator_or_function, operand_type): super(IncorrectOperandType, self).__init__( self.inv_operand_msg.format(f=operator_or_function, t=operand_type) ) + + +class IncorrectDataType(MockValidationException): + inc_data_type_msg = "An operand in the update expression has an incorrect data type" + + def __init__(self): + super(IncorrectDataType, self).__init__(self.inc_data_type_msg) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 3ddbcbc54124..33ee1747dda1 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -8,7 +8,6 @@ import uuid from boto3 import Session -from botocore.exceptions import ParamValidationError from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time @@ -20,8 +19,9 @@ ItemSizeTooLarge, ItemSizeToUpdateTooLarge, ) -from moto.dynamodb2.models.utilities import bytesize, attribute_is_list +from moto.dynamodb2.models.utilities import bytesize from moto.dynamodb2.models.dynamo_type import DynamoType +from moto.dynamodb2.parsing.executors import UpdateExpressionExecutor from moto.dynamodb2.parsing.expressions import UpdateExpressionParser from moto.dynamodb2.parsing.validators import UpdateExpressionValidator @@ -71,6 +71,17 @@ def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs): for key, value in attrs.items(): self.attrs[key] = DynamoType(value) + def __eq__(self, other): + return all( + [ + self.hash_key == other.hash_key, + self.hash_key_type == other.hash_key_type, + self.range_key == other.range_key, + self.range_key_type == other.range_key_type, + self.attrs == other.attrs, + ] + ) + def __repr__(self): return "Item: {0}".format(self.to_json()) @@ -94,192 +105,6 @@ def describe_attrs(self, attributes): included = self.attrs return {"Item": included} - def update( - self, update_expression, expression_attribute_names, expression_attribute_values - ): - # Update subexpressions are identifiable by the operator keyword, so split on that and - # get rid of the empty leading string. - parts = [ - p - for p in re.split( - r"\b(SET|REMOVE|ADD|DELETE)\b", update_expression, flags=re.I - ) - if p - ] - # make sure that we correctly found only operator/value pairs - assert ( - len(parts) % 2 == 0 - ), "Mismatched operators and values in update expression: '{}'".format( - update_expression - ) - for action, valstr in zip(parts[:-1:2], parts[1::2]): - action = action.upper() - - # "Should" retain arguments in side (...) - values = re.split(r",(?![^(]*\))", valstr) - for value in values: - # A Real value - value = value.lstrip(":").rstrip(",").strip() - for k, v in expression_attribute_names.items(): - value = re.sub(r"{0}\b".format(k), v, value) - - if action == "REMOVE": - key = value - attr, list_index = attribute_is_list(key.split(".")[0]) - if "." not in key: - if list_index: - new_list = DynamoType(self.attrs[attr]) - new_list.delete(None, list_index) - self.attrs[attr] = new_list - else: - self.attrs.pop(value, None) - else: - # Handle nested dict updates - self.attrs[attr].delete(".".join(key.split(".")[1:])) - elif action == "SET": - key, value = value.split("=", 1) - key = key.strip() - value = value.strip() - - # check whether key is a list - attr, list_index = attribute_is_list(key.split(".")[0]) - # If value not exists, changes value to a default if needed, else its the same as it was - value = self._get_default(value) - # If operation == list_append, get the original value and append it - value = self._get_appended_list(value, expression_attribute_values) - - if type(value) != DynamoType: - if value in expression_attribute_values: - dyn_value = DynamoType(expression_attribute_values[value]) - else: - dyn_value = DynamoType({"S": value}) - else: - dyn_value = value - - if "." in key and attr not in self.attrs: - raise ValueError # Setting nested attr not allowed if first attr does not exist yet - elif attr not in self.attrs: - try: - self.attrs[attr] = dyn_value # set new top-level attribute - except ItemSizeTooLarge: - raise ItemSizeToUpdateTooLarge() - else: - self.attrs[attr].set( - ".".join(key.split(".")[1:]), dyn_value, list_index - ) # set value recursively - - elif action == "ADD": - key, value = value.split(" ", 1) - key = key.strip() - value_str = value.strip() - if value_str in expression_attribute_values: - dyn_value = DynamoType(expression_attribute_values[value]) - else: - raise TypeError - - # Handle adding numbers - value gets added to existing value, - # or added to 0 if it doesn't exist yet - if dyn_value.is_number(): - existing = self.attrs.get(key, DynamoType({"N": "0"})) - if not existing.same_type(dyn_value): - raise TypeError() - self.attrs[key] = DynamoType( - { - "N": str( - decimal.Decimal(existing.value) - + decimal.Decimal(dyn_value.value) - ) - } - ) - - # Handle adding sets - value is added to the set, or set is - # created with only this value if it doesn't exist yet - # New value must be of same set type as previous value - elif dyn_value.is_set(): - key_head = key.split(".")[0] - key_tail = ".".join(key.split(".")[1:]) - if key_head not in self.attrs: - self.attrs[key_head] = DynamoType({dyn_value.type: {}}) - existing = self.attrs.get(key_head) - existing = existing.get(key_tail) - if existing.value and not existing.same_type(dyn_value): - raise TypeError() - new_set = set(existing.value or []).union(dyn_value.value) - existing.set( - key=None, - new_value=DynamoType({dyn_value.type: list(new_set)}), - ) - else: # Number and Sets are the only supported types for ADD - raise TypeError - - elif action == "DELETE": - key, value = value.split(" ", 1) - key = key.strip() - value_str = value.strip() - if value_str in expression_attribute_values: - dyn_value = DynamoType(expression_attribute_values[value]) - else: - raise TypeError - - if not dyn_value.is_set(): - raise TypeError - key_head = key.split(".")[0] - key_tail = ".".join(key.split(".")[1:]) - existing = self.attrs.get(key_head) - existing = existing.get(key_tail) - if existing: - if not existing.same_type(dyn_value): - raise TypeError - new_set = set(existing.value).difference(dyn_value.value) - existing.set( - key=None, - new_value=DynamoType({existing.type: list(new_set)}), - ) - else: - raise NotImplementedError( - "{} update action not yet supported".format(action) - ) - - def _get_appended_list(self, value, expression_attribute_values): - if type(value) != DynamoType: - list_append_re = re.match("list_append\\((.+),(.+)\\)", value) - if list_append_re: - new_value = expression_attribute_values[list_append_re.group(2).strip()] - old_list_key = list_append_re.group(1) - # old_key could be a function itself (if_not_exists) - if old_list_key.startswith("if_not_exists"): - old_list = self._get_default(old_list_key) - if not isinstance(old_list, DynamoType): - old_list = DynamoType(expression_attribute_values[old_list]) - else: - old_list = self.attrs[old_list_key.split(".")[0]] - if "." in old_list_key: - # Value is nested inside a map - find the appropriate child attr - old_list = old_list.child_attr( - ".".join(old_list_key.split(".")[1:]) - ) - if not old_list.is_list(): - raise ParamValidationError - old_list.value.extend([DynamoType(v) for v in new_value["L"]]) - value = old_list - return value - - def _get_default(self, value): - if value.startswith("if_not_exists"): - # Function signature - match = re.match( - r".*if_not_exists\s*\((?P.+),\s*(?P.+)\).*", value - ) - if not match: - raise TypeError - - path, value = match.groups() - - # If it already exists, get its value so we dont overwrite it - if path in self.attrs: - value = self.attrs[path] - return value - def update_with_attribute_updates(self, attribute_updates): for attribute_name, update_action in attribute_updates.items(): action = update_action["Action"] @@ -1266,17 +1091,18 @@ def update_item( item = table.get_item(hash_value, range_value) if update_expression: - UpdateExpressionValidator( + validated_ast = UpdateExpressionValidator( update_expression_ast, expression_attribute_names=expression_attribute_names, expression_attribute_values=expression_attribute_values, item=item, ).validate() - item.update( - update_expression, - expression_attribute_names, - expression_attribute_values, - ) + try: + UpdateExpressionExecutor( + validated_ast, item, expression_attribute_names + ).execute() + except ItemSizeTooLarge: + raise ItemSizeToUpdateTooLarge() else: item.update_with_attribute_updates(attribute_updates) if table.stream_shard is not None: diff --git a/moto/dynamodb2/models/dynamo_type.py b/moto/dynamodb2/models/dynamo_type.py index a3199dcaa2ea..1fc1bcef39b2 100644 --- a/moto/dynamodb2/models/dynamo_type.py +++ b/moto/dynamodb2/models/dynamo_type.py @@ -1,10 +1,53 @@ import six from moto.dynamodb2.comparisons import get_comparison_func -from moto.dynamodb2.exceptions import InvalidUpdateExpression +from moto.dynamodb2.exceptions import InvalidUpdateExpression, IncorrectDataType from moto.dynamodb2.models.utilities import attribute_is_list, bytesize +class DDBType(object): + """ + Official documentation at https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html + """ + + BINARY_SET = "BS" + NUMBER_SET = "NS" + STRING_SET = "SS" + STRING = "S" + NUMBER = "N" + MAP = "M" + LIST = "L" + BOOLEAN = "BOOL" + BINARY = "B" + NULL = "NULL" + + +class DDBTypeConversion(object): + _human_type_mapping = { + val: key.replace("_", " ") + for key, val in DDBType.__dict__.items() + if key.upper() == key + } + + @classmethod + def get_human_type(cls, abbreviated_type): + """ + Args: + abbreviated_type(str): An attribute of DDBType + + Returns: + str: The human readable form of the DDBType. + """ + try: + human_type_str = cls._human_type_mapping[abbreviated_type] + except KeyError: + raise ValueError( + "Invalid abbreviated_type {at}".format(at=abbreviated_type) + ) + + return human_type_str + + class DynamoType(object): """ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes @@ -50,13 +93,22 @@ def set(self, key, new_value, index=None): self.value = new_value.value else: if attr not in self.value: # nonexistingattribute - type_of_new_attr = "M" if "." in key else new_value.type + type_of_new_attr = DDBType.MAP if "." in key else new_value.type self.value[attr] = DynamoType({type_of_new_attr: {}}) # {'M': {'foo': DynamoType}} ==> DynamoType.set(new_value) self.value[attr].set( ".".join(key.split(".")[1:]), new_value, list_index ) + def __contains__(self, item): + if self.type == DDBType.STRING: + return False + try: + self.__getitem__(item) + return True + except KeyError: + return False + def delete(self, key, index=None): if index: if not key: @@ -126,27 +178,35 @@ def __repr__(self): def __add__(self, other): if self.type != other.type: raise TypeError("Different types of operandi is not allowed.") - if self.type == "N": - return DynamoType({"N": "{v}".format(v=int(self.value) + int(other.value))}) + if self.is_number(): + self_value = float(self.value) if "." in self.value else int(self.value) + other_value = float(other.value) if "." in other.value else int(other.value) + return DynamoType( + {DDBType.NUMBER: "{v}".format(v=self_value + other_value)} + ) else: - raise TypeError("Sum only supported for Numbers.") + raise IncorrectDataType() def __sub__(self, other): if self.type != other.type: raise TypeError("Different types of operandi is not allowed.") - if self.type == "N": - return DynamoType({"N": "{v}".format(v=int(self.value) - int(other.value))}) + if self.type == DDBType.NUMBER: + self_value = float(self.value) if "." in self.value else int(self.value) + other_value = float(other.value) if "." in other.value else int(other.value) + return DynamoType( + {DDBType.NUMBER: "{v}".format(v=self_value - other_value)} + ) else: raise TypeError("Sum only supported for Numbers.") def __getitem__(self, item): if isinstance(item, six.string_types): # If our DynamoType is a map it should be subscriptable with a key - if self.type == "M": + if self.type == DDBType.MAP: return self.value[item] elif isinstance(item, int): # If our DynamoType is a list is should be subscriptable with an index - if self.type == "L": + if self.type == DDBType.LIST: return self.value[item] raise TypeError( "This DynamoType {dt} is not subscriptable by a {it}".format( @@ -154,6 +214,20 @@ def __getitem__(self, item): ) ) + def __setitem__(self, key, value): + if isinstance(key, int): + if self.is_list(): + if key >= len(self.value): + # DynamoDB doesn't care you are out of box just add it to the end. + self.value.append(value) + else: + self.value[key] = value + elif isinstance(key, six.string_types): + if self.is_map(): + self.value[key] = value + else: + raise NotImplementedError("No set_item for {t}".format(t=type(key))) + @property def cast_value(self): if self.is_number(): @@ -222,16 +296,22 @@ def compare(self, range_comparison, range_objs): return comparison_func(self.cast_value, *range_values) def is_number(self): - return self.type == "N" + return self.type == DDBType.NUMBER def is_set(self): - return self.type == "SS" or self.type == "NS" or self.type == "BS" + return self.type in (DDBType.STRING_SET, DDBType.NUMBER_SET, DDBType.BINARY_SET) def is_list(self): - return self.type == "L" + return self.type == DDBType.LIST def is_map(self): - return self.type == "M" + return self.type == DDBType.MAP def same_type(self, other): return self.type == other.type + + def pop(self, key, *args, **kwargs): + if self.is_map() or self.is_list(): + self.value.pop(key, *args, **kwargs) + else: + raise TypeError("pop not supported for DynamoType {t}".format(t=self.type)) diff --git a/moto/dynamodb2/parsing/executors.py b/moto/dynamodb2/parsing/executors.py new file mode 100644 index 000000000000..8c51c9cec87a --- /dev/null +++ b/moto/dynamodb2/parsing/executors.py @@ -0,0 +1,262 @@ +from abc import abstractmethod + +from moto.dynamodb2.exceptions import IncorrectOperandType, IncorrectDataType +from moto.dynamodb2.models import DynamoType +from moto.dynamodb2.models.dynamo_type import DDBTypeConversion, DDBType +from moto.dynamodb2.parsing.ast_nodes import ( + UpdateExpressionSetAction, + UpdateExpressionDeleteAction, + UpdateExpressionRemoveAction, + UpdateExpressionAddAction, + UpdateExpressionPath, + DDBTypedValue, + ExpressionAttribute, + ExpressionSelector, + ExpressionAttributeName, +) +from moto.dynamodb2.parsing.validators import ExpressionPathResolver + + +class NodeExecutor(object): + def __init__(self, ast_node, expression_attribute_names): + self.node = ast_node + self.expression_attribute_names = expression_attribute_names + + @abstractmethod + def execute(self, item): + pass + + def get_item_part_for_path_nodes(self, item, path_nodes): + """ + For a list of path nodes travers the item by following the path_nodes + Args: + item(Item): + path_nodes(list): + + Returns: + + """ + if len(path_nodes) == 0: + return item.attrs + else: + return ExpressionPathResolver( + self.expression_attribute_names + ).resolve_expression_path_nodes_to_dynamo_type(item, path_nodes) + + def get_item_before_end_of_path(self, item): + """ + Get the part ot the item where the item will perform the action. For most actions this should be the parent. As + that element will need to be modified by the action. + Args: + item(Item): + + Returns: + DynamoType or dict: The path to be set + """ + return self.get_item_part_for_path_nodes( + item, self.get_path_expression_nodes()[:-1] + ) + + def get_item_at_end_of_path(self, item): + """ + For a DELETE the path points at the stringset so we need to evaluate the full path. + Args: + item(Item): + + Returns: + DynamoType or dict: The path to be set + """ + return self.get_item_part_for_path_nodes(item, self.get_path_expression_nodes()) + + # Get the part ot the item where the item will perform the action. For most actions this should be the parent. As + # that element will need to be modified by the action. + get_item_part_in_which_to_perform_action = get_item_before_end_of_path + + def get_path_expression_nodes(self): + update_expression_path = self.node.children[0] + assert isinstance(update_expression_path, UpdateExpressionPath) + return update_expression_path.children + + def get_element_to_action(self): + return self.get_path_expression_nodes()[-1] + + def get_action_value(self): + """ + + Returns: + DynamoType: The value to be set + """ + ddb_typed_value = self.node.children[1] + assert isinstance(ddb_typed_value, DDBTypedValue) + dynamo_type_value = ddb_typed_value.children[0] + assert isinstance(dynamo_type_value, DynamoType) + return dynamo_type_value + + +class SetExecutor(NodeExecutor): + def execute(self, item): + self.set( + item_part_to_modify_with_set=self.get_item_part_in_which_to_perform_action( + item + ), + element_to_set=self.get_element_to_action(), + value_to_set=self.get_action_value(), + expression_attribute_names=self.expression_attribute_names, + ) + + @classmethod + def set( + cls, + item_part_to_modify_with_set, + element_to_set, + value_to_set, + expression_attribute_names, + ): + if isinstance(element_to_set, ExpressionAttribute): + attribute_name = element_to_set.get_attribute_name() + item_part_to_modify_with_set[attribute_name] = value_to_set + elif isinstance(element_to_set, ExpressionSelector): + index = element_to_set.get_index() + item_part_to_modify_with_set[index] = value_to_set + elif isinstance(element_to_set, ExpressionAttributeName): + attribute_name = expression_attribute_names[ + element_to_set.get_attribute_name_placeholder() + ] + item_part_to_modify_with_set[attribute_name] = value_to_set + else: + raise NotImplementedError( + "Moto does not support setting {t} yet".format(t=type(element_to_set)) + ) + + +class DeleteExecutor(NodeExecutor): + operator = "operator: DELETE" + + def execute(self, item): + string_set_to_remove = self.get_action_value() + assert isinstance(string_set_to_remove, DynamoType) + if not string_set_to_remove.is_set(): + raise IncorrectOperandType( + self.operator, + DDBTypeConversion.get_human_type(string_set_to_remove.type), + ) + + string_set = self.get_item_at_end_of_path(item) + assert isinstance(string_set, DynamoType) + if string_set.type != string_set_to_remove.type: + raise IncorrectDataType() + # String set is currently implemented as a list + string_set_list = string_set.value + + stringset_to_remove_list = string_set_to_remove.value + + for value in stringset_to_remove_list: + try: + string_set_list.remove(value) + except (KeyError, ValueError): + # DynamoDB does not mind if value is not present + pass + + +class RemoveExecutor(NodeExecutor): + def execute(self, item): + element_to_remove = self.get_element_to_action() + if isinstance(element_to_remove, ExpressionAttribute): + attribute_name = element_to_remove.get_attribute_name() + self.get_item_part_in_which_to_perform_action(item).pop( + attribute_name, None + ) + elif isinstance(element_to_remove, ExpressionAttributeName): + attribute_name = self.expression_attribute_names[ + element_to_remove.get_attribute_name_placeholder() + ] + self.get_item_part_in_which_to_perform_action(item).pop( + attribute_name, None + ) + elif isinstance(element_to_remove, ExpressionSelector): + index = element_to_remove.get_index() + try: + self.get_item_part_in_which_to_perform_action(item).pop(index) + except IndexError: + # DynamoDB does not care that index is out of bounds, it will just do nothing. + pass + else: + raise NotImplementedError( + "Moto does not support setting {t} yet".format( + t=type(element_to_remove) + ) + ) + + +class AddExecutor(NodeExecutor): + def execute(self, item): + value_to_add = self.get_action_value() + if isinstance(value_to_add, DynamoType): + if value_to_add.is_set(): + current_string_set = self.get_item_at_end_of_path(item) + assert isinstance(current_string_set, DynamoType) + if not current_string_set.type == value_to_add.type: + raise IncorrectDataType() + # Sets are implemented as list + for value in value_to_add.value: + if value in current_string_set.value: + continue + else: + current_string_set.value.append(value) + elif value_to_add.type == DDBType.NUMBER: + existing_value = self.get_item_at_end_of_path(item) + assert isinstance(existing_value, DynamoType) + if not existing_value.type == DDBType.NUMBER: + raise IncorrectDataType() + new_value = existing_value + value_to_add + SetExecutor.set( + item_part_to_modify_with_set=self.get_item_before_end_of_path(item), + element_to_set=self.get_element_to_action(), + value_to_set=new_value, + expression_attribute_names=self.expression_attribute_names, + ) + else: + raise IncorrectDataType() + + +class UpdateExpressionExecutor(object): + execution_map = { + UpdateExpressionSetAction: SetExecutor, + UpdateExpressionAddAction: AddExecutor, + UpdateExpressionRemoveAction: RemoveExecutor, + UpdateExpressionDeleteAction: DeleteExecutor, + } + + def __init__(self, update_ast, item, expression_attribute_names): + self.update_ast = update_ast + self.item = item + self.expression_attribute_names = expression_attribute_names + + def execute(self, node=None): + """ + As explained in moto.dynamodb2.parsing.expressions.NestableExpressionParserMixin._create_node the order of nodes + in the AST can be translated of the order of statements in the expression. As such we can start at the root node + and process the nodes 1-by-1. If no specific execution for the node type is defined we can execute the children + in order since it will be a container node that is expandable and left child will be first in the statement. + + Args: + node(Node): + + Returns: + None + """ + if node is None: + node = self.update_ast + + node_executor = self.get_specific_execution(node) + if node_executor is None: + for node in node.children: + self.execute(node) + else: + node_executor(node, self.expression_attribute_names).execute(self.item) + + def get_specific_execution(self, node): + for node_class in self.execution_map: + if isinstance(node, node_class): + return self.execution_map[node_class] + return None diff --git a/moto/dynamodb2/parsing/validators.py b/moto/dynamodb2/parsing/validators.py index 180c7a874831..f924a713c309 100644 --- a/moto/dynamodb2/parsing/validators.py +++ b/moto/dynamodb2/parsing/validators.py @@ -11,6 +11,7 @@ ExpressionAttributeNameNotDefined, IncorrectOperandType, InvalidUpdateExpressionInvalidDocumentPath, + ProvidedKeyDoesNotExist, ) from moto.dynamodb2.models import DynamoType from moto.dynamodb2.parsing.ast_nodes import ( @@ -56,6 +57,76 @@ def replace_expression_attribute_value_with_value(self, node): return DDBTypedValue(DynamoType(target)) +class ExpressionPathResolver(object): + def __init__(self, expression_attribute_names): + self.expression_attribute_names = expression_attribute_names + + @classmethod + def raise_exception_if_keyword(cls, attribute): + if attribute.upper() in ReservedKeywords.get_reserved_keywords(): + raise AttributeIsReservedKeyword(attribute) + + def resolve_expression_path(self, item, update_expression_path): + assert isinstance(update_expression_path, UpdateExpressionPath) + return self.resolve_expression_path_nodes(item, update_expression_path.children) + + def resolve_expression_path_nodes(self, item, update_expression_path_nodes): + target = item.attrs + + for child in update_expression_path_nodes: + # First replace placeholder with attribute_name + attr_name = None + if isinstance(child, ExpressionAttributeName): + attr_placeholder = child.get_attribute_name_placeholder() + try: + attr_name = self.expression_attribute_names[attr_placeholder] + except KeyError: + raise ExpressionAttributeNameNotDefined(attr_placeholder) + elif isinstance(child, ExpressionAttribute): + attr_name = child.get_attribute_name() + self.raise_exception_if_keyword(attr_name) + if attr_name is not None: + # Resolv attribute_name + try: + target = target[attr_name] + except (KeyError, TypeError): + if child == update_expression_path_nodes[-1]: + return NoneExistingPath(creatable=True) + return NoneExistingPath() + else: + if isinstance(child, ExpressionPathDescender): + continue + elif isinstance(child, ExpressionSelector): + index = child.get_index() + if target.is_list(): + try: + target = target[index] + except IndexError: + # When a list goes out of bounds when assigning that is no problem when at the assignment + # side. It will just append to the list. + if child == update_expression_path_nodes[-1]: + return NoneExistingPath(creatable=True) + return NoneExistingPath() + else: + raise InvalidUpdateExpressionInvalidDocumentPath + else: + raise NotImplementedError( + "Path resolution for {t}".format(t=type(child)) + ) + if not isinstance(target, DynamoType): + print(target) + return DDBTypedValue(target) + + def resolve_expression_path_nodes_to_dynamo_type( + self, item, update_expression_path_nodes + ): + node = self.resolve_expression_path_nodes(item, update_expression_path_nodes) + if isinstance(node, NoneExistingPath): + raise ProvidedKeyDoesNotExist() + assert isinstance(node, DDBTypedValue) + return node.get_value() + + class ExpressionAttributeResolvingProcessor(DepthFirstTraverser): def _processing_map(self): return { @@ -107,55 +178,9 @@ def process_expression_path_node(self, node): return node def resolve_expression_path(self, node): - assert isinstance(node, UpdateExpressionPath) - - target = deepcopy(self.item.attrs) - for child in node.children: - # First replace placeholder with attribute_name - attr_name = None - if isinstance(child, ExpressionAttributeName): - attr_placeholder = child.get_attribute_name_placeholder() - try: - attr_name = self.expression_attribute_names[attr_placeholder] - except KeyError: - raise ExpressionAttributeNameNotDefined(attr_placeholder) - elif isinstance(child, ExpressionAttribute): - attr_name = child.get_attribute_name() - self.raise_exception_if_keyword(attr_name) - if attr_name is not None: - # Resolv attribute_name - try: - target = target[attr_name] - except (KeyError, TypeError): - if child == node.children[-1]: - return NoneExistingPath(creatable=True) - return NoneExistingPath() - else: - if isinstance(child, ExpressionPathDescender): - continue - elif isinstance(child, ExpressionSelector): - index = child.get_index() - if target.is_list(): - try: - target = target[index] - except IndexError: - # When a list goes out of bounds when assigning that is no problem when at the assignment - # side. It will just append to the list. - if child == node.children[-1]: - return NoneExistingPath(creatable=True) - return NoneExistingPath() - else: - raise InvalidUpdateExpressionInvalidDocumentPath - else: - raise NotImplementedError( - "Path resolution for {t}".format(t=type(child)) - ) - return DDBTypedValue(DynamoType(target)) - - @classmethod - def raise_exception_if_keyword(cls, attribute): - if attribute.upper() in ReservedKeywords.get_reserved_keywords(): - raise AttributeIsReservedKeyword(attribute) + return ExpressionPathResolver( + self.expression_attribute_names + ).resolve_expression_path(self.item, node) class UpdateExpressionFunctionEvaluator(DepthFirstTraverser): @@ -183,7 +208,9 @@ def process_function(self, node): assert isinstance(result, (DDBTypedValue, NoneExistingPath)) return result elif function_name == "list_append": - first_arg = self.get_list_from_ddb_typed_value(first_arg, function_name) + first_arg = deepcopy( + self.get_list_from_ddb_typed_value(first_arg, function_name) + ) second_arg = self.get_list_from_ddb_typed_value(second_arg, function_name) for list_element in second_arg.value: first_arg.value.append(list_element) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 089782e7757c..b1bf18f0a594 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,21 +1,17 @@ from __future__ import unicode_literals, print_function -import re from decimal import Decimal -import six import boto import boto3 from boto3.dynamodb.conditions import Attr, Key import re -import requests import sure # noqa from moto import mock_dynamodb2, mock_dynamodb2_deprecated from moto.dynamodb2 import dynamodb_backend2, dynamodb_backends2 from boto.exception import JSONResponseError from botocore.exceptions import ClientError, ParamValidationError from tests.helpers import requires_boto_gte -import tests.backport_assert_raises import moto.dynamodb2.comparisons import moto.dynamodb2.models @@ -3221,6 +3217,25 @@ def test_remove_top_level_attribute(): result.should.equal({"id": {"S": "foo"}}) +@mock_dynamodb2 +def test_remove_top_level_attribute_non_existent(): + """ + Remove statements do not require attribute to exist they silently pass + """ + table_name = "test_remove" + client = create_table_with_list(table_name) + ddb_item = {"id": {"S": "foo"}, "item": {"S": "bar"}} + client.put_item(TableName=table_name, Item=ddb_item) + client.update_item( + TableName=table_name, + Key={"id": {"S": "foo"}}, + UpdateExpression="REMOVE non_existent_attribute", + ExpressionAttributeNames={"#i": "item"}, + ) + result = client.get_item(TableName=table_name, Key={"id": {"S": "foo"}})["Item"] + result.should.equal(ddb_item) + + @mock_dynamodb2 def test_remove_list_index__remove_existing_index(): table_name = "test_list_index_access" @@ -4331,3 +4346,251 @@ def test_list_tables_exclusive_start_table_name_empty(): resp = client.list_tables(Limit=1, ExclusiveStartTableName="whatever") len(resp["TableNames"]).should.equal(0) + + +def assert_correct_client_error( + client_error, code, message_template, message_values=None, braces=None +): + """ + Assert whether a client_error is as expected. Allow for a list of values to be passed into the message + + Args: + client_error(ClientError): The ClientError exception that was raised + code(str): The code for the error (e.g. ValidationException) + message_template(str): Error message template. if message_values is not None then this template has a {values} + as placeholder. For example: + 'Value provided in ExpressionAttributeValues unused in expressions: keys: {values}' + message_values(list of str|None): The values that are passed in the error message + braces(list of str|None): List of length 2 with opening and closing brace for the values. By default it will be + surrounded by curly brackets + """ + braces = braces or ["{", "}"] + assert client_error.response["Error"]["Code"] == code + if message_values is not None: + values_string = "{open_brace}(?P.*){close_brace}".format( + open_brace=braces[0], close_brace=braces[1] + ) + re_msg = re.compile(message_template.format(values=values_string)) + match_result = re_msg.match(client_error.response["Error"]["Message"]) + assert match_result is not None + values_string = match_result.groupdict()["values"] + values = [key for key in values_string.split(", ")] + assert len(message_values) == len(values) + for value in message_values: + assert value in values + else: + assert client_error.response["Error"]["Message"] == message_template + + +def create_simple_table_and_return_client(): + dynamodb = boto3.client("dynamodb", region_name="eu-west-1") + dynamodb.create_table( + TableName="moto-test", + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"},], + ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1}, + ) + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "myNum": {"N": "1"}, "MyStr": {"S": "1"},}, + ) + return dynamodb + + +# https://github.com/spulec/moto/issues/2806 +# https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html +# #DDB-UpdateItem-request-UpdateExpression +@mock_dynamodb2 +def test_update_item_with_attribute_in_right_hand_side_and_operation(): + dynamodb = create_simple_table_and_return_client() + + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET myNum = myNum+:val", + ExpressionAttributeValues={":val": {"N": "3"}}, + ) + + result = dynamodb.get_item(TableName="moto-test", Key={"id": {"S": "1"}}) + assert result["Item"]["myNum"]["N"] == "4" + + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET myNum = myNum - :val", + ExpressionAttributeValues={":val": {"N": "1"}}, + ) + result = dynamodb.get_item(TableName="moto-test", Key={"id": {"S": "1"}}) + assert result["Item"]["myNum"]["N"] == "3" + + +@mock_dynamodb2 +def test_non_existing_attribute_should_raise_exception(): + """ + Does error message get correctly raised if attribute is referenced but it does not exist for the item. + """ + dynamodb = create_simple_table_and_return_client() + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = no_attr + MyStr", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_correct_client_error( + e, + "ValidationException", + "The provided expression refers to an attribute that does not exist in the item", + ) + + +@mock_dynamodb2 +def test_update_expression_with_plus_in_attribute_name(): + """ + Does error message get correctly raised if attribute contains a plus and is passed in without an AttributeName. And + lhs & rhs are not attribute IDs by themselve. + """ + dynamodb = create_simple_table_and_return_client() + + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "my+Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, + ) + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = my+Num", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_correct_client_error( + e, + "ValidationException", + "The provided expression refers to an attribute that does not exist in the item", + ) + + +@mock_dynamodb2 +def test_update_expression_with_minus_in_attribute_name(): + """ + Does error message get correctly raised if attribute contains a minus and is passed in without an AttributeName. And + lhs & rhs are not attribute IDs by themselve. + """ + dynamodb = create_simple_table_and_return_client() + + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "my-Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, + ) + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = my-Num", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_correct_client_error( + e, + "ValidationException", + "The provided expression refers to an attribute that does not exist in the item", + ) + + +@mock_dynamodb2 +def test_update_expression_with_space_in_attribute_name(): + """ + Does error message get correctly raised if attribute contains a space and is passed in without an AttributeName. And + lhs & rhs are not attribute IDs by themselves. + """ + dynamodb = create_simple_table_and_return_client() + + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "my Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, + ) + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = my Num", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_raise_syntax_error(e, "Num", "my Num") + + +@mock_dynamodb2 +def test_summing_up_2_strings_raises_exception(): + """ + Update set supports different DynamoDB types but some operations are not supported. For example summing up 2 strings + raises an exception. It results in ClientError with code ValidationException: + Saying An operand in the update expression has an incorrect data type + """ + dynamodb = create_simple_table_and_return_client() + + try: + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET MyStr = MyStr + MyStr", + ) + assert False, "Validation exception not thrown" + except dynamodb.exceptions.ClientError as e: + assert_correct_client_error( + e, + "ValidationException", + "An operand in the update expression has an incorrect data type", + ) + + +# https://github.com/spulec/moto/issues/2806 +@mock_dynamodb2 +def test_update_item_with_attribute_in_right_hand_side(): + """ + After tokenization and building expression make sure referenced attributes are replaced with their current value + """ + dynamodb = create_simple_table_and_return_client() + + # Make sure there are 2 values + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "myVal1": {"S": "Value1"}, "myVal2": {"S": "Value2"}}, + ) + + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET myVal1 = myVal2", + ) + + result = dynamodb.get_item(TableName="moto-test", Key={"id": {"S": "1"}}) + assert result["Item"]["myVal1"]["S"] == result["Item"]["myVal2"]["S"] == "Value2" + + +@mock_dynamodb2 +def test_multiple_updates(): + dynamodb = create_simple_table_and_return_client() + dynamodb.put_item( + TableName="moto-test", + Item={"id": {"S": "1"}, "myNum": {"N": "1"}, "path": {"N": "6"}}, + ) + dynamodb.update_item( + TableName="moto-test", + Key={"id": {"S": "1"}}, + UpdateExpression="SET myNum = #p + :val, newAttr = myNum", + ExpressionAttributeValues={":val": {"N": "1"}}, + ExpressionAttributeNames={"#p": "path"}, + ) + result = dynamodb.get_item(TableName="moto-test", Key={"id": {"S": "1"}})["Item"] + expected_result = { + "myNum": {"N": "7"}, + "newAttr": {"N": "1"}, + "path": {"N": "6"}, + "id": {"S": "1"}, + } + assert result == expected_result diff --git a/tests/test_dynamodb2/test_dynamodb_executor.py b/tests/test_dynamodb2/test_dynamodb_executor.py new file mode 100644 index 000000000000..4ef0bb423285 --- /dev/null +++ b/tests/test_dynamodb2/test_dynamodb_executor.py @@ -0,0 +1,446 @@ +from moto.dynamodb2.exceptions import IncorrectOperandType, IncorrectDataType +from moto.dynamodb2.models import Item, DynamoType +from moto.dynamodb2.parsing.executors import UpdateExpressionExecutor +from moto.dynamodb2.parsing.expressions import UpdateExpressionParser +from moto.dynamodb2.parsing.validators import UpdateExpressionValidator +from parameterized import parameterized + + +def test_execution_of_if_not_exists_not_existing_value(): + update_expression = "SET a = if_not_exists(b, a)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}}, + ) + assert expected_item == item + + +def test_execution_of_if_not_exists_with_existing_attribute_should_return_attribute(): + update_expression = "SET a = if_not_exists(b, a)" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "A"}, "b": {"S": "B"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"S": "B"}, "b": {"S": "B"}}, + ) + assert expected_item == item + + +def test_execution_of_if_not_exists_with_existing_attribute_should_return_value(): + update_expression = "SET a = if_not_exists(b, :val)" + update_expression_values = {":val": {"N": "4"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "3"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "3"}, "a": {"N": "3"}}, + ) + assert expected_item == item + + +def test_execution_of_if_not_exists_with_non_existing_attribute_should_return_value(): + update_expression = "SET a = if_not_exists(b, :val)" + update_expression_values = {":val": {"N": "4"}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "4"}}, + ) + assert expected_item == item + + +def test_execution_of_sum_operation(): + update_expression = "SET a = a + b" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "7"}, "b": {"N": "4"}}, + ) + assert expected_item == item + + +def test_execution_of_remove(): + update_expression = "Remove a" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "a": {"N": "3"}, "b": {"N": "4"}}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "1"}, "b": {"N": "4"}}, + ) + assert expected_item == item + + +def test_execution_of_remove_in_map(): + update_expression = "Remove itemmap.itemlist[1].foo11" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "itemmap": { + "M": { + "itemlist": { + "L": [ + {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, + {"M": {"foo10": {"S": "bar1"}, "foo11": {"S": "bar2"}}}, + ] + } + } + }, + }, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "itemmap": { + "M": { + "itemlist": { + "L": [ + {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, + {"M": {"foo10": {"S": "bar1"},}}, + ] + } + } + }, + }, + ) + assert expected_item == item + + +def test_execution_of_remove_in_list(): + update_expression = "Remove itemmap.itemlist[1]" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "itemmap": { + "M": { + "itemlist": { + "L": [ + {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, + {"M": {"foo10": {"S": "bar1"}, "foo11": {"S": "bar2"}}}, + ] + } + } + }, + }, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=None, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "itemmap": { + "M": { + "itemlist": { + "L": [{"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}},] + } + } + }, + }, + ) + assert expected_item == item + + +def test_execution_of_delete_element_from_set(): + update_expression = "delete s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"SS": ["value2", "value5"]}}, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value3"]},}, + ) + assert expected_item == item + + +def test_execution_of_add_number(): + update_expression = "add s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"N": "5"},}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"N": "10"}}, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"N": "15"}}, + ) + assert expected_item == item + + +def test_execution_of_add_set_to_a_number(): + update_expression = "add s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"N": "5"},}, + ) + try: + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"SS": ["s1"]}}, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"N": "15"}}, + ) + assert expected_item == item + assert False + except IncorrectDataType: + assert True + + +def test_execution_of_add_to_a_set(): + update_expression = "ADD s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, + ) + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"SS": ["value2", "value5"]}}, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + expected_item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={ + "id": {"S": "foo2"}, + "s": {"SS": ["value1", "value2", "value3", "value5"]}, + }, + ) + assert expected_item == item + + +@parameterized( + [ + ({":value": {"S": "10"}}, "STRING",), + ({":value": {"N": "10"}}, "NUMBER",), + ({":value": {"B": "10"}}, "BINARY",), + ({":value": {"BOOL": True}}, "BOOLEAN",), + ({":value": {"NULL": True}}, "NULL",), + ({":value": {"M": {"el0": {"S": "10"}}}}, "MAP",), + ({":value": {"L": []}}, "LIST",), + ] +) +def test_execution_of__delete_element_from_set_invalid_value( + expression_attribute_values, unexpected_data_type +): + """A delete statement must use a value of type SS in order to delete elements from a set.""" + update_expression = "delete s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, + ) + try: + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=expression_attribute_values, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + assert False, "Must raise exception" + except IncorrectOperandType as e: + assert e.operator_or_function == "operator: DELETE" + assert e.operand_type == unexpected_data_type + + +def test_execution_of_delete_element_from_a_string_attribute(): + """A delete statement must use a value of type SS in order to delete elements from a set.""" + update_expression = "delete s :value" + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "id"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"id": {"S": "foo2"}, "s": {"S": "5"},}, + ) + try: + validated_ast = UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values={":value": {"SS": ["value2"]}}, + item=item, + ).validate() + UpdateExpressionExecutor(validated_ast, item, None).execute() + assert False, "Must raise exception" + except IncorrectDataType: + assert True diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 1aa2175c16ce..6fba713ec4dc 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -8,6 +8,8 @@ from botocore.exceptions import ClientError import sure # noqa from freezegun import freeze_time +from nose.tools import assert_raises + from moto import mock_dynamodb2, mock_dynamodb2_deprecated from boto.exception import JSONResponseError from tests.helpers import requires_boto_gte @@ -1273,6 +1275,15 @@ def test_update_item_with_expression(): ) +def assert_failure_due_to_key_not_in_schema(func, **kwargs): + with assert_raises(ClientError) as ex: + func(**kwargs) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.exception.response["Error"]["Message"].should.equal( + "The provided key element does not match the schema" + ) + + @mock_dynamodb2 def test_update_item_add_with_expression(): table = _create_table_with_range_key() @@ -1299,14 +1310,13 @@ def test_update_item_add_with_expression(): dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) # Update item to add a string value to a non-existing set - # Should just create the set in the background - table.update_item( + # Should throw: 'The provided key element does not match the schema' + assert_failure_due_to_key_not_in_schema( + table.update_item, Key=item_key, UpdateExpression="ADD non_existing_str_set :v", ExpressionAttributeValues={":v": {"item4"}}, ) - current_item["non_existing_str_set"] = {"item4"} - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) # Update item to add a num value to a num set table.update_item( @@ -1381,15 +1391,14 @@ def test_update_item_add_with_nested_sets(): dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) # Update item to add a string value to a non-existing set - # Should just create the set in the background - table.update_item( + # Should raise + assert_failure_due_to_key_not_in_schema( + table.update_item, Key=item_key, UpdateExpression="ADD #ns.#ne :v", ExpressionAttributeNames={"#ns": "nested", "#ne": "non_existing_str_set"}, ExpressionAttributeValues={":v": {"new_item"}}, ) - current_item["nested"]["non_existing_str_set"] = {"new_item"} - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) @mock_dynamodb2 From 0bd586eb67323b501752023050cbc69854de9805 Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Sun, 26 Apr 2020 16:19:24 +0100 Subject: [PATCH 278/658] Place reserved_keywords.txt not in root. Do not use data_files in setup.py but MANIFEST.in Otherwise some enviroments throw errors when trying to create the data file. This was raised in: https://github.com/spulec/moto/pull/2885#discussion_r415150276 --- MANIFEST.in | 1 + setup.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index bd7eb968a939..51d1b223ccbf 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,5 +3,6 @@ include requirements.txt requirements-dev.txt tox.ini include moto/ec2/resources/instance_types.json include moto/ec2/resources/amis.json include moto/cognitoidp/resources/*.json +include moto/dynamodb2/parsing/reserved_keywords.txt recursive-include moto/templates * recursive-include tests * diff --git a/setup.py b/setup.py index adc5e4bb9de9..684c0dcea6db 100755 --- a/setup.py +++ b/setup.py @@ -101,5 +101,4 @@ def get_version(): project_urls={ "Documentation": "http://docs.getmoto.org/en/latest/", }, - data_files=[('', ['moto/dynamodb2/parsing/reserved_keywords.txt'])], ) From 41abd4344bdcb2dfd5dd4fcebf9bc6be325c052e Mon Sep 17 00:00:00 2001 From: Antoine Wendlinger Date: Mon, 27 Apr 2020 11:42:27 +0200 Subject: [PATCH 279/658] Use xmltodict for parsing --- moto/s3/responses.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index ec6015f7ae6b..fa6f8e56817d 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -839,21 +839,22 @@ def _get_path(request): def _bucket_response_delete_keys(self, request, body, bucket_name): template = self.response_template(S3_DELETE_KEYS_RESPONSE) + body_dict = xmltodict.parse(body) - objects = minidom.parseString(body).getElementsByTagName("Object") + objects = body_dict["Delete"].get("Object", []) + if not isinstance(objects, list): + # We expect a list of objects, but when there is a single node xmltodict does not + # return a list. + objects = [objects] + if len(objects) == 0: + raise MalformedXML() deleted_objects = [] error_names = [] - if len(objects) == 0: - raise MalformedXML() for object_ in objects: - key_name = object_.getElementsByTagName("Key")[0].firstChild.nodeValue - version_id_node = object_.getElementsByTagName("VersionId") - if version_id_node: - version_id = version_id_node[0].firstChild.nodeValue - else: - version_id = None + key_name = object_["Key"] + version_id = object_.get("VersionId", None) success = self.backend.delete_key( bucket_name, undo_clean_key_name(key_name), version_id=version_id From f8cabf0729e72c9f295f52dd73f9af19148b72c0 Mon Sep 17 00:00:00 2001 From: Matthew Gladney Date: Mon, 27 Apr 2020 11:52:47 -0400 Subject: [PATCH 280/658] static list of preexisting regions --- moto/ec2/models.py | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 7f7b6369ecc3..9ceb06f6f397 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1504,20 +1504,39 @@ def __init__(self, name, region_name, zone_id): class RegionsAndZonesBackend(object): - regions_not_enabled_by_default = ["ap-east-1", "me-south-1"] + regions_opt_in_not_required = [ + "af-south-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ] regions = [] for region in Session().get_available_regions("ec2"): - if region in regions_not_enabled_by_default: - regions.append( - Region(region, "ec2.{}.amazonaws.com".format(region), "not-opted-in") - ) - else: + if region in regions_opt_in_not_required: regions.append( Region( region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required" ) ) + else: + regions.append( + Region(region, "ec2.{}.amazonaws.com".format(region), "not-opted-in") + ) for region in Session().get_available_regions("ec2", partition_name="aws-us-gov"): regions.append( Region(region, "ec2.{}.amazonaws.com".format(region), "opt-in-not-required") From dd22e7855a2d28b969a955ec940c30bf5d141804 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Mon, 27 Apr 2020 12:48:23 -0700 Subject: [PATCH 281/658] Fixed a regression with CloudWatch --- moto/cloudwatch/responses.py | 2 +- moto/s3/models.py | 6 +- tests/test_cloudwatch/test_cloudwatch.py | 63 ++++++++++--------- .../test_cloudwatch/test_cloudwatch_boto3.py | 8 +-- 4 files changed, 41 insertions(+), 38 deletions(-) diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index 93abb8b95bd4..56ba68bb9305 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -384,7 +384,7 @@ def set_alarm_state(self): {% endfor %} - Metric:{{ metric.name }} + {{ metric.name }} {{ metric.namespace }} {% endfor %} diff --git a/moto/s3/models.py b/moto/s3/models.py index c7693fe14d93..7373dc9e33ac 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -22,7 +22,7 @@ from bisect import insort from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from moto.cloudwatch.models import metric_providers, MetricDatum +from moto.cloudwatch.models import MetricDatum from moto.utilities.tagging_service import TaggingService from .exceptions import ( BucketAlreadyExists, @@ -1159,9 +1159,11 @@ def __init__(self): self.account_public_access_block = None self.tagger = TaggingService() + # TODO: This is broken! DO NOT IMPORT MUTABLE DATA TYPES FROM OTHER AREAS -- THIS BREAKS UNMOCKING! + # WRAP WITH A GETTER/SETTER FUNCTION # Register this class as a CloudWatch Metric Provider # Must provide a method 'get_cloudwatch_metrics' that will return a list of metrics, based on the data available - metric_providers["S3"] = self + # metric_providers["S3"] = self def get_cloudwatch_metrics(self): metrics = [] diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 2d338cf35298..60b6898bd5fe 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -88,7 +88,7 @@ def test_put_metric_data(): metric_names.should.have(1) metric = metrics[0] metric.namespace.should.equal("tester") - metric.name.should.equal("Metric:metric") + metric.name.should.equal("metric") dict(metric.dimensions).should.equal({"InstanceId": ["i-0123456,i-0123457"]}) @@ -157,33 +157,34 @@ def test_get_metric_statistics(): datapoint.should.have.key("Timestamp").which.should.equal(metric_timestamp) -@mock_s3_deprecated -@mock_cloudwatch_deprecated -def test_cloudwatch_return_s3_metrics(): - - region = "us-east-1" - - cw = boto.ec2.cloudwatch.connect_to_region(region) - s3 = boto.s3.connect_to_region(region) - - bucket_name_1 = "test-bucket-1" - bucket_name_2 = "test-bucket-2" - - bucket1 = s3.create_bucket(bucket_name=bucket_name_1) - key = Key(bucket1) - key.key = "the-key" - key.set_contents_from_string("foobar" * 4) - s3.create_bucket(bucket_name=bucket_name_2) - - metrics_s3_bucket_1 = cw.list_metrics(dimensions={"BucketName": bucket_name_1}) - # Verify that the OOTB S3 metrics are available for the created buckets - len(metrics_s3_bucket_1).should.be(2) - metric_names = [m.name for m in metrics_s3_bucket_1] - sorted(metric_names).should.equal( - ["Metric:BucketSizeBytes", "Metric:NumberOfObjects"] - ) - - # Explicit clean up - the metrics for these buckets are messing with subsequent tests - key.delete() - s3.delete_bucket(bucket_name_1) - s3.delete_bucket(bucket_name_2) +# TODO: THIS IS CURRENTLY BROKEN! +# @mock_s3_deprecated +# @mock_cloudwatch_deprecated +# def test_cloudwatch_return_s3_metrics(): +# +# region = "us-east-1" +# +# cw = boto.ec2.cloudwatch.connect_to_region(region) +# s3 = boto.s3.connect_to_region(region) +# +# bucket_name_1 = "test-bucket-1" +# bucket_name_2 = "test-bucket-2" +# +# bucket1 = s3.create_bucket(bucket_name=bucket_name_1) +# key = Key(bucket1) +# key.key = "the-key" +# key.set_contents_from_string("foobar" * 4) +# s3.create_bucket(bucket_name=bucket_name_2) +# +# metrics_s3_bucket_1 = cw.list_metrics(dimensions={"BucketName": bucket_name_1}) +# # Verify that the OOTB S3 metrics are available for the created buckets +# len(metrics_s3_bucket_1).should.be(2) +# metric_names = [m.name for m in metrics_s3_bucket_1] +# sorted(metric_names).should.equal( +# ["Metric:BucketSizeBytes", "Metric:NumberOfObjects"] +# ) +# +# # Explicit clean up - the metrics for these buckets are messing with subsequent tests +# key.delete() +# s3.delete_bucket(bucket_name_1) +# s3.delete_bucket(bucket_name_2) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 12c875a9219b..0c814ee442c1 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -155,7 +155,7 @@ def test_put_metric_data_no_dimensions(): metrics.should.have.length_of(1) metric = metrics[0] metric["Namespace"].should.equal("tester") - metric["MetricName"].should.equal("Metric:metric") + metric["MetricName"].should.equal("metric") @mock_cloudwatch @@ -183,7 +183,7 @@ def test_put_metric_data_with_statistics(): metrics.should.have.length_of(1) metric = metrics[0] metric["Namespace"].should.equal("tester") - metric["MetricName"].should.equal("Metric:statmetric") + metric["MetricName"].should.equal("statmetric") # TODO: test statistics - https://github.com/spulec/moto/issues/1615 @@ -266,12 +266,12 @@ def test_list_metrics(): { u"Namespace": "list_test_1/", u"Dimensions": [], - u"MetricName": "Metric:metric1", + u"MetricName": "metric1", }, { u"Namespace": "list_test_1/", u"Dimensions": [], - u"MetricName": "Metric:metric1", + u"MetricName": "metric1", }, ] ) From 05860fcdd1bf0135df9aeeae626e6229ba9cdbda Mon Sep 17 00:00:00 2001 From: Alessandro Palumbo Date: Mon, 27 Apr 2020 19:39:33 +0200 Subject: [PATCH 282/658] Fixed apigateway usage plan api when dealing with non existing usage plans and non existing api keys --- moto/apigateway/exceptions.py | 9 +++++++ moto/apigateway/models.py | 15 +++++++++++ moto/apigateway/responses.py | 25 ++++++++++++++++-- tests/test_apigateway/test_apigateway.py | 32 ++++++++++++++++++++++++ tests/test_apigateway/test_server.py | 22 ++++++++++++++++ 5 files changed, 101 insertions(+), 2 deletions(-) diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index 8f6d21aa0646..4d3475d0e43b 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -112,6 +112,15 @@ def __init__(self): ) +class UsagePlanNotFoundException(RESTError): + code = 404 + + def __init__(self): + super(UsagePlanNotFoundException, self).__init__( + "NotFoundException", "Invalid Usage Plan ID specified" + ) + + class ApiKeyAlreadyExists(RESTError): code = 409 diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index e011af60144d..d39b719d66f6 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -20,6 +20,7 @@ from moto.sts.models import ACCOUNT_ID from .exceptions import ( ApiKeyNotFoundException, + UsagePlanNotFoundException, AwsProxyNotAllowed, CrossAccountNotAllowed, IntegrationMethodNotDefined, @@ -1045,6 +1046,9 @@ def get_usage_plans(self, api_key_id=None): return plans def get_usage_plan(self, usage_plan_id): + if usage_plan_id not in self.usage_plans: + raise UsagePlanNotFoundException() + return self.usage_plans[usage_plan_id] def delete_usage_plan(self, usage_plan_id): @@ -1077,6 +1081,17 @@ def get_usage_plan_keys(self, usage_plan_id): return list(self.usage_plan_keys[usage_plan_id].values()) def get_usage_plan_key(self, usage_plan_id, key_id): + # first check if is a valid api key + if key_id not in self.keys: + raise ApiKeyNotFoundException() + + # then check if is a valid api key and that the key is in the plan + if ( + usage_plan_id not in self.usage_plan_keys + or key_id not in self.usage_plan_keys[usage_plan_id] + ): + raise UsagePlanNotFoundException() + return self.usage_plan_keys[usage_plan_id][key_id] def delete_usage_plan_key(self, usage_plan_id, key_id): diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index a3c41a6d4b48..1a7689d286e9 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -6,6 +6,7 @@ from .models import apigateway_backends from .exceptions import ( ApiKeyNotFoundException, + UsagePlanNotFoundException, BadRequestException, CrossAccountNotAllowed, AuthorizerNotFoundException, @@ -490,7 +491,16 @@ def usage_plan_individual(self, request, full_url, headers): usage_plan = url_path_parts[2] if self.method == "GET": - usage_plan_response = self.backend.get_usage_plan(usage_plan) + try: + usage_plan_response = self.backend.get_usage_plan(usage_plan) + except (UsagePlanNotFoundException) as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) elif self.method == "DELETE": usage_plan_response = self.backend.delete_usage_plan(usage_plan) return 200, {}, json.dumps(usage_plan_response) @@ -529,7 +539,18 @@ def usage_plan_key_individual(self, request, full_url, headers): key_id = url_path_parts[4] if self.method == "GET": - usage_plan_response = self.backend.get_usage_plan_key(usage_plan_id, key_id) + try: + usage_plan_response = self.backend.get_usage_plan_key( + usage_plan_id, key_id + ) + except (UsagePlanNotFoundException, ApiKeyNotFoundException) as error: + return ( + error.code, + {}, + '{{"message":"{0}","code":"{1}"}}'.format( + error.message, error.error_type + ), + ) elif self.method == "DELETE": usage_plan_response = self.backend.delete_usage_plan_key( usage_plan_id, key_id diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index b04328a03799..7495372d263f 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1797,6 +1797,14 @@ def test_usage_plans(): response = client.get_usage_plans() len(response["items"]).should.equal(0) + # # Try to get info about a non existing usage + with assert_raises(ClientError) as ex: + client.get_usage_plan(usagePlanId="not_existing") + ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + ex.exception.response["Error"]["Message"].should.equal( + "Invalid Usage Plan ID specified" + ) + usage_plan_name = "TEST-PLAN" payload = {"name": usage_plan_name} response = client.create_usage_plan(**payload) @@ -1879,6 +1887,30 @@ def test_usage_plan_keys(): response = client.get_usage_plan_keys(usagePlanId=usage_plan_id) len(response["items"]).should.equal(0) + # Try to get info about a non existing api key + with assert_raises(ClientError) as ex: + client.get_usage_plan_key(usagePlanId=usage_plan_id, keyId="not_existing_key") + ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + ex.exception.response["Error"]["Message"].should.equal( + "Invalid API Key identifier specified" + ) + + # Try to get info about an existing api key that has not jet added to a valid usage plan + with assert_raises(ClientError) as ex: + client.get_usage_plan_key(usagePlanId=usage_plan_id, keyId=key_id) + ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + ex.exception.response["Error"]["Message"].should.equal( + "Invalid Usage Plan ID specified" + ) + + # Try to get info about an existing api key that has not jet added to a valid usage plan + with assert_raises(ClientError) as ex: + client.get_usage_plan_key(usagePlanId="not_existing_plan_id", keyId=key_id) + ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + ex.exception.response["Error"]["Message"].should.equal( + "Invalid Usage Plan ID specified" + ) + @mock_apigateway def test_create_usage_plan_key_non_existent_api_key(): diff --git a/tests/test_apigateway/test_server.py b/tests/test_apigateway/test_server.py index 08b20cc615a1..9be948ef64a7 100644 --- a/tests/test_apigateway/test_server.py +++ b/tests/test_apigateway/test_server.py @@ -39,6 +39,10 @@ def test_usage_plans_apis(): fetched_plan = json.loads(res.data) fetched_plan.should.equal(created_plan) + # Not existing usage plan + res = test_client.get("/usageplans/{0}".format("not_existing")) + res.status_code.should.equal(404) + # Delete usage plan res = test_client.delete("/usageplans/{0}".format(created_plan["id"])) res.data.should.equal(b"{}") @@ -61,6 +65,24 @@ def test_usage_plans_keys(): res = test_client.get("/usageplans/{0}/keys".format(usage_plan_id)) json.loads(res.data)["item"].should.have.length_of(0) + # Invalid api key (does not exists at all) + res = test_client.get( + "/usageplans/{0}/keys/{1}".format(usage_plan_id, "not_existing") + ) + res.status_code.should.equal(404) + + # not existing usage plan with existing api key + res = test_client.get( + "/usageplans/{0}/keys/{1}".format("not_existing", created_api_key["id"]) + ) + res.status_code.should.equal(404) + + # not jet added api key + res = test_client.get( + "/usageplans/{0}/keys/{1}".format(usage_plan_id, created_api_key["id"]) + ) + res.status_code.should.equal(404) + # Create usage plan key res = test_client.post( "/usageplans/{0}/keys".format(usage_plan_id), From 1fbf76b95a6f48768d681d1156fab68fed9bdd97 Mon Sep 17 00:00:00 2001 From: Stijn Seghers Date: Thu, 16 Jan 2020 12:48:48 +1300 Subject: [PATCH 283/658] Keep cfnlint import function-local (~1s) Saves about 1s of startup time. --- moto/cloudformation/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/cloudformation/utils.py b/moto/cloudformation/utils.py index cd84810028f2..54c338b9b157 100644 --- a/moto/cloudformation/utils.py +++ b/moto/cloudformation/utils.py @@ -6,7 +6,6 @@ import os import string -from cfnlint import decode, core from moto.core import ACCOUNT_ID @@ -62,6 +61,8 @@ def _f(loader, tag, node): def validate_template_cfn_lint(template): + # Importing cfnlint adds a significant overhead, so we keep it local + from cfnlint import decode, core # Save the template to a temporary file -- cfn-lint requires a file filename = "file.tmp" From e2af07df446850483fd9f3c1edd0a7340d3510a8 Mon Sep 17 00:00:00 2001 From: Stijn Seghers Date: Thu, 16 Jan 2020 17:30:50 +1300 Subject: [PATCH 284/658] Keep pkg_resources import function-local (~0.06s) --- moto/core/responses.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index c708edb8bd02..9a46f8ac5a49 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -20,7 +20,6 @@ from six.moves.urllib.parse import parse_qs, urlparse import xmltodict -from pkg_resources import resource_filename from werkzeug.exceptions import HTTPException import boto3 @@ -766,6 +765,9 @@ class AWSServiceSpec(object): """ def __init__(self, path): + # Importing pkg_resources takes ~60ms; keep it local + from pkg_resources import resource_filename # noqa + self.path = resource_filename("botocore", path) with io.open(self.path, "r", encoding="utf-8") as f: spec = json.load(f) From b8820009e811be23fdef99e7ce358ebc91493628 Mon Sep 17 00:00:00 2001 From: Stijn Seghers Date: Fri, 17 Jan 2020 14:08:06 +1300 Subject: [PATCH 285/658] Lazily import submodules --- moto/__init__.py | 166 +++++++++++++++++++++++------------- moto/backends.py | 201 +++++++++++++++++++------------------------- moto/core/models.py | 15 ++-- moto/server.py | 16 ++-- 4 files changed, 214 insertions(+), 184 deletions(-) diff --git a/moto/__init__.py b/moto/__init__.py index 4c9d4753c360..79c1555d3088 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -1,62 +1,114 @@ from __future__ import unicode_literals -from .acm import mock_acm # noqa -from .apigateway import mock_apigateway, mock_apigateway_deprecated # noqa -from .athena import mock_athena # noqa -from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # noqa -from .awslambda import mock_lambda, mock_lambda_deprecated # noqa -from .batch import mock_batch # noqa -from .cloudformation import mock_cloudformation # noqa -from .cloudformation import mock_cloudformation_deprecated # noqa -from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # noqa -from .codecommit import mock_codecommit # noqa -from .codepipeline import mock_codepipeline # noqa -from .cognitoidentity import mock_cognitoidentity # noqa -from .cognitoidentity import mock_cognitoidentity_deprecated # noqa -from .cognitoidp import mock_cognitoidp, mock_cognitoidp_deprecated # noqa -from .config import mock_config # noqa -from .datapipeline import mock_datapipeline # noqa -from .datapipeline import mock_datapipeline_deprecated # noqa -from .datasync import mock_datasync # noqa -from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # noqa -from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # noqa -from .dynamodbstreams import mock_dynamodbstreams # noqa -from .elasticbeanstalk import mock_elasticbeanstalk # noqa -from .ec2 import mock_ec2, mock_ec2_deprecated # noqa -from .ec2_instance_connect import mock_ec2_instance_connect # noqa -from .ecr import mock_ecr, mock_ecr_deprecated # noqa -from .ecs import mock_ecs, mock_ecs_deprecated # noqa -from .elb import mock_elb, mock_elb_deprecated # noqa -from .elbv2 import mock_elbv2 # noqa -from .emr import mock_emr, mock_emr_deprecated # noqa -from .events import mock_events # noqa -from .glacier import mock_glacier, mock_glacier_deprecated # noqa -from .glue import mock_glue # noqa -from .iam import mock_iam, mock_iam_deprecated # noqa -from .iot import mock_iot # noqa -from .iotdata import mock_iotdata # noqa -from .kinesis import mock_kinesis, mock_kinesis_deprecated # noqa -from .kms import mock_kms, mock_kms_deprecated # noqa -from .logs import mock_logs, mock_logs_deprecated # noqa -from .opsworks import mock_opsworks, mock_opsworks_deprecated # noqa -from .organizations import mock_organizations # noqa -from .polly import mock_polly # noqa -from .rds import mock_rds, mock_rds_deprecated # noqa -from .rds2 import mock_rds2, mock_rds2_deprecated # noqa -from .redshift import mock_redshift, mock_redshift_deprecated # noqa -from .resourcegroups import mock_resourcegroups # noqa -from .resourcegroupstaggingapi import mock_resourcegroupstaggingapi # noqa -from .route53 import mock_route53, mock_route53_deprecated # noqa -from .s3 import mock_s3, mock_s3_deprecated # noqa -from .secretsmanager import mock_secretsmanager # noqa -from .ses import mock_ses, mock_ses_deprecated # noqa -from .sns import mock_sns, mock_sns_deprecated # noqa -from .sqs import mock_sqs, mock_sqs_deprecated # noqa -from .ssm import mock_ssm # noqa -from .stepfunctions import mock_stepfunctions # noqa -from .sts import mock_sts, mock_sts_deprecated # noqa -from .swf import mock_swf, mock_swf_deprecated # noqa -from .xray import XRaySegment, mock_xray, mock_xray_client # noqa +import importlib + + +def lazy_load(module_name, element): + def f(*args, **kwargs): + module = importlib.import_module(module_name, "moto") + return getattr(module, element)(*args, **kwargs) + + return f + + +mock_acm = lazy_load(".acm", "mock_acm") +mock_apigateway = lazy_load(".apigateway", "mock_apigateway") +mock_apigateway_deprecated = lazy_load(".apigateway", "mock_apigateway_deprecated") +mock_athena = lazy_load(".athena", "mock_athena") +mock_autoscaling = lazy_load(".autoscaling", "mock_autoscaling") +mock_autoscaling_deprecated = lazy_load(".autoscaling", "mock_autoscaling_deprecated") +mock_lambda = lazy_load(".awslambda", "mock_lambda") +mock_lambda_deprecated = lazy_load(".awslambda", "mock_lambda_deprecated") +mock_batch = lazy_load(".batch", "mock_batch") +mock_batch = lazy_load(".batch", "mock_batch") +mock_cloudformation = lazy_load(".cloudformation", "mock_cloudformation") +mock_cloudformation_deprecated = lazy_load( + ".cloudformation", "mock_cloudformation_deprecated" +) +mock_cloudwatch = lazy_load(".cloudwatch", "mock_cloudwatch") +mock_cloudwatch_deprecated = lazy_load(".cloudwatch", "mock_cloudwatch_deprecated") +mock_codecommit = lazy_load(".codecommit", "mock_codecommit") +mock_codepipeline = lazy_load(".codepipeline", "mock_codepipeline") +mock_cognitoidentity = lazy_load(".cognitoidentity", "mock_cognitoidentity") +mock_cognitoidentity_deprecated = lazy_load( + ".cognitoidentity", "mock_cognitoidentity_deprecated" +) +mock_cognitoidp = lazy_load(".cognitoidp", "mock_cognitoidp") +mock_cognitoidp_deprecated = lazy_load(".cognitoidp", "mock_cognitoidp_deprecated") +mock_config = lazy_load(".config", "mock_config") +mock_datapipeline = lazy_load(".datapipeline", "mock_datapipeline") +mock_datapipeline_deprecated = lazy_load( + ".datapipeline", "mock_datapipeline_deprecated" +) +mock_datasync = lazy_load(".datasync", "mock_datasync") +mock_dynamodb = lazy_load(".dynamodb", "mock_dynamodb") +mock_dynamodb_deprecated = lazy_load(".dynamodb", "mock_dynamodb_deprecated") +mock_dynamodb2 = lazy_load(".dynamodb2", "mock_dynamodb2") +mock_dynamodb2_deprecated = lazy_load(".dynamodb2", "mock_dynamodb2_deprecated") +mock_dynamodbstreams = lazy_load(".dynamodbstreams", "mock_dynamodbstreams") +mock_elasticbeanstalk = lazy_load(".elasticbeanstalk", "mock_elasticbeanstalk") +mock_ec2 = lazy_load(".ec2", "mock_ec2") +mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated") +mock_ec2_instance_connect = lazy_load( + ".ec2_instance_connect", "mock_ec2_instance_connect" +) +mock_ecr = lazy_load(".ecr", "mock_ecr") +mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated") +mock_ecs = lazy_load(".ecs", "mock_ecs") +mock_ecs_deprecated = lazy_load(".ecs", "mock_ecs_deprecated") +mock_elb = lazy_load(".elb", "mock_elb") +mock_elb_deprecated = lazy_load(".elb", "mock_elb_deprecated") +mock_elbv2 = lazy_load(".elbv2", "mock_elbv2") +mock_emr = lazy_load(".emr", "mock_emr") +mock_emr_deprecated = lazy_load(".emr", "mock_emr_deprecated") +mock_events = lazy_load(".events", "mock_events") +mock_glacier = lazy_load(".glacier", "mock_glacier") +mock_glacier_deprecated = lazy_load(".glacier", "mock_glacier_deprecated") +mock_glue = lazy_load(".glue", "mock_glue") +mock_iam = lazy_load(".iam", "mock_iam") +mock_iam_deprecated = lazy_load(".iam", "mock_iam_deprecated") +mock_iot = lazy_load(".iot", "mock_iot") +mock_iotdata = lazy_load(".iotdata", "mock_iotdata") +mock_kinesis = lazy_load(".kinesis", "mock_kinesis") +mock_kinesis_deprecated = lazy_load(".kinesis", "mock_kinesis_deprecated") +mock_kms = lazy_load(".kms", "mock_kms") +mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated") +mock_logs = lazy_load(".logs", "mock_logs") +mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated") +mock_opsworks = lazy_load(".opsworks", "mock_opsworks") +mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated") +mock_organizations = lazy_load(".organizations", "mock_organizations") +mock_polly = lazy_load(".polly", "mock_polly") +mock_rds = lazy_load(".rds", "mock_rds") +mock_rds_deprecated = lazy_load(".rds", "mock_rds_deprecated") +mock_rds2 = lazy_load(".rds2", "mock_rds2") +mock_rds2_deprecated = lazy_load(".rds2", "mock_rds2_deprecated") +mock_redshift = lazy_load(".redshift", "mock_redshift") +mock_redshift_deprecated = lazy_load(".redshift", "mock_redshift_deprecated") +mock_resourcegroups = lazy_load(".resourcegroups", "mock_resourcegroups") +mock_resourcegroupstaggingapi = lazy_load( + ".resourcegroupstaggingapi", "mock_resourcegroupstaggingapi" +) +mock_route53 = lazy_load(".route53", "mock_route53") +mock_route53_deprecated = lazy_load(".route53", "mock_route53_deprecated") +mock_s3 = lazy_load(".s3", "mock_s3") +mock_s3_deprecated = lazy_load(".s3", "mock_s3_deprecated") +mock_secretsmanager = lazy_load(".secretsmanager", "mock_secretsmanager") +mock_ses = lazy_load(".ses", "mock_ses") +mock_ses_deprecated = lazy_load(".ses", "mock_ses_deprecated") +mock_sns = lazy_load(".sns", "mock_sns") +mock_sns_deprecated = lazy_load(".sns", "mock_sns_deprecated") +mock_sqs = lazy_load(".sqs", "mock_sqs") +mock_sqs_deprecated = lazy_load(".sqs", "mock_sqs_deprecated") +mock_ssm = lazy_load(".ssm", "mock_ssm") +mock_stepfunctions = lazy_load(".stepfunctions", "mock_stepfunctions") +mock_sts = lazy_load(".sts", "mock_sts") +mock_sts_deprecated = lazy_load(".sts", "mock_sts_deprecated") +mock_swf = lazy_load(".swf", "mock_swf") +mock_swf_deprecated = lazy_load(".swf", "mock_swf_deprecated") +XRaySegment = lazy_load(".xray", "XRaySegment") +mock_xray = lazy_load(".xray", "mock_xray") +mock_xray_client = lazy_load(".xray", "mock_xray_client") # import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) diff --git a/moto/backends.py b/moto/backends.py index a48df74a4586..bb71429eb940 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -1,124 +1,99 @@ from __future__ import unicode_literals -from moto.acm import acm_backends -from moto.apigateway import apigateway_backends -from moto.athena import athena_backends -from moto.autoscaling import autoscaling_backends -from moto.awslambda import lambda_backends -from moto.batch import batch_backends -from moto.cloudformation import cloudformation_backends -from moto.cloudwatch import cloudwatch_backends -from moto.codecommit import codecommit_backends -from moto.codepipeline import codepipeline_backends -from moto.cognitoidentity import cognitoidentity_backends -from moto.cognitoidp import cognitoidp_backends -from moto.config import config_backends -from moto.core import moto_api_backends -from moto.datapipeline import datapipeline_backends -from moto.datasync import datasync_backends -from moto.dynamodb import dynamodb_backends -from moto.dynamodb2 import dynamodb_backends2 -from moto.dynamodbstreams import dynamodbstreams_backends -from moto.ec2 import ec2_backends -from moto.ec2_instance_connect import ec2_instance_connect_backends -from moto.ecr import ecr_backends -from moto.ecs import ecs_backends -from moto.elasticbeanstalk import eb_backends -from moto.elb import elb_backends -from moto.elbv2 import elbv2_backends -from moto.emr import emr_backends -from moto.events import events_backends -from moto.glacier import glacier_backends -from moto.glue import glue_backends -from moto.iam import iam_backends -from moto.instance_metadata import instance_metadata_backends -from moto.iot import iot_backends -from moto.iotdata import iotdata_backends -from moto.kinesis import kinesis_backends -from moto.kms import kms_backends -from moto.logs import logs_backends -from moto.opsworks import opsworks_backends -from moto.organizations import organizations_backends -from moto.polly import polly_backends -from moto.rds2 import rds2_backends -from moto.redshift import redshift_backends -from moto.resourcegroups import resourcegroups_backends -from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends -from moto.route53 import route53_backends -from moto.s3 import s3_backends -from moto.secretsmanager import secretsmanager_backends -from moto.ses import ses_backends -from moto.sns import sns_backends -from moto.sqs import sqs_backends -from moto.ssm import ssm_backends -from moto.stepfunctions import stepfunction_backends -from moto.sts import sts_backends -from moto.swf import swf_backends -from moto.xray import xray_backends +import importlib BACKENDS = { - "acm": acm_backends, - "apigateway": apigateway_backends, - "athena": athena_backends, - "autoscaling": autoscaling_backends, - "batch": batch_backends, - "cloudformation": cloudformation_backends, - "cloudwatch": cloudwatch_backends, - "codecommit": codecommit_backends, - "codepipeline": codepipeline_backends, - "cognito-identity": cognitoidentity_backends, - "cognito-idp": cognitoidp_backends, - "config": config_backends, - "datapipeline": datapipeline_backends, - "datasync": datasync_backends, - "dynamodb": dynamodb_backends, - "dynamodb2": dynamodb_backends2, - "dynamodbstreams": dynamodbstreams_backends, - "ec2": ec2_backends, - "ec2_instance_connect": ec2_instance_connect_backends, - "ecr": ecr_backends, - "ecs": ecs_backends, - "elasticbeanstalk": eb_backends, - "elb": elb_backends, - "elbv2": elbv2_backends, - "events": events_backends, - "emr": emr_backends, - "glacier": glacier_backends, - "glue": glue_backends, - "iam": iam_backends, - "moto_api": moto_api_backends, - "instance_metadata": instance_metadata_backends, - "logs": logs_backends, - "kinesis": kinesis_backends, - "kms": kms_backends, - "opsworks": opsworks_backends, - "organizations": organizations_backends, - "polly": polly_backends, - "redshift": redshift_backends, - "resource-groups": resourcegroups_backends, - "rds": rds2_backends, - "s3": s3_backends, - "s3bucket_path": s3_backends, - "ses": ses_backends, - "secretsmanager": secretsmanager_backends, - "sns": sns_backends, - "sqs": sqs_backends, - "ssm": ssm_backends, - "stepfunctions": stepfunction_backends, - "sts": sts_backends, - "swf": swf_backends, - "route53": route53_backends, - "lambda": lambda_backends, - "xray": xray_backends, - "resourcegroupstaggingapi": resourcegroupstaggingapi_backends, - "iot": iot_backends, - "iot-data": iotdata_backends, + "acm": ("acm", "acm_backends"), + "apigateway": ("apigateway", "apigateway_backends"), + "athena": ("athena", "athena_backends"), + "autoscaling": ("autoscaling", "autoscaling_backends"), + "batch": ("batch", "batch_backends"), + "cloudformation": ("cloudformation", "cloudformation_backends"), + "cloudwatch": ("cloudwatch", "cloudwatch_backends"), + "codecommit": ("codecommit", "codecommit_backends"), + "codepipeline": ("codepipeline", "codepipeline_backends"), + "cognito-identity": ("cognitoidentity", "cognitoidentity_backends"), + "cognito-idp": ("cognitoidp", "cognitoidp_backends"), + "config": ("config", "config_backends"), + "datapipeline": ("datapipeline", "datapipeline_backends"), + "datasync": ("datasync", "datasync_backends"), + "dynamodb": ("dynamodb", "dynamodb_backends"), + "dynamodb2": ("dynamodb2", "dynamodb_backends2"), + "dynamodbstreams": ("dynamodbstreams", "dynamodbstreams_backends"), + "ec2": ("ec2", "ec2_backends"), + "ec2_instance_connect": ("ec2_instance_connect", "ec2_instance_connect_backends"), + "ecr": ("ecr", "ecr_backends"), + "ecs": ("ecs", "ecs_backends"), + "elasticbeanstalk": ("elasticbeanstalk", "eb_backends"), + "elb": ("elb", "elb_backends"), + "elbv2": ("elbv2", "elbv2_backends"), + "emr": ("emr", "emr_backends"), + "events": ("events", "events_backends"), + "glacier": ("glacier", "glacier_backends"), + "glue": ("glue", "glue_backends"), + "iam": ("iam", "iam_backends"), + "instance_metadata": ("instance_metadata", "instance_metadata_backends"), + "iot": ("iot", "iot_backends"), + "iot-data": ("iotdata", "iotdata_backends"), + "kinesis": ("kinesis", "kinesis_backends"), + "kms": ("kms", "kms_backends"), + "lambda": ("awslambda", "lambda_backends"), + "logs": ("logs", "logs_backends"), + "moto_api": ("core", "moto_api_backends"), + "opsworks": ("opsworks", "opsworks_backends"), + "organizations": ("organizations", "organizations_backends"), + "polly": ("polly", "polly_backends"), + "rds": ("rds2", "rds2_backends"), + "redshift": ("redshift", "redshift_backends"), + "resource-groups": ("resourcegroups", "resourcegroups_backends"), + "resourcegroupstaggingapi": ( + "resourcegroupstaggingapi", + "resourcegroupstaggingapi_backends", + ), + "route53": ("route53", "route53_backends"), + "s3": ("s3", "s3_backends"), + "s3bucket_path": ("s3", "s3_backends"), + "secretsmanager": ("secretsmanager", "secretsmanager_backends"), + "ses": ("ses", "ses_backends"), + "sns": ("sns", "sns_backends"), + "sqs": ("sqs", "sqs_backends"), + "ssm": ("ssm", "ssm_backends"), + "stepfunctions": ("stepfunctions", "stepfunction_backends"), + "sts": ("sts", "sts_backends"), + "swf": ("swf", "swf_backends"), + "xray": ("xray", "xray_backends"), } +def _import_backend(module_name, backends_name): + module = importlib.import_module("moto." + module_name) + return getattr(module, backends_name) + + +def backends(): + for module_name, backends_name in BACKENDS.values(): + yield _import_backend(module_name, backends_name) + + +def named_backends(): + for name, (module_name, backends_name) in BACKENDS.items(): + yield name, _import_backend(module_name, backends_name) + + +def get_backend(name): + module_name, backends_name = BACKENDS[name] + return _import_backend(module_name, backends_name) + + +def search_backend(predicate): + for name, backend in named_backends(): + if predicate(backend): + return name + + def get_model(name, region_name): - for backends in BACKENDS.values(): - for region, backend in backends.items(): + for backends_ in backends(): + for region, backend in backends_.items(): if region == region_name: models = getattr(backend.__class__, "__models__", {}) if name in models: diff --git a/moto/core/models.py b/moto/core/models.py index 1ee11607ab36..1597efc7bdbf 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -33,14 +33,15 @@ class BaseMockAWS(object): nested_count = 0 def __init__(self, backends): + from moto.instance_metadata import instance_metadata_backend + from moto.core import moto_api_backend + self.backends = backends self.backends_for_urls = {} - from moto.backends import BACKENDS - default_backends = { - "instance_metadata": BACKENDS["instance_metadata"]["global"], - "moto_api": BACKENDS["moto_api"]["global"], + "instance_metadata": instance_metadata_backend, + "moto_api": moto_api_backend, } self.backends_for_urls.update(self.backends) self.backends_for_urls.update(default_backends) @@ -721,12 +722,12 @@ class deprecated_base_decorator(base_decorator): class MotoAPIBackend(BaseBackend): def reset(self): - from moto.backends import BACKENDS + import moto.backends as backends - for name, backends in BACKENDS.items(): + for name, backends_ in backends.named_backends(): if name == "moto_api": continue - for region_name, backend in backends.items(): + for region_name, backend in backends_.items(): backend.reset() self.__init__() diff --git a/moto/server.py b/moto/server.py index 498f6c504dce..46e37d921d9e 100644 --- a/moto/server.py +++ b/moto/server.py @@ -15,7 +15,7 @@ from werkzeug.routing import BaseConverter from werkzeug.serving import run_simple -from moto.backends import BACKENDS +import moto.backends as backends from moto.core.utils import convert_flask_to_httpretty_response @@ -52,13 +52,15 @@ def get_backend_for_host(self, host): if self.service: return self.service - if host in BACKENDS: + if host in backends.BACKENDS: return host - for backend_name, backend in BACKENDS.items(): - for url_base in list(backend.values())[0].url_bases: - if re.match(url_base, "http://%s" % host): - return backend_name + return backends.search_backend( + lambda backend: any( + re.match(url_base, "http://%s" % host) + for url_base in list(backend.values())[0].url_bases + ) + ) def infer_service_region_host(self, environ): auth = environ.get("HTTP_AUTHORIZATION") @@ -204,7 +206,7 @@ def create_backend_app(service): backend_app.view_functions = {} backend_app.url_map = Map() backend_app.url_map.converters["regex"] = RegexConverter - backend = list(BACKENDS[service].values())[0] + backend = list(backends.get_backend(service).values())[0] for url_path, handler in backend.flask_paths.items(): view_func = convert_flask_to_httpretty_response(handler) if handler.__name__ == "dispatch": From 8ff248456ba0fefc73655bb25939dd791407448c Mon Sep 17 00:00:00 2001 From: Stijn Seghers Date: Fri, 17 Jan 2020 14:09:07 +1300 Subject: [PATCH 286/658] Keep sshpubkeys import function-local (~0.5s) --- moto/ec2/utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 61d22d8b227f..3b363e45de9b 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -10,8 +10,6 @@ from cryptography.hazmat.primitives import serialization from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa -import sshpubkeys.exceptions -from sshpubkeys.keys import SSHKey EC2_RESOURCE_TO_PREFIX = { @@ -544,6 +542,10 @@ def generate_instance_identity_document(instance): def rsa_public_key_parse(key_material): + # These imports take ~.5s; let's keep them local + import sshpubkeys.exceptions + from sshpubkeys.keys import SSHKey + try: if not isinstance(key_material, six.binary_type): key_material = key_material.encode("ascii") From 1fc208e52ccadbc4e7c15d2140eef0970a4c1fd5 Mon Sep 17 00:00:00 2001 From: Stijn Seghers Date: Fri, 17 Jan 2020 14:09:40 +1300 Subject: [PATCH 287/658] Add FIXME about import time overhead --- moto/iam/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/iam/models.py b/moto/iam/models.py index e34ca7cf86bb..08a1eb36a83a 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -288,6 +288,7 @@ def arn(self): # AWS defines some of its own managed policies and we periodically # import them via `make aws_managed_policies` +# FIXME: Takes about 40ms at import time aws_managed_policies = [ AWSManagedPolicy.from_data(name, d) for name, d in json.loads(aws_managed_policies_data).items() From 9c13798f785b1a920eb7c703f659c94c45417b6f Mon Sep 17 00:00:00 2001 From: Stijn Seghers Date: Fri, 27 Mar 2020 18:55:13 +1300 Subject: [PATCH 288/658] Fix import errors For Python 2, the lazy importing style uncovered some importing mistakes. I can't quite figure out how it was working before. --- moto/ec2_instance_connect/models.py | 2 +- moto/rds2/models.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/moto/ec2_instance_connect/models.py b/moto/ec2_instance_connect/models.py index cc8cc3f33cb7..f3dbbe9f8269 100644 --- a/moto/ec2_instance_connect/models.py +++ b/moto/ec2_instance_connect/models.py @@ -1,4 +1,4 @@ -import boto +import boto.ec2 from moto.core import BaseBackend diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 722d7d4fd88d..7fa4f3316f06 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -8,7 +8,6 @@ from boto3 import Session from jinja2 import Template from re import compile as re_compile -from moto.cloudformation.exceptions import UnformattedGetAttTemplateException from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import get_random_hex @@ -308,6 +307,9 @@ def update(self, db_kwargs): setattr(self, key, value) def get_cfn_attribute(self, attribute_name): + # Local import to avoid circular dependency with cloudformation.parsing + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + if attribute_name == "Endpoint.Address": return self.address elif attribute_name == "Endpoint.Port": From 84100c44831a2060ceb33a36ecdab339bbf68d0b Mon Sep 17 00:00:00 2001 From: usmankb Date: Wed, 29 Apr 2020 00:28:19 +0530 Subject: [PATCH 289/658] enhancement Create-VPC-endpoint --- moto/ec2/models.py | 98 +++++++++++++++++++++++++++++ moto/ec2/responses/vpcs.py | 65 +++++++++++++++++++ moto/ec2/utils.py | 16 +++++ tests/test_ec2/test_route_tables.py | 36 +++++++++++ 4 files changed, 215 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index ce9c3ef5bb5a..118fc6804a23 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -104,6 +104,7 @@ random_internet_gateway_id, random_ip, random_ipv6_cidr, + randor_ipv4_cidr, random_launch_template_id, random_nat_gateway_id, random_key_pair, @@ -112,6 +113,8 @@ random_reservation_id, random_route_table_id, generate_route_id, + generate_vpc_end_point_id, + create_dns_entries, split_route_id, random_security_group_id, random_snapshot_id, @@ -2735,6 +2738,7 @@ class VPCBackend(object): def __init__(self): self.vpcs = {} + self.vpc_end_points = {} self.vpc_refs[self.__class__].add(weakref.ref(self)) super(VPCBackend, self).__init__() @@ -2877,6 +2881,66 @@ def associate_vpc_cidr_block( vpc = self.get_vpc(vpc_id) return vpc.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block) + def create_vpc_endpoint(self, + vpc_id, + service_name, + type=None, + policy_document=False, + route_table_ids=None, + subnet_ids=[], + network_interface_ids=[], + dns_entries=None, + client_token=None, + security_group=None, + tag_specifications=None, + private_dns_enabled=None + ): + + vpc_endpoint_id = generate_vpc_end_point_id(vpc_id) + + #validates if vpc is present or not. + self.get_vpc(vpc_id) + + if type == "interface" or "Interface ": + + network_interface_ids = [] + for subnet_id in subnet_ids: + self.get_subnet(subnet_id) + eni = self.create_network_interface(subnet_id, random_private_ip()) + network_interface_ids.append(eni.id) + + dns_entries = create_dns_entries(service_name, vpc_endpoint_id) + + else : + # considering gateway if type is not mentioned. + service_destination_cidr = randor_ipv4_cidr() + + for route_table_id in route_table_ids: + self.create_route( + route_table_id, + service_destination_cidr + ) + + vpc_end_point = VPCEndPoint( + vpc_endpoint_id, + vpc_id, + service_name, + type, + policy_document, + route_table_ids, + subnet_ids, + network_interface_ids, + [dns_entries], + client_token, + security_group, + tag_specifications, + private_dns_enabled + ) + + self.vpc_end_points[vpc_endpoint_id] = vpc_end_point + + return vpc_end_point + class VPCPeeringConnectionStatus(object): def __init__(self, code="initiating-request", message=""): @@ -3485,6 +3549,40 @@ def create_from_cloudformation_json( return route_table +class VPCEndPoint(TaggedEC2Resource): + def __init__( + self, + id, + vpc_id, + service_name, + type=None, + policy_document=False, + route_table_ids=None, + subnet_ids =None, + network_interface_ids=None, + dns_entries=None, + client_token=None, + security_group=None, + tag_specifications=None, + private_dns_enabled=None, + ): + + self.id = id + self.vpc_id = vpc_id + self.service_name = service_name + self.type = type + self.policy_document = policy_document + self.route_table_ids = route_table_ids + self.network_interface_ids = network_interface_ids + self.subnet_ids = subnet_ids + self.client_token = client_token + self.security_group = security_group + self.tag_specifications = tag_specifications + self.private_dns_enabled = private_dns_enabled + self.created_at = datetime.utcnow() + self.dns_entries = dns_entries + + class RouteBackend(object): def __init__(self): super(RouteBackend, self).__init__() diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 0fd19837872e..2af4c0b29e41 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -163,6 +163,36 @@ def disassociate_vpc_cidr_block(self): cidr_block_state="disassociating", ) + def create_vpc_endpoint(self): + vpc_id = self._get_param("VpcId") + service_name = self._get_param("ServiceName") + route_table_ids = self._get_multi_param("RouteTableId") + subnet_ids = self._get_multi_param("SubnetId") + type = self._get_param("VpcEndpointType") + policy_document = self._get_param("PolicyDocument") + client_token = self._get_param("ClientToken") + tag_specifications = self._get_param("TagSpecifications") + private_dns_enabled = self._get_param("PrivateDNSEnabled") + security_group = self._get_param("SecurityGroup") + + vpc_end_point = self.ec2_backend.create_vpc_endpoint( + vpc_id=vpc_id, + service_name=service_name, + type=type, + policy_document=policy_document, + route_table_ids=route_table_ids, + subnet_ids=subnet_ids, + client_token=client_token, + security_group=security_group, + tag_specifications=tag_specifications, + private_dns_enabled=private_dns_enabled + ) + + template = self.response_template(CREATE_VPC_END_POINT) + return template.render( + vpc_end_point=vpc_end_point + ) + CREATE_VPC_RESPONSE = """ @@ -384,3 +414,38 @@ def disassociate_vpc_cidr_block(self): """ + +CREATE_VPC_END_POINT = """ + + {{ vpc_end_point.policy_document }} + available + false + {{ vpc_end_point.service_name }} + {{ vpc_end_point.vpc_id }} + {{ vpc_end_point.id }} + + {% for routeid in vpc_end_point.route_table_ids %} + {{ routeid }} + {% endfor %} + + + {% for network_interface_id in vpc_end_point.network_interface_ids %} + {{ network_interface_id }} + {% endfor %} + + + {% for subnetId in vpc_end_point.subnet_ids %} + {{ subnetId }} + {% endfor %} + + + {% for entry in vpc_end_point.dns_entries %} + + {{ entry["hosted_zone_id"] }} + {{ entry["dns_name"] }} + + {% endfor %} + + {{ vpc_end_point.created_at }} + +""" \ No newline at end of file diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 3b363e45de9b..408ee9be54d1 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -181,6 +181,10 @@ def random_ip(): ) +def randor_ipv4_cidr(): + return "10.0.{}.{}/16".format(random.randint(0, 255), random.randint(0, 255)) + + def random_ipv6_cidr(): return "2400:6500:{}:{}::/56".format(random_resource_id(4), random_resource_id(4)) @@ -189,6 +193,18 @@ def generate_route_id(route_table_id, cidr_block): return "%s~%s" % (route_table_id, cidr_block) +def generate_vpc_end_point_id(vpc_id): + return "%s-%s" % ('vpce', vpc_id[4:]) + + +def create_dns_entries(service_name, vpc_endpoint_id): + dns_entries = {} + dns_entries["dns_name"] = "{}-{}.{}".format(vpc_endpoint_id, + random_resource_id(8), service_name) + dns_entries["hosted_zone_id"] = random_resource_id(13).upper() + return dns_entries + + def split_route_id(route_id): values = route_id.split("~") return values[0], values[1] diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 34746469108d..182776f9584d 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -618,3 +618,39 @@ def test_describe_route_tables_with_nat_gateway(): nat_gw_routes[0]["DestinationCidrBlock"].should.equal("0.0.0.0/0") nat_gw_routes[0]["NatGatewayId"].should.equal(nat_gw_id) nat_gw_routes[0]["State"].should.equal("active") + + +@mock_ec2 +def test_create_vpc_end_point(): + + ec2 = boto3.client("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc["Vpc"]["VpcId"], + CidrBlock="10.0.0.0/24") + + route_table = ec2.create_route_table(VpcId=vpc["Vpc"]["VpcId"]) + + vpc_end_point = ec2.create_vpc_endpoint( + VpcId=vpc["Vpc"]["VpcId"], + ServiceName="com.amazonaws.us-east-1.s3", + RouteTableIds=[route_table["RouteTable"]["RouteTableId"]] + ) + + vpc_end_point["VpcEndpoint"]["ServiceName"].\ + should.equal("com.amazonaws.us-east-1.s3") + vpc_end_point["VpcEndpoint"]["RouteTableIds"][0].\ + should.equal(route_table["RouteTable"]["RouteTableId"]) + vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) + + vpc_end_point = ec2.create_vpc_endpoint( + VpcId=vpc["Vpc"]["VpcId"], + ServiceName="com.amazonaws.us-east-1.s3", + SubnetIds=[subnet["Subnet"]["SubnetId"]], + VpcEndpointType="interface" + ) + + vpc_end_point["VpcEndpoint"]["ServiceName"].\ + should.equal("com.amazonaws.us-east-1.s3") + vpc_end_point["VpcEndpoint"]["SubnetIds"][0].\ + should.equal(subnet["Subnet"]["SubnetId"]) + vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) From 8e2a14ba502146acb94a51f67294aa2006d1b02b Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Tue, 28 Apr 2020 15:11:28 -0700 Subject: [PATCH 290/658] Add eu-south-1 --- moto/ec2/models.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index ce9c3ef5bb5a..120cda7e4c8a 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1528,6 +1528,7 @@ class RegionsAndZonesBackend(object): "ca-central-1", "eu-central-1", "eu-north-1", + "eu-south-1", "eu-west-1", "eu-west-2", "eu-west-3", @@ -1679,6 +1680,11 @@ class RegionsAndZonesBackend(object): Zone(region_name="eu-central-1", name="eu-central-1b", zone_id="euc1-az3"), Zone(region_name="eu-central-1", name="eu-central-1c", zone_id="euc1-az1"), ], + "eu-south-1": [ + Zone(region_name="eu-south-1", name="eu-south-1a", zone_id="eus1-az1"), + Zone(region_name="eu-south-1", name="eu-south-1b", zone_id="eus1-az2"), + Zone(region_name="eu-south-1", name="eu-south-1c", zone_id="eus1-az3"), + ], "us-east-1": [ Zone(region_name="us-east-1", name="us-east-1a", zone_id="use1-az6"), Zone(region_name="us-east-1", name="us-east-1b", zone_id="use1-az1"), From 4a7b5adbaedb30c34e3a8ff703cb2669fc0f9f79 Mon Sep 17 00:00:00 2001 From: Michael Penkov Date: Wed, 29 Apr 2020 12:38:33 +0900 Subject: [PATCH 291/658] relax version pins in setup.py for non-Py2 users --- setup.py | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/setup.py b/setup.py index 684c0dcea6db..05ae661d251d 100755 --- a/setup.py +++ b/setup.py @@ -8,6 +8,8 @@ from setuptools import setup, find_packages import sys +PY2 = sys.version_info[0] == 2 + # Borrowed from pip at https://github.com/pypa/pip/blob/62c27dee45625e1b63d1e023b0656310f276e050/setup.py#L11-L15 here = os.path.abspath(os.path.dirname(__file__)) @@ -28,8 +30,6 @@ def get_version(): install_requires = [ - "setuptools==44.0.0", - "Jinja2<3.0.0,>=2.10.1", "boto>=2.36.0", "boto3>=1.9.201", "botocore>=1.12.201", @@ -42,18 +42,40 @@ def get_version(): "pytz", "python-dateutil<3.0.0,>=2.1", "python-jose<4.0.0", - "mock<=3.0.5", "docker>=2.5.1", "jsondiff>=1.1.2", "aws-xray-sdk!=0.96,>=0.93", "responses>=0.9.0", "idna<3,>=2.5", "cfn-lint>=0.4.0", - "sshpubkeys>=3.1.0,<4.0", - "zipp==0.6.0", - "more-itertools==5.0.0" ] +# +# Avoid pins where they are not necessary. These pins were introduced by the +# following commit for Py2 compatibility. They are not required for non-Py2 +# users. +# +# https://github.com/mpenkov/moto/commit/00134d2df37bb4dcd5f447ef951d383bfec0903c +# +if PY2: + install_requires += [ + "Jinja2<3.0.0,>=2.10.1", + "mock<=3.0.5", + "more-itertools==5.0.0", + "setuptools==44.0.0", + "sshpubkeys>=3.1.0,<4.0", + "zipp==0.6.0", + ] +else: + install_requires += [ + "Jinja2>=2.10.1", + "mock", + "more-itertools", + "setuptools", + "sshpubkeys>=3.1.0", + "zipp", + ] + extras_require = { 'server': ['flask'], } From 179f5170d4c721be9121b35ae24130f44f05cffe Mon Sep 17 00:00:00 2001 From: Michael Penkov Date: Wed, 29 Apr 2020 13:02:53 +0900 Subject: [PATCH 292/658] more pins for Py2 --- setup.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/setup.py b/setup.py index 05ae661d251d..994e55300a89 100755 --- a/setup.py +++ b/setup.py @@ -48,6 +48,7 @@ def get_version(): "responses>=0.9.0", "idna<3,>=2.5", "cfn-lint>=0.4.0", + "MarkupSafe<2.0", # This is a Jinja2 dependency, 2.0.0a1 currently seems broken ] # @@ -59,6 +60,13 @@ def get_version(): # if PY2: install_requires += [ + # + # This is an indirect dependency. Version 5.0.0 claims to be for + # Py2.6+, but it really isn't. + # + # https://github.com/jaraco/configparser/issues/51 + # + "configparser<5.0", "Jinja2<3.0.0,>=2.10.1", "mock<=3.0.5", "more-itertools==5.0.0", From 45d99aef690c0c7aeea403ead07064d0fda85497 Mon Sep 17 00:00:00 2001 From: Michael Penkov Date: Wed, 29 Apr 2020 13:03:03 +0900 Subject: [PATCH 293/658] fix list comprehension --- moto/s3/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 7373dc9e33ac..866c5d007c51 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1370,7 +1370,7 @@ def set_key_tags(self, key, tags, key_name=None): raise MissingKey(key_name) self.tagger.delete_all_tags_for_resource(key.arn) self.tagger.tag_resource( - key.arn, [{"Key": key, "Value": value} for key, value in tags.items()], + key.arn, [{"Key": k, "Value": v} for (k, v) in tags.items()], ) return key From 79b022fbc130bdfc461f773894d4e14f0e921def Mon Sep 17 00:00:00 2001 From: Michael Penkov Date: Wed, 29 Apr 2020 15:42:40 +0900 Subject: [PATCH 294/658] update requirements-dev.txt --- requirements-dev.txt | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 2b43bcf9d863..313f2dfb629d 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,4 @@ -r requirements.txt -mock<=3.0.5 # Last version compatible with Python 2.7 nose black; python_version >= '3.6' regex==2019.11.1; python_version >= '3.6' # Needed for black @@ -18,3 +17,21 @@ click==6.7 inflection==0.3.1 lxml==4.2.3 beautifulsoup4==4.6.0 + +# +# The below pins mirror the Python version-conditional pins in setup.py +# +Jinja2>=2.10.1; python_version >= '3.6' +mock; python_version >= '3.6' +more-itertools; python_version >= '3.6' +setuptools; python_version >= '3.6' +sshpubkeys>=3.1.0; python_version >= '3.6' +zipp; python_version >= '3.6' + +configparser<5.0; python_version == '2.7' +Jinja2<3.0.0,>=2.10.1; python_version == '2.7' +mock<=3.0.5; python_version == '2.7' +more-itertools==5.0.0; python_version == '2.7' +setuptools==44.0.0; python_version == '2.7' +sshpubkeys>=3.1.0,<4.0; python_version == '2.7' +zipp==0.6.0; python_version == '2.7' From f4888da33498592d6a1e25a073ddf4673947afa6 Mon Sep 17 00:00:00 2001 From: usmankb Date: Wed, 29 Apr 2020 18:02:02 +0530 Subject: [PATCH 295/658] added test asserts and review comments --- moto/ec2/models.py | 6 ++++-- moto/ec2/responses/vpcs.py | 2 ++ tests/test_ec2/test_route_tables.py | 19 +++++++++++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 118fc6804a23..c35fa339d7f2 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2901,7 +2901,7 @@ def create_vpc_endpoint(self, #validates if vpc is present or not. self.get_vpc(vpc_id) - if type == "interface" or "Interface ": + if type and type.lower() == "interface": network_interface_ids = [] for subnet_id in subnet_ids: @@ -2920,6 +2920,8 @@ def create_vpc_endpoint(self, route_table_id, service_destination_cidr ) + if dns_entries: + dns_entries = [dns_entries] vpc_end_point = VPCEndPoint( vpc_endpoint_id, @@ -2930,7 +2932,7 @@ def create_vpc_endpoint(self, route_table_ids, subnet_ids, network_interface_ids, - [dns_entries], + dns_entries, client_token, security_group, tag_specifications, diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 2af4c0b29e41..4b0aa76d85d4 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -439,12 +439,14 @@ def create_vpc_endpoint(self): {% endfor %} + {% if vpc_end_point.dns_entries %} {% for entry in vpc_end_point.dns_entries %} {{ entry["hosted_zone_id"] }} {{ entry["dns_name"] }} {% endfor %} + {% endif %} {{ vpc_end_point.created_at }} diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 182776f9584d..f8be8dc802f5 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -630,6 +630,7 @@ def test_create_vpc_end_point(): route_table = ec2.create_route_table(VpcId=vpc["Vpc"]["VpcId"]) + # test without any end point type specified vpc_end_point = ec2.create_vpc_endpoint( VpcId=vpc["Vpc"]["VpcId"], ServiceName="com.amazonaws.us-east-1.s3", @@ -641,7 +642,24 @@ def test_create_vpc_end_point(): vpc_end_point["VpcEndpoint"]["RouteTableIds"][0].\ should.equal(route_table["RouteTable"]["RouteTableId"]) vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) + vpc_end_point["VpcEndpoint"]["DnsEntries"].should.have.length_of(0) + # test with any end point type as gateway + vpc_end_point = ec2.create_vpc_endpoint( + VpcId=vpc["Vpc"]["VpcId"], + ServiceName="com.amazonaws.us-east-1.s3", + RouteTableIds=[route_table["RouteTable"]["RouteTableId"]], + VpcEndpointType="gateway" + ) + + vpc_end_point["VpcEndpoint"]["ServiceName"]. \ + should.equal("com.amazonaws.us-east-1.s3") + vpc_end_point["VpcEndpoint"]["RouteTableIds"][0]. \ + should.equal(route_table["RouteTable"]["RouteTableId"]) + vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) + vpc_end_point["VpcEndpoint"]["DnsEntries"].should.have.length_of(0) + + # test with end point type as interface vpc_end_point = ec2.create_vpc_endpoint( VpcId=vpc["Vpc"]["VpcId"], ServiceName="com.amazonaws.us-east-1.s3", @@ -654,3 +672,4 @@ def test_create_vpc_end_point(): vpc_end_point["VpcEndpoint"]["SubnetIds"][0].\ should.equal(subnet["Subnet"]["SubnetId"]) vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) + len(vpc_end_point["VpcEndpoint"]["DnsEntries"]).should.be.greater_than(0) \ No newline at end of file From 2d0087d500ad392fd017b603f94b9030a63f5a52 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 29 Apr 2020 16:29:25 +0100 Subject: [PATCH 296/658] Linting --- moto/ec2/models.py | 42 ++++++++++++++-------------- moto/ec2/responses/vpcs.py | 26 ++++++++--------- moto/ec2/utils.py | 7 +++-- tests/test_ec2/test_route_tables.py | 43 ++++++++++++++++------------- 4 files changed, 60 insertions(+), 58 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index c35fa339d7f2..edc216eb32a7 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2881,24 +2881,25 @@ def associate_vpc_cidr_block( vpc = self.get_vpc(vpc_id) return vpc.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block) - def create_vpc_endpoint(self, - vpc_id, - service_name, - type=None, - policy_document=False, - route_table_ids=None, - subnet_ids=[], - network_interface_ids=[], - dns_entries=None, - client_token=None, - security_group=None, - tag_specifications=None, - private_dns_enabled=None - ): + def create_vpc_endpoint( + self, + vpc_id, + service_name, + type=None, + policy_document=False, + route_table_ids=None, + subnet_ids=[], + network_interface_ids=[], + dns_entries=None, + client_token=None, + security_group=None, + tag_specifications=None, + private_dns_enabled=None, + ): vpc_endpoint_id = generate_vpc_end_point_id(vpc_id) - #validates if vpc is present or not. + # validates if vpc is present or not. self.get_vpc(vpc_id) if type and type.lower() == "interface": @@ -2911,15 +2912,12 @@ def create_vpc_endpoint(self, dns_entries = create_dns_entries(service_name, vpc_endpoint_id) - else : + else: # considering gateway if type is not mentioned. service_destination_cidr = randor_ipv4_cidr() for route_table_id in route_table_ids: - self.create_route( - route_table_id, - service_destination_cidr - ) + self.create_route(route_table_id, service_destination_cidr) if dns_entries: dns_entries = [dns_entries] @@ -2936,7 +2934,7 @@ def create_vpc_endpoint(self, client_token, security_group, tag_specifications, - private_dns_enabled + private_dns_enabled, ) self.vpc_end_points[vpc_endpoint_id] = vpc_end_point @@ -3560,7 +3558,7 @@ def __init__( type=None, policy_document=False, route_table_ids=None, - subnet_ids =None, + subnet_ids=None, network_interface_ids=None, dns_entries=None, client_token=None, diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 4b0aa76d85d4..59222207dd56 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -176,22 +176,20 @@ def create_vpc_endpoint(self): security_group = self._get_param("SecurityGroup") vpc_end_point = self.ec2_backend.create_vpc_endpoint( - vpc_id=vpc_id, - service_name=service_name, - type=type, - policy_document=policy_document, - route_table_ids=route_table_ids, - subnet_ids=subnet_ids, - client_token=client_token, - security_group=security_group, - tag_specifications=tag_specifications, - private_dns_enabled=private_dns_enabled + vpc_id=vpc_id, + service_name=service_name, + type=type, + policy_document=policy_document, + route_table_ids=route_table_ids, + subnet_ids=subnet_ids, + client_token=client_token, + security_group=security_group, + tag_specifications=tag_specifications, + private_dns_enabled=private_dns_enabled, ) template = self.response_template(CREATE_VPC_END_POINT) - return template.render( - vpc_end_point=vpc_end_point - ) + return template.render(vpc_end_point=vpc_end_point) CREATE_VPC_RESPONSE = """ @@ -450,4 +448,4 @@ def create_vpc_endpoint(self): {{ vpc_end_point.created_at }} -""" \ No newline at end of file +""" diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 408ee9be54d1..c07c470a9e24 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -194,13 +194,14 @@ def generate_route_id(route_table_id, cidr_block): def generate_vpc_end_point_id(vpc_id): - return "%s-%s" % ('vpce', vpc_id[4:]) + return "%s-%s" % ("vpce", vpc_id[4:]) def create_dns_entries(service_name, vpc_endpoint_id): dns_entries = {} - dns_entries["dns_name"] = "{}-{}.{}".format(vpc_endpoint_id, - random_resource_id(8), service_name) + dns_entries["dns_name"] = "{}-{}.{}".format( + vpc_endpoint_id, random_resource_id(8), service_name + ) dns_entries["hosted_zone_id"] = random_resource_id(13).upper() return dns_entries diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index f8be8dc802f5..a64fbae1af7c 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -625,8 +625,7 @@ def test_create_vpc_end_point(): ec2 = boto3.client("ec2", region_name="us-west-1") vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") - subnet = ec2.create_subnet(VpcId=vpc["Vpc"]["VpcId"], - CidrBlock="10.0.0.0/24") + subnet = ec2.create_subnet(VpcId=vpc["Vpc"]["VpcId"], CidrBlock="10.0.0.0/24") route_table = ec2.create_route_table(VpcId=vpc["Vpc"]["VpcId"]) @@ -634,13 +633,15 @@ def test_create_vpc_end_point(): vpc_end_point = ec2.create_vpc_endpoint( VpcId=vpc["Vpc"]["VpcId"], ServiceName="com.amazonaws.us-east-1.s3", - RouteTableIds=[route_table["RouteTable"]["RouteTableId"]] - ) + RouteTableIds=[route_table["RouteTable"]["RouteTableId"]], + ) - vpc_end_point["VpcEndpoint"]["ServiceName"].\ - should.equal("com.amazonaws.us-east-1.s3") - vpc_end_point["VpcEndpoint"]["RouteTableIds"][0].\ - should.equal(route_table["RouteTable"]["RouteTableId"]) + vpc_end_point["VpcEndpoint"]["ServiceName"].should.equal( + "com.amazonaws.us-east-1.s3" + ) + vpc_end_point["VpcEndpoint"]["RouteTableIds"][0].should.equal( + route_table["RouteTable"]["RouteTableId"] + ) vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) vpc_end_point["VpcEndpoint"]["DnsEntries"].should.have.length_of(0) @@ -649,13 +650,15 @@ def test_create_vpc_end_point(): VpcId=vpc["Vpc"]["VpcId"], ServiceName="com.amazonaws.us-east-1.s3", RouteTableIds=[route_table["RouteTable"]["RouteTableId"]], - VpcEndpointType="gateway" + VpcEndpointType="gateway", ) - vpc_end_point["VpcEndpoint"]["ServiceName"]. \ - should.equal("com.amazonaws.us-east-1.s3") - vpc_end_point["VpcEndpoint"]["RouteTableIds"][0]. \ - should.equal(route_table["RouteTable"]["RouteTableId"]) + vpc_end_point["VpcEndpoint"]["ServiceName"].should.equal( + "com.amazonaws.us-east-1.s3" + ) + vpc_end_point["VpcEndpoint"]["RouteTableIds"][0].should.equal( + route_table["RouteTable"]["RouteTableId"] + ) vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) vpc_end_point["VpcEndpoint"]["DnsEntries"].should.have.length_of(0) @@ -664,12 +667,14 @@ def test_create_vpc_end_point(): VpcId=vpc["Vpc"]["VpcId"], ServiceName="com.amazonaws.us-east-1.s3", SubnetIds=[subnet["Subnet"]["SubnetId"]], - VpcEndpointType="interface" + VpcEndpointType="interface", ) - vpc_end_point["VpcEndpoint"]["ServiceName"].\ - should.equal("com.amazonaws.us-east-1.s3") - vpc_end_point["VpcEndpoint"]["SubnetIds"][0].\ - should.equal(subnet["Subnet"]["SubnetId"]) + vpc_end_point["VpcEndpoint"]["ServiceName"].should.equal( + "com.amazonaws.us-east-1.s3" + ) + vpc_end_point["VpcEndpoint"]["SubnetIds"][0].should.equal( + subnet["Subnet"]["SubnetId"] + ) vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) - len(vpc_end_point["VpcEndpoint"]["DnsEntries"]).should.be.greater_than(0) \ No newline at end of file + len(vpc_end_point["VpcEndpoint"]["DnsEntries"]).should.be.greater_than(0) From d6d2a38c76ba15051424c237ece0b12b6c1d5d11 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 30 Apr 2020 12:11:33 +0100 Subject: [PATCH 297/658] Fix circular import issue --- moto/core/responses.py | 5 ++++- moto/{core => iam}/access_control.py | 3 +-- 2 files changed, 5 insertions(+), 3 deletions(-) rename moto/{core => iam}/access_control.py (99%) diff --git a/moto/core/responses.py b/moto/core/responses.py index 9a46f8ac5a49..508bd8c59b31 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -11,7 +11,6 @@ import pytz -from moto.core.access_control import IAMRequest, S3IAMRequest from moto.core.exceptions import DryRunClientError from jinja2 import Environment, DictLoader, TemplateNotFound @@ -134,9 +133,13 @@ def _authenticate_and_authorize_action(self, iam_request_cls): ActionAuthenticatorMixin.request_count += 1 def _authenticate_and_authorize_normal_action(self): + from moto.iam.access_control import IAMRequest + self._authenticate_and_authorize_action(IAMRequest) def _authenticate_and_authorize_s3_action(self): + from moto.iam.access_control import S3IAMRequest + self._authenticate_and_authorize_action(S3IAMRequest) @staticmethod diff --git a/moto/core/access_control.py b/moto/iam/access_control.py similarity index 99% rename from moto/core/access_control.py rename to moto/iam/access_control.py index 8ba0c3ba1933..bcde25d9ea0f 100644 --- a/moto/core/access_control.py +++ b/moto/iam/access_control.py @@ -25,8 +25,6 @@ from six import string_types from moto.core import ACCOUNT_ID -from moto.iam.models import Policy -from moto.iam import iam_backend from moto.core.exceptions import ( SignatureDoesNotMatchError, AccessDeniedError, @@ -44,6 +42,7 @@ S3SignatureDoesNotMatchError, ) from moto.sts import sts_backend +from .models import iam_backend, Policy log = logging.getLogger(__name__) From 72bc07f1129f87c89166fec37c953509ebac9948 Mon Sep 17 00:00:00 2001 From: zscholl Date: Wed, 11 Mar 2020 14:54:58 -0500 Subject: [PATCH 298/658] get access key create date for cred report --- .gitignore | 1 + moto/iam/models.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index fb9bd51de2b9..deb9d984076b 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ tests/file.tmp .eggs/ .mypy_cache/ *.tmp +.venv/ \ No newline at end of file diff --git a/moto/iam/models.py b/moto/iam/models.py index 08a1eb36a83a..dd197c87258f 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -680,14 +680,14 @@ def to_csv(self): access_key_2_last_rotated = "N/A" elif len(self.access_keys) == 1: access_key_1_active = "true" - access_key_1_last_rotated = date_created.strftime(date_format) + access_key_1_last_rotated = self.access_keys[0].create_date.strftime(date_format) access_key_2_active = "false" access_key_2_last_rotated = "N/A" else: access_key_1_active = "true" - access_key_1_last_rotated = date_created.strftime(date_format) + access_key_1_last_rotated = self.access_keys[0].create_date.strftime(date_format) access_key_2_active = "true" - access_key_2_last_rotated = date_created.strftime(date_format) + access_key_2_last_rotated = self.access_keys[1].create_date.strftime(date_format) return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A".format( self.name, From fc5e6ebf512694a78228347b43024e7038cd3431 Mon Sep 17 00:00:00 2001 From: zscholl Date: Wed, 11 Mar 2020 15:00:47 -0500 Subject: [PATCH 299/658] formatting --- moto/iam/models.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index dd197c87258f..dfa6fd36ae2c 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -680,14 +680,20 @@ def to_csv(self): access_key_2_last_rotated = "N/A" elif len(self.access_keys) == 1: access_key_1_active = "true" - access_key_1_last_rotated = self.access_keys[0].create_date.strftime(date_format) + access_key_1_last_rotated = self.access_keys[0].create_date.strftime( + date_format + ) access_key_2_active = "false" access_key_2_last_rotated = "N/A" else: access_key_1_active = "true" - access_key_1_last_rotated = self.access_keys[0].create_date.strftime(date_format) + access_key_1_last_rotated = self.access_keys[0].create_date.strftime( + date_format + ) access_key_2_active = "true" - access_key_2_last_rotated = self.access_keys[1].create_date.strftime(date_format) + access_key_2_last_rotated = self.access_keys[1].create_date.strftime( + date_format + ) return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A".format( self.name, From 2f2d6dc3fecb30e7420678d7731ac2bfc9ac336d Mon Sep 17 00:00:00 2001 From: zscholl Date: Wed, 11 Mar 2020 15:07:08 -0500 Subject: [PATCH 300/658] newline --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index deb9d984076b..02e812c5b2f6 100644 --- a/.gitignore +++ b/.gitignore @@ -22,4 +22,4 @@ tests/file.tmp .eggs/ .mypy_cache/ *.tmp -.venv/ \ No newline at end of file +.venv/ From 35fde06381a910331410105be05bf4b28a37eb49 Mon Sep 17 00:00:00 2001 From: zscholl Date: Thu, 12 Mar 2020 13:07:30 -0500 Subject: [PATCH 301/658] update last_used for access keys --- moto/core/utils.py | 2 +- moto/iam/models.py | 23 ++++++++++++++++++++--- moto/iam/responses.py | 5 +++++ 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/moto/core/utils.py b/moto/core/utils.py index dce9f675c78b..921f64be2f0c 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -187,7 +187,7 @@ def iso_8601_datetime_with_milliseconds(datetime): def iso_8601_datetime_without_milliseconds(datetime): - return datetime.strftime("%Y-%m-%dT%H:%M:%S") + "Z" + return None if datetime is None else datetime.strftime("%Y-%m-%dT%H:%M:%S") + "Z" RFC1123 = "%a, %d %b %Y %H:%M:%S GMT" diff --git a/moto/iam/models.py b/moto/iam/models.py index dfa6fd36ae2c..c84e664c3047 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -464,7 +464,7 @@ def __init__(self, user_name): self.secret_access_key = random_alphanumeric(40) self.status = "Active" self.create_date = datetime.utcnow() - self.last_used = datetime.utcnow() + self.last_used = None @property def created_iso_8601(self): @@ -683,6 +683,11 @@ def to_csv(self): access_key_1_last_rotated = self.access_keys[0].create_date.strftime( date_format ) + access_key_2_last_rotated = ( + "N/A" + if self.access_key[0].last_used is None + else self.access_key[0].last_used.strftime(date_format) + ) access_key_2_active = "false" access_key_2_last_rotated = "N/A" else: @@ -690,12 +695,22 @@ def to_csv(self): access_key_1_last_rotated = self.access_keys[0].create_date.strftime( date_format ) + access_key_1_last_used = ( + "N/A" + if self.access_key[0].last_used is None + else self.access_key[0].last_used.strftime(date_format) + ) access_key_2_active = "true" access_key_2_last_rotated = self.access_keys[1].create_date.strftime( date_format ) + access_key_2_last_used = ( + "N/A" + if self.access_key[1].last_used is None + else self.access_key[1].last_used.strftime(date_format) + ) - return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A".format( + return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},not_supported,not_supported,{9},{10},{11},false,N/A,false,N/A".format( self.name, self.arn, date_created.strftime(date_format), @@ -704,8 +719,10 @@ def to_csv(self): date_created.strftime(date_format), access_key_1_active, access_key_1_last_rotated, + access_key_1_last_used, access_key_2_active, access_key_2_last_rotated, + access_key_2_last_used, ) @@ -1805,7 +1822,7 @@ def generate_report(self): def get_credential_report(self): if not self.credential_report: raise IAMReportNotPresentException("Credential report not present") - report = "user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_2_active,access_key_2_last_rotated,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated\n" + report = "user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_1_last_used_date,access_key_1_last_used_region,access_key_1_last_used_service,access_key_2_active,access_key_2_last_rotated,access_key_2_last_used_date,access_key_2_last_used_region,access_key_2_last_used_service,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated\n" for user in self.users: report += self.users[user].to_csv() return base64.b64encode(report.encode("ascii")).decode("ascii") diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 12501769e182..947cccf33d31 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -1779,7 +1779,12 @@ def get_account_summary(self): {{ user_name }} + {{% if last_used % }} {{ last_used }} + {{% else % }} + N/A + N/A + {{% endif %}} From 54d816f09fbc1ca9e883be02249e66e6c5dbf120 Mon Sep 17 00:00:00 2001 From: zscholl Date: Thu, 12 Mar 2020 13:33:20 -0500 Subject: [PATCH 302/658] fix typo --- moto/iam/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index c84e664c3047..6256d437a029 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -697,8 +697,8 @@ def to_csv(self): ) access_key_1_last_used = ( "N/A" - if self.access_key[0].last_used is None - else self.access_key[0].last_used.strftime(date_format) + if self.access_keys[0].last_used is None + else self.access_keys[0].last_used.strftime(date_format) ) access_key_2_active = "true" access_key_2_last_rotated = self.access_keys[1].create_date.strftime( @@ -706,8 +706,8 @@ def to_csv(self): ) access_key_2_last_used = ( "N/A" - if self.access_key[1].last_used is None - else self.access_key[1].last_used.strftime(date_format) + if self.access_keys[1].last_used is None + else self.access_keys[1].last_used.strftime(date_format) ) return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},not_supported,not_supported,{9},{10},{11},false,N/A,false,N/A".format( From 9821eff1284109e0135b2486ce55acd418ca899d Mon Sep 17 00:00:00 2001 From: zscholl Date: Thu, 12 Mar 2020 15:06:40 -0500 Subject: [PATCH 303/658] add newline --- moto/iam/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 6256d437a029..304a0650418d 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -710,7 +710,7 @@ def to_csv(self): else self.access_keys[1].last_used.strftime(date_format) ) - return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},not_supported,not_supported,{9},{10},{11},false,N/A,false,N/A".format( + return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},not_supported,not_supported,{9},{10},{11},false,N/A,false,N/A\n".format( self.name, self.arn, date_created.strftime(date_format), From b342a96cb0079de8ce27f5acfaba5c4496211bad Mon Sep 17 00:00:00 2001 From: zscholl Date: Thu, 12 Mar 2020 15:15:14 -0500 Subject: [PATCH 304/658] add fields --- moto/iam/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 304a0650418d..b92c1f293384 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -710,7 +710,7 @@ def to_csv(self): else self.access_keys[1].last_used.strftime(date_format) ) - return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},not_supported,not_supported,{9},{10},{11},false,N/A,false,N/A\n".format( + return "{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},not_supported,not_supported,{9},{10},{11},not_supported,not_supported,false,N/A,false,N/A\n".format( self.name, self.arn, date_created.strftime(date_format), From 09109f336c14285a3ea805127e994d7851f1d84f Mon Sep 17 00:00:00 2001 From: zscholl Date: Thu, 12 Mar 2020 15:57:54 -0500 Subject: [PATCH 305/658] more fixes --- moto/iam/models.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index b92c1f293384..a01b8655af08 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -676,20 +676,23 @@ def to_csv(self): if len(self.access_keys) == 0: access_key_1_active = "false" access_key_1_last_rotated = "N/A" + access_key_1_last_used = "N/A" access_key_2_active = "false" access_key_2_last_rotated = "N/A" + access_key_2_last_used = "N/A" elif len(self.access_keys) == 1: access_key_1_active = "true" access_key_1_last_rotated = self.access_keys[0].create_date.strftime( date_format ) - access_key_2_last_rotated = ( + access_key_1_last_used = ( "N/A" - if self.access_key[0].last_used is None - else self.access_key[0].last_used.strftime(date_format) + if self.access_keys[0].last_used is None + else self.access_keys[0].last_used.strftime(date_format) ) access_key_2_active = "false" access_key_2_last_rotated = "N/A" + access_key_2_last_used = "N/A" else: access_key_1_active = "true" access_key_1_last_rotated = self.access_keys[0].create_date.strftime( From 1abff5727581286ac0fec63e96092aed65795e3e Mon Sep 17 00:00:00 2001 From: zscholl Date: Mon, 23 Mar 2020 14:46:46 -0500 Subject: [PATCH 306/658] add status to credential report --- moto/iam/models.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index a01b8655af08..6da6c6742d93 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -681,7 +681,7 @@ def to_csv(self): access_key_2_last_rotated = "N/A" access_key_2_last_used = "N/A" elif len(self.access_keys) == 1: - access_key_1_active = "true" + access_key_1_active = "true" if self.access_keys[0].status == "Active" else "false" access_key_1_last_rotated = self.access_keys[0].create_date.strftime( date_format ) @@ -694,7 +694,7 @@ def to_csv(self): access_key_2_last_rotated = "N/A" access_key_2_last_used = "N/A" else: - access_key_1_active = "true" + access_key_1_active = "true" if self.access_keys[0].status == "Active" else "false" access_key_1_last_rotated = self.access_keys[0].create_date.strftime( date_format ) @@ -703,7 +703,7 @@ def to_csv(self): if self.access_keys[0].last_used is None else self.access_keys[0].last_used.strftime(date_format) ) - access_key_2_active = "true" + access_key_2_active = "true" if self.access_keys[1].status == "Active" else "false" access_key_2_last_rotated = self.access_keys[1].create_date.strftime( date_format ) From 48304f81b18fbe75f306c90068796ef5653fddfe Mon Sep 17 00:00:00 2001 From: zscholl Date: Fri, 24 Apr 2020 13:16:13 -0500 Subject: [PATCH 307/658] fix last_used template --- moto/iam/responses.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 947cccf33d31..086ba508bfe6 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -1779,12 +1779,12 @@ def get_account_summary(self): {{ user_name }} - {{% if last_used % }} + {% if last_used is defined %} {{ last_used }} - {{% else % }} + {% else %} N/A N/A - {{% endif %}} + {% endif %} From 51e7002cbb84c9524372f216115461e2ce5c3f12 Mon Sep 17 00:00:00 2001 From: zscholl Date: Wed, 29 Apr 2020 15:49:14 -0500 Subject: [PATCH 308/658] add tests --- moto/iam/models.py | 12 +++++-- moto/iam/responses.py | 9 +++--- tests/test_iam/test_iam.py | 64 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 74 insertions(+), 11 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 6da6c6742d93..d3907da266a9 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -681,7 +681,9 @@ def to_csv(self): access_key_2_last_rotated = "N/A" access_key_2_last_used = "N/A" elif len(self.access_keys) == 1: - access_key_1_active = "true" if self.access_keys[0].status == "Active" else "false" + access_key_1_active = ( + "true" if self.access_keys[0].status == "Active" else "false" + ) access_key_1_last_rotated = self.access_keys[0].create_date.strftime( date_format ) @@ -694,7 +696,9 @@ def to_csv(self): access_key_2_last_rotated = "N/A" access_key_2_last_used = "N/A" else: - access_key_1_active = "true" if self.access_keys[0].status == "Active" else "false" + access_key_1_active = ( + "true" if self.access_keys[0].status == "Active" else "false" + ) access_key_1_last_rotated = self.access_keys[0].create_date.strftime( date_format ) @@ -703,7 +707,9 @@ def to_csv(self): if self.access_keys[0].last_used is None else self.access_keys[0].last_used.strftime(date_format) ) - access_key_2_active = "true" if self.access_keys[1].status == "Active" else "false" + access_key_2_active = ( + "true" if self.access_keys[1].status == "Active" else "false" + ) access_key_2_last_rotated = self.access_keys[1].create_date.strftime( date_format ) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 086ba508bfe6..667a6d13b851 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -1779,12 +1779,11 @@ def get_account_summary(self): {{ user_name }} - {% if last_used is defined %} - {{ last_used }} - {% else %} - N/A - N/A + {% if last_used %} + {{ last_used }} {% endif %} + N/A + N/A diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 9958954373b2..986809bd58a9 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -4,6 +4,7 @@ import boto import boto3 +import csv import os import sure # noqa import sys @@ -12,12 +13,14 @@ from dateutil.tz import tzutc from moto import mock_iam, mock_iam_deprecated -from moto.iam.models import aws_managed_policies from moto.core import ACCOUNT_ID +from moto.iam.models import aws_managed_policies +from moto.backends import get_backend from nose.tools import assert_raises, assert_equals from nose.tools import raises from datetime import datetime +from datetime import timezone from tests.helpers import requires_boto_gte from uuid import uuid4 @@ -1215,6 +1218,44 @@ def test_boto3_get_credential_report(): report.should.match(r".*my-user.*") +@mock_iam +def test_boto3_get_credential_report_content(): + conn = boto3.client("iam", region_name="us-east-1") + username = "my-user" + conn.create_user(UserName=username) + key1 = conn.create_access_key(UserName=username)["AccessKey"] + conn.update_access_key( + UserName=username, AccessKeyId=key1["AccessKeyId"], Status="Inactive" + ) + key1 = conn.create_access_key(UserName=username)["AccessKey"] + iam_backend = get_backend("iam")["global"] + timestamp = datetime.now(tz=timezone.utc) + iam_backend.users[username].access_keys[1].last_used = timestamp + with assert_raises(ClientError): + conn.get_credential_report() + result = conn.generate_credential_report() + while result["State"] != "COMPLETE": + result = conn.generate_credential_report() + result = conn.get_credential_report() + report = result["Content"].decode("utf-8") + header = report.split("\n")[0] + header.should.equal( + "user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_1_last_used_date,access_key_1_last_used_region,access_key_1_last_used_service,access_key_2_active,access_key_2_last_rotated,access_key_2_last_used_date,access_key_2_last_used_region,access_key_2_last_used_service,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated" + ) + report_dict = csv.DictReader(report.split("\n")) + user = next(report_dict) + user["user"].should.equal("my-user") + user["access_key_1_active"].should.equal("false") + user["access_key_1_last_rotated"].should.equal( + timestamp.isoformat(timespec="seconds") + ) + user["access_key_1_last_used_date"].should.equal("N/A") + user["access_key_2_active"].should.equal("true") + user["access_key_2_last_used_date"].should.equal( + timestamp.isoformat(timespec="seconds") + ) + + @requires_boto_gte("2.39") @mock_iam_deprecated() def test_managed_policy(): @@ -1382,7 +1423,7 @@ def test_update_access_key(): @mock_iam -def test_get_access_key_last_used(): +def test_get_access_key_last_used_when_unused(): iam = boto3.resource("iam", region_name="us-east-1") client = iam.meta.client username = "test-user" @@ -1393,11 +1434,28 @@ def test_get_access_key_last_used(): resp = client.get_access_key_last_used( AccessKeyId=create_key_response["AccessKeyId"] ) + resp["AccessKeyLastUsed"].should_not.contain("LastUsedDate") + resp["UserName"].should.equal(create_key_response["UserName"]) + +@mock_iam +def test_get_access_key_last_used_when_used(): + iam = boto3.resource("iam", region_name="us-east-1") + client = iam.meta.client + username = "test-user" + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.get_access_key_last_used(AccessKeyId="non-existent-key-id") + create_key_response = client.create_access_key(UserName=username)["AccessKey"] + # Set last used date using the IAM backend. Moto currently does not have a mechanism for tracking usage of access keys + iam_backend = get_backend("iam")["global"] + iam_backend.users[username].access_keys[0].last_used = datetime.utcnow() + resp = client.get_access_key_last_used( + AccessKeyId=create_key_response["AccessKeyId"] + ) datetime.strftime( resp["AccessKeyLastUsed"]["LastUsedDate"], "%Y-%m-%d" ).should.equal(datetime.strftime(datetime.utcnow(), "%Y-%m-%d")) - resp["UserName"].should.equal(create_key_response["UserName"]) @mock_iam From 0423be259ada80786ea6b7b1d272470e5edf79b5 Mon Sep 17 00:00:00 2001 From: zscholl Date: Thu, 30 Apr 2020 08:44:45 -0500 Subject: [PATCH 309/658] remove backend logic & py27 incompatible timezone --- tests/test_iam/test_iam.py | 34 ++++------------------------------ 1 file changed, 4 insertions(+), 30 deletions(-) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 986809bd58a9..5a8ffe709c25 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -15,12 +15,10 @@ from moto import mock_iam, mock_iam_deprecated from moto.core import ACCOUNT_ID from moto.iam.models import aws_managed_policies -from moto.backends import get_backend from nose.tools import assert_raises, assert_equals from nose.tools import raises from datetime import datetime -from datetime import timezone from tests.helpers import requires_boto_gte from uuid import uuid4 @@ -1228,9 +1226,7 @@ def test_boto3_get_credential_report_content(): UserName=username, AccessKeyId=key1["AccessKeyId"], Status="Inactive" ) key1 = conn.create_access_key(UserName=username)["AccessKey"] - iam_backend = get_backend("iam")["global"] - timestamp = datetime.now(tz=timezone.utc) - iam_backend.users[username].access_keys[1].last_used = timestamp + timestamp = datetime.utcnow() with assert_raises(ClientError): conn.get_credential_report() result = conn.generate_credential_report() @@ -1246,14 +1242,12 @@ def test_boto3_get_credential_report_content(): user = next(report_dict) user["user"].should.equal("my-user") user["access_key_1_active"].should.equal("false") - user["access_key_1_last_rotated"].should.equal( - timestamp.isoformat(timespec="seconds") + user["access_key_1_last_rotated"].should.match( + timestamp.strftime(datetime.utcnow(), "%Y-%m-%d") ) user["access_key_1_last_used_date"].should.equal("N/A") user["access_key_2_active"].should.equal("true") - user["access_key_2_last_used_date"].should.equal( - timestamp.isoformat(timespec="seconds") - ) + user["access_key_2_last_used_date"].should.equal("N/A") @requires_boto_gte("2.39") @@ -1438,26 +1432,6 @@ def test_get_access_key_last_used_when_unused(): resp["UserName"].should.equal(create_key_response["UserName"]) -@mock_iam -def test_get_access_key_last_used_when_used(): - iam = boto3.resource("iam", region_name="us-east-1") - client = iam.meta.client - username = "test-user" - iam.create_user(UserName=username) - with assert_raises(ClientError): - client.get_access_key_last_used(AccessKeyId="non-existent-key-id") - create_key_response = client.create_access_key(UserName=username)["AccessKey"] - # Set last used date using the IAM backend. Moto currently does not have a mechanism for tracking usage of access keys - iam_backend = get_backend("iam")["global"] - iam_backend.users[username].access_keys[0].last_used = datetime.utcnow() - resp = client.get_access_key_last_used( - AccessKeyId=create_key_response["AccessKeyId"] - ) - datetime.strftime( - resp["AccessKeyLastUsed"]["LastUsedDate"], "%Y-%m-%d" - ).should.equal(datetime.strftime(datetime.utcnow(), "%Y-%m-%d")) - - @mock_iam def test_upload_ssh_public_key(): iam = boto3.resource("iam", region_name="us-east-1") From 1f1404352e91a6e7f13801f9233a10299dcdece4 Mon Sep 17 00:00:00 2001 From: zscholl Date: Thu, 30 Apr 2020 09:42:22 -0500 Subject: [PATCH 310/658] use conditional TEST_SERVER_MODE for backend tests --- tests/test_iam/test_iam.py | 40 +++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 5a8ffe709c25..6792d8f52ce4 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -12,9 +12,10 @@ from botocore.exceptions import ClientError from dateutil.tz import tzutc -from moto import mock_iam, mock_iam_deprecated +from moto import mock_iam, mock_iam_deprecated, settings from moto.core import ACCOUNT_ID from moto.iam.models import aws_managed_policies +from moto.backends import get_backend from nose.tools import assert_raises, assert_equals from nose.tools import raises @@ -1227,6 +1228,9 @@ def test_boto3_get_credential_report_content(): ) key1 = conn.create_access_key(UserName=username)["AccessKey"] timestamp = datetime.utcnow() + if not settings.TEST_SERVER_MODE: + iam_backend = get_backend("iam")["global"] + iam_backend.users[username].access_keys[1].last_used = timestamp with assert_raises(ClientError): conn.get_credential_report() result = conn.generate_credential_report() @@ -1242,12 +1246,38 @@ def test_boto3_get_credential_report_content(): user = next(report_dict) user["user"].should.equal("my-user") user["access_key_1_active"].should.equal("false") - user["access_key_1_last_rotated"].should.match( - timestamp.strftime(datetime.utcnow(), "%Y-%m-%d") - ) + user["access_key_1_last_rotated"].should.match(timestamp.strftime("%Y-%m-%d")) user["access_key_1_last_used_date"].should.equal("N/A") user["access_key_2_active"].should.equal("true") - user["access_key_2_last_used_date"].should.equal("N/A") + if not settings.TEST_SERVER_MODE: + user["access_key_2_last_used_date"].should.match(timestamp.strftime("%Y-%m-%d")) + else: + user["access_key_2_last_used_date"].should.equal("N/A") + + +@mock_iam +def test_get_access_key_last_used_when_used(): + iam = boto3.resource("iam", region_name="us-east-1") + client = iam.meta.client + username = "test-user" + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.get_access_key_last_used(AccessKeyId="non-existent-key-id") + create_key_response = client.create_access_key(UserName=username)["AccessKey"] + # Set last used date using the IAM backend. Moto currently does not have a mechanism for tracking usage of access keys + if not settings.TEST_SERVER_MODE: + timestamp = datetime.utcnow() + iam_backend = get_backend("iam")["global"] + iam_backend.users[username].access_keys[0].last_used = timestamp + resp = client.get_access_key_last_used( + AccessKeyId=create_key_response["AccessKeyId"] + ) + if not settings.TEST_SERVER_MODE: + datetime.strftime( + resp["AccessKeyLastUsed"]["LastUsedDate"], "%Y-%m-%d" + ).should.equal(timestamp.strftime("%Y-%m-%d")) + else: + resp["AccessKeyLastUsed"].should_not.contain("LastUsedDate") @requires_boto_gte("2.39") From 7be39844bb5ab7312ae81d86ad84a31a6876321b Mon Sep 17 00:00:00 2001 From: redyvon Date: Thu, 30 Apr 2020 19:18:08 +0200 Subject: [PATCH 311/658] Update Dockerfile use python:3.7-slim image to avoid packages building steps for alpine and have a smaller image size --- Dockerfile | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/Dockerfile b/Dockerfile index 24d7c34ff58b..3c159633e669 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,22 +1,12 @@ -FROM alpine:3.6 - -RUN apk add --no-cache --update \ - gcc \ - musl-dev \ - python3-dev \ - libffi-dev \ - openssl-dev \ - python3 +FROM python:3.7-slim ADD . /moto/ ENV PYTHONUNBUFFERED 1 WORKDIR /moto/ -RUN python3 -m ensurepip && \ - rm -r /usr/lib/python*/ensurepip && \ - pip3 --no-cache-dir install --upgrade pip setuptools && \ +RUN pip3 --no-cache-dir install --upgrade pip setuptools && \ pip3 --no-cache-dir install ".[server]" -ENTRYPOINT ["/usr/bin/moto_server", "-H", "0.0.0.0"] +ENTRYPOINT ["/usr/local/bin/moto_server", "-H", "0.0.0.0"] EXPOSE 5000 From 95c459a86de317b390fdd3a9038bfb859b98e7d0 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Thu, 30 Apr 2020 17:21:45 -0400 Subject: [PATCH 312/658] Added exception for deleting a group which has childs. Added better tests for delete_thing_group --- moto/iot/models.py | 9 +++++++++ tests/test_iot/test_iot.py | 41 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/moto/iot/models.py b/moto/iot/models.py index 51a23b6c6e22..1f0623eb33db 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -842,6 +842,15 @@ def create_thing_group( return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id def delete_thing_group(self, thing_group_name, expected_version): + child_groups = [ + thing_group + for _, thing_group in self.thing_groups.items() + if thing_group.parent_group_name == thing_group_name + ] + if len(child_groups) > 0: + raise InvalidRequestException( + f" Cannot delete thing group : {thing_group_name} when there are still child groups attached to it" + ) thing_group = self.describe_thing_group(thing_group_name) del self.thing_groups[thing_group.arn] diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 2f43de5b9729..58a820fee6f4 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -756,6 +756,47 @@ def test_delete_principal_thing(): client.delete_certificate(certificateId=cert_id) +@mock_iot +def test_delete_thing_group(): + client = boto3.client("iot", region_name="ap-northeast-1") + group_name_1a = "my-group-name-1a" + group_name_2a = "my-group-name-2a" + # --1a + # |--2a + + # create thing groups tree + # 1 + thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) + thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) + thing_group1a.should.have.key("thingGroupArn") + # 2 + thing_group2a = client.create_thing_group( + thingGroupName=group_name_2a, parentGroupName=group_name_1a + ) + thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) + thing_group2a.should.have.key("thingGroupArn") + + # delete group with child + try: + client.delete_thing_group(thingGroupName=group_name_1a) + except client.exceptions.InvalidRequestException as exc: + error_code = exc.response["Error"]["Code"] + error_code.should.equal("InvalidRequestException") + else: + raise Exception("Should have raised error") + + # delete child group + client.delete_thing_group(thingGroupName=group_name_2a) + res = client.list_thing_groups() + res.should.have.key("thingGroups").which.should.have.length_of(1) + res["thingGroups"].should_not.have.key(group_name_2a) + + # now that there is no child group, we can delete the previus group safely + client.delete_thing_group(thingGroupName=group_name_1a) + res = client.list_thing_groups() + res.should.have.key("thingGroups").which.should.have.length_of(0) + + @mock_iot def test_describe_thing_group_metadata_hierarchy(): client = boto3.client("iot", region_name="ap-northeast-1") From 5e4451b0d50b90c36ccbd0fdedea75406bf78ade Mon Sep 17 00:00:00 2001 From: Chagui- Date: Thu, 30 Apr 2020 20:54:51 -0400 Subject: [PATCH 313/658] Removed f string so python 2 can work correctly --- moto/iot/models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 1f0623eb33db..2e9979bda203 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -849,7 +849,9 @@ def delete_thing_group(self, thing_group_name, expected_version): ] if len(child_groups) > 0: raise InvalidRequestException( - f" Cannot delete thing group : {thing_group_name} when there are still child groups attached to it" + " Cannot delete thing group : " + + thing_group_name + + " when there are still child groups attached to it" ) thing_group = self.describe_thing_group(thing_group_name) del self.thing_groups[thing_group.arn] From 49b00942c3b8cc6b71a600db41c014396422c0e6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 1 May 2020 13:50:11 +0100 Subject: [PATCH 314/658] S3 DeleteObjects - Allow multiple querystring formats --- moto/s3/responses.py | 9 +++++++-- tests/test_s3/test_s3.py | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index e1ab93860c76..98f28f012679 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -137,8 +137,13 @@ def parse_key_name(pth): def is_delete_keys(request, path, bucket_name): - return path == "/?delete" or ( - path == "/" and getattr(request, "query_string", "") == "delete" + # GOlang sends a request as url/?delete= (treating it as a normal key=value, even if the value is empty) + # Python sends a request as url/?delete (treating it as a flag) + # https://github.com/spulec/moto/issues/2937 + return ( + path == "/?delete" + or path == "/?delete=" + or (path == "/" and getattr(request, "query_string", "") == "delete") ) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 86b892315c83..f60e0293e231 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -3765,7 +3765,7 @@ def test_paths_with_leading_slashes_work(): @mock_s3 def test_root_dir_with_empty_name_works(): - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + if settings.TEST_SERVER_MODE: raise SkipTest("Does not work in server mode due to error in Workzeug") store_and_read_back_a_key("/") From 2e737a61026a84171927e9eece10de6ed1bfa02c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 1 May 2020 14:19:20 +0100 Subject: [PATCH 315/658] DynamoDB - Add tests to verify Atomic Counter functionality --- tests/test_dynamodb2/test_dynamodb.py | 83 +++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 0f45e0244425..470c5f8ffb40 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4946,3 +4946,86 @@ def test_multiple_updates(): "id": {"S": "1"}, } assert result == expected_result + + +@mock_dynamodb2 +def test_update_item_atomic_counter(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-3") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + + key = {"t_id": {"S": "item1"}} + + ddb_mock.put_item( + TableName=table, + Item={"t_id": {"S": "item1"}, "n_i": {"N": "5"}, "n_f": {"N": "5.3"}}, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="set n_i = n_i + :inc1, n_f = n_f + :inc2", + ExpressionAttributeValues={":inc1": {"N": "1.2"}, ":inc2": {"N": "0.05"}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + updated_item["n_i"]["N"].should.equal("6.2") + updated_item["n_f"]["N"].should.equal("5.35") + + +@mock_dynamodb2 +def test_update_item_atomic_counter_return_values(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-3") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + + key = {"t_id": {"S": "item1"}} + + ddb_mock.put_item(TableName=table, Item={"t_id": {"S": "item1"}, "v": {"N": "5"}}) + + response = ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="set v = v + :inc", + ExpressionAttributeValues={":inc": {"N": "1"}}, + ReturnValues="UPDATED_OLD", + ) + assert ( + "v" in response["Attributes"] + ), "v has been updated, and should be returned here" + response["Attributes"]["v"]["N"].should.equal("5") + + # second update + response = ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="set v = v + :inc", + ExpressionAttributeValues={":inc": {"N": "1"}}, + ReturnValues="UPDATED_OLD", + ) + assert ( + "v" in response["Attributes"] + ), "v has been updated, and should be returned here" + response["Attributes"]["v"]["N"].should.equal("6") + + # third update + response = ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="set v = v + :inc", + ExpressionAttributeValues={":inc": {"N": "1"}}, + ReturnValues="UPDATED_NEW", + ) + assert ( + "v" in response["Attributes"] + ), "v has been updated, and should be returned here" + response["Attributes"]["v"]["N"].should.equal("8") From 440213f854c2f77ce6e382cc0dd3edb6631cdae1 Mon Sep 17 00:00:00 2001 From: usmankb Date: Fri, 1 May 2020 21:16:33 +0530 Subject: [PATCH 316/658] Enhancement Adding SES get_send_statistics,create_configuration_set functions --- moto/ses/exceptions.py | 13 +++++++ moto/ses/models.py | 36 ++++++++++++++++- moto/ses/responses.py | 66 ++++++++++++++++++++++++++++++++ tests/test_ses/test_ses.py | 36 +++++++++++++++++ tests/test_ses/test_ses_boto3.py | 52 +++++++++++++++++++++++++ 5 files changed, 202 insertions(+), 1 deletion(-) diff --git a/moto/ses/exceptions.py b/moto/ses/exceptions.py index a905039e2184..f57eadf770d9 100644 --- a/moto/ses/exceptions.py +++ b/moto/ses/exceptions.py @@ -7,3 +7,16 @@ class MessageRejectedError(RESTError): def __init__(self, message): super(MessageRejectedError, self).__init__("MessageRejected", message) + +class ConfigurationSetDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(ConfigurationSetDoesNotExist, self).__init__("ConfigurationSetDoesNotExist", message) + + +class EventDestinationAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(EventDestinationAlreadyExists, self).__init__("EventDestinationAlreadyExists", message) diff --git a/moto/ses/models.py b/moto/ses/models.py index 91241f70629c..62068e5a980f 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -1,11 +1,12 @@ from __future__ import unicode_literals +import datetime import email from email.utils import parseaddr from moto.core import BaseBackend, BaseModel from moto.sns.models import sns_backends -from .exceptions import MessageRejectedError +from .exceptions import MessageRejectedError,ConfigurationSetDoesNotExist,EventDestinationAlreadyExists from .utils import get_random_message_id from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY @@ -81,7 +82,11 @@ def __init__(self): self.domains = [] self.sent_messages = [] self.sent_message_count = 0 + self.rejected_messages_count = 0 self.sns_topics = {} + self.config_set = {} + self.config_set_event_destination = {} + self.event_destinations = {} def _is_verified_address(self, source): _, address = parseaddr(source) @@ -118,6 +123,7 @@ def send_email(self, source, subject, body, destinations, region): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): + self.rejected_messages_count+=1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -135,6 +141,7 @@ def send_templated_email( if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): + self.rejected_messages_count += 1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -237,5 +244,32 @@ def set_identity_notification_topic(self, identity, notification_type, sns_topic return {} + def create_configuration_set(self, configuration_set_name): + self.config_set[configuration_set_name] = 1 + return {} + + def create_configuration_set_event_destination(self,configuration_set_name, event_destination): + + if self.config_set.get(configuration_set_name) is None: + raise ConfigurationSetDoesNotExist("Invalid Configuration Set Name.") + + if self.event_destinations.get(event_destination["Name"]): + raise EventDestinationAlreadyExists("Duplicate Event destination Name.") + + self.config_set_event_destination[configuration_set_name] = event_destination + self.event_destinations[event_destination["Name"]] = 1 + + return {} + + def get_send_statistics(self): + + statistics = {} + statistics["DeliveryAttempts"] = self.sent_message_count + statistics["Rejects"] = self.rejected_messages_count + statistics["Complaints"] = 0 + statistics["Bounces"] = 0 + statistics["Timestamp"] = datetime.datetime.utcnow() + return statistics + ses_backend = SESBackend() diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 1034aeb0df59..8bf7bd9420b6 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -133,6 +133,40 @@ def set_identity_notification_topic(self): template = self.response_template(SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE) return template.render() + def get_send_statistics(self): + statistics = ses_backend.get_send_statistics() + template = self.response_template(GET_SEND_STATISTICS) + return template.render(all_statistics=[statistics]) + + def create_configuration_set(self): + configuration_set_name = self.querystring.get("ConfigurationSet.Name")[0] + ses_backend.create_configuration_set(configuration_set_name=configuration_set_name) + template = self.response_template(CREATE_CONFIGURATION_SET) + return template.render() + + def create_configuration_set_event_destination(self): + + configuration_set_name = self._get_param('ConfigurationSetName') + is_configuration_event_enabled = self.querystring.get("EventDestination.Enabled")[0] + configuration_event_name = self.querystring.get("EventDestination.Name")[0] + event_topic_arn = self.querystring.get("EventDestination.SNSDestination.TopicARN")[0] + event_matching_types = self._get_multi_param("EventDestination.MatchingEventTypes.member") + + event_destination = {"Name":configuration_event_name, + "Enabled":is_configuration_event_enabled, + "EventMatchingTypes":event_matching_types, + "SNSDestination":event_topic_arn + } + + ses_backend.create_configuration_set_event_destination( + configuration_set_name=configuration_set_name, + event_destination=event_destination + ) + + template = self.response_template(CREATE_CONFIGURATION_SET_EVENT_DESTINATION) + return template.render() + + VERIFY_EMAIL_IDENTITY = """ @@ -248,3 +282,35 @@ def set_identity_notification_topic(self): 47e0ef1a-9bf2-11e1-9279-0100e8cf109a """ + +GET_SEND_STATISTICS = """ + + {% for statistics in all_statistics %} + + {{ statistics["DeliveryAttempts"] }} + {{ statistics["Rejects"] }} + {{ statistics["Bounces"] }} + {{ statistics["Complaints"] }} + {{ statistics["Timestamp"] }} + + {% endfor %} + + + e0abcdfa-c866-11e0-b6d0-273d09173z49 + +""" + +CREATE_CONFIGURATION_SET = """ + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" + + +CREATE_CONFIGURATION_SET_EVENT_DESTINATION = """ + + + 67e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 851327b9dcc2..637931572a5c 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -127,3 +127,39 @@ def test_send_raw_email(): send_quota["GetSendQuotaResponse"]["GetSendQuotaResult"]["SentLast24Hours"] ) sent_count.should.equal(1) + + +@mock_ses_deprecated +def test_get_send_statistics(): + conn = boto.connect_ses("the_key", "the_secret") + + conn.send_email.when.called_with( + "test@example.com", + "test subject", + "test body", + "test_to@example.com", + format="html", + ).should.throw(BotoServerError) + + # tests to verify rejects in get_send_statistics + result = conn.get_send_statistics() + + reject_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"]) + delivery_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"]) + reject_count.should.equal(1) + delivery_count.should.equal(0) + + conn.verify_email_identity("test@example.com") + conn.send_email( + "test@example.com", "test subject", "test body", "test_to@example.com" + ) + + # tests to delivery attempts in get_send_statistics + result = conn.get_send_statistics() + + reject_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"]) + delivery_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"]) + reject_count.should.equal(1) + delivery_count.should.equal(1) + + diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index de8aa0813dae..e14abda3f449 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -4,6 +4,8 @@ from botocore.exceptions import ClientError from six.moves.email_mime_multipart import MIMEMultipart from six.moves.email_mime_text import MIMEText +from nose.tools import assert_raises + import sure # noqa @@ -227,3 +229,53 @@ def test_send_email_notification_with_encoded_sender(): Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}}}, ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + +@mock_ses +def test_create_configuration_set(): + conn = boto3.client("ses", region_name="us-east-1") + conn.create_configuration_set(ConfigurationSet=dict({"Name": "test"})) + + conn.create_configuration_set_event_destination( + ConfigurationSetName='test', + EventDestination={ + 'Name': 'snsEvent', + 'Enabled': True, + 'MatchingEventTypes': [ + 'send', + ], + 'SNSDestination': { + 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' + } + }) + + with assert_raises(ClientError) as ex: + conn.create_configuration_set_event_destination( + ConfigurationSetName='failtest', + EventDestination={ + 'Name': 'snsEvent', + 'Enabled': True, + 'MatchingEventTypes': [ + 'send', + ], + 'SNSDestination': { + 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' + } + }) + + ex.exception.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist") + + with assert_raises(ClientError) as ex: + conn.create_configuration_set_event_destination( + ConfigurationSetName='test', + EventDestination={ + 'Name': 'snsEvent', + 'Enabled': True, + 'MatchingEventTypes': [ + 'send', + ], + 'SNSDestination': { + 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' + } + }) + + ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") \ No newline at end of file From 143134816155bc70e4c17a2a594394b2b4335737 Mon Sep 17 00:00:00 2001 From: usmankb Date: Sat, 2 May 2020 01:33:58 +0530 Subject: [PATCH 317/658] Fix SQS send_message_batch empty array Exception handling --- moto/sqs/responses.py | 3 +++ tests/test_sqs/test_sqs.py | 15 +++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 8acea079971e..f5481cc10288 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -285,6 +285,9 @@ def send_message_batch(self): "MessageAttributes": message_attributes, } + if entries == {}: + raise EmptyBatchRequest() + messages = self.sqs_backend.send_message_batch(queue_name, entries) template = self.response_template(SEND_MESSAGE_BATCH_RESPONSE) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index f2ab8c37c09b..9deb8d8e2d0f 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1147,6 +1147,21 @@ def test_send_message_batch_errors(): ) +@mock_sqs +def test_send_message_batch_with_empty_list(): + client = boto3.client("sqs", region_name="us-east-1") + + response = client.create_queue(QueueName="test-queue") + queue_url = response["QueueUrl"] + + client.send_message_batch.when.called_with( + QueueUrl=queue_url, Entries=[] + ).should.throw( + ClientError, + "There should be at least one SendMessageBatchRequestEntry in the request.", + ) + + @mock_sqs def test_batch_change_message_visibility(): if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": From f23f7068255a0a3816393f49c5f6a61288f76050 Mon Sep 17 00:00:00 2001 From: Rifqi Al Fatih Date: Sat, 2 May 2020 18:47:59 +0200 Subject: [PATCH 318/658] Implement placement constraints model --- moto/ecs/models.py | 13 ++++++++++++- moto/ecs/responses.py | 2 ++ tests/test_ecs/test_ecs_boto3.py | 33 ++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 1 deletion(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 33d4dcf721cf..1a385226bc91 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -121,6 +121,7 @@ def __init__( network_mode=None, volumes=None, tags=None, + placement_constraints=None, ): self.family = family self.revision = revision @@ -137,6 +138,9 @@ def __init__( self.network_mode = "bridge" else: self.network_mode = network_mode + self.placement_constraints = ( + placement_constraints if placement_constraints is not None else [] + ) @property def response_object(self): @@ -558,7 +562,13 @@ def delete_cluster(self, cluster_str): raise Exception("{0} is not a cluster".format(cluster_name)) def register_task_definition( - self, family, container_definitions, volumes=None, network_mode=None, tags=None + self, + family, + container_definitions, + volumes=None, + network_mode=None, + tags=None, + placement_constraints=None, ): if family in self.task_definitions: last_id = self._get_last_task_definition_revision_id(family) @@ -574,6 +584,7 @@ def register_task_definition( volumes=volumes, network_mode=network_mode, tags=tags, + placement_constraints=placement_constraints, ) self.task_definitions[family][revision] = task_definition diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index 49bf022b485b..c8f1e06ce632 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -63,12 +63,14 @@ def register_task_definition(self): volumes = self._get_param("volumes") tags = self._get_param("tags") network_mode = self._get_param("networkMode") + placement_constraints = self._get_param("placementConstraints") task_definition = self.ecs_backend.register_task_definition( family, container_definitions, volumes=volumes, network_mode=network_mode, tags=tags, + placement_constraints=placement_constraints, ) return json.dumps({"taskDefinition": task_definition.response_object}) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 7fd90b412de3..f6de595974a4 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -2604,3 +2604,36 @@ def test_ecs_service_untag_resource_multiple_tags(): resourceArn=response["service"]["serviceArn"] ) response["tags"].should.equal([{"key": "hello", "value": "world"}]) + + +@mock_ecs +def test_ecs_task_definition_placement_constraints(): + client = boto3.client("ecs", region_name="us-east-1") + response = client.register_task_definition( + family="test_ecs_task", + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + networkMode="bridge", + tags=[ + {"key": "createdBy", "value": "moto-unittest"}, + {"key": "foo", "value": "bar"}, + ], + placementConstraints=[ + {"type": "memberOf", "expression": "attribute:ecs.instance-type =~ t2.*"} + ], + ) + type(response["taskDefinition"]["placementConstraints"]).should.be(list) + response["taskDefinition"]["placementConstraints"].should.equal( + [{"type": "memberOf", "expression": "attribute:ecs.instance-type =~ t2.*"}] + ) From 52cbdd72e75963f2879659a6a4bd60a210a2174d Mon Sep 17 00:00:00 2001 From: Kevin Neal Date: Sat, 2 May 2020 16:40:52 -0700 Subject: [PATCH 319/658] update SQS MaximumMessageSize from 64K to 256K --- moto/sqs/models.py | 2 +- tests/test_sqs/test_sqs.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index a54d91c43bcc..f88d906b945d 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -230,7 +230,7 @@ def __init__(self, name, region, **kwargs): "FifoQueue": "false", "KmsDataKeyReusePeriodSeconds": 300, # five minutes "KmsMasterKeyId": None, - "MaximumMessageSize": int(64 << 10), + "MaximumMessageSize": int(64 << 12), "MessageRetentionPeriod": 86400 * 4, # four days "Policy": None, "ReceiveMessageWaitTimeSeconds": 0, diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9deb8d8e2d0f..01e34de0b61c 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -384,7 +384,7 @@ def test_get_queue_attributes(): response["Attributes"]["CreatedTimestamp"].should.be.a(six.string_types) response["Attributes"]["DelaySeconds"].should.equal("0") response["Attributes"]["LastModifiedTimestamp"].should.be.a(six.string_types) - response["Attributes"]["MaximumMessageSize"].should.equal("65536") + response["Attributes"]["MaximumMessageSize"].should.equal("262144") response["Attributes"]["MessageRetentionPeriod"].should.equal("345600") response["Attributes"]["QueueArn"].should.equal( "arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID) @@ -406,7 +406,7 @@ def test_get_queue_attributes(): response["Attributes"].should.equal( { "ApproximateNumberOfMessages": "0", - "MaximumMessageSize": "65536", + "MaximumMessageSize": "262144", "QueueArn": "arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), "VisibilityTimeout": "30", "RedrivePolicy": json.dumps( From b8aa6ddaea81762e5c8f574f915d31ae50171579 Mon Sep 17 00:00:00 2001 From: usmankb Date: Sun, 3 May 2020 08:28:20 +0530 Subject: [PATCH 320/658] Fix response_parameter being ignored in put_integration_response --- moto/apigateway/models.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index d39b719d66f6..d1b4300681b4 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -56,8 +56,10 @@ def __init__(self, deployment_id, name, description=""): class IntegrationResponse(BaseModel, dict): - def __init__(self, status_code, selection_pattern=None): - self["responseTemplates"] = {"application/json": None} + def __init__(self, status_code, selection_pattern=None, response_templates=None): + if response_templates == None: + response_templates = {"application/json": None} + self["responseTemplates"] = response_templates self["statusCode"] = status_code if selection_pattern: self["selectionPattern"] = selection_pattern @@ -72,8 +74,10 @@ def __init__(self, integration_type, uri, http_method, request_templates=None): self["requestTemplates"] = request_templates self["integrationResponses"] = {"200": IntegrationResponse(200)} - def create_integration_response(self, status_code, selection_pattern): - integration_response = IntegrationResponse(status_code, selection_pattern) + def create_integration_response(self, status_code, selection_pattern, response_templates): + if response_templates == {}: + response_templates = None + integration_response = IntegrationResponse(status_code, selection_pattern, response_templates) self["integrationResponses"][status_code] = integration_response return integration_response @@ -956,7 +960,7 @@ def create_integration_response( raise InvalidRequestInput() integration = self.get_integration(function_id, resource_id, method_type) integration_response = integration.create_integration_response( - status_code, selection_pattern + status_code, selection_pattern, response_templates ) return integration_response From 1cda64e9a3a190a5caa7f08b5af7b783d335c562 Mon Sep 17 00:00:00 2001 From: usmankb Date: Sun, 3 May 2020 08:31:46 +0530 Subject: [PATCH 321/658] Added tests --- tests/test_apigateway/test_apigateway.py | 58 ++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 7495372d263f..0ad815972873 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +import json import boto3 from freezegun import freeze_time @@ -1229,6 +1230,63 @@ def test_put_integration_response_requires_responseTemplate(): responseTemplates={}, ) +@mock_apigateway +def test_put_integration_response_with_response_template(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + resources = client.get_resources(restApiId=api_id) + root_id = [resource for resource in resources["items"] if resource["path"] == "/"][ + 0 + ]["id"] + + client.put_method( + restApiId=api_id, resourceId=root_id, httpMethod="GET", authorizationType="NONE" + ) + client.put_method_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + client.put_integration( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + type="HTTP", + uri="http://httpbin.org/robots.txt", + integrationHttpMethod="POST", + ) + + with assert_raises(ClientError) as ex: + client.put_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + + ex.exception.response["Error"]["Code"].should.equal("BadRequestException") + ex.exception.response["Error"]["Message"].should.equal("Invalid request input") + + client.put_integration_response( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + statusCode="200", + selectionPattern= "foobar", + responseTemplates={"application/json": json.dumps({"data":"test"})}) + + + response = client.get_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": json.dumps({"data":"test"})}, + } + ) @mock_apigateway def test_put_integration_validation(): From 4365c2bd4ed6bad601d2af6a4b6b5531efe896c6 Mon Sep 17 00:00:00 2001 From: James Belleau Date: Sun, 3 May 2020 18:13:40 -0500 Subject: [PATCH 322/658] Added network functions --- moto/__init__.py | 1 + moto/managedblockchain/__init__.py | 9 ++ moto/managedblockchain/exceptions.py | 16 ++ moto/managedblockchain/models.py | 151 ++++++++++++++++++ moto/managedblockchain/responses.py | 67 ++++++++ moto/managedblockchain/urls.py | 9 ++ moto/managedblockchain/utils.py | 23 +++ .../test_managedblockchain_networks.py | 53 ++++++ 8 files changed, 329 insertions(+) create mode 100644 moto/managedblockchain/__init__.py create mode 100644 moto/managedblockchain/exceptions.py create mode 100644 moto/managedblockchain/models.py create mode 100644 moto/managedblockchain/responses.py create mode 100644 moto/managedblockchain/urls.py create mode 100644 moto/managedblockchain/utils.py create mode 100644 tests/test_managedblockchain/test_managedblockchain_networks.py diff --git a/moto/__init__.py b/moto/__init__.py index 79c1555d3088..d3fa7b8aad09 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -75,6 +75,7 @@ def f(*args, **kwargs): mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated") mock_logs = lazy_load(".logs", "mock_logs") mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated") +mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain") mock_opsworks = lazy_load(".opsworks", "mock_opsworks") mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated") mock_organizations = lazy_load(".organizations", "mock_organizations") diff --git a/moto/managedblockchain/__init__.py b/moto/managedblockchain/__init__.py new file mode 100644 index 000000000000..a95fa73515de --- /dev/null +++ b/moto/managedblockchain/__init__.py @@ -0,0 +1,9 @@ +from __future__ import unicode_literals +from .models import managedblockchain_backends +from ..core.models import base_decorator, deprecated_base_decorator + +managedblockchain_backend = managedblockchain_backends["us-east-1"] +mock_managedblockchain = base_decorator(managedblockchain_backends) +mock_managedblockchain_deprecated = deprecated_base_decorator( + managedblockchain_backends +) diff --git a/moto/managedblockchain/exceptions.py b/moto/managedblockchain/exceptions.py new file mode 100644 index 000000000000..3195d7c34c3d --- /dev/null +++ b/moto/managedblockchain/exceptions.py @@ -0,0 +1,16 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class ManagedBlockchainClientError(RESTError): + code = 400 + + +class BadRequestException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + super(BadRequestException, self).__init__( + "BadRequestException", + "An error occurred (BadRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py new file mode 100644 index 000000000000..32e9ebbb58ed --- /dev/null +++ b/moto/managedblockchain/models.py @@ -0,0 +1,151 @@ +from __future__ import unicode_literals + +import datetime + +from boto3 import Session + +from moto.core import BaseBackend, BaseModel + +from .exceptions import BadRequestException + +from .utils import get_network_id + +FRAMEWORKS = [ + "HYPERLEDGER_FABRIC", +] + +FRAMEWORKVERSIONS = [ + "1.2", +] + +EDITIONS = [ + "STARTER", + "STANDARD", +] + + +class ManagedBlockchainNetwork(BaseModel): + def __init__( + self, + id, + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + region, + description=None, + ): + self.st = datetime.datetime.now(datetime.timezone.utc) + self.id = id + self.name = name + self.description = description + self.framework = framework + self.frameworkversion = frameworkversion + self.frameworkconfiguration = frameworkconfiguration + self.voting_policy = voting_policy + self.member_configuration = member_configuration + self.region = region + + def to_dict(self): + frameworkattributes = { + "Fabric": { + "OrderingServiceEndpoint": "orderer.{0}.managedblockchain.{1}.amazonaws.com:30001".format( + self.id, self.region + ), + "Edition": self.frameworkconfiguration["Fabric"]["Edition"], + } + } + + vpcendpointname = "com.amazonaws.{0}.managedblockchain.{1}".format( + self.region, self.id + ) + # Use iso_8601_datetime_with_milliseconds ? + d = { + "Id": self.id, + "Name": self.name, + "Framework": self.framework, + "FrameworkVersion": self.frameworkversion, + "FrameworkAttributes": frameworkattributes, + "VpcEndpointServiceName": vpcendpointname, + "VotingPolicy": self.voting_policy, + "Status": "AVAILABLE", + "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if self.description is not None: + d["Description"] = self.description + return d + + +class ManagedBlockchainBackend(BaseBackend): + def __init__(self, region_name): + self.networks = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_network( + self, + json_body, + ): + name = json_body["Name"] + framework = json_body["Framework"] + frameworkversion = json_body["FrameworkVersion"] + frameworkconfiguration = json_body["FrameworkConfiguration"] + voting_policy = json_body["VotingPolicy"] + member_configuration = json_body["MemberConfiguration"] + + # Check framework + if framework not in FRAMEWORKS: + raise BadRequestException("CreateNetwork", "Invalid request body") + + # Check framework version + if frameworkversion not in FRAMEWORKVERSIONS: + raise BadRequestException( + "CreateNetwork", + "Invalid version {0} requested for framework HYPERLEDGER_FABRIC".format( + frameworkversion + ), + ) + + # Check edition + if frameworkconfiguration["Fabric"]["Edition"] not in EDITIONS: + raise BadRequestException("CreateNetwork", "Invalid request body") + + ## Generate network ID + network_id = get_network_id() + + self.networks[network_id] = ManagedBlockchainNetwork( + id=network_id, + name=name, + framework=framework, + frameworkversion=frameworkversion, + frameworkconfiguration=frameworkconfiguration, + voting_policy=voting_policy, + member_configuration=member_configuration, + region=self.region_name, + ) + + def list_networks(self): + return self.networks.values() + + def get_network(self, network_id): + return self.networks[network_id] + + + +managedblockchain_backends = {} +for region in Session().get_available_regions("managedblockchain"): + managedblockchain_backends[region] = ManagedBlockchainBackend(region) +for region in Session().get_available_regions( + "managedblockchain", partition_name="aws-us-gov" +): + managedblockchain_backends[region] = ManagedBlockchainBackend(region) +for region in Session().get_available_regions( + "managedblockchain", partition_name="aws-cn" +): + managedblockchain_backends[region] = ManagedBlockchainBackend(region) diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py new file mode 100644 index 000000000000..ff7c5ff5c0fc --- /dev/null +++ b/moto/managedblockchain/responses.py @@ -0,0 +1,67 @@ +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import urlparse, parse_qs + +from moto.core.responses import BaseResponse +from .models import managedblockchain_backends +from .utils import region_from_managedblckchain_url, networkid_from_managedblockchain_url + + +class ManagedBlockchainResponse(BaseResponse): + def __init__(self, backend): + super(ManagedBlockchainResponse, self).__init__() + self.backend = backend + + @classmethod + def network_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse(managedblockchain_backends[region_name]) + return response_instance._network_response(request, full_url, headers) + + def _network_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + if method == "GET": + return self._all_networks_response(request, full_url, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._network_response_post(json_body, querystring, headers) + + def _all_networks_response(self, request, full_url, headers): + mbcnetworks = self.backend.list_networks() + response = json.dumps( + {"Networks": [mbcnetwork.to_dict() for mbcnetwork in mbcnetworks]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + def _network_response_post(self, json_body, querystring, headers): + self.backend.create_network(json_body) + return 201, headers, "" + + @classmethod + def networkid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse(managedblockchain_backends[region_name]) + return response_instance._networkid_response(request, full_url, headers) + + def _networkid_response(self, request, full_url, headers): + method = request.method + + if method == "GET": + network_id = networkid_from_managedblockchain_url(full_url) + return self._networkid_response_get(network_id, headers) + + def _networkid_response_get(self, network_id, headers): + mbcnetwork = self.backend.get_network(network_id) + response = json.dumps( + {"Network": mbcnetwork.to_dict()} + ) + headers["content-type"] = "application/json" + return 200, headers, response diff --git a/moto/managedblockchain/urls.py b/moto/managedblockchain/urls.py new file mode 100644 index 000000000000..806d11926bc9 --- /dev/null +++ b/moto/managedblockchain/urls.py @@ -0,0 +1,9 @@ +from __future__ import unicode_literals +from .responses import ManagedBlockchainResponse + +url_bases = ["https?://managedblockchain.(.+).amazonaws.com"] + +url_paths = { + "{0}/networks$": ManagedBlockchainResponse.network_response, + "{0}/networks/(?P[^/.]+)$": ManagedBlockchainResponse.networkid_response, +} diff --git a/moto/managedblockchain/utils.py b/moto/managedblockchain/utils.py new file mode 100644 index 000000000000..687b7990b420 --- /dev/null +++ b/moto/managedblockchain/utils.py @@ -0,0 +1,23 @@ +import random +import string + +from six.moves.urllib.parse import urlparse + + +def region_from_managedblckchain_url(url): + domain = urlparse(url).netloc + + if "." in domain: + return domain.split(".")[1] + else: + return "us-east-1" + + +def networkid_from_managedblockchain_url(full_url): + return full_url.split("/")[-1] + + +def get_network_id(): + return "n-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py new file mode 100644 index 000000000000..7bdc0ec59355 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -0,0 +1,53 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_managedblockchain + + +@mock_managedblockchain +def test_create_network(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } + } + + memberconfiguration = { + "Name": "testmember1", + "Description": "Test Member 1", + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} + }, + } + + conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=memberconfiguration, + ) + + # Find in full list + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(1) + mbcnetworks[0]["Name"].should.equal("testnetwork1") + + # Get network details + network_id = mbcnetworks[0]["Id"] + response = conn.get_network(NetworkId=network_id) + response["Network"]["Name"].should.equal("testnetwork1") From 353bc08ac2f4a4af82987f1fa82ef28d8d4b4584 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 4 May 2020 09:24:46 +0100 Subject: [PATCH 323/658] Linting --- moto/ses/exceptions.py | 9 ++++- moto/ses/models.py | 12 ++++-- moto/ses/responses.py | 36 +++++++++++------- tests/test_ses/test_ses.py | 18 ++++++--- tests/test_ses/test_ses_boto3.py | 64 ++++++++++++++++---------------- 5 files changed, 81 insertions(+), 58 deletions(-) diff --git a/moto/ses/exceptions.py b/moto/ses/exceptions.py index f57eadf770d9..c154731883ff 100644 --- a/moto/ses/exceptions.py +++ b/moto/ses/exceptions.py @@ -8,15 +8,20 @@ class MessageRejectedError(RESTError): def __init__(self, message): super(MessageRejectedError, self).__init__("MessageRejected", message) + class ConfigurationSetDoesNotExist(RESTError): code = 400 def __init__(self, message): - super(ConfigurationSetDoesNotExist, self).__init__("ConfigurationSetDoesNotExist", message) + super(ConfigurationSetDoesNotExist, self).__init__( + "ConfigurationSetDoesNotExist", message + ) class EventDestinationAlreadyExists(RESTError): code = 400 def __init__(self, message): - super(EventDestinationAlreadyExists, self).__init__("EventDestinationAlreadyExists", message) + super(EventDestinationAlreadyExists, self).__init__( + "EventDestinationAlreadyExists", message + ) diff --git a/moto/ses/models.py b/moto/ses/models.py index 62068e5a980f..d141e25ae8a9 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -6,7 +6,11 @@ from moto.core import BaseBackend, BaseModel from moto.sns.models import sns_backends -from .exceptions import MessageRejectedError,ConfigurationSetDoesNotExist,EventDestinationAlreadyExists +from .exceptions import ( + MessageRejectedError, + ConfigurationSetDoesNotExist, + EventDestinationAlreadyExists, +) from .utils import get_random_message_id from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY @@ -123,7 +127,7 @@ def send_email(self, source, subject, body, destinations, region): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): - self.rejected_messages_count+=1 + self.rejected_messages_count += 1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -248,7 +252,9 @@ def create_configuration_set(self, configuration_set_name): self.config_set[configuration_set_name] = 1 return {} - def create_configuration_set_event_destination(self,configuration_set_name, event_destination): + def create_configuration_set_event_destination( + self, configuration_set_name, event_destination + ): if self.config_set.get(configuration_set_name) is None: raise ConfigurationSetDoesNotExist("Invalid Configuration Set Name.") diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 8bf7bd9420b6..62893094a036 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -140,34 +140,42 @@ def get_send_statistics(self): def create_configuration_set(self): configuration_set_name = self.querystring.get("ConfigurationSet.Name")[0] - ses_backend.create_configuration_set(configuration_set_name=configuration_set_name) + ses_backend.create_configuration_set( + configuration_set_name=configuration_set_name + ) template = self.response_template(CREATE_CONFIGURATION_SET) return template.render() def create_configuration_set_event_destination(self): - configuration_set_name = self._get_param('ConfigurationSetName') - is_configuration_event_enabled = self.querystring.get("EventDestination.Enabled")[0] + configuration_set_name = self._get_param("ConfigurationSetName") + is_configuration_event_enabled = self.querystring.get( + "EventDestination.Enabled" + )[0] configuration_event_name = self.querystring.get("EventDestination.Name")[0] - event_topic_arn = self.querystring.get("EventDestination.SNSDestination.TopicARN")[0] - event_matching_types = self._get_multi_param("EventDestination.MatchingEventTypes.member") + event_topic_arn = self.querystring.get( + "EventDestination.SNSDestination.TopicARN" + )[0] + event_matching_types = self._get_multi_param( + "EventDestination.MatchingEventTypes.member" + ) - event_destination = {"Name":configuration_event_name, - "Enabled":is_configuration_event_enabled, - "EventMatchingTypes":event_matching_types, - "SNSDestination":event_topic_arn - } + event_destination = { + "Name": configuration_event_name, + "Enabled": is_configuration_event_enabled, + "EventMatchingTypes": event_matching_types, + "SNSDestination": event_topic_arn, + } ses_backend.create_configuration_set_event_destination( - configuration_set_name=configuration_set_name, - event_destination=event_destination - ) + configuration_set_name=configuration_set_name, + event_destination=event_destination, + ) template = self.response_template(CREATE_CONFIGURATION_SET_EVENT_DESTINATION) return template.render() - VERIFY_EMAIL_IDENTITY = """ diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 637931572a5c..719e4ede96a1 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -144,8 +144,12 @@ def test_get_send_statistics(): # tests to verify rejects in get_send_statistics result = conn.get_send_statistics() - reject_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"]) - delivery_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"]) + reject_count = int( + result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"] + ) + delivery_count = int( + result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"] + ) reject_count.should.equal(1) delivery_count.should.equal(0) @@ -157,9 +161,11 @@ def test_get_send_statistics(): # tests to delivery attempts in get_send_statistics result = conn.get_send_statistics() - reject_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"]) - delivery_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"]) + reject_count = int( + result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"] + ) + delivery_count = int( + result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"] + ) reject_count.should.equal(1) delivery_count.should.equal(1) - - diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index e14abda3f449..0e6bb9bea18f 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -230,52 +230,50 @@ def test_send_email_notification_with_encoded_sender(): ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + @mock_ses def test_create_configuration_set(): conn = boto3.client("ses", region_name="us-east-1") conn.create_configuration_set(ConfigurationSet=dict({"Name": "test"})) conn.create_configuration_set_event_destination( - ConfigurationSetName='test', - EventDestination={ - 'Name': 'snsEvent', - 'Enabled': True, - 'MatchingEventTypes': [ - 'send', - ], - 'SNSDestination': { - 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' - } - }) + ConfigurationSetName="test", + EventDestination={ + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) with assert_raises(ClientError) as ex: conn.create_configuration_set_event_destination( - ConfigurationSetName='failtest', + ConfigurationSetName="failtest", EventDestination={ - 'Name': 'snsEvent', - 'Enabled': True, - 'MatchingEventTypes': [ - 'send', - ], - 'SNSDestination': { - 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' - } - }) + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) ex.exception.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist") with assert_raises(ClientError) as ex: conn.create_configuration_set_event_destination( - ConfigurationSetName='test', + ConfigurationSetName="test", EventDestination={ - 'Name': 'snsEvent', - 'Enabled': True, - 'MatchingEventTypes': [ - 'send', - ], - 'SNSDestination': { - 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' - } - }) - - ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") \ No newline at end of file + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) + + ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") From d6875c25cc369c6704e3d5560045d2d6e080b7f8 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 4 May 2020 09:27:57 +0100 Subject: [PATCH 324/658] Linting --- moto/apigateway/models.py | 10 +++++++--- tests/test_apigateway/test_apigateway.py | 10 ++++++---- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index d1b4300681b4..4513c75abed7 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -57,7 +57,7 @@ def __init__(self, deployment_id, name, description=""): class IntegrationResponse(BaseModel, dict): def __init__(self, status_code, selection_pattern=None, response_templates=None): - if response_templates == None: + if response_templates is None: response_templates = {"application/json": None} self["responseTemplates"] = response_templates self["statusCode"] = status_code @@ -74,10 +74,14 @@ def __init__(self, integration_type, uri, http_method, request_templates=None): self["requestTemplates"] = request_templates self["integrationResponses"] = {"200": IntegrationResponse(200)} - def create_integration_response(self, status_code, selection_pattern, response_templates): + def create_integration_response( + self, status_code, selection_pattern, response_templates + ): if response_templates == {}: response_templates = None - integration_response = IntegrationResponse(status_code, selection_pattern, response_templates) + integration_response = IntegrationResponse( + status_code, selection_pattern, response_templates + ) self["integrationResponses"][status_code] = integration_response return integration_response diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 0ad815972873..295cd1c54a5f 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1230,6 +1230,7 @@ def test_put_integration_response_requires_responseTemplate(): responseTemplates={}, ) + @mock_apigateway def test_put_integration_response_with_response_template(): client = boto3.client("apigateway", region_name="us-west-2") @@ -1268,9 +1269,9 @@ def test_put_integration_response_with_response_template(): resourceId=root_id, httpMethod="GET", statusCode="200", - selectionPattern= "foobar", - responseTemplates={"application/json": json.dumps({"data":"test"})}) - + selectionPattern="foobar", + responseTemplates={"application/json": json.dumps({"data": "test"})}, + ) response = client.get_integration_response( restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" @@ -1284,10 +1285,11 @@ def test_put_integration_response_with_response_template(): "statusCode": "200", "selectionPattern": "foobar", "ResponseMetadata": {"HTTPStatusCode": 200}, - "responseTemplates": {"application/json": json.dumps({"data":"test"})}, + "responseTemplates": {"application/json": json.dumps({"data": "test"})}, } ) + @mock_apigateway def test_put_integration_validation(): client = boto3.client("apigateway", region_name="us-west-2") From e1baca1569c538cd4771c066d73c2560b8cb60c3 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Tue, 5 May 2020 18:08:28 -0400 Subject: [PATCH 325/658] Implemented parent_group, recursive and name_prefix_filter for function list_thing_groups() --- moto/iot/models.py | 26 ++++++++++++++++++++++++-- moto/iot/responses.py | 2 +- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 2e9979bda203..5b74b353cb21 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -857,8 +857,30 @@ def delete_thing_group(self, thing_group_name, expected_version): del self.thing_groups[thing_group.arn] def list_thing_groups(self, parent_group, name_prefix_filter, recursive): - thing_groups = self.thing_groups.values() - return thing_groups + if recursive is None: + recursive = True + if name_prefix_filter is None: + name_prefix_filter = "" + if parent_group and parent_group not in [ + _.thing_group_name for _ in self.thing_groups.values() + ]: + raise ResourceNotFoundException() + thing_groups = [ + _ for _ in self.thing_groups.values() if _.parent_group_name == parent_group + ] + if recursive: + for g in thing_groups: + thing_groups.extend( + self.list_thing_groups( + parent_group=g.thing_group_name, + name_prefix_filter=None, + recursive=False, + ) + ) + # thing_groups = groups_to_process.values() + return [ + _ for _ in thing_groups if _.thing_group_name.startswith(name_prefix_filter) + ] def update_thing_group( self, thing_group_name, thing_group_properties, expected_version diff --git a/moto/iot/responses.py b/moto/iot/responses.py index c12d4b5c5ebb..07a8c10c2299 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -535,7 +535,7 @@ def list_thing_groups(self): # max_results = self._get_int_param("maxResults") parent_group = self._get_param("parentGroup") name_prefix_filter = self._get_param("namePrefixFilter") - recursive = self._get_param("recursive") + recursive = self._get_bool_param("recursive") thing_groups = self.iot_backend.list_thing_groups( parent_group=parent_group, name_prefix_filter=name_prefix_filter, From e114eb9383e84099951ffad49af4d172d12863b1 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Tue, 5 May 2020 18:08:56 -0400 Subject: [PATCH 326/658] Added test test_list_thing_groups() --- tests/test_iot/test_iot.py | 134 +++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 58a820fee6f4..edf6235329ab 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -756,6 +756,140 @@ def test_delete_principal_thing(): client.delete_certificate(certificateId=cert_id) +@mock_iot +def test_list_thing_groups(): + client = boto3.client("iot", region_name="ap-northeast-1") + group_name_1a = "my-group-name-1a" + group_name_1b = "my-group-name-1b" + group_name_2a = "my-group-name-2a" + group_name_2b = "my-group-name-2b" + group_name_3a = "my-group-name-3a" + group_name_3b = "my-group-name-3b" + group_name_3c = "my-group-name-3c" + group_name_3d = "my-group-name-3d" + + # --1a + # |--2a + # | |--3a + # | |--3b + # | + # |--2b + # |--3c + # |--3d + # --1b + + # create thing groups tree + # 1 + thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) + thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) + thing_group1a.should.have.key("thingGroupArn") + thing_group1b = client.create_thing_group(thingGroupName=group_name_1b) + thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b) + thing_group1b.should.have.key("thingGroupArn") + # 2 + thing_group2a = client.create_thing_group( + thingGroupName=group_name_2a, parentGroupName=group_name_1a + ) + thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) + thing_group2a.should.have.key("thingGroupArn") + thing_group2b = client.create_thing_group( + thingGroupName=group_name_2b, parentGroupName=group_name_1a + ) + thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b) + thing_group2b.should.have.key("thingGroupArn") + # 3 + thing_group3a = client.create_thing_group( + thingGroupName=group_name_3a, parentGroupName=group_name_2a + ) + thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a) + thing_group3a.should.have.key("thingGroupArn") + thing_group3b = client.create_thing_group( + thingGroupName=group_name_3b, parentGroupName=group_name_2a + ) + thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b) + thing_group3b.should.have.key("thingGroupArn") + thing_group3c = client.create_thing_group( + thingGroupName=group_name_3c, parentGroupName=group_name_2b + ) + thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c) + thing_group3c.should.have.key("thingGroupArn") + thing_group3d = client.create_thing_group( + thingGroupName=group_name_3d, parentGroupName=group_name_2b + ) + thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d) + thing_group3d.should.have.key("thingGroupArn") + + # begin tests + # should list all groups + resp = client.list_thing_groups() + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(8) + # should list all groups non-recursively + resp = client.list_thing_groups(recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + # should list all groups filtered by parent + resp = client.list_thing_groups(parentGroup=group_name_1a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(6) + resp = client.list_thing_groups(parentGroup=group_name_2a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=group_name_1b) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + try: + client.list_thing_groups(parentGroup="inexistant-group-name") + except client.exceptions.ResourceNotFoundException as exc: + error_code = exc.response["Error"]["Code"] + error_code.should.equal("ResourceNotFoundException") + else: + raise Exception("Should have raised error") + # should list all groups filtered by parent non-recursively + resp = client.list_thing_groups(parentGroup=group_name_1a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=group_name_2a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + # should list all groups filtered by name prefix + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + # should list all groups filtered by name prefix non-recursively + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1", recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3", recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + # should list all groups filtered by name prefix and parent + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-2", parentGroup=group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", parentGroup=group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups( + namePrefixFilter="prefix-which-doesn-not-match", parentGroup=group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + @mock_iot def test_delete_thing_group(): client = boto3.client("iot", region_name="ap-northeast-1") From 3b8c8fafe2a4c3ee1b1c70a0763fe1233b28086d Mon Sep 17 00:00:00 2001 From: gruebel Date: Wed, 6 May 2020 14:38:25 +0200 Subject: [PATCH 327/658] Fix ssm.get_parameters missing validation --- moto/ssm/models.py | 10 ++++++++++ tests/test_ssm/test_ssm_boto3.py | 27 +++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 3ce3b3a227cc..67216972ed05 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -514,6 +514,16 @@ def get_all_parameters(self): def get_parameters(self, names, with_decryption): result = [] + + if len(names) > 10: + raise ValidationException( + "1 validation error detected: " + "Value '[{}]' at 'names' failed to satisfy constraint: " + "Member must have length less than or equal to 10.".format( + ", ".join(names) + ) + ) + for name in names: if name in self._parameters: result.append(self.get_parameter(name, with_decryption)) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 170cd8a3e51e..e757a4006536 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +import string + import boto3 import botocore.exceptions import sure # noqa @@ -300,6 +302,31 @@ def test_get_parameter(): ) +@mock_ssm +def test_get_parameters_errors(): + client = boto3.client("ssm", region_name="us-east-1") + + ssm_parameters = {name: "value" for name in string.ascii_lowercase[:11]} + + for name, value in ssm_parameters.items(): + client.put_parameter(Name=name, Value=value, Type="String") + + with assert_raises(ClientError) as e: + client.get_parameters(Names=list(ssm_parameters.keys())) + ex = e.exception + ex.operation_name.should.equal("GetParameters") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ValidationException") + ex.response["Error"]["Message"].should.equal( + "1 validation error detected: " + "Value '[{}]' at 'names' failed to satisfy constraint: " + "Member must have length less than or equal to 10.".format( + ", ".join(ssm_parameters.keys()) + ) + ) + print(ex.response["Error"]["Message"]) + + @mock_ssm def test_get_nonexistant_parameter(): client = boto3.client("ssm", region_name="us-east-1") From 40d1c8c9b9563a50a91dfcb9160073630772c990 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:10:42 -0400 Subject: [PATCH 328/658] Added generate_thing_group_tree function --- tests/test_iot/test_iot.py | 90 +++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 51 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index edf6235329ab..394317fc6d78 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -9,6 +9,37 @@ from nose.tools import assert_raises +def generate_thing_group_tree(iot_client, tree_dict, _parent=None): + """ + Generates a thing group tree given the input tree structure. + :param iot_client: the iot client for boto3 + :param tree_dict: dictionary with the key being the group_name, and the value being a sub tree. + tree_dict = { + "group_name_1a":{ + "group_name_2a":{ + "group_name_3a":{} or None + }, + }, + "group_name_1b":{} + } + :return: a dictionary of created groups, keyed by group name + """ + if tree_dict is None: + tree_dict = {} + created_dict = {} + for group_name in tree_dict.keys(): + params = {"thingGroupName": group_name} + if _parent: + params["parentGroupName"] = _parent + created_group = iot_client.create_thing_group(**params) + created_dict[group_name] = created_group + subtree_dict = generate_thing_group_tree( + iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name + ) + created_dict = {**created_dict, **subtree_dict} + return created_dict + + @mock_iot def test_attach_policy(): client = boto3.client("iot", region_name="ap-northeast-1") @@ -767,57 +798,14 @@ def test_list_thing_groups(): group_name_3b = "my-group-name-3b" group_name_3c = "my-group-name-3c" group_name_3d = "my-group-name-3d" - - # --1a - # |--2a - # | |--3a - # | |--3b - # | - # |--2b - # |--3c - # |--3d - # --1b - - # create thing groups tree - # 1 - thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) - thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) - thing_group1a.should.have.key("thingGroupArn") - thing_group1b = client.create_thing_group(thingGroupName=group_name_1b) - thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b) - thing_group1b.should.have.key("thingGroupArn") - # 2 - thing_group2a = client.create_thing_group( - thingGroupName=group_name_2a, parentGroupName=group_name_1a - ) - thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) - thing_group2a.should.have.key("thingGroupArn") - thing_group2b = client.create_thing_group( - thingGroupName=group_name_2b, parentGroupName=group_name_1a - ) - thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b) - thing_group2b.should.have.key("thingGroupArn") - # 3 - thing_group3a = client.create_thing_group( - thingGroupName=group_name_3a, parentGroupName=group_name_2a - ) - thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a) - thing_group3a.should.have.key("thingGroupArn") - thing_group3b = client.create_thing_group( - thingGroupName=group_name_3b, parentGroupName=group_name_2a - ) - thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b) - thing_group3b.should.have.key("thingGroupArn") - thing_group3c = client.create_thing_group( - thingGroupName=group_name_3c, parentGroupName=group_name_2b - ) - thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c) - thing_group3c.should.have.key("thingGroupArn") - thing_group3d = client.create_thing_group( - thingGroupName=group_name_3d, parentGroupName=group_name_2b - ) - thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d) - thing_group3d.should.have.key("thingGroupArn") + tree_dict = { + group_name_1a: { + group_name_2a: {group_name_3a: {} or None, group_name_3b: {} or None}, + group_name_2b: {group_name_3c: {} or None, group_name_3d: {} or None}, + }, + group_name_1b: {}, + } + group_catalog = generate_thing_group_tree(client, tree_dict) # begin tests # should list all groups From 5fd817965326ad308e28064267d44d988619d562 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:29:16 -0400 Subject: [PATCH 329/658] Refactored test_list_thing_groups into class TestListThingGroup --- tests/test_iot/test_iot.py | 183 ++++++++++++++++++++++--------------- 1 file changed, 109 insertions(+), 74 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 394317fc6d78..40eb19628748 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -787,9 +787,7 @@ def test_delete_principal_thing(): client.delete_certificate(certificateId=cert_id) -@mock_iot -def test_list_thing_groups(): - client = boto3.client("iot", region_name="ap-northeast-1") +class TestListThingGroup: group_name_1a = "my-group-name-1a" group_name_1b = "my-group-name-1b" group_name_2a = "my-group-name-2a" @@ -805,77 +803,114 @@ def test_list_thing_groups(): }, group_name_1b: {}, } - group_catalog = generate_thing_group_tree(client, tree_dict) - - # begin tests - # should list all groups - resp = client.list_thing_groups() - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(8) - # should list all groups non-recursively - resp = client.list_thing_groups(recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - - # should list all groups filtered by parent - resp = client.list_thing_groups(parentGroup=group_name_1a) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(6) - resp = client.list_thing_groups(parentGroup=group_name_2a) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(parentGroup=group_name_1b) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(0) - try: - client.list_thing_groups(parentGroup="inexistant-group-name") - except client.exceptions.ResourceNotFoundException as exc: - error_code = exc.response["Error"]["Code"] - error_code.should.equal("ResourceNotFoundException") - else: - raise Exception("Should have raised error") - # should list all groups filtered by parent non-recursively - resp = client.list_thing_groups(parentGroup=group_name_1a, recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(parentGroup=group_name_2a, recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - - # should list all groups filtered by name prefix - resp = client.list_thing_groups(namePrefixFilter="my-group-name-1") - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(namePrefixFilter="my-group-name-3") - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(4) - resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match") - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(0) - # should list all groups filtered by name prefix non-recursively - resp = client.list_thing_groups(namePrefixFilter="my-group-name-1", recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(namePrefixFilter="my-group-name-3", recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(0) - - # should list all groups filtered by name prefix and parent - resp = client.list_thing_groups( - namePrefixFilter="my-group-name-2", parentGroup=group_name_1a - ) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups( - namePrefixFilter="my-group-name-3", parentGroup=group_name_1a - ) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(4) - resp = client.list_thing_groups( - namePrefixFilter="prefix-which-doesn-not-match", parentGroup=group_name_1a - ) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(0) + + @mock_iot + def test_should_list_all_groups(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups() + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(8) + + + @mock_iot + def test_should_list_all_groups_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + + @mock_iot + def test_should_list_all_groups_filtered_by_parent(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(parentGroup=self.group_name_1a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(6) + resp = client.list_thing_groups(parentGroup=self.group_name_2a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=self.group_name_1b) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + with assert_raises(ClientError) as e: + client.list_thing_groups(parentGroup="inexistant-group-name") + e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + + @mock_iot + def test_should_list_all_groups_filtered_by_parent_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(parentGroup=self.group_name_1a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=self.group_name_2a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1", recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3", recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-2", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups( + namePrefixFilter="prefix-which-doesn-not-match", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) @mock_iot From 0869c83ea5415cf85ba5e516dec4ee9528c55aa3 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:32:47 -0400 Subject: [PATCH 330/658] Refactored test_delete_thing_group to use generate_thing_group_tree --- tests/test_iot/test_iot.py | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 40eb19628748..af7abfdcd1e1 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -798,8 +798,8 @@ class TestListThingGroup: group_name_3d = "my-group-name-3d" tree_dict = { group_name_1a: { - group_name_2a: {group_name_3a: {} or None, group_name_3b: {} or None}, - group_name_2b: {group_name_3c: {} or None, group_name_3d: {} or None}, + group_name_2a: {group_name_3a: {}, group_name_3b: {}}, + group_name_2b: {group_name_3c: {}, group_name_3d: {}}, }, group_name_1b: {}, } @@ -918,20 +918,12 @@ def test_delete_thing_group(): client = boto3.client("iot", region_name="ap-northeast-1") group_name_1a = "my-group-name-1a" group_name_2a = "my-group-name-2a" - # --1a - # |--2a - - # create thing groups tree - # 1 - thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) - thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) - thing_group1a.should.have.key("thingGroupArn") - # 2 - thing_group2a = client.create_thing_group( - thingGroupName=group_name_2a, parentGroupName=group_name_1a - ) - thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) - thing_group2a.should.have.key("thingGroupArn") + tree_dict = { + group_name_1a: { + group_name_2a: {}, + }, + } + group_catalog = generate_thing_group_tree(client, tree_dict) # delete group with child try: From f7b048442822fd2c2a63ea73d2e75eb39c592961 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:37:43 -0400 Subject: [PATCH 331/658] Refactored test_describe_thing_group_metadata_hierarchy to use generate_thing_group_tree --- tests/test_iot/test_iot.py | 78 +++++++++----------------------------- 1 file changed, 18 insertions(+), 60 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index af7abfdcd1e1..8524bcbc1b13 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -958,56 +958,14 @@ def test_describe_thing_group_metadata_hierarchy(): group_name_3c = "my-group-name-3c" group_name_3d = "my-group-name-3d" - # --1a - # |--2a - # | |--3a - # | |--3b - # | - # |--2b - # |--3c - # |--3d - # --1b - - # create thing groups tree - # 1 - thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) - thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) - thing_group1a.should.have.key("thingGroupArn") - thing_group1b = client.create_thing_group(thingGroupName=group_name_1b) - thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b) - thing_group1b.should.have.key("thingGroupArn") - # 2 - thing_group2a = client.create_thing_group( - thingGroupName=group_name_2a, parentGroupName=group_name_1a - ) - thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) - thing_group2a.should.have.key("thingGroupArn") - thing_group2b = client.create_thing_group( - thingGroupName=group_name_2b, parentGroupName=group_name_1a - ) - thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b) - thing_group2b.should.have.key("thingGroupArn") - # 3 - thing_group3a = client.create_thing_group( - thingGroupName=group_name_3a, parentGroupName=group_name_2a - ) - thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a) - thing_group3a.should.have.key("thingGroupArn") - thing_group3b = client.create_thing_group( - thingGroupName=group_name_3b, parentGroupName=group_name_2a - ) - thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b) - thing_group3b.should.have.key("thingGroupArn") - thing_group3c = client.create_thing_group( - thingGroupName=group_name_3c, parentGroupName=group_name_2b - ) - thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c) - thing_group3c.should.have.key("thingGroupArn") - thing_group3d = client.create_thing_group( - thingGroupName=group_name_3d, parentGroupName=group_name_2b - ) - thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d) - thing_group3d.should.have.key("thingGroupArn") + tree_dict = { + group_name_1a: { + group_name_2a: {group_name_3a: {}, group_name_3b: {}}, + group_name_2b: {group_name_3c: {}, group_name_3d: {}}, + }, + group_name_1b: {}, + } + group_catalog = generate_thing_group_tree(client, tree_dict) # describe groups # groups level 1 @@ -1059,7 +1017,7 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description2a.should.have.key("version") # 2b thing_group_description2b = client.describe_thing_group( @@ -1085,7 +1043,7 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description2b.should.have.key("version") # groups level 3 # 3a @@ -1112,13 +1070,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2a) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2a["thingGroupArn"]) + ].should.match(group_catalog[group_name_2a]["thingGroupArn"]) thing_group_description3a.should.have.key("version") # 3b thing_group_description3b = client.describe_thing_group( @@ -1144,13 +1102,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2a) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2a["thingGroupArn"]) + ].should.match(group_catalog[group_name_2a]["thingGroupArn"]) thing_group_description3b.should.have.key("version") # 3c thing_group_description3c = client.describe_thing_group( @@ -1176,13 +1134,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2b) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2b["thingGroupArn"]) + ].should.match(group_catalog[group_name_2b]["thingGroupArn"]) thing_group_description3c.should.have.key("version") # 3d thing_group_description3d = client.describe_thing_group( @@ -1208,13 +1166,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2b) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2b["thingGroupArn"]) + ].should.match(group_catalog[group_name_2b]["thingGroupArn"]) thing_group_description3d.should.have.key("version") From c51ef87f710a42df42ad847cb048cbfd109b757b Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:43:34 -0400 Subject: [PATCH 332/658] black --- tests/test_iot/test_iot.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 8524bcbc1b13..6fe43edc2ccc 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -814,7 +814,6 @@ def test_should_list_all_groups(self): resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(8) - @mock_iot def test_should_list_all_groups_non_recursively(self): # setup @@ -825,7 +824,6 @@ def test_should_list_all_groups_non_recursively(self): resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(2) - @mock_iot def test_should_list_all_groups_filtered_by_parent(self): # setup @@ -843,7 +841,9 @@ def test_should_list_all_groups_filtered_by_parent(self): resp["thingGroups"].should.have.length_of(0) with assert_raises(ClientError) as e: client.list_thing_groups(parentGroup="inexistant-group-name") - e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + e.exception.response["Error"]["Code"].should.equal( + "ResourceNotFoundException" + ) @mock_iot def test_should_list_all_groups_filtered_by_parent_non_recursively(self): @@ -858,7 +858,6 @@ def test_should_list_all_groups_filtered_by_parent_non_recursively(self): resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(2) - @mock_iot def test_should_list_all_groups_filtered_by_name_prefix(self): # setup @@ -875,21 +874,23 @@ def test_should_list_all_groups_filtered_by_name_prefix(self): resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(0) - @mock_iot def test_should_list_all_groups_filtered_by_name_prefix_non_recursively(self): # setup client = boto3.client("iot", region_name="ap-northeast-1") group_catalog = generate_thing_group_tree(client, self.tree_dict) # test - resp = client.list_thing_groups(namePrefixFilter="my-group-name-1", recursive=False) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-1", recursive=False + ) resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(namePrefixFilter="my-group-name-3", recursive=False) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", recursive=False + ) resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(0) - @mock_iot def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self): # setup @@ -907,7 +908,8 @@ def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self): resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(4) resp = client.list_thing_groups( - namePrefixFilter="prefix-which-doesn-not-match", parentGroup=self.group_name_1a + namePrefixFilter="prefix-which-doesn-not-match", + parentGroup=self.group_name_1a, ) resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(0) @@ -919,9 +921,7 @@ def test_delete_thing_group(): group_name_1a = "my-group-name-1a" group_name_2a = "my-group-name-2a" tree_dict = { - group_name_1a: { - group_name_2a: {}, - }, + group_name_1a: {group_name_2a: {},}, } group_catalog = generate_thing_group_tree(client, tree_dict) From 8bfc7ed76056c35528e306da383b3e0a1c270978 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 10:28:13 -0400 Subject: [PATCH 333/658] Fixed python2 --- tests/test_iot/test_iot.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 6fe43edc2ccc..c3ee4c96d87c 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -36,7 +36,8 @@ def generate_thing_group_tree(iot_client, tree_dict, _parent=None): subtree_dict = generate_thing_group_tree( iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name ) - created_dict = {**created_dict, **subtree_dict} + created_dict.update(created_dict) + created_dict.update(subtree_dict) return created_dict From 2b0e7da9985700f72904a5a3b7130b4f436250b0 Mon Sep 17 00:00:00 2001 From: usmankb Date: Wed, 6 May 2020 20:28:50 +0530 Subject: [PATCH 334/658] SES get send statistics response modification --- moto/ses/responses.py | 30 ++++++++++++++++-------------- tests/test_ses/test_ses.py | 8 ++++---- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 62893094a036..8c9dc8f75abb 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -292,20 +292,22 @@ def create_configuration_set_event_destination(self): """ GET_SEND_STATISTICS = """ - - {% for statistics in all_statistics %} - - {{ statistics["DeliveryAttempts"] }} - {{ statistics["Rejects"] }} - {{ statistics["Bounces"] }} - {{ statistics["Complaints"] }} - {{ statistics["Timestamp"] }} - - {% endfor %} - - - e0abcdfa-c866-11e0-b6d0-273d09173z49 - + + + {% for statistics in all_statistics %} + + {{ statistics["DeliveryAttempts"] }} + {{ statistics["Rejects"] }} + {{ statistics["Bounces"] }} + {{ statistics["Complaints"] }} + {{ statistics["Timestamp"] }} + + {% endfor %} + + + e0abcdfa-c866-11e0-b6d0-273d09173z49 + + """ CREATE_CONFIGURATION_SET = """ diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 719e4ede96a1..7d7674bea166 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -145,10 +145,10 @@ def test_get_send_statistics(): result = conn.get_send_statistics() reject_count = int( - result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["Rejects"] ) delivery_count = int( - result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["DeliveryAttempts"] ) reject_count.should.equal(1) delivery_count.should.equal(0) @@ -162,10 +162,10 @@ def test_get_send_statistics(): result = conn.get_send_statistics() reject_count = int( - result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["Rejects"] ) delivery_count = int( - result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["DeliveryAttempts"] ) reject_count.should.equal(1) delivery_count.should.equal(1) From 55f207050ef133888ac8dd231e3d124953096391 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 6 May 2020 14:28:40 -0700 Subject: [PATCH 335/658] Add `Redshift.ClusterAlreadyExists` Error Closes #2967 --- moto/redshift/exceptions.py | 7 +++++++ moto/redshift/models.py | 3 +++ tests/test_redshift/test_redshift.py | 22 ++++++++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index 0a17e8aab4c4..b5f83d3bc25f 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -136,3 +136,10 @@ def __init__(self, cluster_identifier): cluster_identifier ), ) + + +class ClusterAlreadyExistsFaultError(RedshiftClientError): + def __init__(self): + super(ClusterAlreadyExistsFaultError, self).__init__( + "ClusterAlreadyExists", "Cluster already exists" + ) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 17840fb86f0f..07baf18c0c3c 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -10,6 +10,7 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2 import ec2_backends from .exceptions import ( + ClusterAlreadyExistsFaultError, ClusterNotFoundError, ClusterParameterGroupNotFoundError, ClusterSecurityGroupNotFoundError, @@ -580,6 +581,8 @@ def modify_snapshot_copy_retention_period( def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs["cluster_identifier"] + if cluster_identifier in self.clusters: + raise ClusterAlreadyExistsFaultError() cluster = Cluster(self, **cluster_kwargs) self.clusters[cluster_identifier] = cluster return cluster diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 6bb3b1396162..cf96ee15ffaa 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -915,6 +915,11 @@ def test_create_cluster_from_snapshot(): ClusterIdentifier=original_cluster_identifier, ) + client.restore_from_cluster_snapshot.when.called_with( + ClusterIdentifier=original_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + ).should.throw(ClientError, "ClusterAlreadyExists") + response = client.restore_from_cluster_snapshot( ClusterIdentifier=new_cluster_identifier, SnapshotIdentifier=original_snapshot_identifier, @@ -1333,3 +1338,20 @@ def test_modify_snapshot_copy_retention_period(): response = client.describe_clusters(ClusterIdentifier="test") cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"] cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5) + + +@mock_redshift +def test_create_duplicate_cluster_fails(): + kwargs = { + "ClusterIdentifier": "test", + "ClusterType": "single-node", + "DBName": "test", + "MasterUsername": "user", + "MasterUserPassword": "password", + "NodeType": "ds2.xlarge", + } + client = boto3.client("redshift", region_name="us-east-1") + client.create_cluster(**kwargs) + client.create_cluster.when.called_with(**kwargs).should.throw( + ClientError, "ClusterAlreadyExists" + ) From 5ec814a6042b73f000b62d3baa3fd74afc27c992 Mon Sep 17 00:00:00 2001 From: James Belleau Date: Wed, 6 May 2020 21:12:48 -0500 Subject: [PATCH 336/658] Fixes and additional tests --- moto/backends.py | 1 + moto/managedblockchain/models.py | 77 +++++++---- moto/managedblockchain/responses.py | 41 ++++-- moto/managedblockchain/utils.py | 6 + .../test_managedblockchain_networks.py | 128 ++++++++++++++---- 5 files changed, 193 insertions(+), 60 deletions(-) diff --git a/moto/backends.py b/moto/backends.py index bb71429eb940..3934afa67171 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -39,6 +39,7 @@ "kms": ("kms", "kms_backends"), "lambda": ("awslambda", "lambda_backends"), "logs": ("logs", "logs_backends"), + "managedblockchain": ("managedblockchain", "managedblockchain_backends"), "moto_api": ("core", "moto_api_backends"), "opsworks": ("opsworks", "opsworks_backends"), "organizations": ("organizations", "organizations_backends"), diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py index 32e9ebbb58ed..475a19bbd610 100644 --- a/moto/managedblockchain/models.py +++ b/moto/managedblockchain/models.py @@ -8,7 +8,7 @@ from .exceptions import BadRequestException -from .utils import get_network_id +from .utils import get_network_id, get_member_id FRAMEWORKS = [ "HYPERLEDGER_FABRIC", @@ -37,7 +37,7 @@ def __init__( region, description=None, ): - self.st = datetime.datetime.now(datetime.timezone.utc) + self.creationdate = datetime.datetime.utcnow() self.id = id self.name = name self.description = description @@ -49,19 +49,34 @@ def __init__( self.region = region def to_dict(self): + # Format for list_networks + d = { + "Id": self.id, + "Name": self.name, + "Framework": self.framework, + "FrameworkVersion": self.frameworkversion, + "Status": "AVAILABLE", + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if self.description is not None: + d["Description"] = self.description + return d + + def get_format(self): + # Format for get_networks frameworkattributes = { "Fabric": { "OrderingServiceEndpoint": "orderer.{0}.managedblockchain.{1}.amazonaws.com:30001".format( - self.id, self.region + self.id.lower(), self.region ), "Edition": self.frameworkconfiguration["Fabric"]["Edition"], } } vpcendpointname = "com.amazonaws.{0}.managedblockchain.{1}".format( - self.region, self.id + self.region, self.id.lower() ) - # Use iso_8601_datetime_with_milliseconds ? + d = { "Id": self.id, "Name": self.name, @@ -71,7 +86,7 @@ def to_dict(self): "VpcEndpointServiceName": vpcendpointname, "VotingPolicy": self.voting_policy, "Status": "AVAILABLE", - "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), } if self.description is not None: d["Description"] = self.description @@ -90,14 +105,21 @@ def reset(self): def create_network( self, - json_body, + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + description=None, ): - name = json_body["Name"] - framework = json_body["Framework"] - frameworkversion = json_body["FrameworkVersion"] - frameworkconfiguration = json_body["FrameworkConfiguration"] - voting_policy = json_body["VotingPolicy"] - member_configuration = json_body["MemberConfiguration"] + self.name = name + self.framework = framework + self.frameworkversion = frameworkversion + self.frameworkconfiguration = frameworkconfiguration + self.voting_policy = voting_policy + self.member_configuration = member_configuration + self.description = description # Check framework if framework not in FRAMEWORKS: @@ -119,33 +141,32 @@ def create_network( ## Generate network ID network_id = get_network_id() + ## Generate memberid ID - will need to actually create member + member_id = get_member_id() + self.networks[network_id] = ManagedBlockchainNetwork( id=network_id, name=name, - framework=framework, - frameworkversion=frameworkversion, - frameworkconfiguration=frameworkconfiguration, - voting_policy=voting_policy, - member_configuration=member_configuration, + framework=self.framework, + frameworkversion=self.frameworkversion, + frameworkconfiguration=self.frameworkconfiguration, + voting_policy=self.voting_policy, + member_configuration=self.member_configuration, region=self.region_name, + description=self.description, ) + # Return the network and member ID + d = {"NetworkId": network_id, "MemberId": member_id} + return d + def list_networks(self): return self.networks.values() def get_network(self, network_id): - return self.networks[network_id] - + return self.networks.get(network_id) managedblockchain_backends = {} for region in Session().get_available_regions("managedblockchain"): managedblockchain_backends[region] = ManagedBlockchainBackend(region) -for region in Session().get_available_regions( - "managedblockchain", partition_name="aws-us-gov" -): - managedblockchain_backends[region] = ManagedBlockchainBackend(region) -for region in Session().get_available_regions( - "managedblockchain", partition_name="aws-cn" -): - managedblockchain_backends[region] = ManagedBlockchainBackend(region) diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py index ff7c5ff5c0fc..93084581de16 100644 --- a/moto/managedblockchain/responses.py +++ b/moto/managedblockchain/responses.py @@ -5,7 +5,10 @@ from moto.core.responses import BaseResponse from .models import managedblockchain_backends -from .utils import region_from_managedblckchain_url, networkid_from_managedblockchain_url +from .utils import ( + region_from_managedblckchain_url, + networkid_from_managedblockchain_url, +) class ManagedBlockchainResponse(BaseResponse): @@ -16,7 +19,9 @@ def __init__(self, backend): @classmethod def network_response(clazz, request, full_url, headers): region_name = region_from_managedblckchain_url(full_url) - response_instance = ManagedBlockchainResponse(managedblockchain_backends[region_name]) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) return response_instance._network_response(request, full_url, headers) def _network_response(self, request, full_url, headers): @@ -42,13 +47,35 @@ def _all_networks_response(self, request, full_url, headers): return 200, headers, response def _network_response_post(self, json_body, querystring, headers): - self.backend.create_network(json_body) - return 201, headers, "" + name = json_body["Name"] + framework = json_body["Framework"] + frameworkversion = json_body["FrameworkVersion"] + frameworkconfiguration = json_body["FrameworkConfiguration"] + voting_policy = json_body["VotingPolicy"] + member_configuration = json_body["MemberConfiguration"] + + # Optional + description = None + if "Description" in json_body: + description = json_body["Description"] + + response = self.backend.create_network( + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + description, + ) + return 201, headers, json.dumps(response) @classmethod def networkid_response(clazz, request, full_url, headers): region_name = region_from_managedblckchain_url(full_url) - response_instance = ManagedBlockchainResponse(managedblockchain_backends[region_name]) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) return response_instance._networkid_response(request, full_url, headers) def _networkid_response(self, request, full_url, headers): @@ -60,8 +87,6 @@ def _networkid_response(self, request, full_url, headers): def _networkid_response_get(self, network_id, headers): mbcnetwork = self.backend.get_network(network_id) - response = json.dumps( - {"Network": mbcnetwork.to_dict()} - ) + response = json.dumps({"Network": mbcnetwork.get_format()}) headers["content-type"] = "application/json" return 200, headers, response diff --git a/moto/managedblockchain/utils.py b/moto/managedblockchain/utils.py index 687b7990b420..2a93d93f40e9 100644 --- a/moto/managedblockchain/utils.py +++ b/moto/managedblockchain/utils.py @@ -21,3 +21,9 @@ def get_network_id(): return "n-" + "".join( random.choice(string.ascii_uppercase + string.digits) for _ in range(26) ) + + +def get_member_id(): + return "m-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py index 7bdc0ec59355..f9c98676e7f8 100644 --- a/tests/test_managedblockchain/test_managedblockchain_networks.py +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -3,51 +3,131 @@ import boto3 import sure # noqa +from moto.managedblockchain.exceptions import BadRequestException from moto import mock_managedblockchain +default_frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} + +default_votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } +} + +default_memberconfiguration = { + "Name": "testmember1", + "Description": "Test Member 1", + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} + }, +} + + @mock_managedblockchain def test_create_network(): conn = boto3.client("managedblockchain", region_name="us-east-1") - frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=default_frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, + ) + response["NetworkId"].should.match("n-[A-Z0-9]{26}") + response["MemberId"].should.match("m-[A-Z0-9]{26}") - votingpolicy = { - "ApprovalThresholdPolicy": { - "ThresholdPercentage": 50, - "ProposalDurationInHours": 24, - "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", - } - } + # Find in full list + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(1) + mbcnetworks[0]["Name"].should.equal("testnetwork1") + + # Get network details + network_id = mbcnetworks[0]["Id"] + response = conn.get_network(NetworkId=network_id) + response["Network"]["Name"].should.equal("testnetwork1") - memberconfiguration = { - "Name": "testmember1", - "Description": "Test Member 1", - "FrameworkConfiguration": { - "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} - }, - "LogPublishingConfiguration": { - "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} - }, - } - conn.create_network( +@mock_managedblockchain +def test_create_network_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( Name="testnetwork1", Description="Test Network 1", Framework="HYPERLEDGER_FABRIC", FrameworkVersion="1.2", - FrameworkConfiguration=frameworkconfiguration, - VotingPolicy=votingpolicy, - MemberConfiguration=memberconfiguration, + FrameworkConfiguration=default_frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, ) + response["NetworkId"].should.match("n-[A-Z0-9]{26}") + response["MemberId"].should.match("m-[A-Z0-9]{26}") # Find in full list response = conn.list_networks() mbcnetworks = response["Networks"] mbcnetworks.should.have.length_of(1) - mbcnetworks[0]["Name"].should.equal("testnetwork1") + mbcnetworks[0]["Description"].should.equal("Test Network 1") # Get network details network_id = mbcnetworks[0]["Id"] response = conn.get_network(NetworkId=network_id) - response["Network"]["Name"].should.equal("testnetwork1") + response["Network"]["Description"].should.equal("Test Network 1") + + +@mock_managedblockchain +def test_create_network_noframework(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_VINYL", + FrameworkVersion="1.2", + FrameworkConfiguration=default_frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_create_network_badframeworkver(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.X", + FrameworkConfiguration=default_frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, + ).should.throw( + Exception, "Invalid version 1.X requested for framework HYPERLEDGER_FABRIC" + ) + + +@mock_managedblockchain +def test_create_network_badedition(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + frameworkconfiguration = {"Fabric": {"Edition": "SUPER"}} + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, + ).should.throw(Exception, "Invalid request body") From 811ec3bd2a6921b24ac0d6133b58ed713bd58b38 Mon Sep 17 00:00:00 2001 From: James Belleau Date: Wed, 6 May 2020 21:54:59 -0500 Subject: [PATCH 337/658] Added get network test --- moto/managedblockchain/exceptions.py | 11 +++++++++++ moto/managedblockchain/models.py | 6 +++++- .../test_managedblockchain_networks.py | 9 +++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/moto/managedblockchain/exceptions.py b/moto/managedblockchain/exceptions.py index 3195d7c34c3d..265d8eaeab2c 100644 --- a/moto/managedblockchain/exceptions.py +++ b/moto/managedblockchain/exceptions.py @@ -14,3 +14,14 @@ def __init__(self, pretty_called_method, operation_error): pretty_called_method, operation_error ), ) + + +class ResourceNotFoundException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "An error occurred (BadRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py index 475a19bbd610..96f411a87854 100644 --- a/moto/managedblockchain/models.py +++ b/moto/managedblockchain/models.py @@ -6,7 +6,7 @@ from moto.core import BaseBackend, BaseModel -from .exceptions import BadRequestException +from .exceptions import BadRequestException, ResourceNotFoundException from .utils import get_network_id, get_member_id @@ -164,6 +164,10 @@ def list_networks(self): return self.networks.values() def get_network(self, network_id): + if network_id not in self.networks: + raise ResourceNotFoundException( + "CreateNetwork", "Network {0} not found".format(network_id) + ) return self.networks.get(network_id) diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py index f9c98676e7f8..a3256a3fe03c 100644 --- a/tests/test_managedblockchain/test_managedblockchain_networks.py +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -131,3 +131,12 @@ def test_create_network_badedition(): VotingPolicy=default_votingpolicy, MemberConfiguration=default_memberconfiguration, ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_get_network_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_network.when.called_with( + NetworkId="n-BADNETWORK", + ).should.throw(Exception, "Network n-BADNETWORK not found") From 4abd88f95cc7fb5abcca0f18191f89b581b9d319 Mon Sep 17 00:00:00 2001 From: Rigas Papathanasopoulos Date: Wed, 6 May 2020 23:12:32 +0300 Subject: [PATCH 338/658] Fix the online status in OpsWorks When an instance is running, OpsWorks reports its status as "online" [1], while EC2 reports it as "running". Until now, moto copied the EC2 instance's status as is. This commit introduces the converts the running status to online when returned by OpsWorks. [1]: https://docs.aws.amazon.com/cli/latest/reference/opsworks/describe-instances.html --- moto/opsworks/models.py | 3 +++ tests/test_opsworks/test_instances.py | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index 96d918cc9c30..84bd3b103fa9 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -125,6 +125,9 @@ def start(self): def status(self): if self.instance is None: return "stopped" + # OpsWorks reports the "running" state as "online" + elif self.instance._state.name == "running": + return "online" return self.instance._state.name def to_dict(self): diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index 5f0dc2040a76..93935d20fcc8 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -195,6 +195,10 @@ def test_ec2_integration(): reservations = ec2.describe_instances()["Reservations"] assert reservations.should.be.empty + # Before starting the instance, its status should be "stopped" + opsworks_instance = opsworks.describe_instances(StackId=stack_id)["Instances"][0] + opsworks_instance["Status"].should.equal("stopped") + # After starting the instance, it should be discoverable via ec2 opsworks.start_instance(InstanceId=instance_id) reservations = ec2.describe_instances()["Reservations"] @@ -204,3 +208,5 @@ def test_ec2_integration(): instance["InstanceId"].should.equal(opsworks_instance["Ec2InstanceId"]) instance["PrivateIpAddress"].should.equal(opsworks_instance["PrivateIp"]) + # After starting the instance, its status should be "online" + opsworks_instance["Status"].should.equal("online") From 691e2068541f6fab161daddbfe8b3e17418f0f80 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 7 May 2020 09:49:37 +0100 Subject: [PATCH 339/658] SES - Validate domain before send_raw_email --- moto/ses/models.py | 4 +-- tests/test_ses/test_ses_boto3.py | 45 +++++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/moto/ses/models.py b/moto/ses/models.py index 91241f70629c..75c25a0a36bc 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -189,7 +189,7 @@ def __process_sns_feedback__(self, source, destinations, region): def send_raw_email(self, source, destinations, raw_data, region): if source is not None: _, source_email_address = parseaddr(source) - if source_email_address not in self.addresses: + if not self._is_verified_address(source_email_address): raise MessageRejectedError( "Did not have authority to send from email %s" % source_email_address @@ -202,7 +202,7 @@ def send_raw_email(self, source, destinations, raw_data, region): raise MessageRejectedError("Source not specified") _, source_email_address = parseaddr(message["from"]) - if source_email_address not in self.addresses: + if not self._is_verified_address(source_email_address): raise MessageRejectedError( "Did not have authority to send from email %s" % source_email_address diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index de8aa0813dae..7f64e5f71db2 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -139,25 +139,31 @@ def test_send_html_email(): def test_send_raw_email(): conn = boto3.client("ses", region_name="us-east-1") - message = MIMEMultipart() - message["Subject"] = "Test" - message["From"] = "test@example.com" - message["To"] = "to@example.com, foo@example.com" + message = get_raw_email() - # Message body - part = MIMEText("test file attached") - message.attach(part) + kwargs = dict(Source=message["From"], RawMessage={"Data": message.as_string()}) - # Attachment - part = MIMEText("contents of test file here") - part.add_header("Content-Disposition", "attachment; filename=test.txt") - message.attach(part) + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_email_identity(EmailAddress="test@example.com") + conn.send_raw_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota["SentLast24Hours"]) + sent_count.should.equal(2) + + +@mock_ses +def test_send_raw_email_validate_domain(): + conn = boto3.client("ses", region_name="us-east-1") + + message = get_raw_email() kwargs = dict(Source=message["From"], RawMessage={"Data": message.as_string()}) conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) - conn.verify_email_identity(EmailAddress="test@example.com") + conn.verify_domain_identity(Domain="example.com") conn.send_raw_email(**kwargs) send_quota = conn.get_send_quota() @@ -165,6 +171,21 @@ def test_send_raw_email(): sent_count.should.equal(2) +def get_raw_email(): + message = MIMEMultipart() + message["Subject"] = "Test" + message["From"] = "test@example.com" + message["To"] = "to@example.com, foo@example.com" + # Message body + part = MIMEText("test file attached") + message.attach(part) + # Attachment + part = MIMEText("contents of test file here") + part.add_header("Content-Disposition", "attachment; filename=test.txt") + message.attach(part) + return message + + @mock_ses def test_send_raw_email_without_source(): conn = boto3.client("ses", region_name="us-east-1") From dcb122076fc3c6e634cd939c6e9ea1b2433b777c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 7 May 2020 09:53:07 +0100 Subject: [PATCH 340/658] Linting --- tests/test_ses/test_ses.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 7d7674bea166..ce00629742fa 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -145,10 +145,14 @@ def test_get_send_statistics(): result = conn.get_send_statistics() reject_count = int( - result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["Rejects"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["Rejects"] ) delivery_count = int( - result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["DeliveryAttempts"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["DeliveryAttempts"] ) reject_count.should.equal(1) delivery_count.should.equal(0) @@ -162,10 +166,14 @@ def test_get_send_statistics(): result = conn.get_send_statistics() reject_count = int( - result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["Rejects"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["Rejects"] ) delivery_count = int( - result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["DeliveryAttempts"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["DeliveryAttempts"] ) reject_count.should.equal(1) delivery_count.should.equal(1) From 9881306ef2ea53564e26d886ce6dccbbee1ce6c0 Mon Sep 17 00:00:00 2001 From: James Belleau Date: Thu, 7 May 2020 04:33:31 -0500 Subject: [PATCH 341/658] Simplified optional attribute get --- moto/managedblockchain/responses.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py index 93084581de16..081f301d553c 100644 --- a/moto/managedblockchain/responses.py +++ b/moto/managedblockchain/responses.py @@ -55,9 +55,7 @@ def _network_response_post(self, json_body, querystring, headers): member_configuration = json_body["MemberConfiguration"] # Optional - description = None - if "Description" in json_body: - description = json_body["Description"] + description = json_body.get("Description", None) response = self.backend.create_network( name, From be5b1c592fbeb6432d8a7d19d8f2925140cd76f9 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 7 May 2020 10:40:24 +0100 Subject: [PATCH 342/658] Lambda - Add actual logs to LogResult, instead of replicating the response --- moto/awslambda/models.py | 21 +++++++++++++-------- tests/test_awslambda/test_lambda.py | 14 +++++--------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 589a790ae7c6..28cbe61aff87 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -379,6 +379,7 @@ def _invoke_lambda(self, code, event=None, context=None): event = dict() if context is None: context = {} + output = None try: # TODO: I believe we can keep the container running and feed events as needed @@ -394,7 +395,7 @@ def _invoke_lambda(self, code, event=None, context=None): env_vars.update(self.environment_vars) - container = output = exit_code = None + container = exit_code = None log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON) with _DockerDataVolumeContext(self) as data_vol: try: @@ -455,24 +456,28 @@ def _invoke_lambda(self, code, event=None, context=None): # We only care about the response from the lambda # Which is the last line of the output, according to https://github.com/lambci/docker-lambda/issues/25 - output = output.splitlines()[-1] - return output, False + resp = output.splitlines()[-1] + logs = os.linesep.join( + [line for line in self.convert(output).splitlines()[:-1]] + ) + return resp, False, logs except BaseException as e: traceback.print_exc() - return "error running lambda: {}".format(e), True + logs = os.linesep.join( + [line for line in self.convert(output).splitlines()[:-1]] + ) + return "error running lambda: {}".format(e), True, logs def invoke(self, body, request_headers, response_headers): - payload = dict() if body: body = json.loads(body) # Get the invocation type: - res, errored = self._invoke_lambda(code=self.code, event=body) + res, errored, logs = self._invoke_lambda(code=self.code, event=body) if request_headers.get("x-amz-invocation-type") == "RequestResponse": - encoded = base64.b64encode(res.encode("utf-8")) + encoded = base64.b64encode(logs.encode("utf-8")) response_headers["x-amz-log-result"] = encoded.decode("utf-8") - payload["result"] = response_headers["x-amz-log-result"] result = res.encode("utf-8") else: result = res diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index e67576518a13..62e2bcef65ec 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -43,6 +43,7 @@ def _process_lambda(func_str): def get_test_zip_file1(): pfunc = """ def lambda_handler(event, context): + print("custom log event") return event """ return _process_lambda(pfunc) @@ -115,11 +116,11 @@ def test_invoke_requestresponse_function(): ) success_result["StatusCode"].should.equal(200) - result_obj = json.loads( - base64.b64decode(success_result["LogResult"]).decode("utf-8") - ) + logs = base64.b64decode(success_result["LogResult"]).decode("utf-8") - result_obj.should.equal(in_data) + logs.should.contain("START RequestId:") + logs.should.contain("custom log event") + logs.should.contain("END RequestId:") payload = success_result["Payload"].read().decode("utf-8") json.loads(payload).should.equal(in_data) @@ -152,11 +153,6 @@ def test_invoke_requestresponse_function_with_arn(): ) success_result["StatusCode"].should.equal(200) - result_obj = json.loads( - base64.b64decode(success_result["LogResult"]).decode("utf-8") - ) - - result_obj.should.equal(in_data) payload = success_result["Payload"].read().decode("utf-8") json.loads(payload).should.equal(in_data) From f82e834225809af1d3a56769baec8960a4b90481 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 7 May 2020 10:55:15 +0100 Subject: [PATCH 343/658] Lambda - Only return Logs if LogType=Tail --- moto/awslambda/models.py | 4 ++-- moto/awslambda/responses.py | 4 +++- tests/test_awslambda/test_lambda.py | 11 +++++++++++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 28cbe61aff87..829aa76d2a18 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -1049,9 +1049,9 @@ def invoke(self, function_name, qualifier, body, headers, response_headers): if fn: payload = fn.invoke(body, headers, response_headers) response_headers["Content-Length"] = str(len(payload)) - return response_headers, payload + return payload else: - return response_headers, None + return None def do_validate_s3(): diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index d81bd55d9971..a4f559fc2076 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -178,7 +178,7 @@ def _invoke(self, request, full_url): function_name = unquote(self.path.rsplit("/", 2)[-2]) qualifier = self._get_param("qualifier") - response_header, payload = self.lambda_backend.invoke( + payload = self.lambda_backend.invoke( function_name, qualifier, self.body, self.headers, response_headers ) if payload: @@ -187,6 +187,8 @@ def _invoke(self, request, full_url): elif request.headers.get("X-Amz-Invocation-Type") == "DryRun": status_code = 204 else: + if request.headers.get("X-Amz-Log-Type") != "Tail": + del response_headers["x-amz-log-result"] status_code = 200 return status_code, response_headers, payload else: diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 62e2bcef65ec..be5835b5fc78 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -113,6 +113,7 @@ def test_invoke_requestresponse_function(): FunctionName="testFunction", InvocationType="RequestResponse", Payload=json.dumps(in_data), + LogType="Tail" ) success_result["StatusCode"].should.equal(200) @@ -125,6 +126,16 @@ def test_invoke_requestresponse_function(): payload = success_result["Payload"].read().decode("utf-8") json.loads(payload).should.equal(in_data) + # Logs should not be returned by default, only when the LogType-param is supplied + success_result = conn.invoke( + FunctionName="testFunction", + InvocationType="RequestResponse", + Payload=json.dumps(in_data), + ) + + success_result["StatusCode"].should.equal(200) + assert "LogResult" not in success_result + @mock_lambda def test_invoke_requestresponse_function_with_arn(): From 0718525a2a732ba2db060a8c55c3fc6b9b1b9763 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 7 May 2020 12:29:18 +0100 Subject: [PATCH 344/658] Linting --- tests/test_awslambda/test_lambda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index be5835b5fc78..8879ad7e34a4 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -113,7 +113,7 @@ def test_invoke_requestresponse_function(): FunctionName="testFunction", InvocationType="RequestResponse", Payload=json.dumps(in_data), - LogType="Tail" + LogType="Tail", ) success_result["StatusCode"].should.equal(200) From 9e7803dc3601427b8f195f715f07a76a23216749 Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Thu, 7 May 2020 21:29:20 +0100 Subject: [PATCH 345/658] [Bugfix] UpdateExpression using ADD from zero (#2975) When using the ADD syntax to sum up different components the path that is provided is allowed to be non-existent. In such a case DynamoDB will initialize it depending on the type of the value. If it is a number it will be initialized with 0. If it is a set it will be initialized with an empty set. --- moto/dynamodb2/parsing/executors.py | 25 +++++- tests/test_dynamodb2/test_dynamodb.py | 78 +++++++++++++++++++ .../test_dynamodb_table_with_range_key.py | 23 +++--- 3 files changed, 112 insertions(+), 14 deletions(-) diff --git a/moto/dynamodb2/parsing/executors.py b/moto/dynamodb2/parsing/executors.py index 8c51c9cec87a..2f2f2bb8219c 100644 --- a/moto/dynamodb2/parsing/executors.py +++ b/moto/dynamodb2/parsing/executors.py @@ -1,6 +1,10 @@ from abc import abstractmethod -from moto.dynamodb2.exceptions import IncorrectOperandType, IncorrectDataType +from moto.dynamodb2.exceptions import ( + IncorrectOperandType, + IncorrectDataType, + ProvidedKeyDoesNotExist, +) from moto.dynamodb2.models import DynamoType from moto.dynamodb2.models.dynamo_type import DDBTypeConversion, DDBType from moto.dynamodb2.parsing.ast_nodes import ( @@ -193,7 +197,18 @@ def execute(self, item): value_to_add = self.get_action_value() if isinstance(value_to_add, DynamoType): if value_to_add.is_set(): - current_string_set = self.get_item_at_end_of_path(item) + try: + current_string_set = self.get_item_at_end_of_path(item) + except ProvidedKeyDoesNotExist: + current_string_set = DynamoType({value_to_add.type: []}) + SetExecutor.set( + item_part_to_modify_with_set=self.get_item_before_end_of_path( + item + ), + element_to_set=self.get_element_to_action(), + value_to_set=current_string_set, + expression_attribute_names=self.expression_attribute_names, + ) assert isinstance(current_string_set, DynamoType) if not current_string_set.type == value_to_add.type: raise IncorrectDataType() @@ -204,7 +219,11 @@ def execute(self, item): else: current_string_set.value.append(value) elif value_to_add.type == DDBType.NUMBER: - existing_value = self.get_item_at_end_of_path(item) + try: + existing_value = self.get_item_at_end_of_path(item) + except ProvidedKeyDoesNotExist: + existing_value = DynamoType({DDBType.NUMBER: "0"}) + assert isinstance(existing_value, DynamoType) if not existing_value.type == DDBType.NUMBER: raise IncorrectDataType() diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 470c5f8ffb40..9f917a7aeee3 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -5029,3 +5029,81 @@ def test_update_item_atomic_counter_return_values(): "v" in response["Attributes"] ), "v has been updated, and should be returned here" response["Attributes"]["v"]["N"].should.equal("8") + + +@mock_dynamodb2 +def test_update_item_atomic_counter_from_zero(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + + key = {"t_id": {"S": "item1"}} + + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add n_i :inc1, n_f :inc2", + ExpressionAttributeValues={":inc1": {"N": "1.2"}, ":inc2": {"N": "-0.5"}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["n_i"]["N"] == "1.2" + assert updated_item["n_f"]["N"] == "-0.5" + + +@mock_dynamodb2 +def test_update_item_add_to_non_existent_set(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + key = {"t_id": {"S": "item1"}} + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add s_i :s1", + ExpressionAttributeValues={":s1": {"SS": ["hello"]}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["s_i"]["SS"] == ["hello"] + + +@mock_dynamodb2 +def test_update_item_add_to_non_existent_number_set(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + key = {"t_id": {"S": "item1"}} + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add s_i :s1", + ExpressionAttributeValues={":s1": {"NS": ["3"]}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["s_i"]["NS"] == ["3"] diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 6fba713ec4dc..33f65d5ec135 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1307,16 +1307,16 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": {"item4"}}, ) current_item["str_set"] = current_item["str_set"].union({"item4"}) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a string value to a non-existing set - # Should throw: 'The provided key element does not match the schema' - assert_failure_due_to_key_not_in_schema( - table.update_item, + table.update_item( Key=item_key, UpdateExpression="ADD non_existing_str_set :v", ExpressionAttributeValues={":v": {"item4"}}, ) + current_item["non_existing_str_set"] = {"item4"} + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a num value to a num set table.update_item( @@ -1325,7 +1325,7 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": {6}}, ) current_item["num_set"] = current_item["num_set"].union({6}) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a value to a number value table.update_item( @@ -1334,7 +1334,7 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": 20}, ) current_item["num_val"] = current_item["num_val"] + 20 - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to add a number value to a string set, should raise Client Error table.update_item.when.called_with( @@ -1342,7 +1342,7 @@ def test_update_item_add_with_expression(): UpdateExpression="ADD str_set :v", ExpressionAttributeValues={":v": 20}, ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to add a number set to the string set, should raise a ClientError table.update_item.when.called_with( @@ -1350,7 +1350,7 @@ def test_update_item_add_with_expression(): UpdateExpression="ADD str_set :v", ExpressionAttributeValues={":v": {20}}, ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to update with a bad expression table.update_item.when.called_with( @@ -1388,17 +1388,18 @@ def test_update_item_add_with_nested_sets(): current_item["nested"]["str_set"] = current_item["nested"]["str_set"].union( {"item4"} ) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a string value to a non-existing set # Should raise - assert_failure_due_to_key_not_in_schema( - table.update_item, + table.update_item( Key=item_key, UpdateExpression="ADD #ns.#ne :v", ExpressionAttributeNames={"#ns": "nested", "#ne": "non_existing_str_set"}, ExpressionAttributeValues={":v": {"new_item"}}, ) + current_item["nested"]["non_existing_str_set"] = {"new_item"} + assert dict(table.get_item(Key=item_key)["Item"]) == current_item @mock_dynamodb2 From 65e790c4eb6928c76797ab2985f2935f9196d46d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= <33207684+gruebel@users.noreply.github.com> Date: Fri, 8 May 2020 16:57:48 +0200 Subject: [PATCH 346/658] Add dynamodb continuous backups (#2976) * remove print statement * Add dynamodb.describe_continuous_backups * Add dynamodb.update_continuous_backups * Fix Python 2 timestamp error --- moto/dynamodb2/models/__init__.py | 33 +++++++ moto/dynamodb2/responses.py | 29 ++++++ tests/test_dynamodb2/test_dynamodb.py | 136 ++++++++++++++++++++++++++ tests/test_ssm/test_ssm_boto3.py | 1 - 4 files changed, 198 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index ea16f456f142..f459cd0433c8 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -316,6 +316,12 @@ def __init__( } self.set_stream_specification(streams) self.lambda_event_source_mappings = {} + self.continuous_backups = { + "ContinuousBackupsStatus": "ENABLED", # One of 'ENABLED'|'DISABLED', it's enabled by default + "PointInTimeRecoveryDescription": { + "PointInTimeRecoveryStatus": "DISABLED" # One of 'ENABLED'|'DISABLED' + }, + } @classmethod def create_from_cloudformation_json( @@ -1246,6 +1252,33 @@ def transact_write_items(self, transact_items): self.tables = original_table_state raise + def describe_continuous_backups(self, table_name): + table = self.get_table(table_name) + + return table.continuous_backups + + def update_continuous_backups(self, table_name, point_in_time_spec): + table = self.get_table(table_name) + + if ( + point_in_time_spec["PointInTimeRecoveryEnabled"] + and table.continuous_backups["PointInTimeRecoveryDescription"][ + "PointInTimeRecoveryStatus" + ] + == "DISABLED" + ): + table.continuous_backups["PointInTimeRecoveryDescription"] = { + "PointInTimeRecoveryStatus": "ENABLED", + "EarliestRestorableDateTime": unix_time(), + "LatestRestorableDateTime": unix_time(), + } + elif not point_in_time_spec["PointInTimeRecoveryEnabled"]: + table.continuous_backups["PointInTimeRecoveryDescription"] = { + "PointInTimeRecoveryStatus": "DISABLED" + } + + return table.continuous_backups + dynamodb_backends = {} for region in Session().get_available_regions("dynamodb"): diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index b703f2935b13..02c4749d357e 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -936,3 +936,32 @@ def transact_write_items(self): ) response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}} return dynamo_json_dump(response) + + def describe_continuous_backups(self): + name = self.body["TableName"] + + if self.dynamodb_backend.get_table(name) is None: + return self.error( + "com.amazonaws.dynamodb.v20111205#TableNotFoundException", + "Table not found: {}".format(name), + ) + + response = self.dynamodb_backend.describe_continuous_backups(name) + + return json.dumps({"ContinuousBackupsDescription": response}) + + def update_continuous_backups(self): + name = self.body["TableName"] + point_in_time_spec = self.body["PointInTimeRecoverySpecification"] + + if self.dynamodb_backend.get_table(name) is None: + return self.error( + "com.amazonaws.dynamodb.v20111205#TableNotFoundException", + "Table not found: {}".format(name), + ) + + response = self.dynamodb_backend.update_continuous_backups( + name, point_in_time_spec + ) + + return json.dumps({"ContinuousBackupsDescription": response}) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 9f917a7aeee3..8774c3e88aa2 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals, print_function +from datetime import datetime from decimal import Decimal import boto @@ -2049,6 +2050,141 @@ def test_set_ttl(): resp["TimeToLiveDescription"]["TimeToLiveStatus"].should.equal("DISABLED") +@mock_dynamodb2 +def test_describe_continuous_backups(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + table_name = client.create_table( + TableName="test", + AttributeDefinitions=[ + {"AttributeName": "client", "AttributeType": "S"}, + {"AttributeName": "app", "AttributeType": "S"}, + ], + KeySchema=[ + {"AttributeName": "client", "KeyType": "HASH"}, + {"AttributeName": "app", "KeyType": "RANGE"}, + ], + BillingMode="PAY_PER_REQUEST", + )["TableDescription"]["TableName"] + + # when + response = client.describe_continuous_backups(TableName=table_name) + + # then + response["ContinuousBackupsDescription"].should.equal( + { + "ContinuousBackupsStatus": "ENABLED", + "PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"}, + } + ) + + +@mock_dynamodb2 +def test_describe_continuous_backups_errors(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + + # when + with assert_raises(Exception) as e: + client.describe_continuous_backups(TableName="not-existing-table") + + # then + ex = e.exception + ex.operation_name.should.equal("DescribeContinuousBackups") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TableNotFoundException") + ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table") + + +@mock_dynamodb2 +def test_update_continuous_backups(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + table_name = client.create_table( + TableName="test", + AttributeDefinitions=[ + {"AttributeName": "client", "AttributeType": "S"}, + {"AttributeName": "app", "AttributeType": "S"}, + ], + KeySchema=[ + {"AttributeName": "client", "KeyType": "HASH"}, + {"AttributeName": "app", "KeyType": "RANGE"}, + ], + BillingMode="PAY_PER_REQUEST", + )["TableDescription"]["TableName"] + + # when + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal( + "ENABLED" + ) + point_in_time = response["ContinuousBackupsDescription"][ + "PointInTimeRecoveryDescription" + ] + earliest_datetime = point_in_time["EarliestRestorableDateTime"] + earliest_datetime.should.be.a(datetime) + latest_datetime = point_in_time["LatestRestorableDateTime"] + latest_datetime.should.be.a(datetime) + point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED") + + # when + # a second update should not change anything + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal( + "ENABLED" + ) + point_in_time = response["ContinuousBackupsDescription"][ + "PointInTimeRecoveryDescription" + ] + point_in_time["EarliestRestorableDateTime"].should.equal(earliest_datetime) + point_in_time["LatestRestorableDateTime"].should.equal(latest_datetime) + point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED") + + # when + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": False}, + ) + + # then + response["ContinuousBackupsDescription"].should.equal( + { + "ContinuousBackupsStatus": "ENABLED", + "PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"}, + } + ) + + +@mock_dynamodb2 +def test_update_continuous_backups_errors(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + + # when + with assert_raises(Exception) as e: + client.update_continuous_backups( + TableName="not-existing-table", + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + ex = e.exception + ex.operation_name.should.equal("UpdateContinuousBackups") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TableNotFoundException") + ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table") + + # https://github.com/spulec/moto/issues/1043 @mock_dynamodb2 def test_query_missing_expr_names(): diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index e757a4006536..837f81bf5529 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -324,7 +324,6 @@ def test_get_parameters_errors(): ", ".join(ssm_parameters.keys()) ) ) - print(ex.response["Error"]["Message"]) @mock_ssm From a2f5c41372f7bbad0f3bb075eb94b5fa5792c2f6 Mon Sep 17 00:00:00 2001 From: Erik Hovland Date: Fri, 8 May 2020 09:07:28 -0700 Subject: [PATCH 347/658] Check off assume_role_with_saml since it is implemented now. (#2977) --- IMPLEMENTATION_COVERAGE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index f99d86df3250..ef67b1cc34ff 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -7212,7 +7212,7 @@ ## sts 62% implemented - [X] assume_role -- [ ] assume_role_with_saml +- [X] assume_role_with_saml - [X] assume_role_with_web_identity - [ ] decode_authorization_message - [ ] get_access_key_info From 9618e29ba9bc8a4959a62df1cfc7721fafa308c8 Mon Sep 17 00:00:00 2001 From: Denver Janke Date: Mon, 11 May 2020 16:44:26 +1000 Subject: [PATCH 348/658] Always call update ELBs for ASGs (#2980) --- moto/autoscaling/models.py | 13 +- tests/test_autoscaling/test_autoscaling.py | 232 +++++++++++++++++++++ 2 files changed, 238 insertions(+), 7 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index b757672d0821..1da12a09cd5a 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -419,11 +419,8 @@ def set_desired_capacity(self, new_capacity): curr_instance_count = len(self.active_instances()) if self.desired_capacity == curr_instance_count: - self.autoscaling_backend.update_attached_elbs(self.name) - self.autoscaling_backend.update_attached_target_groups(self.name) - return - - if self.desired_capacity > curr_instance_count: + pass # Nothing to do here + elif self.desired_capacity > curr_instance_count: # Need more instances count_needed = int(self.desired_capacity) - int(curr_instance_count) @@ -447,6 +444,7 @@ def set_desired_capacity(self, new_capacity): self.instance_states = list( set(self.instance_states) - set(instances_to_remove) ) + if self.name in self.autoscaling_backend.autoscaling_groups: self.autoscaling_backend.update_attached_elbs(self.name) self.autoscaling_backend.update_attached_target_groups(self.name) @@ -695,6 +693,7 @@ def attach_instances(self, group_name, instance_ids): ) group.instance_states.extend(new_instances) self.update_attached_elbs(group.name) + self.update_attached_target_groups(group.name) def set_instance_health( self, instance_id, health_status, should_respect_grace_period @@ -938,8 +937,7 @@ def enter_standby_instances(self, group_name, instance_ids, should_decrement): standby_instances.append(instance_state) if should_decrement: group.desired_capacity = group.desired_capacity - len(instance_ids) - else: - group.set_desired_capacity(group.desired_capacity) + group.set_desired_capacity(group.desired_capacity) return standby_instances, original_size, group.desired_capacity def exit_standby_instances(self, group_name, instance_ids): @@ -951,6 +949,7 @@ def exit_standby_instances(self, group_name, instance_ids): instance_state.lifecycle_state = "InService" standby_instances.append(instance_state) group.desired_capacity = group.desired_capacity + len(instance_ids) + group.set_desired_capacity(group.desired_capacity) return standby_instances, original_size, group.desired_capacity def terminate_instance(self, instance_id, should_decrement): diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 3a10f20ffe9a..93a8c5a4894c 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -1071,6 +1071,7 @@ def test_autoscaling_describe_policies_boto3(): response["ScalingPolicies"][0]["PolicyName"].should.equal("test_policy_down") +@mock_elb @mock_autoscaling @mock_ec2 def test_detach_one_instance_decrement(): @@ -1096,6 +1097,19 @@ def test_detach_one_instance_decrement(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1111,6 +1125,9 @@ def test_detach_one_instance_decrement(): response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["AutoScalingGroups"][0]["Instances"]] + ) # test to ensure tag has been removed response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) @@ -1122,7 +1139,14 @@ def test_detach_one_instance_decrement(): tags = response["Reservations"][0]["Instances"][0]["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_detach_one_instance(): @@ -1148,6 +1172,19 @@ def test_detach_one_instance(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1173,7 +1210,14 @@ def test_detach_one_instance(): tags = response["Reservations"][0]["Instances"][0]["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_one_instance_decrement(): @@ -1199,6 +1243,19 @@ def test_standby_one_instance_decrement(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1226,7 +1283,14 @@ def test_standby_one_instance_decrement(): tags = instance["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_one_instance(): @@ -1252,6 +1316,19 @@ def test_standby_one_instance(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1279,6 +1356,12 @@ def test_standby_one_instance(): tags = instance["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + @mock_elb @mock_autoscaling @@ -1338,8 +1421,12 @@ def test_standby_elb_update(): response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_terminate_instance_decrement(): @@ -1366,6 +1453,18 @@ def test_standby_terminate_instance_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1409,7 +1508,14 @@ def test_standby_terminate_instance_decrement(): "terminated" ) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_terminate_instance_no_decrement(): @@ -1436,6 +1542,18 @@ def test_standby_terminate_instance_no_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1479,7 +1597,14 @@ def test_standby_terminate_instance_no_decrement(): "terminated" ) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_detach_instance_decrement(): @@ -1506,6 +1631,18 @@ def test_standby_detach_instance_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1547,7 +1684,14 @@ def test_standby_detach_instance_decrement(): response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_detach_instance_no_decrement(): @@ -1574,6 +1718,18 @@ def test_standby_detach_instance_no_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1615,7 +1771,14 @@ def test_standby_detach_instance_no_decrement(): response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_exit_standby(): @@ -1642,6 +1805,18 @@ def test_standby_exit_standby(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1683,7 +1858,14 @@ def test_standby_exit_standby(): ) response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3) + instance_to_standby_exit_standby.should.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_attach_one_instance(): @@ -1711,6 +1893,18 @@ def test_attach_one_instance(): NewInstancesProtectedFromScaleIn=True, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + ec2 = boto3.resource("ec2", "us-east-1") instances_to_add = [ x.id for x in ec2.create_instances(ImageId="", MinCount=1, MaxCount=1) @@ -1727,6 +1921,9 @@ def test_attach_one_instance(): for instance in instances: instance["ProtectedFromScaleIn"].should.equal(True) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3) + @mock_autoscaling @mock_ec2 @@ -1948,6 +2145,7 @@ def test_terminate_instance_via_ec2_in_autoscaling_group(): replaced_instance_id.should_not.equal(original_instance_id) +@mock_elb @mock_autoscaling @mock_ec2 def test_terminate_instance_in_auto_scaling_group_decrement(): @@ -1966,6 +2164,18 @@ def test_terminate_instance_in_auto_scaling_group_decrement(): NewInstancesProtectedFromScaleIn=False, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) original_instance_id = next( instance["InstanceId"] @@ -1979,7 +2189,11 @@ def test_terminate_instance_in_auto_scaling_group_decrement(): response["AutoScalingGroups"][0]["Instances"].should.equal([]) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(0) + +@mock_elb @mock_autoscaling @mock_ec2 def test_terminate_instance_in_auto_scaling_group_no_decrement(): @@ -1998,6 +2212,18 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement(): NewInstancesProtectedFromScaleIn=False, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) original_instance_id = next( instance["InstanceId"] @@ -2014,3 +2240,9 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement(): ) replaced_instance_id.should_not.equal(original_instance_id) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + original_instance_id.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) From 1e0a7380d5ac219f6ead8b1fb2b2f1d243322102 Mon Sep 17 00:00:00 2001 From: Maxim Kirilov Date: Mon, 11 May 2020 15:23:45 +0300 Subject: [PATCH 349/658] Add support for BlockDeviceMappings argument (#2949) * Add support for BlockDeviceMappings argument upon run_instances execution * Remove redundant check for Ebs existence --- moto/ec2/models.py | 12 +++- moto/ec2/responses/instances.py | 113 ++++++++++++++++++++++--------- tests/test_ec2/test_instances.py | 105 ++++++++++++++++++++++++++++ 3 files changed, 195 insertions(+), 35 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index e94d2877c990..bab4636af731 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -560,8 +560,10 @@ def __del__(self): # worst case we'll get IP address exaustion... rarely pass - def add_block_device(self, size, device_path): - volume = self.ec2_backend.create_volume(size, self.region_name) + def add_block_device(self, size, device_path, snapshot_id=None, encrypted=False): + volume = self.ec2_backend.create_volume( + size, self.region_name, snapshot_id, encrypted + ) self.ec2_backend.attach_volume(volume.id, self.id, device_path) def setup_defaults(self): @@ -891,8 +893,12 @@ def add_instances(self, image_id, count, user_data, security_group_names, **kwar new_instance.add_tags(instance_tags) if "block_device_mappings" in kwargs: for block_device in kwargs["block_device_mappings"]: + device_name = block_device["DeviceName"] + volume_size = block_device["Ebs"].get("VolumeSize") + snapshot_id = block_device["Ebs"].get("SnapshotId") + encrypted = block_device["Ebs"].get("Encrypted", False) new_instance.add_block_device( - block_device["Ebs"]["VolumeSize"], block_device["DeviceName"] + volume_size, device_name, snapshot_id, encrypted ) else: new_instance.setup_defaults() diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index de17f060931c..adcbfa741738 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -4,10 +4,16 @@ from moto.autoscaling import autoscaling_backends from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores -from moto.ec2.utils import filters_from_querystring, dict_from_querystring +from moto.ec2.exceptions import MissingParameterError +from moto.ec2.utils import ( + filters_from_querystring, + dict_from_querystring, +) from moto.elbv2 import elbv2_backends from moto.core import ACCOUNT_ID +from copy import deepcopy + class InstanceResponse(BaseResponse): def describe_instances(self): @@ -44,40 +50,31 @@ def run_instances(self): owner_id = self._get_param("OwnerId") user_data = self._get_param("UserData") security_group_names = self._get_multi_param("SecurityGroup") - security_group_ids = self._get_multi_param("SecurityGroupId") - nics = dict_from_querystring("NetworkInterface", self.querystring) - instance_type = self._get_param("InstanceType", if_none="m1.small") - placement = self._get_param("Placement.AvailabilityZone") - subnet_id = self._get_param("SubnetId") - private_ip = self._get_param("PrivateIpAddress") - associate_public_ip = self._get_param("AssociatePublicIpAddress") - key_name = self._get_param("KeyName") - ebs_optimized = self._get_param("EbsOptimized") or False - instance_initiated_shutdown_behavior = self._get_param( - "InstanceInitiatedShutdownBehavior" - ) - tags = self._parse_tag_specification("TagSpecification") - region_name = self.region + kwargs = { + "instance_type": self._get_param("InstanceType", if_none="m1.small"), + "placement": self._get_param("Placement.AvailabilityZone"), + "region_name": self.region, + "subnet_id": self._get_param("SubnetId"), + "owner_id": owner_id, + "key_name": self._get_param("KeyName"), + "security_group_ids": self._get_multi_param("SecurityGroupId"), + "nics": dict_from_querystring("NetworkInterface", self.querystring), + "private_ip": self._get_param("PrivateIpAddress"), + "associate_public_ip": self._get_param("AssociatePublicIpAddress"), + "tags": self._parse_tag_specification("TagSpecification"), + "ebs_optimized": self._get_param("EbsOptimized") or False, + "instance_initiated_shutdown_behavior": self._get_param( + "InstanceInitiatedShutdownBehavior" + ), + } + + mappings = self._parse_block_device_mapping() + if mappings: + kwargs["block_device_mappings"] = mappings if self.is_not_dryrun("RunInstance"): new_reservation = self.ec2_backend.add_instances( - image_id, - min_count, - user_data, - security_group_names, - instance_type=instance_type, - placement=placement, - region_name=region_name, - subnet_id=subnet_id, - owner_id=owner_id, - key_name=key_name, - security_group_ids=security_group_ids, - nics=nics, - private_ip=private_ip, - associate_public_ip=associate_public_ip, - tags=tags, - ebs_optimized=ebs_optimized, - instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, + image_id, min_count, user_data, security_group_names, **kwargs ) template = self.response_template(EC2_RUN_INSTANCES) @@ -272,6 +269,58 @@ def _security_grp_instance_attribute_handler(self): ) return EC2_MODIFY_INSTANCE_ATTRIBUTE + def _parse_block_device_mapping(self): + device_mappings = self._get_list_prefix("BlockDeviceMapping") + mappings = [] + for device_mapping in device_mappings: + self._validate_block_device_mapping(device_mapping) + device_template = deepcopy(BLOCK_DEVICE_MAPPING_TEMPLATE) + device_template["VirtualName"] = device_mapping.get("virtual_name") + device_template["DeviceName"] = device_mapping.get("device_name") + device_template["Ebs"]["SnapshotId"] = device_mapping.get( + "ebs._snapshot_id" + ) + device_template["Ebs"]["VolumeSize"] = device_mapping.get( + "ebs._volume_size" + ) + device_template["Ebs"]["DeleteOnTermination"] = device_mapping.get( + "ebs._delete_on_termination", False + ) + device_template["Ebs"]["VolumeType"] = device_mapping.get( + "ebs._volume_type" + ) + device_template["Ebs"]["Iops"] = device_mapping.get("ebs._iops") + device_template["Ebs"]["Encrypted"] = device_mapping.get( + "ebs._encrypted", False + ) + mappings.append(device_template) + + return mappings + + @staticmethod + def _validate_block_device_mapping(device_mapping): + + if not any(mapping for mapping in device_mapping if mapping.startswith("ebs.")): + raise MissingParameterError("ebs") + if ( + "ebs._volume_size" not in device_mapping + and "ebs._snapshot_id" not in device_mapping + ): + raise MissingParameterError("size or snapshotId") + + +BLOCK_DEVICE_MAPPING_TEMPLATE = { + "VirtualName": None, + "DeviceName": None, + "Ebs": { + "SnapshotId": None, + "VolumeSize": None, + "DeleteOnTermination": None, + "VolumeType": None, + "Iops": None, + "Encrypted": None, + }, +} EC2_RUN_INSTANCES = ( """ diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 0509e1a45115..d53bd14aaa3d 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1126,6 +1126,111 @@ def test_run_instance_with_keypair(): instance.key_name.should.equal("keypair_name") +@mock_ec2 +def test_run_instance_with_block_device_mappings(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}], + } + + ec2_client.run_instances(**kwargs) + + instances = ec2_client.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]]) + volumes["Volumes"][0]["Size"].should.equal(50) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_missing_ebs(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [{"DeviceName": "/dev/sda2"}], + } + with assert_raises(ClientError) as ex: + ec2_client.run_instances(**kwargs) + + ex.exception.response["Error"]["Code"].should.equal("MissingParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "The request must contain the parameter ebs" + ) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_missing_size(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"VolumeType": "standard"}} + ], + } + with assert_raises(ClientError) as ex: + ec2_client.run_instances(**kwargs) + + ex.exception.response["Error"]["Code"].should.equal("MissingParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "The request must contain the parameter size or snapshotId" + ) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_from_snapshot(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + ec2_resource = boto3.resource("ec2", region_name="us-east-1") + volume_details = { + "AvailabilityZone": "1a", + "Size": 30, + } + + volume = ec2_resource.create_volume(**volume_details) + snapshot = volume.create_snapshot() + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"SnapshotId": snapshot.snapshot_id}} + ], + } + + ec2_client.run_instances(**kwargs) + + instances = ec2_client.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]]) + + volumes["Volumes"][0]["Size"].should.equal(30) + volumes["Volumes"][0]["SnapshotId"].should.equal(snapshot.snapshot_id) + + @mock_ec2_deprecated def test_describe_instance_status_no_instances(): conn = boto.connect_ec2("the_key", "the_secret") From 48aa8ec3f9326e1fdceb2b3923214fe41ab92b3a Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 11 May 2020 15:29:21 +0100 Subject: [PATCH 350/658] #2985 - DynamoDB - TransactWriteItems - Fix error-type returned --- moto/dynamodb2/exceptions.py | 15 ++++++ moto/dynamodb2/models/__init__.py | 28 ++++++---- moto/dynamodb2/responses.py | 15 +++--- tests/test_dynamodb2/test_dynamodb.py | 73 ++++++++++++++++++--------- 4 files changed, 90 insertions(+), 41 deletions(-) diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py index 18e498a90553..334cd913a22f 100644 --- a/moto/dynamodb2/exceptions.py +++ b/moto/dynamodb2/exceptions.py @@ -149,3 +149,18 @@ class IncorrectDataType(MockValidationException): def __init__(self): super(IncorrectDataType, self).__init__(self.inc_data_type_msg) + + +class ConditionalCheckFailed(ValueError): + msg = "The conditional request failed" + + def __init__(self): + super(ConditionalCheckFailed, self).__init__(self.msg) + + +class TransactionCanceledException(ValueError): + cancel_reason_msg = "Transaction cancelled, please refer cancellation reasons for specific reasons [{}]" + + def __init__(self, errors): + msg = self.cancel_reason_msg.format(", ".join([str(err) for err in errors])) + super(TransactionCanceledException, self).__init__(msg) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index f459cd0433c8..40eefed4e127 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -18,6 +18,8 @@ InvalidIndexNameError, ItemSizeTooLarge, ItemSizeToUpdateTooLarge, + ConditionalCheckFailed, + TransactionCanceledException, ) from moto.dynamodb2.models.utilities import bytesize from moto.dynamodb2.models.dynamo_type import DynamoType @@ -459,14 +461,14 @@ def put_item( if not overwrite: if not get_expected(expected).expr(current): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed condition_op = get_filter_expression( condition_expression, expression_attribute_names, expression_attribute_values, ) if not condition_op.expr(current): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed if range_value: self.items[hash_value][range_value] = item @@ -1076,14 +1078,14 @@ def update_item( expected = {} if not get_expected(expected).expr(item): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed condition_op = get_filter_expression( condition_expression, expression_attribute_names, expression_attribute_values, ) if not condition_op.expr(item): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed # Update does not fail on new items, so create one if item is None: @@ -1136,7 +1138,7 @@ def delete_item( expression_attribute_values, ) if not condition_op.expr(item): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed return table.delete_item(hash_value, range_value) @@ -1167,8 +1169,9 @@ def describe_ttl(self, table_name): def transact_write_items(self, transact_items): # Create a backup in case any of the transactions fail original_table_state = copy.deepcopy(self.tables) - try: - for item in transact_items: + errors = [] + for item in transact_items: + try: if "ConditionCheck" in item: item = item["ConditionCheck"] key = item["Key"] @@ -1188,7 +1191,7 @@ def transact_write_items(self, transact_items): expression_attribute_values, ) if not condition_op.expr(current): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed() elif "Put" in item: item = item["Put"] attrs = item["Item"] @@ -1247,10 +1250,13 @@ def transact_write_items(self, transact_items): ) else: raise ValueError - except: # noqa: E722 Do not use bare except - # Rollback to the original state, and reraise the error + errors.append(None) + except Exception as e: # noqa: E722 Do not use bare except + errors.append(type(e).__name__) + if any(errors): + # Rollback to the original state, and reraise the errors self.tables = original_table_state - raise + raise TransactionCanceledException(errors) def describe_continuous_backups(self, table_name): table = self.get_table(table_name) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 02c4749d357e..97c7ee286422 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -9,7 +9,12 @@ from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id -from .exceptions import InvalidIndexNameError, ItemSizeTooLarge, MockValidationException +from .exceptions import ( + InvalidIndexNameError, + ItemSizeTooLarge, + MockValidationException, + TransactionCanceledException, +) from moto.dynamodb2.models import dynamodb_backends, dynamo_json_dump @@ -929,11 +934,9 @@ def transact_write_items(self): transact_items = self.body["TransactItems"] try: self.dynamodb_backend.transact_write_items(transact_items) - except ValueError: - er = "com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException" - return self.error( - er, "A condition specified in the operation could not be evaluated." - ) + except TransactionCanceledException as e: + er = "com.amazonaws.dynamodb.v20111205#TransactionCanceledException" + return self.error(er, str(e)) response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}} return dynamo_json_dump(response) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 8774c3e88aa2..50fd4fd6c4dd 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4434,13 +4434,8 @@ def test_transact_write_items_put_conditional_expressions(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal( - "ConditionalCheckFailedException" - ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( - "A condition specified in the operation could not be evaluated." - ) # Assert all are present items = dynamodb.scan(TableName="test-table")["Items"] items.should.have.length_of(1) @@ -4529,13 +4524,8 @@ def test_transact_write_items_conditioncheck_fails(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal( - "ConditionalCheckFailedException" - ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( - "A condition specified in the operation could not be evaluated." - ) # Assert the original email address is still present items = dynamodb.scan(TableName="test-table")["Items"] @@ -4631,13 +4621,8 @@ def test_transact_write_items_delete_with_failed_condition_expression(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal( - "ConditionalCheckFailedException" - ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( - "A condition specified in the operation could not be evaluated." - ) # Assert the original item is still present items = dynamodb.scan(TableName="test-table")["Items"] items.should.have.length_of(1) @@ -4709,13 +4694,8 @@ def test_transact_write_items_update_with_failed_condition_expression(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal( - "ConditionalCheckFailedException" - ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( - "A condition specified in the operation could not be evaluated." - ) # Assert the original item is still present items = dynamodb.scan(TableName="test-table")["Items"] items.should.have.length_of(1) @@ -5243,3 +5223,48 @@ def test_update_item_add_to_non_existent_number_set(): ) updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] assert updated_item["s_i"]["NS"] == ["3"] + + +@mock_dynamodb2 +def test_transact_write_items_fails_with_transaction_canceled_exception(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert one item + dynamodb.put_item(TableName="test-table", Item={"id": {"S": "foo"}}) + # Update two items, the one that exists and another that doesn't + with assert_raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "Update": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "UpdateExpression": "SET #k = :v", + "ConditionExpression": "attribute_exists(id)", + "ExpressionAttributeNames": {"#k": "key"}, + "ExpressionAttributeValues": {":v": {"S": "value"}}, + } + }, + { + "Update": { + "Key": {"id": {"S": "doesnotexist"}}, + "TableName": "test-table", + "UpdateExpression": "SET #e = :v", + "ConditionExpression": "attribute_exists(id)", + "ExpressionAttributeNames": {"#e": "key"}, + "ExpressionAttributeValues": {":v": {"S": "value"}}, + } + }, + ] + ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "Transaction cancelled, please refer cancellation reasons for specific reasons [None, ConditionalCheckFailed]" + ) From e73a69421952eb65da583fadf86af4efa6dd0c93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= <33207684+gruebel@users.noreply.github.com> Date: Tue, 12 May 2020 14:34:10 +0200 Subject: [PATCH 351/658] Add CloudWatch logs subscription filters (#2982) * Add logs.describe_subscription_filters * Add logs.put_subscription_filter * Add logs.delete_subscription_filter * Change to usage of ACCOUNT_ID --- moto/awslambda/models.py | 24 ++ moto/logs/exceptions.py | 12 +- moto/logs/models.py | 109 ++++++++- moto/logs/responses.py | 30 +++ tests/test_logs/test_logs.py | 414 ++++++++++++++++++++++++++++++++++- 5 files changed, 585 insertions(+), 4 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 589a790ae7c6..7641ce067dff 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -5,6 +5,8 @@ from collections import defaultdict import copy import datetime +from gzip import GzipFile + import docker import docker.errors import hashlib @@ -983,6 +985,28 @@ def send_dynamodb_items(self, function_arn, items, source): func = self._lambdas.get_arn(function_arn) return func.invoke(json.dumps(event), {}, {}) + def send_log_event( + self, function_arn, filter_name, log_group_name, log_stream_name, log_events + ): + data = { + "messageType": "DATA_MESSAGE", + "owner": ACCOUNT_ID, + "logGroup": log_group_name, + "logStream": log_stream_name, + "subscriptionFilters": [filter_name], + "logEvents": log_events, + } + + output = io.BytesIO() + with GzipFile(fileobj=output, mode="w") as f: + f.write(json.dumps(data, separators=(",", ":")).encode("utf-8")) + payload_gz_encoded = base64.b64encode(output.getvalue()).decode("utf-8") + + event = {"awslogs": {"data": payload_gz_encoded}} + + func = self._lambdas.get_arn(function_arn) + return func.invoke(json.dumps(event), {}, {}) + def list_tags(self, resource): return self.get_function_by_arn(resource).tags diff --git a/moto/logs/exceptions.py b/moto/logs/exceptions.py index 9f6628b0fc6c..022b3a41116f 100644 --- a/moto/logs/exceptions.py +++ b/moto/logs/exceptions.py @@ -7,10 +7,10 @@ class LogsClientError(JsonRESTError): class ResourceNotFoundException(LogsClientError): - def __init__(self): + def __init__(self, msg=None): self.code = 400 super(ResourceNotFoundException, self).__init__( - "ResourceNotFoundException", "The specified resource does not exist" + "ResourceNotFoundException", msg or "The specified log group does not exist" ) @@ -28,3 +28,11 @@ def __init__(self): super(ResourceAlreadyExistsException, self).__init__( "ResourceAlreadyExistsException", "The specified log group already exists" ) + + +class LimitExceededException(LogsClientError): + def __init__(self): + self.code = 400 + super(LimitExceededException, self).__init__( + "LimitExceededException", "Resource limit exceeded." + ) diff --git a/moto/logs/models.py b/moto/logs/models.py index 755605734260..dcc0e85e1bea 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -6,6 +6,7 @@ ResourceNotFoundException, ResourceAlreadyExistsException, InvalidParameterException, + LimitExceededException, ) @@ -57,6 +58,8 @@ def __init__(self, region, log_group, name): 0 # I'm guessing this is token needed for sequenceToken by put_events ) self.events = [] + self.destination_arn = None + self.filter_name = None self.__class__._log_ids += 1 @@ -97,11 +100,32 @@ def put_log_events( self.lastIngestionTime = int(unix_time_millis()) # TODO: make this match AWS if possible self.storedBytes += sum([len(log_event["message"]) for log_event in log_events]) - self.events += [ + events = [ LogEvent(self.lastIngestionTime, log_event) for log_event in log_events ] + self.events += events self.uploadSequenceToken += 1 + if self.destination_arn and self.destination_arn.split(":")[2] == "lambda": + from moto.awslambda import lambda_backends # due to circular dependency + + lambda_log_events = [ + { + "id": event.eventId, + "timestamp": event.timestamp, + "message": event.message, + } + for event in events + ] + + lambda_backends[self.region].send_log_event( + self.destination_arn, + self.filter_name, + log_group_name, + log_stream_name, + lambda_log_events, + ) + return "{:056d}".format(self.uploadSequenceToken) def get_log_events( @@ -227,6 +251,7 @@ def __init__(self, region, name, tags, **kwargs): self.retention_in_days = kwargs.get( "RetentionInDays" ) # AWS defaults to Never Expire for log group retention + self.subscription_filters = [] def create_log_stream(self, log_stream_name): if log_stream_name in self.streams: @@ -386,6 +411,48 @@ def untag(self, tags_to_remove): k: v for (k, v) in self.tags.items() if k not in tags_to_remove } + def describe_subscription_filters(self): + return self.subscription_filters + + def put_subscription_filter( + self, filter_name, filter_pattern, destination_arn, role_arn + ): + creation_time = int(unix_time_millis()) + + # only one subscription filter can be associated with a log group + if self.subscription_filters: + if self.subscription_filters[0]["filterName"] == filter_name: + creation_time = self.subscription_filters[0]["creationTime"] + else: + raise LimitExceededException + + for stream in self.streams.values(): + stream.destination_arn = destination_arn + stream.filter_name = filter_name + + self.subscription_filters = [ + { + "filterName": filter_name, + "logGroupName": self.name, + "filterPattern": filter_pattern, + "destinationArn": destination_arn, + "roleArn": role_arn, + "distribution": "ByLogStream", + "creationTime": creation_time, + } + ] + + def delete_subscription_filter(self, filter_name): + if ( + not self.subscription_filters + or self.subscription_filters[0]["filterName"] != filter_name + ): + raise ResourceNotFoundException( + "The specified subscription filter does not exist." + ) + + self.subscription_filters = [] + class LogsBackend(BaseBackend): def __init__(self, region_name): @@ -557,6 +624,46 @@ def untag_log_group(self, log_group_name, tags): log_group = self.groups[log_group_name] log_group.untag(tags) + def describe_subscription_filters(self, log_group_name): + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + return log_group.describe_subscription_filters() + + def put_subscription_filter( + self, log_group_name, filter_name, filter_pattern, destination_arn, role_arn + ): + # TODO: support other destinations like Kinesis stream + from moto.awslambda import lambda_backends # due to circular dependency + + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + lambda_func = lambda_backends[self.region_name].get_function(destination_arn) + + # no specific permission check implemented + if not lambda_func: + raise InvalidParameterException( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + log_group.put_subscription_filter( + filter_name, filter_pattern, destination_arn, role_arn + ) + + def delete_subscription_filter(self, log_group_name, filter_name): + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + log_group.delete_subscription_filter(filter_name) + logs_backends = {} for region in Session().get_available_regions("logs"): diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 4631da2f9a95..9e6886a42647 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -178,3 +178,33 @@ def untag_log_group(self): tags = self._get_param("tags") self.logs_backend.untag_log_group(log_group_name, tags) return "" + + def describe_subscription_filters(self): + log_group_name = self._get_param("logGroupName") + + subscription_filters = self.logs_backend.describe_subscription_filters( + log_group_name + ) + + return json.dumps({"subscriptionFilters": subscription_filters}) + + def put_subscription_filter(self): + log_group_name = self._get_param("logGroupName") + filter_name = self._get_param("filterName") + filter_pattern = self._get_param("filterPattern") + destination_arn = self._get_param("destinationArn") + role_arn = self._get_param("roleArn") + + self.logs_backend.put_subscription_filter( + log_group_name, filter_name, filter_pattern, destination_arn, role_arn + ) + + return "" + + def delete_subscription_filter(self): + log_group_name = self._get_param("logGroupName") + filter_name = self._get_param("filterName") + + self.logs_backend.delete_subscription_filter(log_group_name, filter_name) + + return "" diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 2429d7e93b03..675948150410 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,10 +1,17 @@ +import base64 +import json +import time +import zlib +from io import BytesIO +from zipfile import ZipFile, ZIP_DEFLATED + import boto3 import os import sure # noqa import six from botocore.exceptions import ClientError -from moto import mock_logs, settings +from moto import mock_logs, settings, mock_lambda, mock_iam from nose.tools import assert_raises from nose import SkipTest @@ -425,3 +432,408 @@ def test_untag_log_group(): assert response["tags"] == remaining_tags response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_describe_subscription_filters(): + # given + client = boto3.client("logs", "us-east-1") + log_group_name = "/test" + client.create_log_group(logGroupName=log_group_name) + + # when + response = client.describe_subscription_filters(logGroupName=log_group_name) + + # then + response["subscriptionFilters"].should.have.length_of(0) + + +@mock_logs +def test_describe_subscription_filters_errors(): + # given + client = boto3.client("logs", "us-east-1") + + # when + with assert_raises(ClientError) as e: + client.describe_subscription_filters(logGroupName="not-existing-log-group",) + + # then + ex = e.exception + ex.operation_name.should.equal("DescribeSubscriptionFilters") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + +@mock_lambda +@mock_logs +def test_put_subscription_filter_update(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + creation_time = filter["creationTime"] + creation_time.should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + # to update an existing subscription filter the 'filerName' must be identical + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="[]", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.equal(creation_time) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "[]" + + # when + # only one subscription filter can be associated with a log group + with assert_raises(ClientError) as e: + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test-2", + filterPattern="", + destinationArn=function_arn, + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("LimitExceededException") + ex.response["Error"]["Message"].should.equal("Resource limit exceeded.") + + +@mock_lambda +@mock_logs +def test_put_subscription_filter_with_lambda(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + client_logs.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + {"timestamp": 0, "message": "test"}, + {"timestamp": 0, "message": "test 2"}, + ], + ) + + # then + msg_showed_up, received_message = _wait_for_log_msg( + client_logs, "/aws/lambda/test", "awslogs" + ) + assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format( + received_message + ) + + data = json.loads(received_message)["awslogs"]["data"] + response = json.loads( + zlib.decompress(base64.b64decode(data), 16 + zlib.MAX_WBITS).decode("utf-8") + ) + response["messageType"].should.equal("DATA_MESSAGE") + response["owner"].should.equal("123456789012") + response["logGroup"].should.equal("/test") + response["logStream"].should.equal("stream") + response["subscriptionFilters"].should.equal(["test"]) + log_events = sorted(response["logEvents"], key=lambda log_event: log_event["id"]) + log_events.should.have.length_of(2) + log_events[0]["id"].should.be.a(int) + log_events[0]["message"].should.equal("test") + log_events[0]["timestamp"].should.equal(0) + log_events[1]["id"].should.be.a(int) + log_events[1]["message"].should.equal("test 2") + log_events[1]["timestamp"].should.equal(0) + + +@mock_logs +def test_put_subscription_filter_errors(): + # given + client = boto3.client("logs", "us-east-1") + log_group_name = "/test" + client.create_log_group(logGroupName=log_group_name) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="not-existing-log-group", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:test", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="test", + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(0) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + with assert_raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="not-existing-log-group", filterName="test", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with assert_raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="wrong-filter-name", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified subscription filter does not exist." + ) + + +def _get_role_name(region_name): + with mock_iam(): + iam = boto3.client("iam", region_name=region_name) + try: + return iam.get_role(RoleName="test-role")["Role"]["Arn"] + except ClientError: + return iam.create_role( + RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/", + )["Role"]["Arn"] + + +def _get_test_zip_file(): + func_str = """ +def lambda_handler(event, context): + return event +""" + + zip_output = BytesIO() + zip_file = ZipFile(zip_output, "w", ZIP_DEFLATED) + zip_file.writestr("lambda_function.py", func_str) + zip_file.close() + zip_output.seek(0) + return zip_output.read() + + +def _wait_for_log_msg(client, log_group_name, expected_msg_part): + received_messages = [] + start = time.time() + while (time.time() - start) < 10: + result = client.describe_log_streams(logGroupName=log_group_name) + log_streams = result.get("logStreams") + if not log_streams: + time.sleep(1) + continue + + for log_stream in log_streams: + result = client.get_log_events( + logGroupName=log_group_name, logStreamName=log_stream["logStreamName"], + ) + received_messages.extend( + [event["message"] for event in result.get("events")] + ) + for message in received_messages: + if expected_msg_part in message: + return True, message + time.sleep(1) + return False, received_messages From ddb5c30d343c2a4d7ebe5373504e69ed23a6db42 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 12 May 2020 14:58:35 +0100 Subject: [PATCH 352/658] Improve implementation coverage (and layout) --- IMPLEMENTATION_COVERAGE.md | 1432 +++++++++++++---- moto/__init__.py | 4 +- moto/backends.py | 2 +- moto/dynamodb2/models/__init__.py | 55 +- moto/dynamodb2/responses.py | 61 +- moto/ec2_instance_connect/__init__.py | 4 - moto/ec2_instance_connect/models.py | 11 - moto/ec2_instance_connect/responses.py | 9 - moto/ec2instanceconnect/__init__.py | 4 + moto/ec2instanceconnect/models.py | 15 + moto/ec2instanceconnect/responses.py | 11 + .../urls.py | 0 scripts/implementation_coverage.py | 18 +- .../test_ec2instanceconnect_boto3.py} | 4 +- 14 files changed, 1243 insertions(+), 387 deletions(-) delete mode 100644 moto/ec2_instance_connect/__init__.py delete mode 100644 moto/ec2_instance_connect/models.py delete mode 100644 moto/ec2_instance_connect/responses.py create mode 100644 moto/ec2instanceconnect/__init__.py create mode 100644 moto/ec2instanceconnect/models.py create mode 100644 moto/ec2instanceconnect/responses.py rename moto/{ec2_instance_connect => ec2instanceconnect}/urls.py (100%) rename tests/{test_ec2_instance_connect/test_ec2_instance_connect_boto3.py => test_ec2instanceconnect/test_ec2instanceconnect_boto3.py} (92%) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index f99d86df3250..a8c32080da1b 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,6 +1,8 @@ ## accessanalyzer -0% implemented +
+0% implemented + - [ ] create_analyzer - [ ] create_archive_rule - [ ] delete_analyzer @@ -19,9 +21,12 @@ - [ ] untag_resource - [ ] update_archive_rule - [ ] update_findings +
## acm -38% implemented +
+38% implemented + - [X] add_tags_to_certificate - [X] delete_certificate - [ ] describe_certificate @@ -35,9 +40,12 @@ - [X] request_certificate - [ ] resend_validation_email - [ ] update_certificate_options +
## acm-pca -0% implemented +
+0% implemented + - [ ] create_certificate_authority - [ ] create_certificate_authority_audit_report - [ ] create_permission @@ -58,9 +66,12 @@ - [ ] tag_certificate_authority - [ ] untag_certificate_authority - [ ] update_certificate_authority +
## alexaforbusiness -0% implemented +
+0% implemented + - [ ] approve_skill - [ ] associate_contact_with_address_book - [ ] associate_device_with_network_profile @@ -154,9 +165,12 @@ - [ ] update_profile - [ ] update_room - [ ] update_skill_group +
## amplify -0% implemented +
+0% implemented + - [ ] create_app - [ ] create_backend_environment - [ ] create_branch @@ -194,17 +208,20 @@ - [ ] update_branch - [ ] update_domain_association - [ ] update_webhook +
## apigateway -25% implemented +
+34% implemented + - [ ] create_api_key -- [ ] create_authorizer +- [X] create_authorizer - [ ] create_base_path_mapping - [X] create_deployment - [ ] create_documentation_part - [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model +- [X] create_domain_name +- [X] create_model - [ ] create_request_validator - [X] create_resource - [X] create_rest_api @@ -213,7 +230,7 @@ - [X] create_usage_plan_key - [ ] create_vpc_link - [ ] delete_api_key -- [ ] delete_authorizer +- [X] delete_authorizer - [ ] delete_base_path_mapping - [ ] delete_client_certificate - [X] delete_deployment @@ -239,8 +256,8 @@ - [ ] get_account - [ ] get_api_key - [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers +- [X] get_authorizer +- [X] get_authorizers - [ ] get_base_path_mapping - [ ] get_base_path_mappings - [ ] get_client_certificate @@ -251,8 +268,8 @@ - [ ] get_documentation_parts - [ ] get_documentation_version - [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names +- [X] get_domain_name +- [X] get_domain_names - [ ] get_export - [ ] get_gateway_response - [ ] get_gateway_responses @@ -260,9 +277,9 @@ - [X] get_integration_response - [X] get_method - [X] get_method_response -- [ ] get_model +- [X] get_model - [ ] get_model_template -- [ ] get_models +- [X] get_models - [ ] get_request_validator - [ ] get_request_validators - [X] get_resource @@ -297,7 +314,7 @@ - [ ] untag_resource - [ ] update_account - [ ] update_api_key -- [ ] update_authorizer +- [X] update_authorizer - [ ] update_base_path_mapping - [ ] update_client_certificate - [ ] update_deployment @@ -317,15 +334,21 @@ - [ ] update_usage - [ ] update_usage_plan - [ ] update_vpc_link +
## apigatewaymanagementapi -0% implemented +
+0% implemented + - [ ] delete_connection - [ ] get_connection - [ ] post_to_connection +
## apigatewayv2 -0% implemented +
+0% implemented + - [ ] create_api - [ ] create_api_mapping - [ ] create_authorizer @@ -337,6 +360,8 @@ - [ ] create_route - [ ] create_route_response - [ ] create_stage +- [ ] create_vpc_link +- [ ] delete_access_log_settings - [ ] delete_api - [ ] delete_api_mapping - [ ] delete_authorizer @@ -347,9 +372,11 @@ - [ ] delete_integration_response - [ ] delete_model - [ ] delete_route +- [ ] delete_route_request_parameter - [ ] delete_route_response - [ ] delete_route_settings - [ ] delete_stage +- [ ] delete_vpc_link - [ ] get_api - [ ] get_api_mapping - [ ] get_api_mappings @@ -374,6 +401,8 @@ - [ ] get_stage - [ ] get_stages - [ ] get_tags +- [ ] get_vpc_link +- [ ] get_vpc_links - [ ] import_api - [ ] reimport_api - [ ] tag_resource @@ -389,9 +418,13 @@ - [ ] update_route - [ ] update_route_response - [ ] update_stage +- [ ] update_vpc_link +
## appconfig -0% implemented +
+0% implemented + - [ ] create_application - [ ] create_configuration_profile - [ ] create_deployment_strategy @@ -421,9 +454,12 @@ - [ ] update_deployment_strategy - [ ] update_environment - [ ] validate_configuration +
## application-autoscaling -0% implemented +
+0% implemented + - [ ] delete_scaling_policy - [ ] delete_scheduled_action - [ ] deregister_scalable_target @@ -434,9 +470,12 @@ - [ ] put_scaling_policy - [ ] put_scheduled_action - [ ] register_scalable_target +
## application-insights -0% implemented +
+0% implemented + - [ ] create_application - [ ] create_component - [ ] create_log_pattern @@ -453,6 +492,7 @@ - [ ] describe_problem_observations - [ ] list_applications - [ ] list_components +- [ ] list_configuration_history - [ ] list_log_pattern_sets - [ ] list_log_patterns - [ ] list_problems @@ -463,9 +503,12 @@ - [ ] update_component - [ ] update_component_configuration - [ ] update_log_pattern +
## appmesh -0% implemented +
+0% implemented + - [ ] create_mesh - [ ] create_route - [ ] create_virtual_node @@ -494,9 +537,12 @@ - [ ] update_virtual_node - [ ] update_virtual_router - [ ] update_virtual_service +
## appstream -0% implemented +
+0% implemented + - [ ] associate_fleet - [ ] batch_associate_user_stack - [ ] batch_disassociate_user_stack @@ -544,9 +590,12 @@ - [ ] update_fleet - [ ] update_image_permissions - [ ] update_stack +
## appsync -0% implemented +
+0% implemented + - [ ] create_api_cache - [ ] create_api_key - [ ] create_data_source @@ -588,9 +637,12 @@ - [ ] update_graphql_api - [ ] update_resolver - [ ] update_type +
## athena -10% implemented +
+10% implemented + - [ ] batch_get_named_query - [ ] batch_get_query_execution - [ ] create_named_query @@ -610,9 +662,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_work_group +
## autoscaling -44% implemented +
+44% implemented + - [X] attach_instances - [X] attach_load_balancer_target_groups - [X] attach_load_balancers @@ -667,18 +722,24 @@ - [X] suspend_processes - [ ] terminate_instance_in_auto_scaling_group - [X] update_auto_scaling_group +
## autoscaling-plans -0% implemented +
+0% implemented + - [ ] create_scaling_plan - [ ] delete_scaling_plan - [ ] describe_scaling_plan_resources - [ ] describe_scaling_plans - [ ] get_scaling_plan_resource_forecast_data - [ ] update_scaling_plan +
## backup -0% implemented +
+0% implemented + - [ ] create_backup_plan - [ ] create_backup_selection - [ ] create_backup_vault @@ -690,6 +751,7 @@ - [ ] delete_recovery_point - [ ] describe_backup_job - [ ] describe_backup_vault +- [ ] describe_copy_job - [ ] describe_protected_resource - [ ] describe_recovery_point - [ ] describe_restore_job @@ -708,6 +770,7 @@ - [ ] list_backup_plans - [ ] list_backup_selections - [ ] list_backup_vaults +- [ ] list_copy_jobs - [ ] list_protected_resources - [ ] list_recovery_points_by_backup_vault - [ ] list_recovery_points_by_resource @@ -716,15 +779,19 @@ - [ ] put_backup_vault_access_policy - [ ] put_backup_vault_notifications - [ ] start_backup_job +- [ ] start_copy_job - [ ] start_restore_job - [ ] stop_backup_job - [ ] tag_resource - [ ] untag_resource - [ ] update_backup_plan - [ ] update_recovery_point_lifecycle +
## batch -93% implemented +
+93% implemented + - [ ] cancel_job - [X] create_compute_environment - [X] create_job_queue @@ -741,9 +808,12 @@ - [X] terminate_job - [X] update_compute_environment - [X] update_job_queue +
## budgets -0% implemented +
+0% implemented + - [ ] create_budget - [ ] create_notification - [ ] create_subscriber @@ -758,9 +828,12 @@ - [ ] update_budget - [ ] update_notification - [ ] update_subscriber +
## ce -0% implemented +
+0% implemented + - [ ] create_cost_category_definition - [ ] delete_cost_category_definition - [ ] describe_cost_category_definition @@ -780,12 +853,16 @@ - [ ] get_usage_forecast - [ ] list_cost_category_definitions - [ ] update_cost_category_definition +
## chime -0% implemented +
+0% implemented + - [ ] associate_phone_number_with_user - [ ] associate_phone_numbers_with_voice_connector - [ ] associate_phone_numbers_with_voice_connector_group +- [ ] associate_signin_delegate_groups_with_account - [ ] batch_create_attendee - [ ] batch_create_room_membership - [ ] batch_delete_phone_number @@ -800,6 +877,7 @@ - [ ] create_phone_number_order - [ ] create_room - [ ] create_room_membership +- [ ] create_user - [ ] create_voice_connector - [ ] create_voice_connector_group - [ ] delete_account @@ -818,6 +896,7 @@ - [ ] disassociate_phone_number_from_user - [ ] disassociate_phone_numbers_from_voice_connector - [ ] disassociate_phone_numbers_from_voice_connector_group +- [ ] disassociate_signin_delegate_groups_from_account - [ ] get_account - [ ] get_account_settings - [ ] get_attendee @@ -874,9 +953,12 @@ - [ ] update_user_settings - [ ] update_voice_connector - [ ] update_voice_connector_group +
## cloud9 -0% implemented +
+0% implemented + - [ ] create_environment_ec2 - [ ] create_environment_membership - [ ] delete_environment @@ -885,11 +967,17 @@ - [ ] describe_environment_status - [ ] describe_environments - [ ] list_environments +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource - [ ] update_environment - [ ] update_environment_membership +
## clouddirectory -0% implemented +
+0% implemented + - [ ] add_facet_to_object - [ ] apply_schema - [ ] attach_object @@ -956,9 +1044,12 @@ - [ ] update_typed_link_facet - [ ] upgrade_applied_schema - [ ] upgrade_published_schema +
## cloudformation -32% implemented +
+32% implemented + - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set @@ -1014,9 +1105,12 @@ - [X] update_stack_set - [ ] update_termination_protection - [X] validate_template +
## cloudfront -0% implemented +
+0% implemented + - [ ] create_cloud_front_origin_access_identity - [ ] create_distribution - [ ] create_distribution_with_tags @@ -1062,9 +1156,12 @@ - [ ] update_field_level_encryption_profile - [ ] update_public_key - [ ] update_streaming_distribution +
## cloudhsm -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] create_hapg - [ ] create_hsm @@ -1085,9 +1182,12 @@ - [ ] modify_hsm - [ ] modify_luna_client - [ ] remove_tags_from_resource +
## cloudhsmv2 -0% implemented +
+0% implemented + - [ ] copy_backup_to_region - [ ] create_cluster - [ ] create_hsm @@ -1101,9 +1201,12 @@ - [ ] restore_backup - [ ] tag_resource - [ ] untag_resource +
## cloudsearch -0% implemented +
+0% implemented + - [ ] build_suggesters - [ ] create_domain - [ ] define_analysis_scheme @@ -1130,15 +1233,21 @@ - [ ] update_domain_endpoint_options - [ ] update_scaling_parameters - [ ] update_service_access_policies +
## cloudsearchdomain -0% implemented +
+0% implemented + - [ ] search - [ ] suggest - [ ] upload_documents +
## cloudtrail -0% implemented +
+0% implemented + - [ ] add_tags - [ ] create_trail - [ ] delete_trail @@ -1157,9 +1266,12 @@ - [ ] start_logging - [ ] stop_logging - [ ] update_trail +
## cloudwatch -34% implemented +
+36% implemented + - [X] delete_alarms - [ ] delete_anomaly_detector - [X] delete_dashboards @@ -1175,13 +1287,14 @@ - [ ] enable_insight_rules - [X] get_dashboard - [ ] get_insight_rule_report -- [ ] get_metric_data +- [X] get_metric_data - [X] get_metric_statistics - [ ] get_metric_widget_image - [X] list_dashboards - [X] list_metrics - [ ] list_tags_for_resource - [ ] put_anomaly_detector +- [ ] put_composite_alarm - [X] put_dashboard - [ ] put_insight_rule - [X] put_metric_alarm @@ -1189,9 +1302,12 @@ - [X] set_alarm_state - [ ] tag_resource - [ ] untag_resource +
## codebuild -0% implemented +
+0% implemented + - [ ] batch_delete_builds - [ ] batch_get_builds - [ ] batch_get_projects @@ -1226,9 +1342,12 @@ - [ ] update_project - [ ] update_report_group - [ ] update_webhook +
## codecommit -0% implemented +
+4% implemented + - [ ] associate_approval_rule_template_with_repository - [ ] batch_associate_approval_rule_template_with_repositories - [ ] batch_describe_merge_conflicts @@ -1304,9 +1423,12 @@ - [ ] update_pull_request_title - [ ] update_repository_description - [ ] update_repository_name +
## codedeploy -0% implemented +
+0% implemented + - [ ] add_tags_to_on_premises_instances - [ ] batch_get_application_revisions - [ ] batch_get_applications @@ -1353,16 +1475,22 @@ - [ ] untag_resource - [ ] update_application - [ ] update_deployment_group +
## codeguru-reviewer -0% implemented +
+0% implemented + - [ ] associate_repository - [ ] describe_repository_association - [ ] disassociate_repository - [ ] list_repository_associations +
## codeguruprofiler -0% implemented +
+0% implemented + - [ ] configure_agent - [ ] create_profiling_group - [ ] delete_profiling_group @@ -1372,9 +1500,12 @@ - [ ] list_profiling_groups - [ ] post_agent_profile - [ ] update_profiling_group +
## codepipeline -22% implemented +
+21% implemented + - [ ] acknowledge_job - [ ] acknowledge_third_party_job - [ ] create_custom_action_type @@ -1408,12 +1539,16 @@ - [ ] register_webhook_with_third_party - [ ] retry_stage_execution - [ ] start_pipeline_execution +- [ ] stop_pipeline_execution - [X] tag_resource - [X] untag_resource - [X] update_pipeline +
## codestar -0% implemented +
+0% implemented + - [ ] associate_team_member - [ ] create_project - [ ] create_user_profile @@ -1432,9 +1567,22 @@ - [ ] update_project - [ ] update_team_member - [ ] update_user_profile +
+ +## codestar-connections +
+0% implemented + +- [ ] create_connection +- [ ] delete_connection +- [ ] get_connection +- [ ] list_connections +
## codestar-notifications -0% implemented +
+0% implemented + - [ ] create_notification_rule - [ ] delete_notification_rule - [ ] delete_target @@ -1448,9 +1596,12 @@ - [ ] unsubscribe - [ ] untag_resource - [ ] update_notification_rule +
## cognito-identity -28% implemented +
+28% implemented + - [X] create_identity_pool - [ ] delete_identities - [ ] delete_identity_pool @@ -1472,9 +1623,12 @@ - [ ] unlink_identity - [ ] untag_resource - [ ] update_identity_pool +
## cognito-idp -37% implemented +
+37% implemented + - [ ] add_custom_attributes - [X] admin_add_user_to_group - [ ] admin_confirm_sign_up @@ -1575,9 +1729,12 @@ - [X] update_user_pool_domain - [ ] verify_software_token - [ ] verify_user_attribute +
## cognito-sync -0% implemented +
+0% implemented + - [ ] bulk_publish - [ ] delete_dataset - [ ] describe_dataset @@ -1595,9 +1752,12 @@ - [ ] subscribe_to_dataset - [ ] unsubscribe_from_dataset - [ ] update_records +
## comprehend -0% implemented +
+0% implemented + - [ ] batch_detect_dominant_language - [ ] batch_detect_entities - [ ] batch_detect_key_phrases @@ -1649,32 +1809,43 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_endpoint +
## comprehendmedical -0% implemented +
+0% implemented + - [ ] describe_entities_detection_v2_job - [ ] describe_phi_detection_job - [ ] detect_entities - [ ] detect_entities_v2 - [ ] detect_phi +- [ ] infer_icd10_cm +- [ ] infer_rx_norm - [ ] list_entities_detection_v2_jobs - [ ] list_phi_detection_jobs - [ ] start_entities_detection_v2_job - [ ] start_phi_detection_job - [ ] stop_entities_detection_v2_job - [ ] stop_phi_detection_job +
## compute-optimizer -0% implemented +
+0% implemented + - [ ] get_auto_scaling_group_recommendations - [ ] get_ec2_instance_recommendations - [ ] get_ec2_recommendation_projected_metrics - [ ] get_enrollment_status - [ ] get_recommendation_summaries - [ ] update_enrollment_status +
## config -25% implemented +
+26% implemented + - [X] batch_get_aggregate_resource_config - [X] batch_get_resource_config - [X] delete_aggregation_authorization @@ -1739,13 +1910,14 @@ - [X] put_configuration_recorder - [ ] put_conformance_pack - [X] put_delivery_channel -- [ ] put_evaluations +- [X] put_evaluations - [ ] put_organization_config_rule - [ ] put_organization_conformance_pack - [ ] put_remediation_configurations - [ ] put_remediation_exceptions - [ ] put_resource_config - [ ] put_retention_configuration +- [ ] select_aggregate_resource_config - [ ] select_resource_config - [ ] start_config_rules_evaluation - [X] start_configuration_recorder @@ -1753,9 +1925,12 @@ - [X] stop_configuration_recorder - [ ] tag_resource - [ ] untag_resource +
## connect -0% implemented +
+0% implemented + - [ ] create_user - [ ] delete_user - [ ] describe_user @@ -1785,24 +1960,33 @@ - [ ] update_user_phone_config - [ ] update_user_routing_profile - [ ] update_user_security_profiles +
## connectparticipant -0% implemented +
+0% implemented + - [ ] create_participant_connection - [ ] disconnect_participant - [ ] get_transcript - [ ] send_event - [ ] send_message +
## cur -0% implemented +
+0% implemented + - [ ] delete_report_definition - [ ] describe_report_definitions - [ ] modify_report_definition - [ ] put_report_definition +
## dataexchange -0% implemented +
+0% implemented + - [ ] cancel_job - [ ] create_data_set - [ ] create_job @@ -1825,9 +2009,12 @@ - [ ] update_asset - [ ] update_data_set - [ ] update_revision +
## datapipeline -42% implemented +
+42% implemented + - [X] activate_pipeline - [ ] add_tags - [X] create_pipeline @@ -1847,12 +2034,16 @@ - [ ] set_status - [ ] set_task_status - [ ] validate_pipeline_definition +
## datasync -22% implemented +
+20% implemented + - [X] cancel_task_execution - [ ] create_agent - [ ] create_location_efs +- [ ] create_location_fsx_windows - [ ] create_location_nfs - [ ] create_location_s3 - [ ] create_location_smb @@ -1862,6 +2053,7 @@ - [X] delete_task - [ ] describe_agent - [ ] describe_location_efs +- [ ] describe_location_fsx_windows - [ ] describe_location_nfs - [ ] describe_location_s3 - [ ] describe_location_smb @@ -1877,9 +2069,12 @@ - [ ] untag_resource - [ ] update_agent - [X] update_task +
## dax -0% implemented +
+0% implemented + - [ ] create_cluster - [ ] create_parameter_group - [ ] create_subnet_group @@ -1901,9 +2096,12 @@ - [ ] update_cluster - [ ] update_parameter_group - [ ] update_subnet_group +
## detective -0% implemented +
+0% implemented + - [ ] accept_invitation - [ ] create_graph - [ ] create_members @@ -1915,14 +2113,19 @@ - [ ] list_invitations - [ ] list_members - [ ] reject_invitation +
## devicefarm -0% implemented +
+0% implemented + - [ ] create_device_pool - [ ] create_instance_profile - [ ] create_network_profile - [ ] create_project - [ ] create_remote_access_session +- [ ] create_test_grid_project +- [ ] create_test_grid_url - [ ] create_upload - [ ] create_vpce_configuration - [ ] delete_device_pool @@ -1931,6 +2134,7 @@ - [ ] delete_project - [ ] delete_remote_access_session - [ ] delete_run +- [ ] delete_test_grid_project - [ ] delete_upload - [ ] delete_vpce_configuration - [ ] get_account_settings @@ -1947,6 +2151,8 @@ - [ ] get_run - [ ] get_suite - [ ] get_test +- [ ] get_test_grid_project +- [ ] get_test_grid_session - [ ] get_upload - [ ] get_vpce_configuration - [ ] install_to_remote_access_session @@ -1966,6 +2172,10 @@ - [ ] list_samples - [ ] list_suites - [ ] list_tags_for_resource +- [ ] list_test_grid_projects +- [ ] list_test_grid_session_actions +- [ ] list_test_grid_session_artifacts +- [ ] list_test_grid_sessions - [ ] list_tests - [ ] list_unique_problems - [ ] list_uploads @@ -1983,11 +2193,15 @@ - [ ] update_instance_profile - [ ] update_network_profile - [ ] update_project +- [ ] update_test_grid_project - [ ] update_upload - [ ] update_vpce_configuration +
## directconnect -0% implemented +
+0% implemented + - [ ] accept_direct_connect_gateway_association_proposal - [ ] allocate_connection_on_interconnect - [ ] allocate_hosted_connection @@ -2041,9 +2255,12 @@ - [ ] update_direct_connect_gateway_association - [ ] update_lag - [ ] update_virtual_interface_attributes +
## discovery -0% implemented +
+0% implemented + - [ ] associate_configuration_items_to_application - [ ] batch_delete_import_data - [ ] create_application @@ -2069,9 +2286,12 @@ - [ ] stop_continuous_export - [ ] stop_data_collection_by_agent_ids - [ ] update_application +
## dlm -0% implemented +
+0% implemented + - [ ] create_lifecycle_policy - [ ] delete_lifecycle_policy - [ ] get_lifecycle_policies @@ -2080,9 +2300,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_lifecycle_policy +
## dms -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] apply_pending_maintenance_action - [ ] create_endpoint @@ -2130,9 +2353,12 @@ - [ ] start_replication_task_assessment - [ ] stop_replication_task - [ ] test_connection +
## docdb -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] apply_pending_maintenance_action - [ ] copy_db_cluster_parameter_group @@ -2175,9 +2401,12 @@ - [ ] restore_db_cluster_to_point_in_time - [ ] start_db_cluster - [ ] stop_db_cluster +
## ds -0% implemented +
+0% implemented + - [ ] accept_shared_directory - [ ] add_ip_routes - [ ] add_tags_to_resource @@ -2235,11 +2464,14 @@ - [ ] update_radius - [ ] update_trust - [ ] verify_trust +
## dynamodb -24% implemented -- [ ] batch_get_item -- [ ] batch_write_item +
+46% implemented + +- [X] batch_get_item +- [X] batch_write_item - [ ] create_backup - [ ] create_global_table - [X] create_table @@ -2253,48 +2485,57 @@ - [ ] describe_global_table - [ ] describe_global_table_settings - [ ] describe_limits -- [ ] describe_table +- [X] describe_table - [ ] describe_table_replica_auto_scaling -- [ ] describe_time_to_live +- [X] describe_time_to_live - [X] get_item - [ ] list_backups - [ ] list_contributor_insights - [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource +- [X] list_tables +- [X] list_tags_of_resource - [X] put_item - [X] query - [ ] restore_table_from_backup - [ ] restore_table_to_point_in_time - [X] scan -- [ ] tag_resource -- [X] transact_get_items -- [ ] transact_write_items -- [ ] untag_resource +- [X] tag_resource +- [ ] transact_get_items +- [X] transact_write_items +- [X] untag_resource - [ ] update_continuous_backups - [ ] update_contributor_insights - [ ] update_global_table - [ ] update_global_table_settings -- [ ] update_item -- [ ] update_table +- [X] update_item +- [X] update_table - [ ] update_table_replica_auto_scaling -- [ ] update_time_to_live +- [X] update_time_to_live +
## dynamodbstreams -100% implemented +
+100% implemented + - [X] describe_stream - [X] get_records - [X] get_shard_iterator - [X] list_streams +
## ebs -0% implemented +
+0% implemented + - [ ] get_snapshot_block - [ ] list_changed_blocks - [ ] list_snapshot_blocks +
## ec2 -26% implemented +
+26% implemented + - [ ] accept_reserved_instances_exchange_quote - [ ] accept_transit_gateway_peering_attachment - [ ] accept_transit_gateway_vpc_attachment @@ -2382,7 +2623,7 @@ - [ ] create_transit_gateway_vpc_attachment - [X] create_volume - [X] create_vpc -- [ ] create_vpc_endpoint +- [X] create_vpc_endpoint - [ ] create_vpc_endpoint_connection_notification - [ ] create_vpc_endpoint_service_configuration - [X] create_vpc_peering_connection @@ -2479,12 +2720,13 @@ - [ ] describe_import_image_tasks - [ ] describe_import_snapshot_tasks - [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications +- [X] describe_instance_credit_specifications - [ ] describe_instance_status - [ ] describe_instance_type_offerings - [ ] describe_instance_types - [ ] describe_instances - [X] describe_internet_gateways +- [ ] describe_ipv6_pools - [X] describe_key_pairs - [ ] describe_launch_template_versions - [ ] describe_launch_templates @@ -2581,6 +2823,7 @@ - [ ] export_client_vpn_client_configuration - [ ] export_image - [ ] export_transit_gateway_routes +- [ ] get_associated_ipv6_pool_cidrs - [ ] get_capacity_reservation_usage - [ ] get_coip_pool_usage - [ ] get_console_output @@ -2602,6 +2845,7 @@ - [X] import_key_pair - [ ] import_snapshot - [ ] import_volume +- [ ] modify_availability_zone_group - [ ] modify_capacity_reservation - [ ] modify_client_vpn_endpoint - [ ] modify_default_credit_specification @@ -2682,6 +2926,7 @@ - [ ] search_transit_gateway_routes - [ ] send_diagnostic_interrupt - [X] start_instances +- [ ] start_vpc_endpoint_service_private_dns_verification - [X] stop_instances - [ ] terminate_client_vpn_connections - [X] terminate_instances @@ -2691,13 +2936,19 @@ - [ ] update_security_group_rule_descriptions_egress - [ ] update_security_group_rule_descriptions_ingress - [ ] withdraw_byoip_cidr +
## ec2-instance-connect -0% implemented -- [x] send_ssh_public_key +
+100% implemented + +- [X] send_ssh_public_key +
## ecr -27% implemented +
+27% implemented + - [ ] batch_check_layer_availability - [X] batch_delete_image - [X] batch_get_image @@ -2727,9 +2978,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] upload_layer_part +
## ecs -62% implemented +
+62% implemented + - [ ] create_capacity_provider - [X] create_cluster - [X] create_service @@ -2778,26 +3032,41 @@ - [X] update_service - [ ] update_service_primary_task_set - [ ] update_task_set +
## efs -0% implemented +
+0% implemented + +- [ ] create_access_point - [ ] create_file_system - [ ] create_mount_target - [ ] create_tags +- [ ] delete_access_point - [ ] delete_file_system +- [ ] delete_file_system_policy - [ ] delete_mount_target - [ ] delete_tags +- [ ] describe_access_points +- [ ] describe_file_system_policy - [ ] describe_file_systems - [ ] describe_lifecycle_configuration - [ ] describe_mount_target_security_groups - [ ] describe_mount_targets - [ ] describe_tags +- [ ] list_tags_for_resource - [ ] modify_mount_target_security_groups +- [ ] put_file_system_policy - [ ] put_lifecycle_configuration +- [ ] tag_resource +- [ ] untag_resource - [ ] update_file_system +
## eks -0% implemented +
+0% implemented + - [ ] create_cluster - [ ] create_fargate_profile - [ ] create_nodegroup @@ -2819,15 +3088,21 @@ - [ ] update_cluster_version - [ ] update_nodegroup_config - [ ] update_nodegroup_version +
## elastic-inference -0% implemented +
+0% implemented + - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource +
## elasticache -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] authorize_cache_security_group_ingress - [ ] batch_apply_update_action @@ -2838,13 +3113,16 @@ - [ ] create_cache_parameter_group - [ ] create_cache_security_group - [ ] create_cache_subnet_group +- [ ] create_global_replication_group - [ ] create_replication_group - [ ] create_snapshot +- [ ] decrease_node_groups_in_global_replication_group - [ ] decrease_replica_count - [ ] delete_cache_cluster - [ ] delete_cache_parameter_group - [ ] delete_cache_security_group - [ ] delete_cache_subnet_group +- [ ] delete_global_replication_group - [ ] delete_replication_group - [ ] delete_snapshot - [ ] describe_cache_clusters @@ -2855,30 +3133,39 @@ - [ ] describe_cache_subnet_groups - [ ] describe_engine_default_parameters - [ ] describe_events +- [ ] describe_global_replication_groups - [ ] describe_replication_groups - [ ] describe_reserved_cache_nodes - [ ] describe_reserved_cache_nodes_offerings - [ ] describe_service_updates - [ ] describe_snapshots - [ ] describe_update_actions +- [ ] disassociate_global_replication_group +- [ ] failover_global_replication_group +- [ ] increase_node_groups_in_global_replication_group - [ ] increase_replica_count - [ ] list_allowed_node_type_modifications - [ ] list_tags_for_resource - [ ] modify_cache_cluster - [ ] modify_cache_parameter_group - [ ] modify_cache_subnet_group +- [ ] modify_global_replication_group - [ ] modify_replication_group - [ ] modify_replication_group_shard_configuration - [ ] purchase_reserved_cache_nodes_offering +- [ ] rebalance_slots_in_global_replication_group - [ ] reboot_cache_cluster - [ ] remove_tags_from_resource - [ ] reset_cache_parameter_group - [ ] revoke_cache_security_group_ingress - [ ] start_migration - [ ] test_failover +
## elasticbeanstalk -13% implemented +
+13% implemented + - [ ] abort_environment_update - [ ] apply_environment_managed_action - [ ] check_dns_availability @@ -2923,9 +3210,12 @@ - [ ] update_environment - [X] update_tags_for_resource - [ ] validate_configuration_settings +
## elastictranscoder -0% implemented +
+0% implemented + - [ ] cancel_job - [ ] create_job - [ ] create_pipeline @@ -2943,9 +3233,12 @@ - [ ] update_pipeline - [ ] update_pipeline_notifications - [ ] update_pipeline_status +
## elb -34% implemented +
+34% implemented + - [ ] add_tags - [X] apply_security_groups_to_load_balancer - [ ] attach_load_balancer_to_subnets @@ -2975,9 +3268,12 @@ - [ ] set_load_balancer_listener_ssl_certificate - [ ] set_load_balancer_policies_for_backend_server - [X] set_load_balancer_policies_of_listener +
## elbv2 -70% implemented +
+70% implemented + - [ ] add_listener_certificates - [ ] add_tags - [X] create_listener @@ -3012,9 +3308,12 @@ - [X] set_rule_priorities - [X] set_security_groups - [X] set_subnets +
## emr -50% implemented +
+50% implemented + - [ ] add_instance_fleet - [X] add_instance_groups - [X] add_job_flow_steps @@ -3045,9 +3344,12 @@ - [X] set_termination_protection - [X] set_visible_to_all_users - [X] terminate_job_flows +
## es -0% implemented +
+0% implemented + - [ ] add_tags - [ ] cancel_elasticsearch_service_software_update - [ ] create_elasticsearch_domain @@ -3071,9 +3373,12 @@ - [ ] start_elasticsearch_service_software_update - [ ] update_elasticsearch_domain_config - [ ] upgrade_elasticsearch_domain +
## events -58% implemented +
+67% implemented + - [ ] activate_event_source - [X] create_event_bus - [ ] create_partner_event_source @@ -3093,7 +3398,7 @@ - [ ] list_partner_event_sources - [X] list_rule_names_by_target - [X] list_rules -- [ ] list_tags_for_resource +- [X] list_tags_for_resource - [X] list_targets_by_rule - [X] put_events - [ ] put_partner_events @@ -3102,12 +3407,15 @@ - [X] put_targets - [X] remove_permission - [X] remove_targets -- [ ] tag_resource +- [X] tag_resource - [X] test_event_pattern -- [ ] untag_resource +- [X] untag_resource +
## firehose -0% implemented +
+0% implemented + - [ ] create_delivery_stream - [ ] delete_delivery_stream - [ ] describe_delivery_stream @@ -3120,9 +3428,12 @@ - [ ] tag_delivery_stream - [ ] untag_delivery_stream - [ ] update_destination +
## fms -0% implemented +
+0% implemented + - [ ] associate_admin_account - [ ] delete_notification_channel - [ ] delete_policy @@ -3135,11 +3446,17 @@ - [ ] list_compliance_status - [ ] list_member_accounts - [ ] list_policies +- [ ] list_tags_for_resource - [ ] put_notification_channel - [ ] put_policy +- [ ] tag_resource +- [ ] untag_resource +
## forecast -0% implemented +
+0% implemented + - [ ] create_dataset - [ ] create_dataset_group - [ ] create_dataset_import_job @@ -3166,13 +3483,19 @@ - [ ] list_forecasts - [ ] list_predictors - [ ] update_dataset_group +
## forecastquery -0% implemented +
+0% implemented + - [ ] query_forecast +
## frauddetector -0% implemented +
+0% implemented + - [ ] batch_create_variable - [ ] batch_get_variable - [ ] create_detector_version @@ -3203,23 +3526,32 @@ - [ ] update_rule_metadata - [ ] update_rule_version - [ ] update_variable +
## fsx -0% implemented +
+0% implemented + +- [ ] cancel_data_repository_task - [ ] create_backup +- [ ] create_data_repository_task - [ ] create_file_system - [ ] create_file_system_from_backup - [ ] delete_backup - [ ] delete_file_system - [ ] describe_backups +- [ ] describe_data_repository_tasks - [ ] describe_file_systems - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource - [ ] update_file_system +
## gamelift -0% implemented +
+0% implemented + - [ ] accept_match - [ ] create_alias - [ ] create_build @@ -3271,6 +3603,7 @@ - [ ] list_builds - [ ] list_fleets - [ ] list_scripts +- [ ] list_tags_for_resource - [ ] put_scaling_policy - [ ] request_upload_credentials - [ ] resolve_alias @@ -3282,6 +3615,8 @@ - [ ] stop_fleet_actions - [ ] stop_game_session_placement - [ ] stop_matchmaking +- [ ] tag_resource +- [ ] untag_resource - [ ] update_alias - [ ] update_build - [ ] update_fleet_attributes @@ -3293,9 +3628,12 @@ - [ ] update_runtime_configuration - [ ] update_script - [ ] validate_matchmaking_rule_set +
## glacier -12% implemented +
+12% implemented + - [ ] abort_multipart_upload - [ ] abort_vault_lock - [ ] add_tags_to_vault @@ -3329,33 +3667,47 @@ - [ ] set_vault_notifications - [ ] upload_archive - [ ] upload_multipart_part +
## globalaccelerator -0% implemented +
+0% implemented + +- [ ] advertise_byoip_cidr - [ ] create_accelerator - [ ] create_endpoint_group - [ ] create_listener - [ ] delete_accelerator - [ ] delete_endpoint_group - [ ] delete_listener +- [ ] deprovision_byoip_cidr - [ ] describe_accelerator - [ ] describe_accelerator_attributes - [ ] describe_endpoint_group - [ ] describe_listener - [ ] list_accelerators +- [ ] list_byoip_cidrs - [ ] list_endpoint_groups - [ ] list_listeners +- [ ] list_tags_for_resource +- [ ] provision_byoip_cidr +- [ ] tag_resource +- [ ] untag_resource - [ ] update_accelerator - [ ] update_accelerator_attributes - [ ] update_endpoint_group - [ ] update_listener +- [ ] withdraw_byoip_cidr +
## glue -11% implemented -- [X] batch_create_partition +
+5% implemented + +- [ ] batch_create_partition - [ ] batch_delete_connection -- [X] batch_delete_partition -- [X] batch_delete_table +- [ ] batch_delete_partition +- [ ] batch_delete_table - [ ] batch_delete_table_version - [ ] batch_get_crawlers - [ ] batch_get_dev_endpoints @@ -3372,7 +3724,7 @@ - [ ] create_dev_endpoint - [ ] create_job - [ ] create_ml_transform -- [X] create_partition +- [ ] create_partition - [ ] create_script - [ ] create_security_configuration - [X] create_table @@ -3418,7 +3770,7 @@ - [ ] get_ml_task_runs - [ ] get_ml_transform - [ ] get_ml_transforms -- [X] get_partition +- [ ] get_partition - [ ] get_partitions - [ ] get_plan - [ ] get_resource_policy @@ -3441,6 +3793,7 @@ - [ ] list_crawlers - [ ] list_dev_endpoints - [ ] list_jobs +- [ ] list_ml_transforms - [ ] list_triggers - [ ] list_workflows - [ ] put_data_catalog_encryption_settings @@ -3470,14 +3823,17 @@ - [ ] update_dev_endpoint - [ ] update_job - [ ] update_ml_transform -- [X] update_partition -- [X] update_table +- [ ] update_partition +- [ ] update_table - [ ] update_trigger - [ ] update_user_defined_function - [ ] update_workflow +
## greengrass -0% implemented +
+0% implemented + - [ ] associate_role_to_group - [ ] associate_service_role_to_account - [ ] create_connector_definition @@ -3568,9 +3924,12 @@ - [ ] update_logger_definition - [ ] update_resource_definition - [ ] update_subscription_definition +
## groundstation -0% implemented +
+0% implemented + - [ ] cancel_contact - [ ] create_config - [ ] create_dataflow_endpoint_group @@ -3596,9 +3955,12 @@ - [ ] untag_resource - [ ] update_config - [ ] update_mission_profile +
## guardduty -0% implemented +
+0% implemented + - [ ] accept_invitation - [ ] archive_findings - [ ] create_detector @@ -3649,18 +4011,31 @@ - [ ] update_ip_set - [ ] update_publishing_destination - [ ] update_threat_intel_set +
## health -0% implemented +
+0% implemented + +- [ ] describe_affected_accounts_for_organization - [ ] describe_affected_entities +- [ ] describe_affected_entities_for_organization - [ ] describe_entity_aggregates - [ ] describe_event_aggregates - [ ] describe_event_details +- [ ] describe_event_details_for_organization - [ ] describe_event_types - [ ] describe_events +- [ ] describe_events_for_organization +- [ ] describe_health_service_status_for_organization +- [ ] disable_health_service_access_for_organization +- [ ] enable_health_service_access_for_organization +
## iam -67% implemented +
+68% implemented + - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -3801,9 +4176,12 @@ - [X] upload_server_certificate - [X] upload_signing_certificate - [X] upload_ssh_public_key +
## imagebuilder -0% implemented +
+0% implemented + - [ ] cancel_image_creation - [ ] create_component - [ ] create_distribution_configuration @@ -3846,18 +4224,24 @@ - [ ] update_distribution_configuration - [ ] update_image_pipeline - [ ] update_infrastructure_configuration +
## importexport -0% implemented +
+0% implemented + - [ ] cancel_job - [ ] create_job - [ ] get_shipping_label - [ ] get_status - [ ] list_jobs - [ ] update_job +
## inspector -0% implemented +
+0% implemented + - [ ] add_attributes_to_findings - [ ] create_assessment_target - [ ] create_assessment_template @@ -3895,9 +4279,12 @@ - [ ] subscribe_to_event - [ ] unsubscribe_from_event - [ ] update_assessment_target +
## iot -20% implemented +
+27% implemented + - [ ] accept_certificate_transfer - [ ] add_thing_to_billing_group - [X] add_thing_to_thing_group @@ -3909,8 +4296,8 @@ - [ ] cancel_audit_mitigation_actions_task - [ ] cancel_audit_task - [ ] cancel_certificate_transfer -- [ ] cancel_job -- [ ] cancel_job_execution +- [X] cancel_job +- [X] cancel_job_execution - [ ] clear_default_authorizer - [ ] confirm_topic_rule_destination - [ ] create_authorizer @@ -3923,7 +4310,7 @@ - [ ] create_mitigation_action - [ ] create_ota_update - [X] create_policy -- [ ] create_policy_version +- [X] create_policy_version - [ ] create_provisioning_claim - [ ] create_provisioning_template - [ ] create_provisioning_template_version @@ -3943,12 +4330,12 @@ - [X] delete_certificate - [ ] delete_domain_configuration - [ ] delete_dynamic_thing_group -- [ ] delete_job -- [ ] delete_job_execution +- [X] delete_job +- [X] delete_job_execution - [ ] delete_mitigation_action - [ ] delete_ota_update - [X] delete_policy -- [ ] delete_policy_version +- [X] delete_policy_version - [ ] delete_provisioning_template - [ ] delete_provisioning_template_version - [ ] delete_registration_code @@ -3977,7 +4364,7 @@ - [ ] describe_event_configurations - [ ] describe_index - [X] describe_job -- [ ] describe_job_execution +- [X] describe_job_execution - [ ] describe_mitigation_action - [ ] describe_provisioning_template - [ ] describe_provisioning_template_version @@ -3998,19 +4385,19 @@ - [ ] get_cardinality - [ ] get_effective_policies - [ ] get_indexing_configuration -- [ ] get_job_document +- [X] get_job_document - [ ] get_logging_options - [ ] get_ota_update - [ ] get_percentiles - [X] get_policy -- [ ] get_policy_version +- [X] get_policy_version - [ ] get_registration_code - [ ] get_statistics - [ ] get_topic_rule - [ ] get_topic_rule_destination - [ ] get_v2_logging_options - [ ] list_active_violations -- [ ] list_attached_policies +- [X] list_attached_policies - [ ] list_audit_findings - [ ] list_audit_mitigation_actions_executions - [ ] list_audit_mitigation_actions_tasks @@ -4022,15 +4409,15 @@ - [ ] list_certificates_by_ca - [ ] list_domain_configurations - [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs +- [X] list_job_executions_for_job +- [X] list_job_executions_for_thing +- [X] list_jobs - [ ] list_mitigation_actions - [ ] list_ota_updates - [ ] list_outgoing_certificates - [X] list_policies - [X] list_policy_principals -- [ ] list_policy_versions +- [X] list_policy_versions - [X] list_principal_policies - [X] list_principal_things - [ ] list_provisioning_template_versions @@ -4065,7 +4452,7 @@ - [ ] replace_topic_rule - [ ] search_index - [ ] set_default_authorizer -- [ ] set_default_policy_version +- [X] set_default_policy_version - [ ] set_logging_options - [ ] set_v2_logging_level - [ ] set_v2_logging_options @@ -4099,23 +4486,32 @@ - [X] update_thing_groups_for_thing - [ ] update_topic_rule_destination - [ ] validate_security_profile_behaviors +
## iot-data -100% implemented +
+100% implemented + - [X] delete_thing_shadow - [X] get_thing_shadow - [X] publish - [X] update_thing_shadow +
## iot-jobs-data -0% implemented +
+0% implemented + - [ ] describe_job_execution - [ ] get_pending_job_executions - [ ] start_next_pending_job_execution - [ ] update_job_execution +
## iot1click-devices -0% implemented +
+0% implemented + - [ ] claim_devices_by_claim_code - [ ] describe_device - [ ] finalize_device_claim @@ -4129,9 +4525,12 @@ - [ ] unclaim_device - [ ] untag_resource - [ ] update_device_state +
## iot1click-projects -0% implemented +
+0% implemented + - [ ] associate_device_with_placement - [ ] create_placement - [ ] create_project @@ -4148,9 +4547,12 @@ - [ ] untag_resource - [ ] update_placement - [ ] update_project +
## iotanalytics -0% implemented +
+0% implemented + - [ ] batch_put_message - [ ] cancel_pipeline_reprocessing - [ ] create_channel @@ -4185,9 +4587,12 @@ - [ ] update_dataset - [ ] update_datastore - [ ] update_pipeline +
## iotevents -0% implemented +
+0% implemented + - [ ] create_detector_model - [ ] create_input - [ ] delete_detector_model @@ -4204,16 +4609,22 @@ - [ ] untag_resource - [ ] update_detector_model - [ ] update_input +
## iotevents-data -0% implemented +
+0% implemented + - [ ] batch_put_message - [ ] batch_update_detector - [ ] describe_detector - [ ] list_detectors +
## iotsecuretunneling -0% implemented +
+0% implemented + - [ ] close_tunnel - [ ] describe_tunnel - [ ] list_tags_for_resource @@ -4221,9 +4632,12 @@ - [ ] open_tunnel - [ ] tag_resource - [ ] untag_resource +
## iotthingsgraph -0% implemented +
+0% implemented + - [ ] associate_entity_to_thing - [ ] create_flow_template - [ ] create_system_instance @@ -4259,9 +4673,12 @@ - [ ] update_flow_template - [ ] update_system_template - [ ] upload_entity_definitions +
## kafka -0% implemented +
+0% implemented + - [ ] create_cluster - [ ] create_configuration - [ ] delete_cluster @@ -4274,6 +4691,7 @@ - [ ] list_clusters - [ ] list_configuration_revisions - [ ] list_configurations +- [ ] list_kafka_versions - [ ] list_nodes - [ ] list_tags_for_resource - [ ] tag_resource @@ -4282,9 +4700,12 @@ - [ ] update_broker_storage - [ ] update_cluster_configuration - [ ] update_monitoring +
## kendra -0% implemented +
+0% implemented + - [ ] batch_delete_document - [ ] batch_put_document - [ ] create_data_source @@ -4305,9 +4726,12 @@ - [ ] submit_feedback - [ ] update_data_source - [ ] update_index +
## kinesis -50% implemented +
+50% implemented + - [X] add_tags_to_stream - [X] create_stream - [ ] decrease_stream_retention_period @@ -4336,25 +4760,37 @@ - [ ] stop_stream_encryption - [ ] subscribe_to_shard - [ ] update_shard_count +
## kinesis-video-archived-media -0% implemented +
+0% implemented + - [ ] get_dash_streaming_session_url - [ ] get_hls_streaming_session_url - [ ] get_media_for_fragment_list - [ ] list_fragments +
## kinesis-video-media -0% implemented +
+0% implemented + - [ ] get_media +
## kinesis-video-signaling -0% implemented +
+0% implemented + - [ ] get_ice_server_config - [ ] send_alexa_offer_to_master +
## kinesisanalytics -0% implemented +
+0% implemented + - [ ] add_application_cloud_watch_logging_option - [ ] add_application_input - [ ] add_application_input_processing_configuration @@ -4375,9 +4811,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_application +
## kinesisanalyticsv2 -0% implemented +
+0% implemented + - [ ] add_application_cloud_watch_logging_option - [ ] add_application_input - [ ] add_application_input_processing_configuration @@ -4404,9 +4843,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_application +
## kinesisvideo -0% implemented +
+0% implemented + - [ ] create_signaling_channel - [ ] create_stream - [ ] delete_signaling_channel @@ -4426,9 +4868,12 @@ - [ ] update_data_retention - [ ] update_signaling_channel - [ ] update_stream +
## kms -43% implemented +
+45% implemented + - [X] cancel_key_deletion - [ ] connect_custom_key_store - [ ] create_alias @@ -4470,14 +4915,17 @@ - [X] schedule_key_deletion - [ ] sign - [X] tag_resource -- [ ] untag_resource +- [X] untag_resource - [ ] update_alias - [ ] update_custom_key_store - [X] update_key_description - [ ] verify +
## lakeformation -0% implemented +
+0% implemented + - [ ] batch_grant_permissions - [ ] batch_revoke_permissions - [ ] deregister_resource @@ -4491,11 +4939,14 @@ - [ ] register_resource - [ ] revoke_permissions - [ ] update_resource +
## lambda -32% implemented +
+38% implemented + - [ ] add_layer_version_permission -- [ ] add_permission +- [X] add_permission - [ ] create_alias - [X] create_event_source_mapping - [X] create_function @@ -4516,7 +4967,7 @@ - [ ] get_layer_version - [ ] get_layer_version_by_arn - [ ] get_layer_version_policy -- [ ] get_policy +- [X] get_policy - [ ] get_provisioned_concurrency_config - [X] invoke - [ ] invoke_async @@ -4535,7 +4986,7 @@ - [ ] put_function_event_invoke_config - [ ] put_provisioned_concurrency_config - [ ] remove_layer_version_permission -- [ ] remove_permission +- [X] remove_permission - [X] tag_resource - [X] untag_resource - [ ] update_alias @@ -4543,9 +4994,12 @@ - [X] update_function_code - [X] update_function_configuration - [ ] update_function_event_invoke_config +
## lex-models -0% implemented +
+0% implemented + - [ ] create_bot_version - [ ] create_intent_version - [ ] create_slot_type_version @@ -4577,22 +5031,31 @@ - [ ] get_slot_type_versions - [ ] get_slot_types - [ ] get_utterances_view +- [ ] list_tags_for_resource - [ ] put_bot - [ ] put_bot_alias - [ ] put_intent - [ ] put_slot_type - [ ] start_import +- [ ] tag_resource +- [ ] untag_resource +
## lex-runtime -0% implemented +
+0% implemented + - [ ] delete_session - [ ] get_session - [ ] post_content - [ ] post_text - [ ] put_session +
## license-manager -0% implemented +
+0% implemented + - [ ] create_license_configuration - [ ] delete_license_configuration - [ ] get_license_configuration @@ -4609,9 +5072,12 @@ - [ ] update_license_configuration - [ ] update_license_specifications_for_resource - [ ] update_service_settings +
## lightsail -0% implemented +
+0% implemented + - [ ] allocate_static_ip - [ ] attach_disk - [ ] attach_instances_to_load_balancer @@ -4620,6 +5086,7 @@ - [ ] close_instance_public_ports - [ ] copy_snapshot - [ ] create_cloud_formation_stack +- [ ] create_contact_method - [ ] create_disk - [ ] create_disk_from_snapshot - [ ] create_disk_snapshot @@ -4634,7 +5101,9 @@ - [ ] create_relational_database - [ ] create_relational_database_from_snapshot - [ ] create_relational_database_snapshot +- [ ] delete_alarm - [ ] delete_auto_snapshot +- [ ] delete_contact_method - [ ] delete_disk - [ ] delete_disk_snapshot - [ ] delete_domain @@ -4655,10 +5124,12 @@ - [ ] enable_add_on - [ ] export_snapshot - [ ] get_active_names +- [ ] get_alarms - [ ] get_auto_snapshots - [ ] get_blueprints - [ ] get_bundles - [ ] get_cloud_formation_stack_records +- [ ] get_contact_methods - [ ] get_disk - [ ] get_disk_snapshot - [ ] get_disk_snapshots @@ -4702,24 +5173,30 @@ - [ ] is_vpc_peered - [ ] open_instance_public_ports - [ ] peer_vpc +- [ ] put_alarm - [ ] put_instance_public_ports - [ ] reboot_instance - [ ] reboot_relational_database - [ ] release_static_ip +- [ ] send_contact_method_verification - [ ] start_instance - [ ] start_relational_database - [ ] stop_instance - [ ] stop_relational_database - [ ] tag_resource +- [ ] test_alarm - [ ] unpeer_vpc - [ ] untag_resource - [ ] update_domain_entry - [ ] update_load_balancer_attribute - [ ] update_relational_database - [ ] update_relational_database_parameters +
## logs -35% implemented +
+35% implemented + - [ ] associate_kms_key - [ ] cancel_export_task - [ ] create_export_task @@ -4759,9 +5236,12 @@ - [X] tag_log_group - [ ] test_metric_filter - [X] untag_log_group +
## machinelearning -0% implemented +
+0% implemented + - [ ] add_tags - [ ] create_batch_prediction - [ ] create_data_source_from_rds @@ -4790,9 +5270,12 @@ - [ ] update_data_source - [ ] update_evaluation - [ ] update_ml_model +
## macie -0% implemented +
+0% implemented + - [ ] associate_member_account - [ ] associate_s3_resources - [ ] disassociate_member_account @@ -4800,9 +5283,12 @@ - [ ] list_member_accounts - [ ] list_s3_resources - [ ] update_s3_resources +
## managedblockchain -0% implemented +
+0% implemented + - [ ] create_member - [ ] create_network - [ ] create_node @@ -4821,28 +5307,41 @@ - [ ] list_proposals - [ ] reject_invitation - [ ] vote_on_proposal +
## marketplace-catalog -0% implemented +
+0% implemented + - [ ] cancel_change_set - [ ] describe_change_set - [ ] describe_entity - [ ] list_change_sets - [ ] list_entities - [ ] start_change_set +
## marketplace-entitlement -0% implemented +
+0% implemented + - [ ] get_entitlements +
## marketplacecommerceanalytics -0% implemented +
+0% implemented + - [ ] generate_data_set - [ ] start_support_data_export +
## mediaconnect -0% implemented +
+0% implemented + - [ ] add_flow_outputs +- [ ] add_flow_sources - [ ] create_flow - [ ] delete_flow - [ ] describe_flow @@ -4851,17 +5350,22 @@ - [ ] list_flows - [ ] list_tags_for_resource - [ ] remove_flow_output +- [ ] remove_flow_source - [ ] revoke_flow_entitlement - [ ] start_flow - [ ] stop_flow - [ ] tag_resource - [ ] untag_resource +- [ ] update_flow - [ ] update_flow_entitlement - [ ] update_flow_output - [ ] update_flow_source +
## mediaconvert -0% implemented +
+0% implemented + - [ ] associate_certificate - [ ] cancel_job - [ ] create_job @@ -4887,9 +5391,12 @@ - [ ] update_job_template - [ ] update_preset - [ ] update_queue +
## medialive -0% implemented +
+0% implemented + - [ ] batch_update_schedule - [ ] create_channel - [ ] create_input @@ -4933,9 +5440,12 @@ - [ ] update_multiplex - [ ] update_multiplex_program - [ ] update_reservation +
## mediapackage -0% implemented +
+0% implemented + - [ ] create_channel - [ ] create_harvest_job - [ ] create_origin_endpoint @@ -4954,9 +5464,12 @@ - [ ] untag_resource - [ ] update_channel - [ ] update_origin_endpoint +
## mediapackage-vod -0% implemented +
+0% implemented + - [ ] create_asset - [ ] create_packaging_configuration - [ ] create_packaging_group @@ -4969,9 +5482,12 @@ - [ ] list_assets - [ ] list_packaging_configurations - [ ] list_packaging_groups +
## mediastore -0% implemented +
+0% implemented + - [ ] create_container - [ ] delete_container - [ ] delete_container_policy @@ -4990,17 +5506,23 @@ - [ ] stop_access_logging - [ ] tag_resource - [ ] untag_resource +
## mediastore-data -0% implemented +
+0% implemented + - [ ] delete_object - [ ] describe_object - [ ] get_object - [ ] list_items - [ ] put_object +
## mediatailor -0% implemented +
+0% implemented + - [ ] delete_playback_configuration - [ ] get_playback_configuration - [ ] list_playback_configurations @@ -5008,16 +5530,22 @@ - [ ] put_playback_configuration - [ ] tag_resource - [ ] untag_resource +
## meteringmarketplace -0% implemented +
+0% implemented + - [ ] batch_meter_usage - [ ] meter_usage - [ ] register_usage - [ ] resolve_customer +
## mgh -0% implemented +
+0% implemented + - [ ] associate_created_artifact - [ ] associate_discovered_resource - [ ] create_progress_update_stream @@ -5027,6 +5555,7 @@ - [ ] disassociate_created_artifact - [ ] disassociate_discovered_resource - [ ] import_migration_task +- [ ] list_application_states - [ ] list_created_artifacts - [ ] list_discovered_resources - [ ] list_migration_tasks @@ -5034,15 +5563,21 @@ - [ ] notify_application_state - [ ] notify_migration_task_state - [ ] put_resource_attributes +
## migrationhub-config -0% implemented +
+0% implemented + - [ ] create_home_region_control - [ ] describe_home_region_controls - [ ] get_home_region +
## mobile -0% implemented +
+0% implemented + - [ ] create_project - [ ] delete_project - [ ] describe_bundle @@ -5052,9 +5587,12 @@ - [ ] list_bundles - [ ] list_projects - [ ] update_project +
## mq -0% implemented +
+0% implemented + - [ ] create_broker - [ ] create_configuration - [ ] create_tags @@ -5077,9 +5615,12 @@ - [ ] update_broker - [ ] update_configuration - [ ] update_user +
## mturk -0% implemented +
+0% implemented + - [ ] accept_qualification_request - [ ] approve_assignment - [ ] associate_qualification_with_worker @@ -5119,9 +5660,12 @@ - [ ] update_hit_type_of_hit - [ ] update_notification_settings - [ ] update_qualification_type +
## neptune -0% implemented +
+0% implemented + - [ ] add_role_to_db_cluster - [ ] add_source_identifier_to_subscription - [ ] add_tags_to_resource @@ -5179,9 +5723,14 @@ - [ ] reset_db_parameter_group - [ ] restore_db_cluster_from_snapshot - [ ] restore_db_cluster_to_point_in_time +- [ ] start_db_cluster +- [ ] stop_db_cluster +
## networkmanager -0% implemented +
+0% implemented + - [ ] associate_customer_gateway - [ ] associate_link - [ ] create_device @@ -5210,9 +5759,12 @@ - [ ] update_global_network - [ ] update_link - [ ] update_site +
## opsworks -12% implemented +
+12% implemented + - [ ] assign_instance - [ ] assign_volume - [ ] associate_elastic_ip @@ -5287,9 +5839,12 @@ - [ ] update_stack - [ ] update_user_profile - [ ] update_volume +
## opsworkscm -0% implemented +
+0% implemented + - [ ] associate_node - [ ] create_backup - [ ] create_server @@ -5302,13 +5857,19 @@ - [ ] describe_servers - [ ] disassociate_node - [ ] export_server_engine_attribute +- [ ] list_tags_for_resource - [ ] restore_server - [ ] start_maintenance +- [ ] tag_resource +- [ ] untag_resource - [ ] update_server - [ ] update_server_engine_attributes +
## organizations -48% implemented +
+51% implemented + - [ ] accept_handshake - [X] attach_policy - [ ] cancel_handshake @@ -5354,19 +5915,27 @@ - [ ] remove_account_from_organization - [X] tag_resource - [X] untag_resource -- [ ] update_organizational_unit +- [X] update_organizational_unit - [ ] update_policy +
## outposts -0% implemented +
+0% implemented + - [ ] create_outpost +- [ ] delete_outpost +- [ ] delete_site - [ ] get_outpost - [ ] get_outpost_instance_types - [ ] list_outposts - [ ] list_sites +
## personalize -0% implemented +
+0% implemented + - [ ] create_batch_inference_job - [ ] create_campaign - [ ] create_dataset @@ -5406,23 +5975,35 @@ - [ ] list_solution_versions - [ ] list_solutions - [ ] update_campaign +
## personalize-events -0% implemented +
+0% implemented + - [ ] put_events +
## personalize-runtime -0% implemented +
+0% implemented + - [ ] get_personalized_ranking - [ ] get_recommendations +
## pi -0% implemented +
+0% implemented + - [ ] describe_dimension_keys - [ ] get_resource_metrics +
## pinpoint -0% implemented +
+0% implemented + - [ ] create_app - [ ] create_campaign - [ ] create_email_template @@ -5430,6 +6011,7 @@ - [ ] create_import_job - [ ] create_journey - [ ] create_push_template +- [ ] create_recommender_configuration - [ ] create_segment - [ ] create_sms_template - [ ] create_voice_template @@ -5448,6 +6030,7 @@ - [ ] delete_gcm_channel - [ ] delete_journey - [ ] delete_push_template +- [ ] delete_recommender_configuration - [ ] delete_segment - [ ] delete_sms_channel - [ ] delete_sms_template @@ -5485,6 +6068,8 @@ - [ ] get_journey_execution_activity_metrics - [ ] get_journey_execution_metrics - [ ] get_push_template +- [ ] get_recommender_configuration +- [ ] get_recommender_configurations - [ ] get_segment - [ ] get_segment_export_jobs - [ ] get_segment_import_jobs @@ -5498,6 +6083,7 @@ - [ ] get_voice_template - [ ] list_journeys - [ ] list_tags_for_resource +- [ ] list_template_versions - [ ] list_templates - [ ] phone_number_validate - [ ] put_event_stream @@ -5523,14 +6109,19 @@ - [ ] update_journey - [ ] update_journey_state - [ ] update_push_template +- [ ] update_recommender_configuration - [ ] update_segment - [ ] update_sms_channel - [ ] update_sms_template +- [ ] update_template_active_version - [ ] update_voice_channel - [ ] update_voice_template +
## pinpoint-email -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] create_dedicated_ip_pool @@ -5573,9 +6164,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_configuration_set_event_destination +
## pinpoint-sms-voice -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] delete_configuration_set @@ -5583,9 +6177,12 @@ - [ ] get_configuration_set_event_destinations - [ ] send_voice_message - [ ] update_configuration_set_event_destination +
## polly -55% implemented +
+55% implemented + - [X] delete_lexicon - [X] describe_voices - [X] get_lexicon @@ -5595,15 +6192,21 @@ - [X] put_lexicon - [ ] start_speech_synthesis_task - [ ] synthesize_speech +
## pricing -0% implemented +
+0% implemented + - [ ] describe_services - [ ] get_attribute_values - [ ] get_products +
## qldb -0% implemented +
+0% implemented + - [ ] create_ledger - [ ] delete_ledger - [ ] describe_journal_s3_export @@ -5619,13 +6222,19 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_ledger +
## qldb-session -0% implemented +
+0% implemented + - [ ] send_command +
## quicksight -0% implemented +
+0% implemented + - [ ] cancel_ingestion - [ ] create_dashboard - [ ] create_data_set @@ -5676,6 +6285,7 @@ - [ ] list_user_groups - [ ] list_users - [ ] register_user +- [ ] search_dashboards - [ ] tag_resource - [ ] untag_resource - [ ] update_dashboard @@ -5691,9 +6301,12 @@ - [ ] update_template_alias - [ ] update_template_permissions - [ ] update_user +
## ram -0% implemented +
+0% implemented + - [ ] accept_resource_share_invitation - [ ] associate_resource_share - [ ] associate_resource_share_permission @@ -5717,9 +6330,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_resource_share +
## rds -0% implemented +
+0% implemented + - [ ] add_role_to_db_cluster - [ ] add_role_to_db_instance - [ ] add_source_identifier_to_subscription @@ -5727,6 +6343,7 @@ - [ ] apply_pending_maintenance_action - [ ] authorize_db_security_group_ingress - [ ] backtrack_db_cluster +- [ ] cancel_export_task - [ ] copy_db_cluster_parameter_group - [ ] copy_db_cluster_snapshot - [ ] copy_db_parameter_group @@ -5792,6 +6409,7 @@ - [ ] describe_event_categories - [ ] describe_event_subscriptions - [ ] describe_events +- [ ] describe_export_tasks - [ ] describe_global_clusters - [ ] describe_installation_media - [ ] describe_option_group_options @@ -5806,6 +6424,7 @@ - [ ] failover_db_cluster - [ ] import_installation_media - [ ] list_tags_for_resource +- [ ] modify_certificates - [ ] modify_current_db_cluster_capacity - [ ] modify_db_cluster - [ ] modify_db_cluster_endpoint @@ -5843,21 +6462,28 @@ - [ ] start_activity_stream - [ ] start_db_cluster - [ ] start_db_instance +- [ ] start_export_task - [ ] stop_activity_stream - [ ] stop_db_cluster - [ ] stop_db_instance +
## rds-data -0% implemented +
+0% implemented + - [ ] batch_execute_statement - [ ] begin_transaction - [ ] commit_transaction - [ ] execute_sql - [ ] execute_statement - [ ] rollback_transaction +
## redshift -30% implemented +
+29% implemented + - [ ] accept_reserved_node_exchange - [ ] authorize_cluster_security_group_ingress - [ ] authorize_snapshot_access @@ -5935,18 +6561,23 @@ - [ ] modify_scheduled_action - [X] modify_snapshot_copy_retention_period - [ ] modify_snapshot_schedule +- [ ] pause_cluster - [ ] purchase_reserved_node_offering - [ ] reboot_cluster - [ ] reset_cluster_parameter_group - [ ] resize_cluster - [X] restore_from_cluster_snapshot - [ ] restore_table_from_cluster_snapshot +- [ ] resume_cluster - [ ] revoke_cluster_security_group_ingress - [ ] revoke_snapshot_access - [ ] rotate_encryption_key +
## rekognition -0% implemented +
+0% implemented + - [ ] compare_faces - [ ] create_collection - [ ] create_project @@ -5971,6 +6602,7 @@ - [ ] get_face_search - [ ] get_label_detection - [ ] get_person_tracking +- [ ] get_text_detection - [ ] index_faces - [ ] list_collections - [ ] list_faces @@ -5986,11 +6618,15 @@ - [ ] start_person_tracking - [ ] start_project_version - [ ] start_stream_processor +- [ ] start_text_detection - [ ] stop_project_version - [ ] stop_stream_processor +
## resource-groups -75% implemented +
+75% implemented + - [X] create_group - [X] delete_group - [X] get_group @@ -6003,9 +6639,12 @@ - [X] untag - [X] update_group - [X] update_group_query +
## resourcegroupstaggingapi -37% implemented +
+37% implemented + - [ ] describe_report_creation - [ ] get_compliance_summary - [X] get_resources @@ -6014,12 +6653,16 @@ - [ ] start_report_creation - [ ] tag_resources - [ ] untag_resources +
## robomaker -0% implemented +
+0% implemented + - [ ] batch_describe_simulation_job - [ ] cancel_deployment_job - [ ] cancel_simulation_job +- [ ] cancel_simulation_job_batch - [ ] create_deployment_job - [ ] create_fleet - [ ] create_robot @@ -6039,23 +6682,29 @@ - [ ] describe_robot_application - [ ] describe_simulation_application - [ ] describe_simulation_job +- [ ] describe_simulation_job_batch - [ ] list_deployment_jobs - [ ] list_fleets - [ ] list_robot_applications - [ ] list_robots - [ ] list_simulation_applications +- [ ] list_simulation_job_batches - [ ] list_simulation_jobs - [ ] list_tags_for_resource - [ ] register_robot - [ ] restart_simulation_job +- [ ] start_simulation_job_batch - [ ] sync_deployment_job - [ ] tag_resource - [ ] untag_resource - [ ] update_robot_application - [ ] update_simulation_application +
## route53 -12% implemented +
+12% implemented + - [ ] associate_vpc_with_hosted_zone - [ ] change_resource_record_sets - [X] change_tags_for_resource @@ -6112,9 +6761,12 @@ - [ ] update_hosted_zone_comment - [ ] update_traffic_policy_comment - [ ] update_traffic_policy_instance +
## route53domains -0% implemented +
+0% implemented + - [ ] check_domain_availability - [ ] check_domain_transferability - [ ] delete_tags_for_domain @@ -6139,9 +6791,12 @@ - [ ] update_domain_nameservers - [ ] update_tags_for_domain - [ ] view_billing +
## route53resolver -0% implemented +
+0% implemented + - [ ] associate_resolver_endpoint_ip_address - [ ] associate_resolver_rule - [ ] create_resolver_endpoint @@ -6164,9 +6819,12 @@ - [ ] untag_resource - [ ] update_resolver_endpoint - [ ] update_resolver_rule +
## s3 -14% implemented +
+13% implemented + - [ ] abort_multipart_upload - [ ] complete_multipart_upload - [ ] copy_object @@ -6177,63 +6835,63 @@ - [X] delete_bucket_cors - [ ] delete_bucket_encryption - [ ] delete_bucket_inventory_configuration -- [X] delete_bucket_lifecycle +- [ ] delete_bucket_lifecycle - [ ] delete_bucket_metrics_configuration - [X] delete_bucket_policy - [ ] delete_bucket_replication - [X] delete_bucket_tagging - [ ] delete_bucket_website -- [X] delete_object +- [ ] delete_object - [ ] delete_object_tagging -- [X] delete_objects -- [X] delete_public_access_block +- [ ] delete_objects +- [ ] delete_public_access_block - [ ] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration -- [X] get_bucket_cors +- [ ] get_bucket_cors - [ ] get_bucket_encryption - [ ] get_bucket_inventory_configuration -- [X] get_bucket_lifecycle -- [X] get_bucket_lifecycle_configuration -- [X] get_bucket_location -- [X] get_bucket_logging +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging - [ ] get_bucket_metrics_configuration - [ ] get_bucket_notification - [ ] get_bucket_notification_configuration - [X] get_bucket_policy -- [X] get_bucket_policy_status +- [ ] get_bucket_policy_status - [ ] get_bucket_replication - [ ] get_bucket_request_payment -- [X] get_bucket_tagging +- [ ] get_bucket_tagging - [X] get_bucket_versioning - [ ] get_bucket_website -- [X] get_object -- [X] get_object_acl +- [ ] get_object +- [ ] get_object_acl - [ ] get_object_legal_hold - [ ] get_object_lock_configuration - [ ] get_object_retention - [ ] get_object_tagging - [ ] get_object_torrent -- [X] get_public_access_block +- [ ] get_public_access_block - [ ] head_bucket - [ ] head_object - [ ] list_bucket_analytics_configurations - [ ] list_bucket_inventory_configurations - [ ] list_bucket_metrics_configurations -- [X] list_buckets -- [X] list_multipart_uploads +- [ ] list_buckets +- [ ] list_multipart_uploads - [ ] list_object_versions -- [X] list_objects -- [X] list_objects_v2 +- [ ] list_objects +- [ ] list_objects_v2 - [ ] list_parts - [X] put_bucket_accelerate_configuration -- [X] put_bucket_acl +- [ ] put_bucket_acl - [ ] put_bucket_analytics_configuration - [X] put_bucket_cors - [ ] put_bucket_encryption - [ ] put_bucket_inventory_configuration -- [X] put_bucket_lifecycle -- [X] put_bucket_lifecycle_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration - [X] put_bucket_logging - [ ] put_bucket_metrics_configuration - [ ] put_bucket_notification @@ -6241,42 +6899,51 @@ - [ ] put_bucket_policy - [ ] put_bucket_replication - [ ] put_bucket_request_payment -- [X] put_bucket_tagging -- [X] put_bucket_versioning +- [ ] put_bucket_tagging +- [ ] put_bucket_versioning - [ ] put_bucket_website -- [X] put_object +- [ ] put_object - [ ] put_object_acl - [ ] put_object_legal_hold - [ ] put_object_lock_configuration - [ ] put_object_retention - [ ] put_object_tagging -- [X] put_public_access_block +- [ ] put_public_access_block - [ ] restore_object - [ ] select_object_content - [ ] upload_part - [ ] upload_part_copy +
## s3control -0% implemented +
+0% implemented + - [ ] create_access_point - [ ] create_job - [ ] delete_access_point - [ ] delete_access_point_policy +- [ ] delete_job_tagging - [ ] delete_public_access_block - [ ] describe_job - [ ] get_access_point - [ ] get_access_point_policy - [ ] get_access_point_policy_status +- [ ] get_job_tagging - [ ] get_public_access_block - [ ] list_access_points - [ ] list_jobs - [ ] put_access_point_policy +- [ ] put_job_tagging - [ ] put_public_access_block - [ ] update_job_priority - [ ] update_job_status +
## sagemaker -0% implemented +
+0% implemented + - [ ] add_tags - [ ] associate_trial_component - [ ] create_algorithm @@ -6349,6 +7016,7 @@ - [ ] describe_trial - [ ] describe_trial_component - [ ] describe_user_profile +- [ ] describe_workforce - [ ] describe_workteam - [ ] disassociate_trial_component - [ ] get_search_suggestions @@ -6407,22 +7075,32 @@ - [ ] update_trial - [ ] update_trial_component - [ ] update_user_profile +- [ ] update_workforce - [ ] update_workteam +
## sagemaker-a2i-runtime -0% implemented +
+0% implemented + - [ ] delete_human_loop - [ ] describe_human_loop - [ ] list_human_loops - [ ] start_human_loop - [ ] stop_human_loop +
## sagemaker-runtime -0% implemented +
+0% implemented + - [ ] invoke_endpoint +
## savingsplans -0% implemented +
+0% implemented + - [ ] create_savings_plan - [ ] describe_savings_plan_rates - [ ] describe_savings_plans @@ -6431,9 +7109,12 @@ - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource +
## schemas -0% implemented +
+0% implemented + - [ ] create_discoverer - [ ] create_registry - [ ] create_schema @@ -6463,9 +7144,12 @@ - [ ] update_discoverer - [ ] update_registry - [ ] update_schema +
## sdb -0% implemented +
+0% implemented + - [ ] batch_delete_attributes - [ ] batch_put_attributes - [ ] create_domain @@ -6476,9 +7160,12 @@ - [ ] list_domains - [ ] put_attributes - [ ] select +
## secretsmanager -61% implemented +
+66% implemented + - [ ] cancel_rotate_secret - [X] create_secret - [ ] delete_resource_policy @@ -6495,11 +7182,14 @@ - [X] rotate_secret - [ ] tag_resource - [ ] untag_resource -- [ ] update_secret +- [X] update_secret - [ ] update_secret_version_stage +
## securityhub -0% implemented +
+0% implemented + - [ ] accept_invitation - [ ] batch_disable_standards - [ ] batch_enable_standards @@ -6515,6 +7205,8 @@ - [ ] describe_action_targets - [ ] describe_hub - [ ] describe_products +- [ ] describe_standards +- [ ] describe_standards_controls - [ ] disable_import_findings_for_product - [ ] disable_security_hub - [ ] disassociate_from_master_account @@ -6538,9 +7230,13 @@ - [ ] update_action_target - [ ] update_findings - [ ] update_insight +- [ ] update_standards_control +
## serverlessrepo -0% implemented +
+0% implemented + - [ ] create_application - [ ] create_application_version - [ ] create_cloud_formation_change_set @@ -6553,10 +7249,14 @@ - [ ] list_application_versions - [ ] list_applications - [ ] put_application_policy +- [ ] unshare_application - [ ] update_application +
## service-quotas -0% implemented +
+0% implemented + - [ ] associate_service_quota_template - [ ] delete_service_quota_increase_request_from_template - [ ] disassociate_service_quota_template @@ -6573,9 +7273,12 @@ - [ ] list_services - [ ] put_service_quota_increase_request_into_template - [ ] request_service_quota_increase +
## servicecatalog -0% implemented +
+0% implemented + - [ ] accept_portfolio_share - [ ] associate_budget_with_resource - [ ] associate_principal_with_portfolio @@ -6659,9 +7362,12 @@ - [ ] update_provisioning_artifact - [ ] update_service_action - [ ] update_tag_option +
## servicediscovery -0% implemented +
+0% implemented + - [ ] create_http_namespace - [ ] create_private_dns_namespace - [ ] create_public_dns_namespace @@ -6682,9 +7388,12 @@ - [ ] register_instance - [ ] update_instance_custom_health_status - [ ] update_service +
## ses -14% implemented +
+14% implemented + - [ ] clone_receipt_rule_set - [ ] create_configuration_set - [ ] create_configuration_set_event_destination @@ -6756,9 +7465,12 @@ - [ ] verify_domain_identity - [X] verify_email_address - [X] verify_email_identity +
## sesv2 -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] create_dedicated_ip_pool @@ -6808,11 +7520,15 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_configuration_set_event_destination +
## shield -0% implemented +
+0% implemented + - [ ] associate_drt_log_bucket - [ ] associate_drt_role +- [ ] associate_health_check - [ ] create_protection - [ ] create_subscription - [ ] delete_protection @@ -6824,14 +7540,18 @@ - [ ] describe_subscription - [ ] disassociate_drt_log_bucket - [ ] disassociate_drt_role +- [ ] disassociate_health_check - [ ] get_subscription_state - [ ] list_attacks - [ ] list_protections - [ ] update_emergency_contact_settings - [ ] update_subscription +
## signer -0% implemented +
+0% implemented + - [ ] cancel_signing_profile - [ ] describe_signing_job - [ ] get_signing_platform @@ -6844,9 +7564,12 @@ - [ ] start_signing_job - [ ] tag_resource - [ ] untag_resource +
## sms -0% implemented +
+0% implemented + - [ ] create_app - [ ] create_replication_job - [ ] delete_app @@ -6875,9 +7598,12 @@ - [ ] terminate_app - [ ] update_app - [ ] update_replication_job +
## sms-voice -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] delete_configuration_set @@ -6886,9 +7612,12 @@ - [ ] list_configuration_sets - [ ] send_voice_message - [ ] update_configuration_set_event_destination +
## snowball -0% implemented +
+0% implemented + - [ ] cancel_cluster - [ ] cancel_job - [ ] create_address @@ -6908,9 +7637,12 @@ - [ ] list_jobs - [ ] update_cluster - [ ] update_job +
## sns -63% implemented +
+63% implemented + - [X] add_permission - [ ] check_if_phone_number_is_opted_out - [ ] confirm_subscription @@ -6944,9 +7676,12 @@ - [X] tag_resource - [X] unsubscribe - [X] untag_resource +
## sqs -85% implemented +
+85% implemented + - [X] add_permission - [X] change_message_visibility - [ ] change_message_visibility_batch @@ -6967,9 +7702,12 @@ - [X] set_queue_attributes - [X] tag_queue - [X] untag_queue +
## ssm -11% implemented +
+12% implemented + - [X] add_tags_to_resource - [ ] cancel_command - [ ] cancel_maintenance_window_execution @@ -7049,7 +7787,7 @@ - [ ] get_patch_baseline - [ ] get_patch_baseline_for_patch_group - [ ] get_service_setting -- [ ] label_parameter_version +- [X] label_parameter_version - [ ] list_association_versions - [ ] list_associations - [ ] list_command_invocations @@ -7092,22 +7830,31 @@ - [ ] update_patch_baseline - [ ] update_resource_data_sync - [ ] update_service_setting +
## sso -0% implemented +
+0% implemented + - [ ] get_role_credentials - [ ] list_account_roles - [ ] list_accounts - [ ] logout +
## sso-oidc -0% implemented +
+0% implemented + - [ ] create_token - [ ] register_client - [ ] start_device_authorization +
## stepfunctions -36% implemented +
+36% implemented + - [ ] create_activity - [X] create_state_machine - [ ] delete_activity @@ -7130,9 +7877,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_state_machine +
## storagegateway -0% implemented +
+0% implemented + - [ ] activate_gateway - [ ] add_cache - [ ] add_tags_to_resource @@ -7208,20 +7958,26 @@ - [ ] update_smb_security_strategy - [ ] update_snapshot_schedule - [ ] update_vtl_device_type +
## sts -62% implemented +
+62% implemented + - [X] assume_role -- [ ] assume_role_with_saml +- [X] assume_role_with_saml - [X] assume_role_with_web_identity - [ ] decode_authorization_message - [ ] get_access_key_info -- [X] get_caller_identity +- [ ] get_caller_identity - [X] get_federation_token - [X] get_session_token +
## support -0% implemented +
+0% implemented + - [ ] add_attachments_to_set - [ ] add_communication_to_case - [ ] create_case @@ -7236,9 +7992,12 @@ - [ ] describe_trusted_advisor_checks - [ ] refresh_trusted_advisor_check - [ ] resolve_case +
## swf -48% implemented +
+51% implemented + - [ ] count_closed_workflow_executions - [ ] count_open_workflow_executions - [X] count_pending_activity_tasks @@ -7272,34 +8031,48 @@ - [X] start_workflow_execution - [ ] tag_resource - [X] terminate_workflow_execution -- [X] undeprecate_activity_type +- [ ] undeprecate_activity_type - [X] undeprecate_domain -- [X] undeprecate_workflow_type +- [ ] undeprecate_workflow_type - [ ] untag_resource +
## textract -0% implemented +
+0% implemented + - [ ] analyze_document - [ ] detect_document_text - [ ] get_document_analysis - [ ] get_document_text_detection - [ ] start_document_analysis - [ ] start_document_text_detection +
## transcribe -0% implemented +
+0% implemented + - [ ] create_vocabulary +- [ ] create_vocabulary_filter - [ ] delete_transcription_job - [ ] delete_vocabulary +- [ ] delete_vocabulary_filter - [ ] get_transcription_job - [ ] get_vocabulary +- [ ] get_vocabulary_filter - [ ] list_transcription_jobs - [ ] list_vocabularies +- [ ] list_vocabulary_filters - [ ] start_transcription_job - [ ] update_vocabulary +- [ ] update_vocabulary_filter +
## transfer -0% implemented +
+0% implemented + - [ ] create_server - [ ] create_user - [ ] delete_server @@ -7318,17 +8091,27 @@ - [ ] untag_resource - [ ] update_server - [ ] update_user +
## translate -0% implemented +
+0% implemented + - [ ] delete_terminology +- [ ] describe_text_translation_job - [ ] get_terminology - [ ] import_terminology - [ ] list_terminologies +- [ ] list_text_translation_jobs +- [ ] start_text_translation_job +- [ ] stop_text_translation_job - [ ] translate_text +
## waf -0% implemented +
+0% implemented + - [ ] create_byte_match_set - [ ] create_geo_match_set - [ ] create_ip_set @@ -7405,9 +8188,12 @@ - [ ] update_sql_injection_match_set - [ ] update_web_acl - [ ] update_xss_match_set +
## waf-regional -0% implemented +
+0% implemented + - [ ] associate_web_acl - [ ] create_byte_match_set - [ ] create_geo_match_set @@ -7488,9 +8274,12 @@ - [ ] update_sql_injection_match_set - [ ] update_web_acl - [ ] update_xss_match_set +
## wafv2 -0% implemented +
+0% implemented + - [ ] associate_web_acl - [ ] check_capacity - [ ] create_ip_set @@ -7527,9 +8316,12 @@ - [ ] update_regex_pattern_set - [ ] update_rule_group - [ ] update_web_acl +
## workdocs -0% implemented +
+0% implemented + - [ ] abort_document_version_upload - [ ] activate_user - [ ] add_resource_permissions @@ -7571,9 +8363,12 @@ - [ ] update_document_version - [ ] update_folder - [ ] update_user +
## worklink -0% implemented +
+0% implemented + - [ ] associate_domain - [ ] associate_website_authorization_provider - [ ] associate_website_certificate_authority @@ -7604,15 +8399,19 @@ - [ ] update_domain_metadata - [ ] update_fleet_metadata - [ ] update_identity_provider_configuration +
## workmail -0% implemented +
+0% implemented + - [ ] associate_delegate_to_resource - [ ] associate_member_to_group - [ ] create_alias - [ ] create_group - [ ] create_resource - [ ] create_user +- [ ] delete_access_control_rule - [ ] delete_alias - [ ] delete_group - [ ] delete_mailbox_permissions @@ -7625,7 +8424,9 @@ - [ ] describe_user - [ ] disassociate_delegate_from_resource - [ ] disassociate_member_from_group +- [ ] get_access_control_effect - [ ] get_mailbox_details +- [ ] list_access_control_rules - [ ] list_aliases - [ ] list_group_members - [ ] list_groups @@ -7633,20 +8434,30 @@ - [ ] list_organizations - [ ] list_resource_delegates - [ ] list_resources +- [ ] list_tags_for_resource - [ ] list_users +- [ ] put_access_control_rule - [ ] put_mailbox_permissions - [ ] register_to_work_mail - [ ] reset_password +- [ ] tag_resource +- [ ] untag_resource - [ ] update_mailbox_quota - [ ] update_primary_email_address - [ ] update_resource +
## workmailmessageflow -0% implemented +
+0% implemented + - [ ] get_raw_message_content +
## workspaces -0% implemented +
+0% implemented + - [ ] associate_ip_groups - [ ] authorize_ip_rules - [ ] copy_workspace_image @@ -7671,6 +8482,7 @@ - [ ] disassociate_ip_groups - [ ] import_workspace_image - [ ] list_available_management_cidr_ranges +- [ ] migrate_workspace - [ ] modify_account - [ ] modify_client_properties - [ ] modify_selfservice_permissions @@ -7687,9 +8499,12 @@ - [ ] stop_workspaces - [ ] terminate_workspaces - [ ] update_rules_of_ip_group +
## xray -0% implemented +
+0% implemented + - [ ] batch_get_traces - [ ] create_group - [ ] create_sampling_rule @@ -7710,3 +8525,4 @@ - [ ] put_trace_segments - [ ] update_group - [ ] update_sampling_rule +
diff --git a/moto/__init__.py b/moto/__init__.py index 79c1555d3088..a9a61fff0d0a 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -49,9 +49,7 @@ def f(*args, **kwargs): mock_elasticbeanstalk = lazy_load(".elasticbeanstalk", "mock_elasticbeanstalk") mock_ec2 = lazy_load(".ec2", "mock_ec2") mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated") -mock_ec2_instance_connect = lazy_load( - ".ec2_instance_connect", "mock_ec2_instance_connect" -) +mock_ec2instanceconnect = lazy_load(".ec2instanceconnect", "mock_ec2instanceconnect") mock_ecr = lazy_load(".ecr", "mock_ecr") mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated") mock_ecs = lazy_load(".ecs", "mock_ecs") diff --git a/moto/backends.py b/moto/backends.py index bb71429eb940..d7abe8e13de2 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -21,7 +21,7 @@ "dynamodb2": ("dynamodb2", "dynamodb_backends2"), "dynamodbstreams": ("dynamodbstreams", "dynamodbstreams_backends"), "ec2": ("ec2", "ec2_backends"), - "ec2_instance_connect": ("ec2_instance_connect", "ec2_instance_connect_backends"), + "ec2instanceconnect": ("ec2instanceconnect", "ec2instanceconnect_backends"), "ecr": ("ecr", "ecr_backends"), "ecs": ("ecs", "ecs_backends"), "elasticbeanstalk": ("elasticbeanstalk", "eb_backends"), diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index ea16f456f142..f8aeaf1f1a15 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -824,6 +824,42 @@ def list_tags_of_resource(self, table_arn): required_table = self.tables[table] return required_table.tags + def list_tables(self, limit, exclusive_start_table_name): + all_tables = list(self.tables.keys()) + + if exclusive_start_table_name: + try: + last_table_index = all_tables.index(exclusive_start_table_name) + except ValueError: + start = len(all_tables) + else: + start = last_table_index + 1 + else: + start = 0 + + if limit: + tables = all_tables[start : start + limit] + else: + tables = all_tables[start:] + + if limit and len(all_tables) > start + limit: + return tables, tables[-1] + return tables, None + + def describe_table(self, name): + table = self.tables[name] + return table.describe(base_key="Table") + + def update_table(self, name, global_index, throughput, stream_spec): + table = self.get_table(name) + if global_index: + table = self.update_table_global_indexes(name, global_index) + if throughput: + table = self.update_table_throughput(name, throughput) + if stream_spec: + table = self.update_table_streams(name, stream_spec) + return table + def update_table_throughput(self, name, throughput): table = self.tables[name] table.throughput = throughput @@ -1134,7 +1170,7 @@ def delete_item( return table.delete_item(hash_value, range_value) - def update_ttl(self, table_name, ttl_spec): + def update_time_to_live(self, table_name, ttl_spec): table = self.tables.get(table_name) if table is None: raise JsonRESTError("ResourceNotFound", "Table not found") @@ -1151,7 +1187,7 @@ def update_ttl(self, table_name, ttl_spec): table.ttl["TimeToLiveStatus"] = "DISABLED" table.ttl["AttributeName"] = ttl_spec["AttributeName"] - def describe_ttl(self, table_name): + def describe_time_to_live(self, table_name): table = self.tables.get(table_name) if table is None: raise JsonRESTError("ResourceNotFound", "Table not found") @@ -1246,6 +1282,21 @@ def transact_write_items(self, transact_items): self.tables = original_table_state raise + ###################### + # LIST of methods where the logic completely resides in responses.py + # Duplicated here so that the implementation coverage script is aware + # TODO: Move logic here + ###################### + + def batch_get_item(self): + pass + + def batch_write_item(self): + pass + + def transact_get_items(self): + pass + dynamodb_backends = {} for region in Session().get_available_regions("dynamodb"): diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index b703f2935b13..2c9f1724004d 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -92,27 +92,14 @@ def call_action(self): def list_tables(self): body = self.body limit = body.get("Limit", 100) - all_tables = list(self.dynamodb_backend.tables.keys()) - exclusive_start_table_name = body.get("ExclusiveStartTableName") - if exclusive_start_table_name: - try: - last_table_index = all_tables.index(exclusive_start_table_name) - except ValueError: - start = len(all_tables) - else: - start = last_table_index + 1 - else: - start = 0 - - if limit: - tables = all_tables[start : start + limit] - else: - tables = all_tables[start:] + tables, last_eval = self.dynamodb_backend.list_tables( + limit, exclusive_start_table_name + ) response = {"TableNames": tables} - if limit and len(all_tables) > start + limit: - response["LastEvaluatedTableName"] = tables[-1] + if last_eval: + response["LastEvaluatedTableName"] = last_eval return dynamo_json_dump(response) @@ -232,33 +219,29 @@ def list_tags_of_resource(self): def update_table(self): name = self.body["TableName"] - table = self.dynamodb_backend.get_table(name) - if "GlobalSecondaryIndexUpdates" in self.body: - table = self.dynamodb_backend.update_table_global_indexes( - name, self.body["GlobalSecondaryIndexUpdates"] + global_index = self.body.get("GlobalSecondaryIndexUpdates", None) + throughput = self.body.get("ProvisionedThroughput", None) + stream_spec = self.body.get("StreamSpecification", None) + try: + table = self.dynamodb_backend.update_table( + name=name, + global_index=global_index, + throughput=throughput, + stream_spec=stream_spec, ) - if "ProvisionedThroughput" in self.body: - throughput = self.body["ProvisionedThroughput"] - table = self.dynamodb_backend.update_table_throughput(name, throughput) - if "StreamSpecification" in self.body: - try: - table = self.dynamodb_backend.update_table_streams( - name, self.body["StreamSpecification"] - ) - except ValueError: - er = "com.amazonaws.dynamodb.v20111205#ResourceInUseException" - return self.error(er, "Cannot enable stream") - - return dynamo_json_dump(table.describe()) + return dynamo_json_dump(table.describe()) + except ValueError: + er = "com.amazonaws.dynamodb.v20111205#ResourceInUseException" + return self.error(er, "Cannot enable stream") def describe_table(self): name = self.body["TableName"] try: - table = self.dynamodb_backend.tables[name] + table = self.dynamodb_backend.describe_table(name) + return dynamo_json_dump(table) except KeyError: er = "com.amazonaws.dynamodb.v20111205#ResourceNotFoundException" return self.error(er, "Requested resource not found") - return dynamo_json_dump(table.describe(base_key="Table")) def put_item(self): name = self.body["TableName"] @@ -850,14 +833,14 @@ def update_time_to_live(self): name = self.body["TableName"] ttl_spec = self.body["TimeToLiveSpecification"] - self.dynamodb_backend.update_ttl(name, ttl_spec) + self.dynamodb_backend.update_time_to_live(name, ttl_spec) return json.dumps({"TimeToLiveSpecification": ttl_spec}) def describe_time_to_live(self): name = self.body["TableName"] - ttl_spec = self.dynamodb_backend.describe_ttl(name) + ttl_spec = self.dynamodb_backend.describe_time_to_live(name) return json.dumps({"TimeToLiveDescription": ttl_spec}) diff --git a/moto/ec2_instance_connect/__init__.py b/moto/ec2_instance_connect/__init__.py deleted file mode 100644 index c20d59cfa586..000000000000 --- a/moto/ec2_instance_connect/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from ..core.models import base_decorator -from .models import ec2_instance_connect_backends - -mock_ec2_instance_connect = base_decorator(ec2_instance_connect_backends) diff --git a/moto/ec2_instance_connect/models.py b/moto/ec2_instance_connect/models.py deleted file mode 100644 index f3dbbe9f8269..000000000000 --- a/moto/ec2_instance_connect/models.py +++ /dev/null @@ -1,11 +0,0 @@ -import boto.ec2 -from moto.core import BaseBackend - - -class Ec2InstanceConnectBackend(BaseBackend): - pass - - -ec2_instance_connect_backends = {} -for region in boto.ec2.regions(): - ec2_instance_connect_backends[region.name] = Ec2InstanceConnectBackend() diff --git a/moto/ec2_instance_connect/responses.py b/moto/ec2_instance_connect/responses.py deleted file mode 100644 index 462f1fddcf71..000000000000 --- a/moto/ec2_instance_connect/responses.py +++ /dev/null @@ -1,9 +0,0 @@ -import json -from moto.core.responses import BaseResponse - - -class Ec2InstanceConnectResponse(BaseResponse): - def send_ssh_public_key(self): - return json.dumps( - {"RequestId": "example-2a47-4c91-9700-e37e85162cb6", "Success": True} - ) diff --git a/moto/ec2instanceconnect/__init__.py b/moto/ec2instanceconnect/__init__.py new file mode 100644 index 000000000000..c53958f7e7f4 --- /dev/null +++ b/moto/ec2instanceconnect/__init__.py @@ -0,0 +1,4 @@ +from ..core.models import base_decorator +from .models import ec2instanceconnect_backends + +mock_ec2instanceconnect = base_decorator(ec2instanceconnect_backends) diff --git a/moto/ec2instanceconnect/models.py b/moto/ec2instanceconnect/models.py new file mode 100644 index 000000000000..43c01e7f275b --- /dev/null +++ b/moto/ec2instanceconnect/models.py @@ -0,0 +1,15 @@ +import boto.ec2 +import json +from moto.core import BaseBackend + + +class Ec2InstanceConnectBackend(BaseBackend): + def send_ssh_public_key(self): + return json.dumps( + {"RequestId": "example-2a47-4c91-9700-e37e85162cb6", "Success": True} + ) + + +ec2instanceconnect_backends = {} +for region in boto.ec2.regions(): + ec2instanceconnect_backends[region.name] = Ec2InstanceConnectBackend() diff --git a/moto/ec2instanceconnect/responses.py b/moto/ec2instanceconnect/responses.py new file mode 100644 index 000000000000..9fce11aa21fe --- /dev/null +++ b/moto/ec2instanceconnect/responses.py @@ -0,0 +1,11 @@ +from moto.core.responses import BaseResponse +from .models import ec2instanceconnect_backends + + +class Ec2InstanceConnectResponse(BaseResponse): + @property + def ec2instanceconnect_backend(self): + return ec2instanceconnect_backends[self.region] + + def send_ssh_public_key(self): + return self.ec2instanceconnect_backend.send_ssh_public_key() diff --git a/moto/ec2_instance_connect/urls.py b/moto/ec2instanceconnect/urls.py similarity index 100% rename from moto/ec2_instance_connect/urls.py rename to moto/ec2instanceconnect/urls.py diff --git a/scripts/implementation_coverage.py b/scripts/implementation_coverage.py index 4552ec18ec64..57f978ff9b5a 100755 --- a/scripts/implementation_coverage.py +++ b/scripts/implementation_coverage.py @@ -7,18 +7,18 @@ script_dir = os.path.dirname(os.path.abspath(__file__)) -alternative_service_names = {'lambda': 'awslambda'} +alternative_service_names = {'lambda': 'awslambda', 'dynamodb': 'dynamodb2'} def get_moto_implementation(service_name): service_name = service_name.replace("-", "") if "-" in service_name else service_name alt_service_name = alternative_service_names[service_name] if service_name in alternative_service_names else service_name - if not hasattr(moto, alt_service_name): - return None - module = getattr(moto, alt_service_name) - if module is None: - return None - mock = getattr(module, "mock_{}".format(service_name)) + if hasattr(moto, "mock_{}".format(alt_service_name)): + mock = getattr(moto, "mock_{}".format(alt_service_name)) + elif hasattr(moto, "mock_{}".format(service_name)): + mock = getattr(moto, "mock_{}".format(service_name)) + else: + mock = None if mock is None: return None backends = list(mock().backends.values()) @@ -97,12 +97,14 @@ def write_implementation_coverage_to_file(coverage): file.write("\n") file.write("## {}\n".format(service_name)) - file.write("{}% implemented\n".format(percentage_implemented)) + file.write("
\n") + file.write("{}% implemented\n\n".format(percentage_implemented)) for op in operations: if op in implemented: file.write("- [X] {}\n".format(op)) else: file.write("- [ ] {}\n".format(op)) + file.write("
\n") if __name__ == '__main__': diff --git a/tests/test_ec2_instance_connect/test_ec2_instance_connect_boto3.py b/tests/test_ec2instanceconnect/test_ec2instanceconnect_boto3.py similarity index 92% rename from tests/test_ec2_instance_connect/test_ec2_instance_connect_boto3.py rename to tests/test_ec2instanceconnect/test_ec2instanceconnect_boto3.py index eb685d80ad5e..3f676af961db 100644 --- a/tests/test_ec2_instance_connect/test_ec2_instance_connect_boto3.py +++ b/tests/test_ec2instanceconnect/test_ec2instanceconnect_boto3.py @@ -1,6 +1,6 @@ import boto3 -from moto import mock_ec2_instance_connect +from moto import mock_ec2instanceconnect pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDV5+voluw2zmzqpqCAqtsyoP01TQ8Ydx1eS1yD6wUsHcPqMIqpo57YxiC8XPwrdeKQ6GG6MC3bHsgXoPypGP0LyixbiuLTU31DnnqorcHt4bWs6rQa7dK2pCCflz2fhYRt5ZjqSNsAKivIbqkH66JozN0SySIka3kEV79GdB0BicioKeEJlCwM9vvxafyzjWf/z8E0lh4ni3vkLpIVJ0t5l+Qd9QMJrT6Is0SCQPVagTYZoi8+fWDoGsBa8vyRwDjEzBl28ZplKh9tSyDkRIYszWTpmK8qHiqjLYZBfAxXjGJbEYL1iig4ZxvbYzKEiKSBi1ZMW9iWjHfZDZuxXAmB @@ -8,7 +8,7 @@ """ -@mock_ec2_instance_connect +@mock_ec2instanceconnect def test_send_ssh_public_key(): client = boto3.client("ec2-instance-connect", region_name="us-east-1") fake_request_id = "example-2a47-4c91-9700-e37e85162cb6" From 774a764b698fbf50dffd58f6518a344fef73af76 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Tue, 12 May 2020 19:29:07 +0530 Subject: [PATCH 353/658] Fix s3 Added Error handling in case of invalid uploadID (#2979) * Added Error handling in case of invalid uploadID * Linting * added assertions * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/s3/exceptions.py | 9 +++++++++ moto/s3/models.py | 4 ++++ tests/test_s3/test_s3.py | 13 +++++++++++++ 3 files changed, 26 insertions(+) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index c38a4f467d0a..3ed385f1cea9 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -377,3 +377,12 @@ def __init__(self): super(NoSystemTags, self).__init__( "InvalidTag", "System tags cannot be added/updated by requester" ) + + +class NoSuchUpload(S3ClientError): + code = 404 + + def __init__(self): + super(NoSuchUpload, self).__init__( + "NoSuchUpload", "The specified multipart upload does not exist." + ) diff --git a/moto/s3/models.py b/moto/s3/models.py index 866c5d007c51..3020fd45e81c 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -40,6 +40,7 @@ NoSuchPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration, WrongPublicAccessBlockAccountIdError, + NoSuchUpload, ) from .utils import clean_key_name, _VersionedKeyStore @@ -1478,6 +1479,9 @@ def complete_multipart(self, bucket_name, multipart_id, body): def cancel_multipart(self, bucket_name, multipart_id): bucket = self.get_bucket(bucket_name) + multipart_data = bucket.multiparts.get(multipart_id, None) + if not multipart_data: + raise NoSuchUpload() del bucket.multiparts[multipart_id] def list_multipart(self, bucket_name, multipart_id): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index f60e0293e231..bcb9da87f33c 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2149,6 +2149,19 @@ def test_boto3_copy_object_with_versioning(): data.should.equal(b"test2") +@mock_s3 +def test_s3_abort_multipart_data_with_invalid_upload_and_key(): + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + + client.create_bucket(Bucket="blah") + + with assert_raises(Exception) as err: + client.abort_multipart_upload( + Bucket="blah", Key="foobar", UploadId="dummy_upload_id" + ) + err.exception.response["Error"]["Code"].should.equal("NoSuchUpload") + + @mock_s3 def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) From 9bc393801f24e651f6600b3f8ea986e1f345e866 Mon Sep 17 00:00:00 2001 From: James Belleau Date: Wed, 13 May 2020 06:28:22 -0500 Subject: [PATCH 354/658] Managedblockchain member additions (#2983) * Added some member and proposal functions * Added additional member and proposal functions * Fixed admin password return and added update_member along with tests * Added network removal and member removal proposal * Fixed failing test * Fixed Python 2.7 test --- moto/managedblockchain/exceptions.py | 21 + moto/managedblockchain/models.py | 683 +++++++++++++++++- moto/managedblockchain/responses.py | 238 +++++- moto/managedblockchain/urls.py | 7 + moto/managedblockchain/utils.py | 87 ++- tests/test_managedblockchain/__init__.py | 1 + tests/test_managedblockchain/helpers.py | 67 ++ .../test_managedblockchain_invitations.py | 142 ++++ .../test_managedblockchain_members.py | 669 +++++++++++++++++ .../test_managedblockchain_networks.py | 69 +- .../test_managedblockchain_proposals.py | 199 +++++ .../test_managedblockchain_proposalvotes.py | 529 ++++++++++++++ 12 files changed, 2638 insertions(+), 74 deletions(-) create mode 100644 tests/test_managedblockchain/__init__.py create mode 100644 tests/test_managedblockchain/helpers.py create mode 100644 tests/test_managedblockchain/test_managedblockchain_invitations.py create mode 100644 tests/test_managedblockchain/test_managedblockchain_members.py create mode 100644 tests/test_managedblockchain/test_managedblockchain_proposals.py create mode 100644 tests/test_managedblockchain/test_managedblockchain_proposalvotes.py diff --git a/moto/managedblockchain/exceptions.py b/moto/managedblockchain/exceptions.py index 265d8eaeab2c..456eabc0581b 100644 --- a/moto/managedblockchain/exceptions.py +++ b/moto/managedblockchain/exceptions.py @@ -16,6 +16,16 @@ def __init__(self, pretty_called_method, operation_error): ) +class InvalidRequestException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + super(InvalidRequestException, self).__init__( + "InvalidRequestException", + "An error occurred (InvalidRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) + + class ResourceNotFoundException(ManagedBlockchainClientError): def __init__(self, pretty_called_method, operation_error): self.code = 404 @@ -25,3 +35,14 @@ def __init__(self, pretty_called_method, operation_error): pretty_called_method, operation_error ), ) + + +class ResourceLimitExceededException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + self.code = 429 + super(ResourceLimitExceededException, self).__init__( + "ResourceLimitExceededException", + "An error occurred (ResourceLimitExceededException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py index 96f411a87854..034e45d350ec 100644 --- a/moto/managedblockchain/models.py +++ b/moto/managedblockchain/models.py @@ -1,14 +1,28 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals, division import datetime +import re from boto3 import Session from moto.core import BaseBackend, BaseModel -from .exceptions import BadRequestException, ResourceNotFoundException +from .exceptions import ( + BadRequestException, + ResourceNotFoundException, + InvalidRequestException, + ResourceLimitExceededException, +) -from .utils import get_network_id, get_member_id +from .utils import ( + get_network_id, + get_member_id, + get_proposal_id, + get_invitation_id, + member_name_exist_in_network, + number_of_members_in_network, + admin_password_ok, +) FRAMEWORKS = [ "HYPERLEDGER_FABRIC", @@ -18,10 +32,20 @@ "1.2", ] -EDITIONS = [ - "STARTER", - "STANDARD", -] +EDITIONS = { + "STARTER": { + "MaxMembers": 5, + "MaxNodesPerMember": 2, + "AllowedNodeInstanceTypes": ["bc.t3.small", "bc.t3.medium"], + }, + "STANDARD": { + "MaxMembers": 14, + "MaxNodesPerMember": 3, + "AllowedNodeInstanceTypes": ["bc.t3", "bc.m5", "bc.c5"], + }, +} + +VOTEVALUES = ["YES", "NO"] class ManagedBlockchainNetwork(BaseModel): @@ -48,6 +72,42 @@ def __init__( self.member_configuration = member_configuration self.region = region + @property + def network_name(self): + return self.name + + @property + def network_framework(self): + return self.framework + + @property + def network_framework_version(self): + return self.frameworkversion + + @property + def network_creationdate(self): + return self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z") + + @property + def network_description(self): + return self.description + + @property + def network_edition(self): + return self.frameworkconfiguration["Fabric"]["Edition"] + + @property + def vote_pol_proposal_duration(self): + return self.voting_policy["ApprovalThresholdPolicy"]["ProposalDurationInHours"] + + @property + def vote_pol_threshold_percentage(self): + return self.voting_policy["ApprovalThresholdPolicy"]["ThresholdPercentage"] + + @property + def vote_pol_threshold_comparator(self): + return self.voting_policy["ApprovalThresholdPolicy"]["ThresholdComparator"] + def to_dict(self): # Format for list_networks d = { @@ -63,7 +123,7 @@ def to_dict(self): return d def get_format(self): - # Format for get_networks + # Format for get_network frameworkattributes = { "Fabric": { "OrderingServiceEndpoint": "orderer.{0}.managedblockchain.{1}.amazonaws.com:30001".format( @@ -93,9 +153,272 @@ def get_format(self): return d +class ManagedBlockchainProposal(BaseModel): + def __init__( + self, + id, + networkid, + memberid, + membername, + numofmembers, + actions, + network_expirtation, + network_threshold, + network_threshold_comp, + description=None, + ): + # In general, passing all values instead of creating + # an apparatus to look them up + self.id = id + self.networkid = networkid + self.memberid = memberid + self.membername = membername + self.numofmembers = numofmembers + self.actions = actions + self.network_expirtation = network_expirtation + self.network_threshold = network_threshold + self.network_threshold_comp = network_threshold_comp + self.description = description + + self.creationdate = datetime.datetime.utcnow() + self.expirtationdate = self.creationdate + datetime.timedelta( + hours=network_expirtation + ) + self.yes_vote_count = 0 + self.no_vote_count = 0 + self.outstanding_vote_count = self.numofmembers + self.status = "IN_PROGRESS" + self.votes = {} + + @property + def network_id(self): + return self.networkid + + @property + def proposal_status(self): + return self.status + + @property + def proposal_votes(self): + return self.votes + + def proposal_actions(self, action_type): + default_return = [] + if action_type.lower() == "invitations": + if "Invitations" in self.actions: + return self.actions["Invitations"] + elif action_type.lower() == "removals": + if "Removals" in self.actions: + return self.actions["Removals"] + return default_return + + def to_dict(self): + # Format for list_proposals + d = { + "ProposalId": self.id, + "ProposedByMemberId": self.memberid, + "ProposedByMemberName": self.membername, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + return d + + def get_format(self): + # Format for get_proposal + d = { + "ProposalId": self.id, + "NetworkId": self.networkid, + "Actions": self.actions, + "ProposedByMemberId": self.memberid, + "ProposedByMemberName": self.membername, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "YesVoteCount": self.yes_vote_count, + "NoVoteCount": self.no_vote_count, + "OutstandingVoteCount": self.outstanding_vote_count, + } + if self.description is not None: + d["Description"] = self.description + return d + + def set_vote(self, votermemberid, votermembername, vote): + if datetime.datetime.utcnow() > self.expirtationdate: + self.status = "EXPIRED" + return False + + if vote.upper() == "YES": + self.yes_vote_count += 1 + else: + self.no_vote_count += 1 + self.outstanding_vote_count -= 1 + + perct_yes = (self.yes_vote_count / self.numofmembers) * 100 + perct_no = (self.no_vote_count / self.numofmembers) * 100 + self.votes[votermemberid] = { + "MemberId": votermemberid, + "MemberName": votermembername, + "Vote": vote.upper(), + } + + if self.network_threshold_comp == "GREATER_THAN_OR_EQUAL_TO": + if perct_yes >= self.network_threshold: + self.status = "APPROVED" + elif perct_no >= self.network_threshold: + self.status = "REJECTED" + else: + if perct_yes > self.network_threshold: + self.status = "APPROVED" + elif perct_no > self.network_threshold: + self.status = "REJECTED" + + return True + + +class ManagedBlockchainInvitation(BaseModel): + def __init__( + self, + id, + networkid, + networkname, + networkframework, + networkframeworkversion, + networkcreationdate, + region, + networkdescription=None, + ): + self.id = id + self.networkid = networkid + self.networkname = networkname + self.networkdescription = networkdescription + self.networkframework = networkframework + self.networkframeworkversion = networkframeworkversion + self.networkstatus = "AVAILABLE" + self.networkcreationdate = networkcreationdate + self.status = "PENDING" + self.region = region + + self.creationdate = datetime.datetime.utcnow() + self.expirtationdate = self.creationdate + datetime.timedelta(days=7) + + @property + def invitation_status(self): + return self.status + + @property + def invitation_networkid(self): + return self.networkid + + def to_dict(self): + d = { + "InvitationId": self.id, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "Status": self.status, + "NetworkSummary": { + "Id": self.networkid, + "Name": self.networkname, + "Framework": self.networkframework, + "FrameworkVersion": self.networkframeworkversion, + "Status": self.networkstatus, + "CreationDate": self.networkcreationdate, + }, + } + if self.networkdescription is not None: + d["NetworkSummary"]["Description"] = self.networkdescription + return d + + def accept_invitation(self): + self.status = "ACCEPTED" + + def reject_invitation(self): + self.status = "REJECTED" + + def set_network_status(self, network_status): + self.networkstatus = network_status + + +class ManagedBlockchainMember(BaseModel): + def __init__( + self, id, networkid, member_configuration, region, + ): + self.creationdate = datetime.datetime.utcnow() + self.id = id + self.networkid = networkid + self.member_configuration = member_configuration + self.status = "AVAILABLE" + self.region = region + self.description = None + + @property + def network_id(self): + return self.networkid + + @property + def name(self): + return self.member_configuration["Name"] + + @property + def member_status(self): + return self.status + + def to_dict(self): + # Format for list_members + d = { + "Id": self.id, + "Name": self.member_configuration["Name"], + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "IsOwned": True, + } + if "Description" in self.member_configuration: + self.description = self.member_configuration["Description"] + return d + + def get_format(self): + # Format for get_member + frameworkattributes = { + "Fabric": { + "AdminUsername": self.member_configuration["FrameworkConfiguration"][ + "Fabric" + ]["AdminUsername"], + "CaEndpoint": "ca.{0}.{1}.managedblockchain.{2}.amazonaws.com:30002".format( + self.id.lower(), self.networkid.lower(), self.region + ), + } + } + + d = { + "NetworkId": self.networkid, + "Id": self.id, + "Name": self.name, + "FrameworkAttributes": frameworkattributes, + "LogPublishingConfiguration": self.member_configuration[ + "LogPublishingConfiguration" + ], + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if "Description" in self.member_configuration: + d["Description"] = self.description + return d + + def delete(self): + self.status = "DELETED" + + def update(self, logpublishingconfiguration): + self.member_configuration[ + "LogPublishingConfiguration" + ] = logpublishingconfiguration + + class ManagedBlockchainBackend(BaseBackend): def __init__(self, region_name): self.networks = {} + self.members = {} + self.proposals = {} + self.invitations = {} self.region_name = region_name def reset(self): @@ -113,14 +436,6 @@ def create_network( member_configuration, description=None, ): - self.name = name - self.framework = framework - self.frameworkversion = frameworkversion - self.frameworkconfiguration = frameworkconfiguration - self.voting_policy = voting_policy - self.member_configuration = member_configuration - self.description = description - # Check framework if framework not in FRAMEWORKS: raise BadRequestException("CreateNetwork", "Invalid request body") @@ -141,19 +456,25 @@ def create_network( ## Generate network ID network_id = get_network_id() - ## Generate memberid ID - will need to actually create member + ## Generate memberid ID and initial member member_id = get_member_id() + self.members[member_id] = ManagedBlockchainMember( + id=member_id, + networkid=network_id, + member_configuration=member_configuration, + region=self.region_name, + ) self.networks[network_id] = ManagedBlockchainNetwork( id=network_id, name=name, - framework=self.framework, - frameworkversion=self.frameworkversion, - frameworkconfiguration=self.frameworkconfiguration, - voting_policy=self.voting_policy, - member_configuration=self.member_configuration, + framework=framework, + frameworkversion=frameworkversion, + frameworkconfiguration=frameworkconfiguration, + voting_policy=voting_policy, + member_configuration=member_configuration, region=self.region_name, - description=self.description, + description=description, ) # Return the network and member ID @@ -166,10 +487,324 @@ def list_networks(self): def get_network(self, network_id): if network_id not in self.networks: raise ResourceNotFoundException( - "CreateNetwork", "Network {0} not found".format(network_id) + "GetNetwork", "Network {0} not found.".format(network_id) ) return self.networks.get(network_id) + def create_proposal( + self, networkid, memberid, actions, description=None, + ): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "CreateProposal", "Network {0} not found.".format(networkid) + ) + + # Check if member exists + if memberid not in self.members: + raise ResourceNotFoundException( + "CreateProposal", "Member {0} not found.".format(memberid) + ) + + # CLI docs say that Invitations and Removals cannot both be passed - but it does + # not throw an error and can be performed + if "Invitations" in actions: + for propinvitation in actions["Invitations"]: + if re.match("[0-9]{12}", propinvitation["Principal"]) is None: + raise InvalidRequestException( + "CreateProposal", + "Account ID format specified in proposal is not valid.", + ) + + if "Removals" in actions: + for propmember in actions["Removals"]: + if propmember["MemberId"] not in self.members: + raise InvalidRequestException( + "CreateProposal", + "Member ID format specified in proposal is not valid.", + ) + + ## Generate proposal ID + proposal_id = get_proposal_id() + + self.proposals[proposal_id] = ManagedBlockchainProposal( + id=proposal_id, + networkid=networkid, + memberid=memberid, + membername=self.members.get(memberid).name, + numofmembers=number_of_members_in_network(self.members, networkid), + actions=actions, + network_expirtation=self.networks.get(networkid).vote_pol_proposal_duration, + network_threshold=self.networks.get( + networkid + ).vote_pol_threshold_percentage, + network_threshold_comp=self.networks.get( + networkid + ).vote_pol_threshold_comparator, + description=description, + ) + + # Return the proposal ID + d = {"ProposalId": proposal_id} + return d + + def list_proposals(self, networkid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListProposals", "Network {0} not found.".format(networkid) + ) + + proposalsfornetwork = [] + for proposal_id in self.proposals: + if self.proposals.get(proposal_id).network_id == networkid: + proposalsfornetwork.append(self.proposals[proposal_id]) + return proposalsfornetwork + + def get_proposal(self, networkid, proposalid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "GetProposal", "Network {0} not found.".format(networkid) + ) + + if proposalid not in self.proposals: + raise ResourceNotFoundException( + "GetProposal", "Proposal {0} not found.".format(proposalid) + ) + return self.proposals.get(proposalid) + + def vote_on_proposal(self, networkid, proposalid, votermemberid, vote): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "VoteOnProposal", "Network {0} not found.".format(networkid) + ) + + if proposalid not in self.proposals: + raise ResourceNotFoundException( + "VoteOnProposal", "Proposal {0} not found.".format(proposalid) + ) + + if votermemberid not in self.members: + raise ResourceNotFoundException( + "VoteOnProposal", "Member {0} not found.".format(votermemberid) + ) + + if vote.upper() not in VOTEVALUES: + raise BadRequestException("VoteOnProposal", "Invalid request body") + + # Check to see if this member already voted + # TODO Verify exception + if votermemberid in self.proposals.get(proposalid).proposal_votes: + raise BadRequestException("VoteOnProposal", "Invalid request body") + + # Will return false if vote was not cast (e.g., status wrong) + if self.proposals.get(proposalid).set_vote( + votermemberid, self.members.get(votermemberid).name, vote.upper() + ): + if self.proposals.get(proposalid).proposal_status == "APPROVED": + ## Generate invitations + for propinvitation in self.proposals.get(proposalid).proposal_actions( + "Invitations" + ): + invitation_id = get_invitation_id() + self.invitations[invitation_id] = ManagedBlockchainInvitation( + id=invitation_id, + networkid=networkid, + networkname=self.networks.get(networkid).network_name, + networkframework=self.networks.get(networkid).network_framework, + networkframeworkversion=self.networks.get( + networkid + ).network_framework_version, + networkcreationdate=self.networks.get( + networkid + ).network_creationdate, + region=self.region_name, + networkdescription=self.networks.get( + networkid + ).network_description, + ) + + ## Delete members + for propmember in self.proposals.get(proposalid).proposal_actions( + "Removals" + ): + self.delete_member(networkid, propmember["MemberId"]) + + def list_proposal_votes(self, networkid, proposalid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListProposalVotes", "Network {0} not found.".format(networkid) + ) + + if proposalid not in self.proposals: + raise ResourceNotFoundException( + "ListProposalVotes", "Proposal {0} not found.".format(proposalid) + ) + + # Output the vote summaries + proposalvotesfornetwork = [] + for proposal_id in self.proposals: + if self.proposals.get(proposal_id).network_id == networkid: + for pvmemberid in self.proposals.get(proposal_id).proposal_votes: + proposalvotesfornetwork.append( + self.proposals.get(proposal_id).proposal_votes[pvmemberid] + ) + return proposalvotesfornetwork + + def list_invitations(self): + return self.invitations.values() + + def reject_invitation(self, invitationid): + if invitationid not in self.invitations: + raise ResourceNotFoundException( + "RejectInvitation", "InvitationId {0} not found.".format(invitationid) + ) + self.invitations.get(invitationid).reject_invitation() + + def create_member( + self, invitationid, networkid, member_configuration, + ): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "CreateMember", "Network {0} not found.".format(networkid) + ) + + if invitationid not in self.invitations: + raise InvalidRequestException( + "CreateMember", "Invitation {0} not valid".format(invitationid) + ) + + if self.invitations.get(invitationid).invitation_status != "PENDING": + raise InvalidRequestException( + "CreateMember", "Invitation {0} not valid".format(invitationid) + ) + + if ( + member_name_exist_in_network( + self.members, networkid, member_configuration["Name"] + ) + is True + ): + raise InvalidRequestException( + "CreateMember", + "Member name {0} already exists in network {1}.".format( + member_configuration["Name"], networkid + ), + ) + + networkedition = self.networks.get(networkid).network_edition + if ( + number_of_members_in_network(self.members, networkid) + >= EDITIONS[networkedition]["MaxMembers"] + ): + raise ResourceLimitExceededException( + "CreateMember", + "You cannot create a member in network {0}.{1} is the maximum number of members allowed in a {2} Edition network.".format( + networkid, EDITIONS[networkedition]["MaxMembers"], networkedition + ), + ) + + memberadminpassword = member_configuration["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] + if admin_password_ok(memberadminpassword) is False: + raise BadRequestException("CreateMember", "Invalid request body") + + member_id = get_member_id() + self.members[member_id] = ManagedBlockchainMember( + id=member_id, + networkid=networkid, + member_configuration=member_configuration, + region=self.region_name, + ) + + # Accept the invitaiton + self.invitations.get(invitationid).accept_invitation() + + # Return the member ID + d = {"MemberId": member_id} + return d + + def list_members(self, networkid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListMembers", "Network {0} not found.".format(networkid) + ) + + membersfornetwork = [] + for member_id in self.members: + if self.members.get(member_id).network_id == networkid: + membersfornetwork.append(self.members[member_id]) + return membersfornetwork + + def get_member(self, networkid, memberid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "GetMember", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "GetMember", "Member {0} not found.".format(memberid) + ) + + ## Cannot get a member than has been delted (it does show up in the list) + if self.members.get(memberid).member_status == "DELETED": + raise ResourceNotFoundException( + "GetMember", "Member {0} not found.".format(memberid) + ) + + return self.members.get(memberid) + + def delete_member(self, networkid, memberid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "DeleteMember", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "DeleteMember", "Member {0} not found.".format(memberid) + ) + + self.members.get(memberid).delete() + + # Is this the last member in the network? (all set to DELETED) + if number_of_members_in_network( + self.members, networkid, member_status="DELETED" + ) == len(self.members): + # Set network status to DELETED for all invitations + for invitation_id in self.invitations: + if ( + self.invitations.get(invitation_id).invitation_networkid + == networkid + ): + self.invitations.get(invitation_id).set_network_status("DELETED") + + # Remove network + del self.networks[networkid] + + def update_member(self, networkid, memberid, logpublishingconfiguration): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "UpdateMember", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "UpdateMember", "Member {0} not found.".format(memberid) + ) + + self.members.get(memberid).update(logpublishingconfiguration) + managedblockchain_backends = {} for region in Session().get_available_regions("managedblockchain"): diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py index 081f301d553c..34206b3c444b 100644 --- a/moto/managedblockchain/responses.py +++ b/moto/managedblockchain/responses.py @@ -8,6 +8,9 @@ from .utils import ( region_from_managedblckchain_url, networkid_from_managedblockchain_url, + proposalid_from_managedblockchain_url, + invitationid_from_managedblockchain_url, + memberid_from_managedblockchain_url, ) @@ -66,7 +69,7 @@ def _network_response_post(self, json_body, querystring, headers): member_configuration, description, ) - return 201, headers, json.dumps(response) + return 200, headers, json.dumps(response) @classmethod def networkid_response(clazz, request, full_url, headers): @@ -88,3 +91,236 @@ def _networkid_response_get(self, network_id, headers): response = json.dumps({"Network": mbcnetwork.get_format()}) headers["content-type"] = "application/json" return 200, headers, response + + @classmethod + def proposal_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._proposal_response(request, full_url, headers) + + def _proposal_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + if method == "GET": + return self._all_proposals_response(network_id, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._proposal_response_post( + network_id, json_body, querystring, headers + ) + + def _all_proposals_response(self, network_id, headers): + proposals = self.backend.list_proposals(network_id) + response = json.dumps( + {"Proposals": [proposal.to_dict() for proposal in proposals]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + def _proposal_response_post(self, network_id, json_body, querystring, headers): + memberid = json_body["MemberId"] + actions = json_body["Actions"] + + # Optional + description = json_body.get("Description", None) + + response = self.backend.create_proposal( + network_id, memberid, actions, description, + ) + return 200, headers, json.dumps(response) + + @classmethod + def proposalid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._proposalid_response(request, full_url, headers) + + def _proposalid_response(self, request, full_url, headers): + method = request.method + network_id = networkid_from_managedblockchain_url(full_url) + if method == "GET": + proposal_id = proposalid_from_managedblockchain_url(full_url) + return self._proposalid_response_get(network_id, proposal_id, headers) + + def _proposalid_response_get(self, network_id, proposal_id, headers): + proposal = self.backend.get_proposal(network_id, proposal_id) + response = json.dumps({"Proposal": proposal.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + @classmethod + def proposal_votes_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._proposal_votes_response(request, full_url, headers) + + def _proposal_votes_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + proposal_id = proposalid_from_managedblockchain_url(full_url) + if method == "GET": + return self._all_proposal_votes_response(network_id, proposal_id, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._proposal_votes_response_post( + network_id, proposal_id, json_body, querystring, headers + ) + + def _all_proposal_votes_response(self, network_id, proposal_id, headers): + proposalvotes = self.backend.list_proposal_votes(network_id, proposal_id) + response = json.dumps({"ProposalVotes": proposalvotes}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _proposal_votes_response_post( + self, network_id, proposal_id, json_body, querystring, headers + ): + votermemberid = json_body["VoterMemberId"] + vote = json_body["Vote"] + + self.backend.vote_on_proposal( + network_id, proposal_id, votermemberid, vote, + ) + return 200, headers, "" + + @classmethod + def invitation_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._invitation_response(request, full_url, headers) + + def _invitation_response(self, request, full_url, headers): + method = request.method + if method == "GET": + return self._all_invitation_response(request, full_url, headers) + + def _all_invitation_response(self, request, full_url, headers): + invitations = self.backend.list_invitations() + response = json.dumps( + {"Invitations": [invitation.to_dict() for invitation in invitations]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + @classmethod + def invitationid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._invitationid_response(request, full_url, headers) + + def _invitationid_response(self, request, full_url, headers): + method = request.method + if method == "DELETE": + invitation_id = invitationid_from_managedblockchain_url(full_url) + return self._invitationid_response_delete(invitation_id, headers) + + def _invitationid_response_delete(self, invitation_id, headers): + self.backend.reject_invitation(invitation_id) + headers["content-type"] = "application/json" + return 200, headers, "" + + @classmethod + def member_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._member_response(request, full_url, headers) + + def _member_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + if method == "GET": + return self._all_members_response(network_id, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._member_response_post( + network_id, json_body, querystring, headers + ) + + def _all_members_response(self, network_id, headers): + members = self.backend.list_members(network_id) + response = json.dumps({"Members": [member.to_dict() for member in members]}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _member_response_post(self, network_id, json_body, querystring, headers): + invitationid = json_body["InvitationId"] + member_configuration = json_body["MemberConfiguration"] + + response = self.backend.create_member( + invitationid, network_id, member_configuration, + ) + return 200, headers, json.dumps(response) + + @classmethod + def memberid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._memberid_response(request, full_url, headers) + + def _memberid_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + network_id = networkid_from_managedblockchain_url(full_url) + member_id = memberid_from_managedblockchain_url(full_url) + if method == "GET": + return self._memberid_response_get(network_id, member_id, headers) + elif method == "PATCH": + json_body = json.loads(body.decode("utf-8")) + return self._memberid_response_patch( + network_id, member_id, json_body, headers + ) + elif method == "DELETE": + return self._memberid_response_delete(network_id, member_id, headers) + + def _memberid_response_get(self, network_id, member_id, headers): + member = self.backend.get_member(network_id, member_id) + response = json.dumps({"Member": member.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _memberid_response_patch(self, network_id, member_id, json_body, headers): + logpublishingconfiguration = json_body["LogPublishingConfiguration"] + self.backend.update_member( + network_id, member_id, logpublishingconfiguration, + ) + return 200, headers, "" + + def _memberid_response_delete(self, network_id, member_id, headers): + self.backend.delete_member(network_id, member_id) + headers["content-type"] = "application/json" + return 200, headers, "" diff --git a/moto/managedblockchain/urls.py b/moto/managedblockchain/urls.py index 806d11926bc9..c7d191aab9f0 100644 --- a/moto/managedblockchain/urls.py +++ b/moto/managedblockchain/urls.py @@ -6,4 +6,11 @@ url_paths = { "{0}/networks$": ManagedBlockchainResponse.network_response, "{0}/networks/(?P[^/.]+)$": ManagedBlockchainResponse.networkid_response, + "{0}/networks/(?P[^/.]+)/proposals$": ManagedBlockchainResponse.proposal_response, + "{0}/networks/(?P[^/.]+)/proposals/(?P[^/.]+)$": ManagedBlockchainResponse.proposalid_response, + "{0}/networks/(?P[^/.]+)/proposals/(?P[^/.]+)/votes$": ManagedBlockchainResponse.proposal_votes_response, + "{0}/invitations$": ManagedBlockchainResponse.invitation_response, + "{0}/invitations/(?P[^/.]+)$": ManagedBlockchainResponse.invitationid_response, + "{0}/networks/(?P[^/.]+)/members$": ManagedBlockchainResponse.member_response, + "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)$": ManagedBlockchainResponse.memberid_response, } diff --git a/moto/managedblockchain/utils.py b/moto/managedblockchain/utils.py index 2a93d93f40e9..ea8f505135e5 100644 --- a/moto/managedblockchain/utils.py +++ b/moto/managedblockchain/utils.py @@ -1,4 +1,5 @@ import random +import re import string from six.moves.urllib.parse import urlparse @@ -6,15 +7,18 @@ def region_from_managedblckchain_url(url): domain = urlparse(url).netloc - + region = "us-east-1" if "." in domain: - return domain.split(".")[1] - else: - return "us-east-1" + region = domain.split(".")[1] + return region def networkid_from_managedblockchain_url(full_url): - return full_url.split("/")[-1] + id_search = re.search("\/n-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id def get_network_id(): @@ -23,7 +27,80 @@ def get_network_id(): ) +def memberid_from_managedblockchain_url(full_url): + id_search = re.search("\/m-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + def get_member_id(): return "m-" + "".join( random.choice(string.ascii_uppercase + string.digits) for _ in range(26) ) + + +def proposalid_from_managedblockchain_url(full_url): + id_search = re.search("\/p-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_proposal_id(): + return "p-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def invitationid_from_managedblockchain_url(full_url): + id_search = re.search("\/in-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_invitation_id(): + return "in-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def member_name_exist_in_network(members, networkid, membername): + membernamexists = False + for member_id in members: + if members.get(member_id).network_id == networkid: + if members.get(member_id).name == membername: + membernamexists = True + break + return membernamexists + + +def number_of_members_in_network(members, networkid, member_status=None): + return len( + [ + membid + for membid in members + if members.get(membid).network_id == networkid + and ( + member_status is None + or members.get(membid).member_status == member_status + ) + ] + ) + + +def admin_password_ok(password): + if not re.search("[a-z]", password): + return False + elif not re.search("[A-Z]", password): + return False + elif not re.search("[0-9]", password): + return False + elif re.search("['\"@\\/]", password): + return False + else: + return True diff --git a/tests/test_managedblockchain/__init__.py b/tests/test_managedblockchain/__init__.py new file mode 100644 index 000000000000..baffc4882521 --- /dev/null +++ b/tests/test_managedblockchain/__init__.py @@ -0,0 +1 @@ +from __future__ import unicode_literals diff --git a/tests/test_managedblockchain/helpers.py b/tests/test_managedblockchain/helpers.py new file mode 100644 index 000000000000..38c13b512d66 --- /dev/null +++ b/tests/test_managedblockchain/helpers.py @@ -0,0 +1,67 @@ +from __future__ import unicode_literals + + +default_frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} + +default_votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } +} + +default_memberconfiguration = { + "Name": "testmember1", + "Description": "Test Member 1", + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} + }, +} + +default_policy_actions = {"Invitations": [{"Principal": "123456789012"}]} + +multiple_policy_actions = { + "Invitations": [{"Principal": "123456789012"}, {"Principal": "123456789013"}] +} + + +def member_id_exist_in_list(members, memberid): + memberidxists = False + for member in members: + if member["Id"] == memberid: + memberidxists = True + break + return memberidxists + + +def create_member_configuration( + name, adminuser, adminpass, cloudwatchenabled, description=None +): + d = { + "Name": name, + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": adminuser, "AdminPassword": adminpass} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": cloudwatchenabled}}} + }, + } + + if description is not None: + d["Description"] = description + + return d + + +def select_invitation_id_for_network(invitations, networkid, status=None): + # Get invitations based on network and maybe status + invitationsfornetwork = [] + for invitation in invitations: + if invitation["NetworkSummary"]["Id"] == networkid: + if status is None or invitation["Status"] == status: + invitationsfornetwork.append(invitation["InvitationId"]) + return invitationsfornetwork diff --git a/tests/test_managedblockchain/test_managedblockchain_invitations.py b/tests/test_managedblockchain/test_managedblockchain_invitations.py new file mode 100644 index 000000000000..81b20a9ba1cf --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_invitations.py @@ -0,0 +1,142 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto.managedblockchain.exceptions import BadRequestException +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_2_invitations(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.multiple_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"].should.have.length_of(2) + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + response["Invitations"][1]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][1]["Status"].should.equal("PENDING") + + +@mock_managedblockchain +def test_reject_invitation(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + invitation_id = response["Invitations"][0]["InvitationId"] + + # Reject - thanks but no thanks + response = conn.reject_invitation(InvitationId=invitation_id) + + # Check the invitation status + response = conn.list_invitations() + response["Invitations"][0]["InvitationId"].should.equal(invitation_id) + response["Invitations"][0]["Status"].should.equal("REJECTED") + + +@mock_managedblockchain +def test_reject_invitation_badinvitation(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + response = conn.reject_invitation.when.called_with( + InvitationId="in-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "InvitationId in-ABCDEFGHIJKLMNOP0123456789 not found.") diff --git a/tests/test_managedblockchain/test_managedblockchain_members.py b/tests/test_managedblockchain/test_managedblockchain_members.py new file mode 100644 index 000000000000..76d29dd5509d --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_members.py @@ -0,0 +1,669 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto.managedblockchain.exceptions import BadRequestException +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_another_member(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ), + ) + member_id2 = response["MemberId"] + + # Check the invitation status + response = conn.list_invitations() + response["Invitations"][0]["InvitationId"].should.equal(invitation_id) + response["Invitations"][0]["Status"].should.equal("ACCEPTED") + + # Find member in full list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + helpers.member_id_exist_in_list(members, member_id2).should.equal(True) + + # Get member 2 details + response = conn.get_member(NetworkId=network_id, MemberId=member_id2) + response["Member"]["Name"].should.equal("testmember2") + + # Update member + logconfignewenabled = not helpers.default_memberconfiguration[ + "LogPublishingConfiguration" + ]["Fabric"]["CaLogs"]["Cloudwatch"]["Enabled"] + logconfignew = { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": logconfignewenabled}}} + } + conn.update_member( + NetworkId=network_id, + MemberId=member_id2, + LogPublishingConfiguration=logconfignew, + ) + + # Get member 2 details + response = conn.get_member(NetworkId=network_id, MemberId=member_id2) + response["Member"]["LogPublishingConfiguration"]["Fabric"]["CaLogs"]["Cloudwatch"][ + "Enabled" + ].should.equal(logconfignewenabled) + + +@mock_managedblockchain +def test_create_another_member_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Check the invitation status + response = conn.list_invitations() + response["Invitations"][0]["InvitationId"].should.equal(invitation_id) + response["Invitations"][0]["Status"].should.equal("ACCEPTED") + + # Find member in full list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + helpers.member_id_exist_in_list(members, member_id2).should.equal(True) + + # Get member 2 details + response = conn.get_member(NetworkId=network_id, MemberId=member_id2) + response["Member"]["Description"].should.equal("Test Member 2") + + # Try to create member with already used invitation + response = conn.create_member.when.called_with( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2 Duplicate" + ), + ).should.throw(Exception, "Invitation {0} not valid".format(invitation_id)) + + # Delete member 2 + conn.delete_member(NetworkId=network_id, MemberId=member_id2) + + # Member is still in the list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + + # But cannot get + response = conn.get_member.when.called_with( + NetworkId=network_id, MemberId=member_id2, + ).should.throw(Exception, "Member {0} not found".format(member_id2)) + + # Delete member 1 + conn.delete_member(NetworkId=network_id, MemberId=member_id) + + # Network should be gone + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(0) + + # Verify the invitation network status is DELETED + # Get the invitation + response = conn.list_invitations() + response["Invitations"].should.have.length_of(1) + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["NetworkSummary"]["Status"].should.equal("DELETED") + + +@mock_managedblockchain +def test_create_and_delete_member(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal (create additional member) + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + both_policy_actions = { + "Invitations": [{"Principal": "123456789012"}], + "Removals": [{"MemberId": member_id2}], + } + + # Create proposal (invite and remove member) + response = conn.create_proposal( + NetworkId=network_id, MemberId=member_id, Actions=both_policy_actions, + ) + proposal_id2 = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id2) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id2, + VoterMemberId=member_id, + Vote="YES", + ) + + # Check the invitation status + response = conn.list_invitations() + invitations = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + ) + invitations.should.have.length_of(1) + + # Member is still in the list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + foundmember2 = False + for member in members: + if member["Id"] == member_id2 and member["Status"] == "DELETED": + foundmember2 = True + foundmember2.should.equal(True) + + +@mock_managedblockchain +def test_create_too_many_members(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create 4 more members - create invitations for 5 + for counter in range(2, 7): + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + for counter in range(2, 6): + # Get the invitation + response = conn.list_invitations() + invitation_id = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + )[0] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember" + str(counter), + "admin", + "Admin12345", + False, + "Test Member " + str(counter), + ), + ) + member_id = response["MemberId"] + + # Find member in full list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(counter) + helpers.member_id_exist_in_list(members, member_id).should.equal(True) + + # Get member details + response = conn.get_member(NetworkId=network_id, MemberId=member_id) + response["Member"]["Description"].should.equal("Test Member " + str(counter)) + + # Try to create the sixth + response = conn.list_invitations() + invitation_id = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + )[0] + + # Try to create member with already used invitation + response = conn.create_member.when.called_with( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember6", "admin", "Admin12345", False, "Test Member 6" + ), + ).should.throw( + Exception, + "5 is the maximum number of members allowed in a STARTER Edition network", + ) + + +@mock_managedblockchain +def test_create_another_member_alreadyhave(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Should fail trying to create with same name + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember1", "admin", "Admin12345", False + ), + ).should.throw( + Exception, + "Member name {0} already exists in network {1}".format( + "testmember1", network_id + ), + ) + + +@mock_managedblockchain +def test_create_another_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + InvitationId="id-ABCDEFGHIJKLMNOP0123456789", + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ), + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_another_member_badinvitation(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId="in-ABCDEFGHIJKLMNOP0123456789", + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ), + ).should.throw(Exception, "Invitation in-ABCDEFGHIJKLMNOP0123456789 not valid") + + +@mock_managedblockchain +def test_create_another_member_adminpassword(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + badadminpassmemberconf = helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ) + + # Too short + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badap" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw( + Exception, + "Invalid length for parameter MemberConfiguration.FrameworkConfiguration.Fabric.AdminPassword", + ) + + # No uppercase or numbers + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badadminpwd" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + # No lowercase or numbers + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "BADADMINPWD" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + # No numbers + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badAdminpwd" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + # Invalid character + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badAdmin@pwd1" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_list_members_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_members.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_member_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.get_member.when.called_with( + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.delete_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_member_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.delete_member.when.called_with( + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.update_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_memberconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_member_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.update_member.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_memberconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py index a3256a3fe03c..4e15790175ea 100644 --- a/tests/test_managedblockchain/test_managedblockchain_networks.py +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -5,28 +5,7 @@ from moto.managedblockchain.exceptions import BadRequestException from moto import mock_managedblockchain - - -default_frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} - -default_votingpolicy = { - "ApprovalThresholdPolicy": { - "ThresholdPercentage": 50, - "ProposalDurationInHours": 24, - "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", - } -} - -default_memberconfiguration = { - "Name": "testmember1", - "Description": "Test Member 1", - "FrameworkConfiguration": { - "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} - }, - "LogPublishingConfiguration": { - "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} - }, -} +from . import helpers @mock_managedblockchain @@ -37,12 +16,14 @@ def test_create_network(): Name="testnetwork1", Framework="HYPERLEDGER_FABRIC", FrameworkVersion="1.2", - FrameworkConfiguration=default_frameworkconfiguration, - VotingPolicy=default_votingpolicy, - MemberConfiguration=default_memberconfiguration, + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, ) - response["NetworkId"].should.match("n-[A-Z0-9]{26}") - response["MemberId"].should.match("m-[A-Z0-9]{26}") + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") # Find in full list response = conn.list_networks() @@ -51,7 +32,6 @@ def test_create_network(): mbcnetworks[0]["Name"].should.equal("testnetwork1") # Get network details - network_id = mbcnetworks[0]["Id"] response = conn.get_network(NetworkId=network_id) response["Network"]["Name"].should.equal("testnetwork1") @@ -65,12 +45,14 @@ def test_create_network_withopts(): Description="Test Network 1", Framework="HYPERLEDGER_FABRIC", FrameworkVersion="1.2", - FrameworkConfiguration=default_frameworkconfiguration, - VotingPolicy=default_votingpolicy, - MemberConfiguration=default_memberconfiguration, + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, ) - response["NetworkId"].should.match("n-[A-Z0-9]{26}") - response["MemberId"].should.match("m-[A-Z0-9]{26}") + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") # Find in full list response = conn.list_networks() @@ -79,7 +61,6 @@ def test_create_network_withopts(): mbcnetworks[0]["Description"].should.equal("Test Network 1") # Get network details - network_id = mbcnetworks[0]["Id"] response = conn.get_network(NetworkId=network_id) response["Network"]["Description"].should.equal("Test Network 1") @@ -93,9 +74,9 @@ def test_create_network_noframework(): Description="Test Network 1", Framework="HYPERLEDGER_VINYL", FrameworkVersion="1.2", - FrameworkConfiguration=default_frameworkconfiguration, - VotingPolicy=default_votingpolicy, - MemberConfiguration=default_memberconfiguration, + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, ).should.throw(Exception, "Invalid request body") @@ -108,9 +89,9 @@ def test_create_network_badframeworkver(): Description="Test Network 1", Framework="HYPERLEDGER_FABRIC", FrameworkVersion="1.X", - FrameworkConfiguration=default_frameworkconfiguration, - VotingPolicy=default_votingpolicy, - MemberConfiguration=default_memberconfiguration, + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, ).should.throw( Exception, "Invalid version 1.X requested for framework HYPERLEDGER_FABRIC" ) @@ -128,8 +109,8 @@ def test_create_network_badedition(): Framework="HYPERLEDGER_FABRIC", FrameworkVersion="1.2", FrameworkConfiguration=frameworkconfiguration, - VotingPolicy=default_votingpolicy, - MemberConfiguration=default_memberconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, ).should.throw(Exception, "Invalid request body") @@ -138,5 +119,5 @@ def test_get_network_badnetwork(): conn = boto3.client("managedblockchain", region_name="us-east-1") response = conn.get_network.when.called_with( - NetworkId="n-BADNETWORK", - ).should.throw(Exception, "Network n-BADNETWORK not found") + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposals.py b/tests/test_managedblockchain/test_managedblockchain_proposals.py new file mode 100644 index 000000000000..407d26246c81 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_proposals.py @@ -0,0 +1,199 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto.managedblockchain.exceptions import BadRequestException +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_proposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + proposal_id.should.match("p-[A-Z0-9]{26}") + + # Find in full list + response = conn.list_proposals(NetworkId=network_id) + proposals = response["Proposals"] + proposals.should.have.length_of(1) + proposals[0]["ProposalId"].should.equal(proposal_id) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + + +@mock_managedblockchain +def test_create_proposal_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + Description="Adding a new member", + ) + proposal_id = response["ProposalId"] + proposal_id.should.match("p-[A-Z0-9]{26}") + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Description"].should.equal("Adding a new member") + + +@mock_managedblockchain +def test_create_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_proposal.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + Actions=helpers.default_policy_actions, + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_proposal_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.create_proposal.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + Actions=helpers.default_policy_actions, + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_proposal_badinvitationacctid(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Must be 12 digits + actions = {"Invitations": [{"Principal": "1234567890"}]} + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal.when.called_with( + NetworkId=network_id, MemberId=member_id, Actions=actions, + ).should.throw(Exception, "Account ID format specified in proposal is not valid") + + +@mock_managedblockchain +def test_create_proposal_badremovalmemid(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Must be 12 digits + actions = {"Removals": [{"MemberId": "m-ABCDEFGHIJKLMNOP0123456789"}]} + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal.when.called_with( + NetworkId=network_id, MemberId=member_id, Actions=actions, + ).should.throw(Exception, "Member ID format specified in proposal is not valid") + + +@mock_managedblockchain +def test_list_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_proposals.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_proposal.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_proposal_badproposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.get_proposal.when.called_with( + NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py new file mode 100644 index 000000000000..a026b496f99e --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py @@ -0,0 +1,529 @@ +from __future__ import unicode_literals + +import os + +import boto3 +import sure # noqa +from freezegun import freeze_time +from nose import SkipTest + +from moto.managedblockchain.exceptions import BadRequestException +from moto import mock_managedblockchain, settings +from . import helpers + + +@mock_managedblockchain +def test_vote_on_proposal_one_member_total_yes(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # List proposal votes + response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id) + response["ProposalVotes"][0]["MemberId"].should.equal(member_id) + + # Get proposal details - should be APPROVED + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("APPROVED") + response["Proposal"]["YesVoteCount"].should.equal(1) + response["Proposal"]["NoVoteCount"].should.equal(0) + response["Proposal"]["OutstandingVoteCount"].should.equal(0) + + +@mock_managedblockchain +def test_vote_on_proposal_one_member_total_no(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote no + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="NO", + ) + + # List proposal votes + response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id) + response["ProposalVotes"][0]["MemberId"].should.equal(member_id) + + # Get proposal details - should be REJECTED + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("REJECTED") + response["Proposal"]["YesVoteCount"].should.equal(0) + response["Proposal"]["NoVoteCount"].should.equal(1) + response["Proposal"]["OutstandingVoteCount"].should.equal(0) + + +@mock_managedblockchain +def test_vote_on_proposal_yes_greater_than(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN", + } + } + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + +@mock_managedblockchain +def test_vote_on_proposal_no_greater_than(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN", + } + } + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote no with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="NO", + ) + + # Vote no with member 2 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id2, + Vote="NO", + ) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("REJECTED") + + +@mock_managedblockchain +def test_vote_on_proposal_expiredproposal(): + if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + raise SkipTest("Cant manipulate time in server mode") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 1, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } + } + + conn = boto3.client("managedblockchain", region_name="us-east-1") + + with freeze_time("2015-01-01 12:00:00"): + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + with freeze_time("2015-02-01 12:00:00"): + # Vote yes - should set status to expired + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get proposal details - should be EXPIRED + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("EXPIRED") + + +@mock_managedblockchain +def test_vote_on_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.vote_on_proposal.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789", + Vote="YES", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_vote_on_proposal_badproposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789", + Vote="YES", + ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_vote_on_proposal_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789", + Vote="YES", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_vote_on_proposal_badvote(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="FOO", + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_vote_on_proposal_alreadyvoted(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Vote yes with member 1 again + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_list_proposal_votes_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_proposal_votes.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_list_proposal_votes_badproposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.list_proposal_votes.when.called_with( + NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") From 8d3d43da90be101216d16330aeacaf7bd1fff6f4 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 13 May 2020 16:59:34 +0530 Subject: [PATCH 355/658] =?UTF-8?q?Enhancement=20Adding=20SES=20Functional?= =?UTF-8?q?ities=20CreateTemplate,GetTemplate,Lis=E2=80=A6=20(#2987)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Enhancement Adding SES Functionalities CreateTemplate,GetTemplate,ListTemplates * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ses/exceptions.py | 16 +++++++++ moto/ses/models.py | 17 +++++++++ moto/ses/responses.py | 61 ++++++++++++++++++++++++++++++++ tests/test_ses/test_ses_boto3.py | 43 ++++++++++++++++++++++ 4 files changed, 137 insertions(+) diff --git a/moto/ses/exceptions.py b/moto/ses/exceptions.py index c154731883ff..7a4ef1b03925 100644 --- a/moto/ses/exceptions.py +++ b/moto/ses/exceptions.py @@ -25,3 +25,19 @@ def __init__(self, message): super(EventDestinationAlreadyExists, self).__init__( "EventDestinationAlreadyExists", message ) + + +class TemplateNameAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(TemplateNameAlreadyExists, self).__init__( + "TemplateNameAlreadyExists", message + ) + + +class TemplateDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(TemplateDoesNotExist, self).__init__("TemplateDoesNotExist", message) diff --git a/moto/ses/models.py b/moto/ses/models.py index d141e25ae8a9..6c3eb219ae3e 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -10,6 +10,8 @@ MessageRejectedError, ConfigurationSetDoesNotExist, EventDestinationAlreadyExists, + TemplateNameAlreadyExists, + TemplateDoesNotExist, ) from .utils import get_random_message_id from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY @@ -91,6 +93,7 @@ def __init__(self): self.config_set = {} self.config_set_event_destination = {} self.event_destinations = {} + self.templates = {} def _is_verified_address(self, source): _, address = parseaddr(source) @@ -277,5 +280,19 @@ def get_send_statistics(self): statistics["Timestamp"] = datetime.datetime.utcnow() return statistics + def add_template(self, template_info): + template_name = template_info["template_name"] + if self.templates.get(template_name, None): + raise TemplateNameAlreadyExists("Duplicate Template Name.") + self.templates[template_name] = template_info + + def get_template(self, template_name): + if not self.templates.get(template_name, None): + raise TemplateDoesNotExist("Invalid Template Name.") + return self.templates[template_name] + + def list_templates(self): + return list(self.templates.values()) + ses_backend = SESBackend() diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 8c9dc8f75abb..f0780e98a3c3 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -5,6 +5,7 @@ from moto.core.responses import BaseResponse from .models import ses_backend +from datetime import datetime class EmailResponse(BaseResponse): @@ -175,6 +176,29 @@ def create_configuration_set_event_destination(self): template = self.response_template(CREATE_CONFIGURATION_SET_EVENT_DESTINATION) return template.render() + def create_template(self): + template_data = self._get_dict_param("Template") + template_info = {} + template_info["text_part"] = template_data["._text_part"] + template_info["html_part"] = template_data["._html_part"] + template_info["template_name"] = template_data["._name"] + template_info["subject_part"] = template_data["._subject_part"] + template_info["Timestamp"] = datetime.utcnow() + ses_backend.add_template(template_info=template_info) + template = self.response_template(CREATE_TEMPLATE) + return template.render() + + def get_template(self): + template_name = self._get_param("TemplateName") + template_data = ses_backend.get_template(template_name) + template = self.response_template(GET_TEMPLATE) + return template.render(template_data=template_data) + + def list_templates(self): + email_templates = ses_backend.list_templates() + template = self.response_template(LIST_TEMPLATES) + return template.render(templates=email_templates) + VERIFY_EMAIL_IDENTITY = """ @@ -324,3 +348,40 @@ def create_configuration_set_event_destination(self): 67e0ef1a-9bf2-11e1-9279-0100e8cf109a
""" + +CREATE_TEMPLATE = """ + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba + +""" + +GET_TEMPLATE = """ + + + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba + +""" + +LIST_TEMPLATES = """ + + + {% for template in templates %} + + {{ template["template_name"] }} + {{ template["Timestamp"] }} + + {% endfor %} + + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba + +""" diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index 0e6bb9bea18f..a94612077b9b 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -277,3 +277,46 @@ def test_create_configuration_set(): ) ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") + + +@mock_ses +def test_create_ses_template(): + conn = boto3.client("ses", region_name="us-east-1") + + conn.create_template( + Template={ + "TemplateName": "MyTemplate", + "SubjectPart": "Greetings, {{name}}!", + "TextPart": "Dear {{name}}," + "\r\nYour favorite animal is {{favoriteanimal}}.", + "HtmlPart": "

Hello {{name}}," + "

Your favorite animal is {{favoriteanimal}}.

", + } + ) + with assert_raises(ClientError) as ex: + conn.create_template( + Template={ + "TemplateName": "MyTemplate", + "SubjectPart": "Greetings, {{name}}!", + "TextPart": "Dear {{name}}," + "\r\nYour favorite animal is {{favoriteanimal}}.", + "HtmlPart": "

Hello {{name}}," + "

Your favorite animal is {{favoriteanimal}}.

", + } + ) + + ex.exception.response["Error"]["Code"].should.equal("TemplateNameAlreadyExists") + + # get a template which is already added + result = conn.get_template(TemplateName="MyTemplate") + result["Template"]["TemplateName"].should.equal("MyTemplate") + result["Template"]["SubjectPart"].should.equal("Greetings, {{name}}!") + + # get a template which is not present + with assert_raises(ClientError) as ex: + conn.get_template(TemplateName="MyFakeTemplate") + + ex.exception.response["Error"]["Code"].should.equal("TemplateDoesNotExist") + + result = conn.list_templates() + result["TemplatesMetadata"][0]["Name"].should.equal("MyTemplate") From 93311dbd4be7f216d9ba019b238d11324bb1f5af Mon Sep 17 00:00:00 2001 From: James Belleau Date: Fri, 15 May 2020 19:38:19 -0500 Subject: [PATCH 356/658] Added node actions and other fixes --- moto/managedblockchain/exceptions.py | 13 +- moto/managedblockchain/models.py | 371 ++++++++++++-- moto/managedblockchain/responses.py | 101 ++++ moto/managedblockchain/urls.py | 3 + moto/managedblockchain/utils.py | 29 ++ tests/test_managedblockchain/helpers.py | 20 + .../test_managedblockchain_invitations.py | 1 - .../test_managedblockchain_members.py | 10 +- .../test_managedblockchain_networks.py | 1 - .../test_managedblockchain_nodes.py | 477 ++++++++++++++++++ .../test_managedblockchain_proposals.py | 1 - .../test_managedblockchain_proposalvotes.py | 151 +++++- 12 files changed, 1121 insertions(+), 57 deletions(-) create mode 100644 tests/test_managedblockchain/test_managedblockchain_nodes.py diff --git a/moto/managedblockchain/exceptions.py b/moto/managedblockchain/exceptions.py index 456eabc0581b..4735389ae89c 100644 --- a/moto/managedblockchain/exceptions.py +++ b/moto/managedblockchain/exceptions.py @@ -31,7 +31,18 @@ def __init__(self, pretty_called_method, operation_error): self.code = 404 super(ResourceNotFoundException, self).__init__( "ResourceNotFoundException", - "An error occurred (BadRequestException) when calling the {0} operation: {1}".format( + "An error occurred (ResourceNotFoundException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) + + +class ResourceAlreadyExistsException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + self.code = 409 + super(ResourceAlreadyExistsException, self).__init__( + "ResourceAlreadyExistsException", + "An error occurred (ResourceAlreadyExistsException) when calling the {0} operation: {1}".format( pretty_called_method, operation_error ), ) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py index 034e45d350ec..233e875c3203 100644 --- a/moto/managedblockchain/models.py +++ b/moto/managedblockchain/models.py @@ -12,6 +12,7 @@ ResourceNotFoundException, InvalidRequestException, ResourceLimitExceededException, + ResourceAlreadyExistsException, ) from .utils import ( @@ -22,6 +23,9 @@ member_name_exist_in_network, number_of_members_in_network, admin_password_ok, + get_node_id, + number_of_nodes_in_member, + nodes_in_member, ) FRAMEWORKS = [ @@ -212,6 +216,10 @@ def proposal_actions(self, action_type): return self.actions["Removals"] return default_return + def check_to_expire_proposal(self): + if datetime.datetime.utcnow() > self.expirtationdate: + self.status = "EXPIRED" + def to_dict(self): # Format for list_proposals d = { @@ -244,10 +252,6 @@ def get_format(self): return d def set_vote(self, votermemberid, votermembername, vote): - if datetime.datetime.utcnow() > self.expirtationdate: - self.status = "EXPIRED" - return False - if vote.upper() == "YES": self.yes_vote_count += 1 else: @@ -273,7 +277,14 @@ def set_vote(self, votermemberid, votermembername, vote): elif perct_no > self.network_threshold: self.status = "REJECTED" - return True + # It is a tie - reject + if ( + self.status == "IN_PROGRESS" + and self.network_threshold_comp == "GREATER_THAN" + and self.outstanding_vote_count == 0 + and perct_yes == perct_no + ): + self.status = "REJECTED" class ManagedBlockchainInvitation(BaseModel): @@ -413,12 +424,92 @@ def update(self, logpublishingconfiguration): ] = logpublishingconfiguration +class ManagedBlockchainNode(BaseModel): + def __init__( + self, + id, + networkid, + memberid, + availabilityzone, + instancetype, + logpublishingconfiguration, + region, + ): + self.creationdate = datetime.datetime.utcnow() + self.id = id + self.instancetype = instancetype + self.networkid = networkid + self.memberid = memberid + self.logpublishingconfiguration = logpublishingconfiguration + self.region = region + self.status = "AVAILABLE" + self.availabilityzone = availabilityzone + + @property + def member_id(self): + return self.memberid + + @property + def node_status(self): + return self.status + + def to_dict(self): + # Format for list_nodes + d = { + "Id": self.id, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "AvailabilityZone": self.availabilityzone, + "InstanceType": self.instancetype, + } + return d + + def get_format(self): + # Format for get_node + frameworkattributes = { + "Fabric": { + "PeerEndpoint": "{0}.{1}.{2}.managedblockchain.{3}.amazonaws.com:30003".format( + self.id.lower(), + self.networkid.lower(), + self.memberid.lower(), + self.region, + ), + "PeerEventEndpoint": "{0}.{1}.{2}.managedblockchain.{3}.amazonaws.com:30004".format( + self.id.lower(), + self.networkid.lower(), + self.memberid.lower(), + self.region, + ), + } + } + + d = { + "NetworkId": self.networkid, + "MemberId": self.memberid, + "Id": self.id, + "InstanceType": self.instancetype, + "AvailabilityZone": self.availabilityzone, + "FrameworkAttributes": frameworkattributes, + "LogPublishingConfiguration": self.logpublishingconfiguration, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + return d + + def delete(self): + self.status = "DELETED" + + def update(self, logpublishingconfiguration): + self.logpublishingconfiguration = logpublishingconfiguration + + class ManagedBlockchainBackend(BaseBackend): def __init__(self, region_name): self.networks = {} self.members = {} self.proposals = {} self.invitations = {} + self.nodes = {} self.region_name = region_name def reset(self): @@ -453,10 +544,10 @@ def create_network( if frameworkconfiguration["Fabric"]["Edition"] not in EDITIONS: raise BadRequestException("CreateNetwork", "Invalid request body") - ## Generate network ID + # Generate network ID network_id = get_network_id() - ## Generate memberid ID and initial member + # Generate memberid ID and initial member member_id = get_member_id() self.members[member_id] = ManagedBlockchainMember( id=member_id, @@ -524,7 +615,7 @@ def create_proposal( "Member ID format specified in proposal is not valid.", ) - ## Generate proposal ID + # Generate proposal ID proposal_id = get_proposal_id() self.proposals[proposal_id] = ManagedBlockchainProposal( @@ -558,6 +649,8 @@ def list_proposals(self, networkid): proposalsfornetwork = [] for proposal_id in self.proposals: if self.proposals.get(proposal_id).network_id == networkid: + # See if any are expired + self.proposals.get(proposal_id).check_to_expire_proposal() proposalsfornetwork.append(self.proposals[proposal_id]) return proposalsfornetwork @@ -572,6 +665,9 @@ def get_proposal(self, networkid, proposalid): raise ResourceNotFoundException( "GetProposal", "Proposal {0} not found.".format(proposalid) ) + + # See if it needs to be set to expipred + self.proposals.get(proposalid).check_to_expire_proposal() return self.proposals.get(proposalid) def vote_on_proposal(self, networkid, proposalid, votermemberid, vote): @@ -594,43 +690,65 @@ def vote_on_proposal(self, networkid, proposalid, votermemberid, vote): if vote.upper() not in VOTEVALUES: raise BadRequestException("VoteOnProposal", "Invalid request body") + # See if it needs to be set to expipred + self.proposals.get(proposalid).check_to_expire_proposal() + + # Exception if EXPIRED + if self.proposals.get(proposalid).proposal_status == "EXPIRED": + raise InvalidRequestException( + "VoteOnProposal", + "Proposal {0} is expired and you cannot vote on it.".format(proposalid), + ) + + # Check if IN_PROGRESS + if self.proposals.get(proposalid).proposal_status != "IN_PROGRESS": + raise InvalidRequestException( + "VoteOnProposal", + "Proposal {0} has status {1} and you cannot vote on it.".format( + proposalid, self.proposals.get(proposalid).proposal_status + ), + ) + # Check to see if this member already voted - # TODO Verify exception if votermemberid in self.proposals.get(proposalid).proposal_votes: - raise BadRequestException("VoteOnProposal", "Invalid request body") + raise ResourceAlreadyExistsException( + "VoteOnProposal", + "Member {0} has already voted on proposal {1}.".format( + votermemberid, proposalid + ), + ) - # Will return false if vote was not cast (e.g., status wrong) - if self.proposals.get(proposalid).set_vote( + # Cast vote + self.proposals.get(proposalid).set_vote( votermemberid, self.members.get(votermemberid).name, vote.upper() - ): - if self.proposals.get(proposalid).proposal_status == "APPROVED": - ## Generate invitations - for propinvitation in self.proposals.get(proposalid).proposal_actions( - "Invitations" - ): - invitation_id = get_invitation_id() - self.invitations[invitation_id] = ManagedBlockchainInvitation( - id=invitation_id, - networkid=networkid, - networkname=self.networks.get(networkid).network_name, - networkframework=self.networks.get(networkid).network_framework, - networkframeworkversion=self.networks.get( - networkid - ).network_framework_version, - networkcreationdate=self.networks.get( - networkid - ).network_creationdate, - region=self.region_name, - networkdescription=self.networks.get( - networkid - ).network_description, - ) + ) - ## Delete members - for propmember in self.proposals.get(proposalid).proposal_actions( - "Removals" - ): - self.delete_member(networkid, propmember["MemberId"]) + if self.proposals.get(proposalid).proposal_status == "APPROVED": + # Generate invitations + for propinvitation in self.proposals.get(proposalid).proposal_actions( + "Invitations" + ): + invitation_id = get_invitation_id() + self.invitations[invitation_id] = ManagedBlockchainInvitation( + id=invitation_id, + networkid=networkid, + networkname=self.networks.get(networkid).network_name, + networkframework=self.networks.get(networkid).network_framework, + networkframeworkversion=self.networks.get( + networkid + ).network_framework_version, + networkcreationdate=self.networks.get( + networkid + ).network_creationdate, + region=self.region_name, + networkdescription=self.networks.get(networkid).network_description, + ) + + # Delete members + for propmember in self.proposals.get(proposalid).proposal_actions( + "Removals" + ): + self.delete_member(networkid, propmember["MemberId"]) def list_proposal_votes(self, networkid, proposalid): # Check if network exists @@ -754,7 +872,7 @@ def get_member(self, networkid, memberid): "GetMember", "Member {0} not found.".format(memberid) ) - ## Cannot get a member than has been delted (it does show up in the list) + # Cannot get a member than has been deleted (it does show up in the list) if self.members.get(memberid).member_status == "DELETED": raise ResourceNotFoundException( "GetMember", "Member {0} not found.".format(memberid) @@ -791,6 +909,10 @@ def delete_member(self, networkid, memberid): # Remove network del self.networks[networkid] + # Remove any nodes associated + for nodeid in nodes_in_member(self.nodes, memberid): + del self.nodes[nodeid] + def update_member(self, networkid, memberid, logpublishingconfiguration): # Check if network exists if networkid not in self.networks: @@ -805,6 +927,173 @@ def update_member(self, networkid, memberid, logpublishingconfiguration): self.members.get(memberid).update(logpublishingconfiguration) + def create_node( + self, + networkid, + memberid, + availabilityzone, + instancetype, + logpublishingconfiguration, + ): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "CreateNode", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "CreateNode", "Member {0} not found.".format(memberid) + ) + + networkedition = self.networks.get(networkid).network_edition + if ( + number_of_nodes_in_member(self.nodes, memberid) + >= EDITIONS[networkedition]["MaxNodesPerMember"] + ): + raise ResourceLimitExceededException( + "CreateNode", + "Maximum number of nodes exceeded in member {0}. The maximum number of nodes you can have in a member in a {1} Edition network is {2}".format( + memberid, + networkedition, + EDITIONS[networkedition]["MaxNodesPerMember"], + ), + ) + + # See if the instance family is correct + correctinstancefamily = False + for chkinsttypepre in EDITIONS["STANDARD"]["AllowedNodeInstanceTypes"]: + chkinsttypepreregex = chkinsttypepre + ".*" + if re.match(chkinsttypepreregex, instancetype, re.IGNORECASE): + correctinstancefamily = True + break + + if correctinstancefamily is False: + raise InvalidRequestException( + "CreateNode", + "Requested instance {0} isn't supported.".format(instancetype), + ) + + # Check for specific types for starter + if networkedition == "STARTER": + if instancetype not in EDITIONS["STARTER"]["AllowedNodeInstanceTypes"]: + raise InvalidRequestException( + "CreateNode", + "Instance type {0} is not supported with STARTER Edition networks.".format( + instancetype + ), + ) + + # Simple availability zone check + chkregionpreregex = self.region_name + "[a-z]" + if re.match(chkregionpreregex, availabilityzone, re.IGNORECASE) is None: + raise InvalidRequestException( + "CreateNode", "Availability Zone is not valid", + ) + + node_id = get_node_id() + self.nodes[node_id] = ManagedBlockchainNode( + id=node_id, + networkid=networkid, + memberid=memberid, + availabilityzone=availabilityzone, + instancetype=instancetype, + logpublishingconfiguration=logpublishingconfiguration, + region=self.region_name, + ) + + # Return the node ID + d = {"NodeId": node_id} + return d + + def list_nodes(self, networkid, memberid, status=None): + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListNodes", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "ListNodes", "Member {0} not found.".format(memberid) + ) + + # If member is deleted, cannot list nodes + if self.members.get(memberid).member_status == "DELETED": + raise ResourceNotFoundException( + "ListNodes", "Member {0} not found.".format(memberid) + ) + + nodesformember = [] + for node_id in self.nodes: + if self.nodes.get(node_id).member_id == memberid and ( + status is None or self.nodes.get(node_id).node_status == status + ): + nodesformember.append(self.nodes[node_id]) + return nodesformember + + def get_node(self, networkid, memberid, nodeid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "GetNode", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "GetNode", "Member {0} not found.".format(memberid) + ) + + if nodeid not in self.nodes: + raise ResourceNotFoundException( + "GetNode", "Node {0} not found.".format(nodeid) + ) + + # Cannot get a node than has been deleted (it does show up in the list) + if self.nodes.get(nodeid).node_status == "DELETED": + raise ResourceNotFoundException( + "GetNode", "Node {0} not found.".format(nodeid) + ) + + return self.nodes.get(nodeid) + + def delete_node(self, networkid, memberid, nodeid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "DeleteNode", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "DeleteNode", "Member {0} not found.".format(memberid) + ) + + if nodeid not in self.nodes: + raise ResourceNotFoundException( + "DeleteNode", "Node {0} not found.".format(nodeid) + ) + + self.nodes.get(nodeid).delete() + + def update_node(self, networkid, memberid, nodeid, logpublishingconfiguration): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "UpdateNode", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "UpdateNode", "Member {0} not found.".format(memberid) + ) + + if nodeid not in self.nodes: + raise ResourceNotFoundException( + "UpdateNode", "Node {0} not found.".format(nodeid) + ) + + self.nodes.get(nodeid).update(logpublishingconfiguration) + managedblockchain_backends = {} for region in Session().get_available_regions("managedblockchain"): diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py index 34206b3c444b..7dd628eba4ba 100644 --- a/moto/managedblockchain/responses.py +++ b/moto/managedblockchain/responses.py @@ -11,6 +11,7 @@ proposalid_from_managedblockchain_url, invitationid_from_managedblockchain_url, memberid_from_managedblockchain_url, + nodeid_from_managedblockchain_url, ) @@ -324,3 +325,103 @@ def _memberid_response_delete(self, network_id, member_id, headers): self.backend.delete_member(network_id, member_id) headers["content-type"] = "application/json" return 200, headers, "" + + @classmethod + def node_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._node_response(request, full_url, headers) + + def _node_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + member_id = memberid_from_managedblockchain_url(full_url) + if method == "GET": + status = None + if "status" in querystring: + status = querystring["status"][0] + return self._all_nodes_response(network_id, member_id, status, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._node_response_post( + network_id, member_id, json_body, querystring, headers + ) + + def _all_nodes_response(self, network_id, member_id, status, headers): + nodes = self.backend.list_nodes(network_id, member_id, status) + response = json.dumps({"Nodes": [node.to_dict() for node in nodes]}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _node_response_post( + self, network_id, member_id, json_body, querystring, headers + ): + instancetype = json_body["NodeConfiguration"]["InstanceType"] + availabilityzone = json_body["NodeConfiguration"]["AvailabilityZone"] + logpublishingconfiguration = json_body["NodeConfiguration"][ + "LogPublishingConfiguration" + ] + + response = self.backend.create_node( + network_id, + member_id, + availabilityzone, + instancetype, + logpublishingconfiguration, + ) + return 200, headers, json.dumps(response) + + @classmethod + def nodeid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._nodeid_response(request, full_url, headers) + + def _nodeid_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + network_id = networkid_from_managedblockchain_url(full_url) + member_id = memberid_from_managedblockchain_url(full_url) + node_id = nodeid_from_managedblockchain_url(full_url) + if method == "GET": + return self._nodeid_response_get(network_id, member_id, node_id, headers) + elif method == "PATCH": + json_body = json.loads(body.decode("utf-8")) + return self._nodeid_response_patch( + network_id, member_id, node_id, json_body, headers + ) + elif method == "DELETE": + return self._nodeid_response_delete(network_id, member_id, node_id, headers) + + def _nodeid_response_get(self, network_id, member_id, node_id, headers): + node = self.backend.get_node(network_id, member_id, node_id) + response = json.dumps({"Node": node.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _nodeid_response_patch( + self, network_id, member_id, node_id, json_body, headers + ): + logpublishingconfiguration = json_body + self.backend.update_node( + network_id, member_id, node_id, logpublishingconfiguration, + ) + return 200, headers, "" + + def _nodeid_response_delete(self, network_id, member_id, node_id, headers): + self.backend.delete_node(network_id, member_id, node_id) + headers["content-type"] = "application/json" + return 200, headers, "" diff --git a/moto/managedblockchain/urls.py b/moto/managedblockchain/urls.py index c7d191aab9f0..442a732335d9 100644 --- a/moto/managedblockchain/urls.py +++ b/moto/managedblockchain/urls.py @@ -13,4 +13,7 @@ "{0}/invitations/(?P[^/.]+)$": ManagedBlockchainResponse.invitationid_response, "{0}/networks/(?P[^/.]+)/members$": ManagedBlockchainResponse.member_response, "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)$": ManagedBlockchainResponse.memberid_response, + "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)/nodes$": ManagedBlockchainResponse.node_response, + "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)/nodes?(?P[^/.]+)$": ManagedBlockchainResponse.node_response, + "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)/nodes/(?P[^/.]+)$": ManagedBlockchainResponse.nodeid_response, } diff --git a/moto/managedblockchain/utils.py b/moto/managedblockchain/utils.py index ea8f505135e5..c8118619eadb 100644 --- a/moto/managedblockchain/utils.py +++ b/moto/managedblockchain/utils.py @@ -104,3 +104,32 @@ def admin_password_ok(password): return False else: return True + + +def nodeid_from_managedblockchain_url(full_url): + id_search = re.search("\/nd-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_node_id(): + return "nd-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def number_of_nodes_in_member(nodes, memberid, node_status=None): + return len( + [ + nodid + for nodid in nodes + if nodes.get(nodid).member_id == memberid + and (node_status is None or nodes.get(nodid).node_status == node_status) + ] + ) + + +def nodes_in_member(nodes, memberid): + return [nodid for nodid in nodes if nodes.get(nodid).member_id == memberid] diff --git a/tests/test_managedblockchain/helpers.py b/tests/test_managedblockchain/helpers.py index 38c13b512d66..f8c6d29b99cd 100644 --- a/tests/test_managedblockchain/helpers.py +++ b/tests/test_managedblockchain/helpers.py @@ -28,6 +28,17 @@ "Invitations": [{"Principal": "123456789012"}, {"Principal": "123456789013"}] } +default_nodeconfiguration = { + "InstanceType": "bc.t3.small", + "AvailabilityZone": "us-east-1a", + "LogPublishingConfiguration": { + "Fabric": { + "ChaincodeLogs": {"Cloudwatch": {"Enabled": False}}, + "PeerLogs": {"Cloudwatch": {"Enabled": False}}, + } + }, +} + def member_id_exist_in_list(members, memberid): memberidxists = False @@ -65,3 +76,12 @@ def select_invitation_id_for_network(invitations, networkid, status=None): if status is None or invitation["Status"] == status: invitationsfornetwork.append(invitation["InvitationId"]) return invitationsfornetwork + + +def node_id_exist_in_list(nodes, nodeid): + nodeidxists = False + for node in nodes: + if node["Id"] == nodeid: + nodeidxists = True + break + return nodeidxists diff --git a/tests/test_managedblockchain/test_managedblockchain_invitations.py b/tests/test_managedblockchain/test_managedblockchain_invitations.py index 81b20a9ba1cf..0f70d7f886a7 100644 --- a/tests/test_managedblockchain/test_managedblockchain_invitations.py +++ b/tests/test_managedblockchain/test_managedblockchain_invitations.py @@ -3,7 +3,6 @@ import boto3 import sure # noqa -from moto.managedblockchain.exceptions import BadRequestException from moto import mock_managedblockchain from . import helpers diff --git a/tests/test_managedblockchain/test_managedblockchain_members.py b/tests/test_managedblockchain/test_managedblockchain_members.py index 76d29dd5509d..9120e4aee42c 100644 --- a/tests/test_managedblockchain/test_managedblockchain_members.py +++ b/tests/test_managedblockchain/test_managedblockchain_members.py @@ -3,7 +3,6 @@ import boto3 import sure # noqa -from moto.managedblockchain.exceptions import BadRequestException from moto import mock_managedblockchain from . import helpers @@ -204,7 +203,7 @@ def test_create_another_member_withopts(): @mock_managedblockchain -def test_create_and_delete_member(): +def test_invite_and_remove_member(): conn = boto3.client("managedblockchain", region_name="us-east-1") # Create network @@ -362,17 +361,14 @@ def test_create_too_many_members(): response["Invitations"], network_id, "PENDING" )[0] - # Try to create member with already used invitation + # Try to create one too many members response = conn.create_member.when.called_with( InvitationId=invitation_id, NetworkId=network_id, MemberConfiguration=helpers.create_member_configuration( "testmember6", "admin", "Admin12345", False, "Test Member 6" ), - ).should.throw( - Exception, - "5 is the maximum number of members allowed in a STARTER Edition network", - ) + ).should.throw(Exception, "is the maximum number of members allowed in a",) @mock_managedblockchain diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py index 4e15790175ea..c2a33298385f 100644 --- a/tests/test_managedblockchain/test_managedblockchain_networks.py +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -3,7 +3,6 @@ import boto3 import sure # noqa -from moto.managedblockchain.exceptions import BadRequestException from moto import mock_managedblockchain from . import helpers diff --git a/tests/test_managedblockchain/test_managedblockchain_nodes.py b/tests/test_managedblockchain/test_managedblockchain_nodes.py new file mode 100644 index 000000000000..32a5bc62c5d2 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_nodes.py @@ -0,0 +1,477 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_node(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create a node + response = conn.create_node( + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=helpers.default_nodeconfiguration, + ) + node_id = response["NodeId"] + + # Find node in full list + response = conn.list_nodes(NetworkId=network_id, MemberId=member_id) + nodes = response["Nodes"] + nodes.should.have.length_of(1) + helpers.node_id_exist_in_list(nodes, node_id).should.equal(True) + + # Get node details + response = conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id) + response["Node"]["AvailabilityZone"].should.equal("us-east-1a") + + # Update node + logconfignewenabled = not helpers.default_nodeconfiguration[ + "LogPublishingConfiguration" + ]["Fabric"]["ChaincodeLogs"]["Cloudwatch"]["Enabled"] + logconfignew = { + "Fabric": {"ChaincodeLogs": {"Cloudwatch": {"Enabled": logconfignewenabled}}} + } + conn.update_node( + NetworkId=network_id, + MemberId=member_id, + NodeId=node_id, + LogPublishingConfiguration=logconfignew, + ) + + # Delete node + conn.delete_node( + NetworkId=network_id, MemberId=member_id, NodeId=node_id, + ) + + # Find node in full list + response = conn.list_nodes(NetworkId=network_id, MemberId=member_id) + nodes = response["Nodes"] + nodes.should.have.length_of(1) + helpers.node_id_exist_in_list(nodes, node_id).should.equal(True) + + # Find node in full list - only DELETED + response = conn.list_nodes( + NetworkId=network_id, MemberId=member_id, Status="DELETED" + ) + nodes = response["Nodes"] + nodes.should.have.length_of(1) + helpers.node_id_exist_in_list(nodes, node_id).should.equal(True) + + # But cannot get + response = conn.get_node.when.called_with( + NetworkId=network_id, MemberId=member_id, NodeId=node_id, + ).should.throw(Exception, "Node {0} not found".format(node_id)) + + +@mock_managedblockchain +def test_create_node_standard_edition(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + frameworkconfiguration = {"Fabric": {"Edition": "STANDARD"}} + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Instance type only allowed with standard edition + logconfigbad = dict(helpers.default_nodeconfiguration) + logconfigbad["InstanceType"] = "bc.t3.large" + response = conn.create_node( + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + ) + node_id = response["NodeId"] + + # Get node details + response = conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id) + response["Node"]["InstanceType"].should.equal("bc.t3.large") + + # Need another member so the network does not get deleted + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + + # Remove member 1 - should remove nodes + conn.delete_member(NetworkId=network_id, MemberId=member_id) + + # Should now be an exception + response = conn.list_nodes.when.called_with( + NetworkId=network_id, MemberId=member_id, + ).should.throw(Exception, "Member {0} not found".format(member_id)) + + +@mock_managedblockchain +def test_create_too_many_nodes(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create a node + response = conn.create_node( + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=helpers.default_nodeconfiguration, + ) + + # Create another node + response = conn.create_node( + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=helpers.default_nodeconfiguration, + ) + + # Find node in full list + response = conn.list_nodes(NetworkId=network_id, MemberId=member_id) + nodes = response["Nodes"] + nodes.should.have.length_of(2) + + # Try to create one too many nodes + response = conn.create_node.when.called_with( + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=helpers.default_nodeconfiguration, + ).should.throw( + Exception, "Maximum number of nodes exceeded in member {0}".format(member_id), + ) + + +@mock_managedblockchain +def test_create_node_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_node.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeConfiguration=helpers.default_nodeconfiguration, + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_node_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.create_node.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeConfiguration=helpers.default_nodeconfiguration, + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_node_badnodeconfig(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Incorrect instance type + logconfigbad = dict(helpers.default_nodeconfiguration) + logconfigbad["InstanceType"] = "foo" + response = conn.create_node.when.called_with( + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + ).should.throw(Exception, "Requested instance foo isn't supported.") + + # Incorrect instance type for edition + logconfigbad = dict(helpers.default_nodeconfiguration) + logconfigbad["InstanceType"] = "bc.t3.large" + response = conn.create_node.when.called_with( + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + ).should.throw( + Exception, + "Instance type bc.t3.large is not supported with STARTER Edition networks", + ) + + # Incorrect availability zone + logconfigbad = dict(helpers.default_nodeconfiguration) + logconfigbad["AvailabilityZone"] = "us-east-11" + response = conn.create_node.when.called_with( + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + ).should.throw(Exception, "Availability Zone is not valid") + + +@mock_managedblockchain +def test_list_nodes_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_nodes.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_list_nodes_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.list_nodes.when.called_with( + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_node_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_node.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_node_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.get_node.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_node_badnode(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.get_node.when.called_with( + NetworkId=network_id, + MemberId=member_id, + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Node nd-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_node_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.delete_node.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_node_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.delete_node.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_node_badnode(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.delete_node.when.called_with( + NetworkId=network_id, + MemberId=member_id, + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Node nd-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_node_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.update_node.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_nodeconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_node_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.update_node.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_nodeconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_node_badnode(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.update_node.when.called_with( + NetworkId=network_id, + MemberId=member_id, + NodeId="nd-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_nodeconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Node nd-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposals.py b/tests/test_managedblockchain/test_managedblockchain_proposals.py index 407d26246c81..aa899e3a1623 100644 --- a/tests/test_managedblockchain/test_managedblockchain_proposals.py +++ b/tests/test_managedblockchain/test_managedblockchain_proposals.py @@ -3,7 +3,6 @@ import boto3 import sure # noqa -from moto.managedblockchain.exceptions import BadRequestException from moto import mock_managedblockchain from . import helpers diff --git a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py index a026b496f99e..eda72839843b 100644 --- a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py +++ b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py @@ -7,7 +7,6 @@ from freezegun import freeze_time from nose import SkipTest -from moto.managedblockchain.exceptions import BadRequestException from moto import mock_managedblockchain, settings from . import helpers @@ -186,6 +185,18 @@ def test_vote_on_proposal_yes_greater_than(): response["Proposal"]["NetworkId"].should.equal(network_id) response["Proposal"]["Status"].should.equal("IN_PROGRESS") + # Vote no with member 2 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id2, + Vote="NO", + ) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("REJECTED") + @mock_managedblockchain def test_vote_on_proposal_no_greater_than(): @@ -310,11 +321,14 @@ def test_vote_on_proposal_expiredproposal(): with freeze_time("2015-02-01 12:00:00"): # Vote yes - should set status to expired - response = conn.vote_on_proposal( + response = conn.vote_on_proposal.when.called_with( NetworkId=network_id, ProposalId=proposal_id, VoterMemberId=member_id, Vote="YES", + ).should.throw( + Exception, + "Proposal {0} is expired and you cannot vote on it.".format(proposal_id), ) # Get proposal details - should be EXPIRED @@ -322,6 +336,123 @@ def test_vote_on_proposal_expiredproposal(): response["Proposal"]["Status"].should.equal("EXPIRED") +@mock_managedblockchain +def test_vote_on_proposal_status_check(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create 2 more members + for counter in range(2, 4): + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + memberidlist = [None, None, None] + memberidlist[0] = member_id + for counter in range(2, 4): + # Get the invitation + response = conn.list_invitations() + invitation_id = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + )[0] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember" + str(counter), + "admin", + "Admin12345", + False, + "Test Member " + str(counter), + ), + ) + member_id = response["MemberId"] + memberidlist[counter - 1] = member_id + + # Should be no more pending invitations + response = conn.list_invitations() + pendinginvs = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + ) + pendinginvs.should.have.length_of(0) + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=memberidlist[0], + Vote="YES", + ) + + # Vote yes with member 2 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=memberidlist[1], + Vote="YES", + ) + + # Get proposal details - now approved (2 yes, 1 outstanding) + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("APPROVED") + + # Should be one pending invitation + response = conn.list_invitations() + pendinginvs = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + ) + pendinginvs.should.have.length_of(1) + + # Vote with member 3 - should throw an exception and not create a new invitation + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=memberidlist[2], + Vote="YES", + ).should.throw(Exception, "and you cannot vote on it") + + # Should still be one pending invitation + response = conn.list_invitations() + pendinginvs = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + ) + pendinginvs.should.have.length_of(1) + + @mock_managedblockchain def test_vote_on_proposal_badnetwork(): conn = boto3.client("managedblockchain", region_name="us-east-1") @@ -425,13 +556,21 @@ def test_vote_on_proposal_badvote(): def test_vote_on_proposal_alreadyvoted(): conn = boto3.client("managedblockchain", region_name="us-east-1") + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN", + } + } + # Create network - need a good network response = conn.create_network( Name="testnetwork1", Framework="HYPERLEDGER_FABRIC", FrameworkVersion="1.2", FrameworkConfiguration=helpers.default_frameworkconfiguration, - VotingPolicy=helpers.default_votingpolicy, + VotingPolicy=votingpolicy, MemberConfiguration=helpers.default_memberconfiguration, ) network_id = response["NetworkId"] @@ -465,7 +604,6 @@ def test_vote_on_proposal_alreadyvoted(): "testmember2", "admin", "Admin12345", False, "Test Member 2" ), ) - member_id2 = response["MemberId"] # Create another proposal response = conn.create_proposal( @@ -495,7 +633,10 @@ def test_vote_on_proposal_alreadyvoted(): ProposalId=proposal_id, VoterMemberId=member_id, Vote="YES", - ).should.throw(Exception, "Invalid request body") + ).should.throw( + Exception, + "Member {0} has already voted on proposal {1}.".format(member_id, proposal_id), + ) @mock_managedblockchain From dd20fec9f35ed508ca6f4a2ecacc6c0c95acbedf Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 16 May 2020 15:00:06 +0100 Subject: [PATCH 357/658] Athena - Start/stop executions --- IMPLEMENTATION_COVERAGE.md | 38 +++++----- moto/athena/models.py | 42 ++++++++++- moto/athena/responses.py | 67 +++++++++++++++--- tests/test_athena/test_athena.py | 115 ++++++++++++++++++++++++++++++- 4 files changed, 229 insertions(+), 33 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index f56385b2527f..1555da1c8748 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -641,7 +641,7 @@ ## athena
-10% implemented +26% implemented - [ ] batch_get_named_query - [ ] batch_get_query_execution @@ -652,13 +652,13 @@ - [ ] get_named_query - [ ] get_query_execution - [ ] get_query_results -- [ ] get_work_group +- [X] get_work_group - [ ] list_named_queries - [ ] list_query_executions - [ ] list_tags_for_resource - [X] list_work_groups -- [ ] start_query_execution -- [ ] stop_query_execution +- [X] start_query_execution +- [X] stop_query_execution - [ ] tag_resource - [ ] untag_resource - [ ] update_work_group @@ -5287,26 +5287,26 @@ ## managedblockchain
-16% implemented +77% implemented -- [ ] create_member +- [X] create_member - [X] create_network - [ ] create_node -- [ ] create_proposal -- [ ] delete_member +- [X] create_proposal +- [X] delete_member - [ ] delete_node -- [ ] get_member +- [X] get_member - [X] get_network - [ ] get_node -- [ ] get_proposal -- [ ] list_invitations -- [ ] list_members +- [X] get_proposal +- [X] list_invitations +- [X] list_members - [X] list_networks - [ ] list_nodes -- [ ] list_proposal_votes -- [ ] list_proposals -- [ ] reject_invitation -- [ ] vote_on_proposal +- [X] list_proposal_votes +- [X] list_proposals +- [X] reject_invitation +- [X] vote_on_proposal
## marketplace-catalog @@ -7392,7 +7392,7 @@ ## ses
-18% implemented +21% implemented - [ ] clone_receipt_rule_set - [X] create_configuration_set @@ -7427,14 +7427,14 @@ - [ ] get_identity_verification_attributes - [X] get_send_quota - [X] get_send_statistics -- [ ] get_template +- [X] get_template - [ ] list_configuration_sets - [ ] list_custom_verification_email_templates - [X] list_identities - [ ] list_identity_policies - [ ] list_receipt_filters - [ ] list_receipt_rule_sets -- [ ] list_templates +- [X] list_templates - [X] list_verified_email_addresses - [ ] put_configuration_set_delivery_options - [ ] put_identity_policy diff --git a/moto/athena/models.py b/moto/athena/models.py index 6aeca0ffad8c..20d180d74103 100644 --- a/moto/athena/models.py +++ b/moto/athena/models.py @@ -2,10 +2,9 @@ import time from boto3 import Session +from moto.core import BaseBackend, BaseModel, ACCOUNT_ID -from moto.core import BaseBackend, BaseModel - -from moto.core import ACCOUNT_ID +from uuid import uuid4 class TaggableResourceMixin(object): @@ -50,6 +49,18 @@ def __init__(self, athena_backend, name, configuration, description, tags): self.configuration = configuration +class Execution(BaseModel): + + def __init__(self, query, context, config, workgroup): + self.id = str(uuid4()) + self.query = query + self.context = context + self.config = config + self.workgroup = workgroup + self.start_time = time.time() + self.status = "QUEUED" + + class AthenaBackend(BaseBackend): region_name = None @@ -57,6 +68,7 @@ def __init__(self, region_name=None): if region_name is not None: self.region_name = region_name self.work_groups = {} + self.executions = {} def create_work_group(self, name, configuration, description, tags): if name in self.work_groups: @@ -76,6 +88,30 @@ def list_work_groups(self): for wg in self.work_groups.values() ] + def get_work_group(self, name): + if name not in self.work_groups: + return None + wg = self.work_groups[name] + return { + "Name": wg.name, + "State": wg.state, + "Configuration": wg.configuration, + "Description": wg.description, + "CreationTime": time.time() + } + + def start_query_execution(self, query, context, config, workgroup): + execution = Execution(query=query, context=context, config=config, workgroup=workgroup) + self.executions[execution.id] = execution + return execution.id + + def get_execution(self, exec_id): + return self.executions[exec_id] + + def stop_query_execution(self, exec_id): + execution = self.executions[exec_id] + execution.status = "CANCELLED" + athena_backends = {} for region in Session().get_available_regions("athena"): diff --git a/moto/athena/responses.py b/moto/athena/responses.py index 80cac5d62e5e..c572cea0bd96 100644 --- a/moto/athena/responses.py +++ b/moto/athena/responses.py @@ -18,15 +18,7 @@ def create_work_group(self): name, configuration, description, tags ) if not work_group: - return ( - json.dumps( - { - "__type": "InvalidRequestException", - "Message": "WorkGroup already exists", - } - ), - dict(status=400), - ) + return self.error("WorkGroup already exists", 400) return json.dumps( { "CreateWorkGroupResponse": { @@ -39,3 +31,60 @@ def create_work_group(self): def list_work_groups(self): return json.dumps({"WorkGroups": self.athena_backend.list_work_groups()}) + + def get_work_group(self): + name = self._get_param("WorkGroup") + return json.dumps({"WorkGroup": self.athena_backend.get_work_group(name)}) + + def start_query_execution(self): + query = self._get_param("QueryString") + context = self._get_param("QueryExecutionContext") + config = self._get_param("ResultConfiguration") + workgroup = self._get_param("WorkGroup") + if workgroup and not self.athena_backend.get_work_group(workgroup): + return self.error("WorkGroup does not exist", 400) + id = self.athena_backend.start_query_execution(query=query, context=context, config=config, workgroup=workgroup) + return json.dumps({"QueryExecutionId": id}) + + def get_query_execution(self): + exec_id = self._get_param("QueryExecutionId") + execution = self.athena_backend.get_execution(exec_id) + result = { + 'QueryExecution': { + 'QueryExecutionId': exec_id, + 'Query': execution.query, + 'StatementType': 'DDL', + 'ResultConfiguration': execution.config, + 'QueryExecutionContext': execution.context, + 'Status': { + 'State': execution.status, + 'SubmissionDateTime': execution.start_time + }, + 'Statistics': { + 'EngineExecutionTimeInMillis': 0, + 'DataScannedInBytes': 0, + 'TotalExecutionTimeInMillis': 0, + 'QueryQueueTimeInMillis': 0, + 'QueryPlanningTimeInMillis': 0, + 'ServiceProcessingTimeInMillis': 0 + }, + 'WorkGroup': execution.workgroup + } + } + return json.dumps(result) + + def stop_query_execution(self): + exec_id = self._get_param("QueryExecutionId") + self.athena_backend.stop_query_execution(exec_id) + return json.dumps({}) + + def error(self, msg, status): + return ( + json.dumps( + { + "__type": "InvalidRequestException", + "Message": msg, + } + ), + dict(status=status), + ) diff --git a/tests/test_athena/test_athena.py b/tests/test_athena/test_athena.py index d36653910da8..597361b1deb5 100644 --- a/tests/test_athena/test_athena.py +++ b/tests/test_athena/test_athena.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -import datetime - from botocore.exceptions import ClientError +from nose.tools import assert_raises import boto3 import sure # noqa @@ -57,3 +56,115 @@ def test_create_work_group(): work_group["Name"].should.equal("athena_workgroup") work_group["Description"].should.equal("Test work group") work_group["State"].should.equal("ENABLED") + + +@mock_athena +def test_create_and_get_workgroup(): + client = boto3.client("athena", region_name="us-east-1") + + create_basic_workgroup(client=client, name="athena_workgroup") + + work_group = client.get_work_group(WorkGroup='athena_workgroup')['WorkGroup'] + del work_group["CreationTime"] # Were not testing creationtime atm + work_group.should.equal({ + 'Name': 'athena_workgroup', + 'State': 'ENABLED', + 'Configuration': { + 'ResultConfiguration': { + 'OutputLocation': 's3://bucket-name/prefix/' + } + }, + 'Description': 'Test work group' + }) + + +@mock_athena +def test_start_query_execution(): + client = boto3.client("athena", region_name="us-east-1") + + create_basic_workgroup(client=client, name="athena_workgroup") + response = client.start_query_execution(QueryString='query1', + QueryExecutionContext={'Database': 'string'}, + ResultConfiguration={'OutputLocation': 'string'}, + WorkGroup='athena_workgroup') + assert 'QueryExecutionId' in response + + sec_response = client.start_query_execution(QueryString='query2', + QueryExecutionContext={'Database': 'string'}, + ResultConfiguration={'OutputLocation': 'string'}) + assert 'QueryExecutionId' in sec_response + response["QueryExecutionId"].shouldnt.equal(sec_response["QueryExecutionId"]) + + +@mock_athena +def test_start_query_validate_workgroup(): + client = boto3.client("athena", region_name="us-east-1") + + with assert_raises(ClientError) as err: + client.start_query_execution(QueryString='query1', + QueryExecutionContext={'Database': 'string'}, + ResultConfiguration={'OutputLocation': 'string'}, + WorkGroup='unknown_workgroup') + err.exception.response["Error"]["Code"].should.equal("InvalidRequestException") + err.exception.response["Error"]["Message"].should.equal("WorkGroup does not exist") + + +@mock_athena +def test_get_query_execution(): + client = boto3.client("athena", region_name="us-east-1") + + query = "SELECT stuff" + location = "s3://bucket-name/prefix/" + database = "database" + # Start Query + exex_id = client.start_query_execution(QueryString=query, + QueryExecutionContext={'Database': database}, + ResultConfiguration={'OutputLocation': location})["QueryExecutionId"] + # + details = client.get_query_execution(QueryExecutionId=exex_id)["QueryExecution"] + # + details["QueryExecutionId"].should.equal(exex_id) + details["Query"].should.equal(query) + details["StatementType"].should.equal("DDL") + details["ResultConfiguration"]["OutputLocation"].should.equal(location) + details["QueryExecutionContext"]["Database"].should.equal(database) + details["Status"]["State"].should.equal("QUEUED") + details["Statistics"].should.equal({'EngineExecutionTimeInMillis': 0, + 'DataScannedInBytes': 0, + 'TotalExecutionTimeInMillis': 0, + 'QueryQueueTimeInMillis': 0, + 'QueryPlanningTimeInMillis': 0, + 'ServiceProcessingTimeInMillis': 0}) + assert "WorkGroup" not in details + + +@mock_athena +def test_stop_query_execution(): + client = boto3.client("athena", region_name="us-east-1") + + query = "SELECT stuff" + location = "s3://bucket-name/prefix/" + database = "database" + # Start Query + exex_id = client.start_query_execution(QueryString=query, + QueryExecutionContext={'Database': database}, + ResultConfiguration={'OutputLocation': location})["QueryExecutionId"] + # Stop Query + client.stop_query_execution(QueryExecutionId=exex_id) + # Verify status + details = client.get_query_execution(QueryExecutionId=exex_id)["QueryExecution"] + # + details["QueryExecutionId"].should.equal(exex_id) + details["Status"]["State"].should.equal("CANCELLED") + + +def create_basic_workgroup(client, name): + client.create_work_group( + Name=name, + Description="Test work group", + Configuration={ + "ResultConfiguration": { + "OutputLocation": "s3://bucket-name/prefix/", + } + } + ) From ffb521f86b2dc793e0c4a5bc953e1ae7aadb5195 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 16 May 2020 15:03:26 +0100 Subject: [PATCH 358/658] Linting --- moto/athena/models.py | 7 ++- moto/athena/responses.py | 45 +++++++-------- tests/test_athena/test_athena.py | 96 ++++++++++++++++++-------------- 3 files changed, 79 insertions(+), 69 deletions(-) diff --git a/moto/athena/models.py b/moto/athena/models.py index 20d180d74103..c39c13817d73 100644 --- a/moto/athena/models.py +++ b/moto/athena/models.py @@ -50,7 +50,6 @@ def __init__(self, athena_backend, name, configuration, description, tags): class Execution(BaseModel): - def __init__(self, query, context, config, workgroup): self.id = str(uuid4()) self.query = query @@ -97,11 +96,13 @@ def get_work_group(self, name): "State": wg.state, "Configuration": wg.configuration, "Description": wg.description, - "CreationTime": time.time() + "CreationTime": time.time(), } def start_query_execution(self, query, context, config, workgroup): - execution = Execution(query=query, context=context, config=config, workgroup=workgroup) + execution = Execution( + query=query, context=context, config=config, workgroup=workgroup + ) self.executions[execution.id] = execution return execution.id diff --git a/moto/athena/responses.py b/moto/athena/responses.py index c572cea0bd96..b52e0beedb8d 100644 --- a/moto/athena/responses.py +++ b/moto/athena/responses.py @@ -43,32 +43,34 @@ def start_query_execution(self): workgroup = self._get_param("WorkGroup") if workgroup and not self.athena_backend.get_work_group(workgroup): return self.error("WorkGroup does not exist", 400) - id = self.athena_backend.start_query_execution(query=query, context=context, config=config, workgroup=workgroup) + id = self.athena_backend.start_query_execution( + query=query, context=context, config=config, workgroup=workgroup + ) return json.dumps({"QueryExecutionId": id}) def get_query_execution(self): exec_id = self._get_param("QueryExecutionId") execution = self.athena_backend.get_execution(exec_id) result = { - 'QueryExecution': { - 'QueryExecutionId': exec_id, - 'Query': execution.query, - 'StatementType': 'DDL', - 'ResultConfiguration': execution.config, - 'QueryExecutionContext': execution.context, - 'Status': { - 'State': execution.status, - 'SubmissionDateTime': execution.start_time + "QueryExecution": { + "QueryExecutionId": exec_id, + "Query": execution.query, + "StatementType": "DDL", + "ResultConfiguration": execution.config, + "QueryExecutionContext": execution.context, + "Status": { + "State": execution.status, + "SubmissionDateTime": execution.start_time, }, - 'Statistics': { - 'EngineExecutionTimeInMillis': 0, - 'DataScannedInBytes': 0, - 'TotalExecutionTimeInMillis': 0, - 'QueryQueueTimeInMillis': 0, - 'QueryPlanningTimeInMillis': 0, - 'ServiceProcessingTimeInMillis': 0 + "Statistics": { + "EngineExecutionTimeInMillis": 0, + "DataScannedInBytes": 0, + "TotalExecutionTimeInMillis": 0, + "QueryQueueTimeInMillis": 0, + "QueryPlanningTimeInMillis": 0, + "ServiceProcessingTimeInMillis": 0, }, - 'WorkGroup': execution.workgroup + "WorkGroup": execution.workgroup, } } return json.dumps(result) @@ -80,11 +82,6 @@ def stop_query_execution(self): def error(self, msg, status): return ( - json.dumps( - { - "__type": "InvalidRequestException", - "Message": msg, - } - ), + json.dumps({"__type": "InvalidRequestException", "Message": msg,}), dict(status=status), ) diff --git a/tests/test_athena/test_athena.py b/tests/test_athena/test_athena.py index 597361b1deb5..93ca436aa0f8 100644 --- a/tests/test_athena/test_athena.py +++ b/tests/test_athena/test_athena.py @@ -64,18 +64,18 @@ def test_create_and_get_workgroup(): create_basic_workgroup(client=client, name="athena_workgroup") - work_group = client.get_work_group(WorkGroup='athena_workgroup')['WorkGroup'] - del work_group["CreationTime"] # Were not testing creationtime atm - work_group.should.equal({ - 'Name': 'athena_workgroup', - 'State': 'ENABLED', - 'Configuration': { - 'ResultConfiguration': { - 'OutputLocation': 's3://bucket-name/prefix/' - } - }, - 'Description': 'Test work group' - }) + work_group = client.get_work_group(WorkGroup="athena_workgroup")["WorkGroup"] + del work_group["CreationTime"] # Were not testing creationtime atm + work_group.should.equal( + { + "Name": "athena_workgroup", + "State": "ENABLED", + "Configuration": { + "ResultConfiguration": {"OutputLocation": "s3://bucket-name/prefix/"} + }, + "Description": "Test work group", + } + ) @mock_athena @@ -83,16 +83,20 @@ def test_start_query_execution(): client = boto3.client("athena", region_name="us-east-1") create_basic_workgroup(client=client, name="athena_workgroup") - response = client.start_query_execution(QueryString='query1', - QueryExecutionContext={'Database': 'string'}, - ResultConfiguration={'OutputLocation': 'string'}, - WorkGroup='athena_workgroup') - assert 'QueryExecutionId' in response - - sec_response = client.start_query_execution(QueryString='query2', - QueryExecutionContext={'Database': 'string'}, - ResultConfiguration={'OutputLocation': 'string'}) - assert 'QueryExecutionId' in sec_response + response = client.start_query_execution( + QueryString="query1", + QueryExecutionContext={"Database": "string"}, + ResultConfiguration={"OutputLocation": "string"}, + WorkGroup="athena_workgroup", + ) + assert "QueryExecutionId" in response + + sec_response = client.start_query_execution( + QueryString="query2", + QueryExecutionContext={"Database": "string"}, + ResultConfiguration={"OutputLocation": "string"}, + ) + assert "QueryExecutionId" in sec_response response["QueryExecutionId"].shouldnt.equal(sec_response["QueryExecutionId"]) @@ -101,10 +105,12 @@ def test_start_query_validate_workgroup(): client = boto3.client("athena", region_name="us-east-1") with assert_raises(ClientError) as err: - client.start_query_execution(QueryString='query1', - QueryExecutionContext={'Database': 'string'}, - ResultConfiguration={'OutputLocation': 'string'}, - WorkGroup='unknown_workgroup') + client.start_query_execution( + QueryString="query1", + QueryExecutionContext={"Database": "string"}, + ResultConfiguration={"OutputLocation": "string"}, + WorkGroup="unknown_workgroup", + ) err.exception.response["Error"]["Code"].should.equal("InvalidRequestException") err.exception.response["Error"]["Message"].should.equal("WorkGroup does not exist") @@ -117,9 +123,11 @@ def test_get_query_execution(): location = "s3://bucket-name/prefix/" database = "database" # Start Query - exex_id = client.start_query_execution(QueryString=query, - QueryExecutionContext={'Database': database}, - ResultConfiguration={'OutputLocation': location})["QueryExecutionId"] + exex_id = client.start_query_execution( + QueryString=query, + QueryExecutionContext={"Database": database}, + ResultConfiguration={"OutputLocation": location}, + )["QueryExecutionId"] # details = client.get_query_execution(QueryExecutionId=exex_id)["QueryExecution"] # @@ -129,12 +137,16 @@ def test_get_query_execution(): details["ResultConfiguration"]["OutputLocation"].should.equal(location) details["QueryExecutionContext"]["Database"].should.equal(database) details["Status"]["State"].should.equal("QUEUED") - details["Statistics"].should.equal({'EngineExecutionTimeInMillis': 0, - 'DataScannedInBytes': 0, - 'TotalExecutionTimeInMillis': 0, - 'QueryQueueTimeInMillis': 0, - 'QueryPlanningTimeInMillis': 0, - 'ServiceProcessingTimeInMillis': 0}) + details["Statistics"].should.equal( + { + "EngineExecutionTimeInMillis": 0, + "DataScannedInBytes": 0, + "TotalExecutionTimeInMillis": 0, + "QueryQueueTimeInMillis": 0, + "QueryPlanningTimeInMillis": 0, + "ServiceProcessingTimeInMillis": 0, + } + ) assert "WorkGroup" not in details @@ -146,9 +158,11 @@ def test_stop_query_execution(): location = "s3://bucket-name/prefix/" database = "database" # Start Query - exex_id = client.start_query_execution(QueryString=query, - QueryExecutionContext={'Database': database}, - ResultConfiguration={'OutputLocation': location})["QueryExecutionId"] + exex_id = client.start_query_execution( + QueryString=query, + QueryExecutionContext={"Database": database}, + ResultConfiguration={"OutputLocation": location}, + )["QueryExecutionId"] # Stop Query client.stop_query_execution(QueryExecutionId=exex_id) # Verify status @@ -163,8 +177,6 @@ def create_basic_workgroup(client, name): Name=name, Description="Test work group", Configuration={ - "ResultConfiguration": { - "OutputLocation": "s3://bucket-name/prefix/", - } - } + "ResultConfiguration": {"OutputLocation": "s3://bucket-name/prefix/",} + }, ) From 80b64f9b3ff57515db1fc07329bf8e5f519597aa Mon Sep 17 00:00:00 2001 From: Zach Brookler <39153813+zbrookle@users.noreply.github.com> Date: Mon, 18 May 2020 04:47:18 -0400 Subject: [PATCH 359/658] Cloud formation "depends_on" #2845 Add depends on and update name type mapping (#2994) * ENH: Add unit test for cloudformation DependsOn * ENH: Add implementation of retrieving list of resources that account for dependencies * ENH: Update the name mappings so that they are consistent with the latest cloudformation names * ENH: Add launch configuration to type names * ENH: Create subnet for test and test creation with dependencies * CLN: Code reformatting * CLN: Remove print statements * BUG: Fix error resulting in possible infinite loop * CLN: Remove commented out fixture decorator * BUG: Remove subnet creation * CLN: Remove main and ec2 dependencies * BUG: Add back in instance profile name type * CLN: Remove print * BUG: Fix broken unit test * CLN: Code reformatting * CLN: Remove main * ENH: Add autoscaling group name to type names * ENH: Add unit test for string only dependency and add assertions to unit tests * ENH: Add unit test for chained depends_on in cloudformation stack * BUG: Remove f strings for python 2.7 compatibility * BUG: List needs to be sorted for python2.7 * CLN: Fix code formatting --- moto/cloudformation/parsing.py | 61 +++++++- .../test_cloudformation_depends_on.py | 143 ++++++++++++++++++ .../test_cloudformation_stack_integration.py | 8 +- 3 files changed, 204 insertions(+), 8 deletions(-) create mode 100644 tests/test_cloudformation/test_cloudformation_depends_on.py diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index a32ff6736ebd..d59b21b82898 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -98,20 +98,46 @@ "AWS::Events::Rule": events_models.Rule, } +UNDOCUMENTED_NAME_TYPE_MAP = { + "AWS::AutoScaling::AutoScalingGroup": "AutoScalingGroupName", + "AWS::AutoScaling::LaunchConfiguration": "LaunchConfigurationName", + "AWS::IAM::InstanceProfile": "InstanceProfileName", +} + # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html NAME_TYPE_MAP = { - "AWS::CloudWatch::Alarm": "Alarm", + "AWS::ApiGateway::ApiKey": "Name", + "AWS::ApiGateway::Model": "Name", + "AWS::CloudWatch::Alarm": "AlarmName", "AWS::DynamoDB::Table": "TableName", - "AWS::ElastiCache::CacheCluster": "ClusterName", "AWS::ElasticBeanstalk::Application": "ApplicationName", "AWS::ElasticBeanstalk::Environment": "EnvironmentName", + "AWS::CodeDeploy::Application": "ApplicationName", + "AWS::CodeDeploy::DeploymentConfig": "DeploymentConfigName", + "AWS::CodeDeploy::DeploymentGroup": "DeploymentGroupName", + "AWS::Config::ConfigRule": "ConfigRuleName", + "AWS::Config::DeliveryChannel": "Name", + "AWS::Config::ConfigurationRecorder": "Name", "AWS::ElasticLoadBalancing::LoadBalancer": "LoadBalancerName", + "AWS::ElasticLoadBalancingV2::LoadBalancer": "Name", "AWS::ElasticLoadBalancingV2::TargetGroup": "Name", + "AWS::EC2::SecurityGroup": "GroupName", + "AWS::ElastiCache::CacheCluster": "ClusterName", + "AWS::ECR::Repository": "RepositoryName", + "AWS::ECS::Cluster": "ClusterName", + "AWS::Elasticsearch::Domain": "DomainName", + "AWS::Events::Rule": "Name", + "AWS::IAM::Group": "GroupName", + "AWS::IAM::ManagedPolicy": "ManagedPolicyName", + "AWS::IAM::Role": "RoleName", + "AWS::IAM::User": "UserName", + "AWS::Lambda::Function": "FunctionName", "AWS::RDS::DBInstance": "DBInstanceIdentifier", "AWS::S3::Bucket": "BucketName", "AWS::SNS::Topic": "TopicName", "AWS::SQS::Queue": "QueueName", } +NAME_TYPE_MAP.update(UNDOCUMENTED_NAME_TYPE_MAP) # Just ignore these models types for now NULL_MODELS = [ @@ -455,6 +481,7 @@ def __getitem__(self, key): return self._parsed_resources[resource_logical_id] else: resource_json = self._resource_json_map.get(resource_logical_id) + if not resource_json: raise KeyError(resource_logical_id) new_resource = parse_and_create_resource( @@ -470,6 +497,34 @@ def __iter__(self): def __len__(self): return len(self._resource_json_map) + def __get_resources_in_dependency_order(self): + resource_map = copy.deepcopy(self._resource_json_map) + resources_in_dependency_order = [] + + def recursively_get_dependencies(resource): + resource_info = resource_map[resource] + + if "DependsOn" not in resource_info: + resources_in_dependency_order.append(resource) + del resource_map[resource] + return + + dependencies = resource_info["DependsOn"] + if isinstance(dependencies, str): # Dependencies may be a string or list + dependencies = [dependencies] + + for dependency in dependencies: + if dependency in resource_map: + recursively_get_dependencies(dependency) + + resources_in_dependency_order.append(resource) + del resource_map[resource] + + while resource_map: + recursively_get_dependencies(list(resource_map.keys())[0]) + + return resources_in_dependency_order + @property def resources(self): return self._resource_json_map.keys() @@ -547,7 +602,7 @@ def create(self): "aws:cloudformation:stack-id": self.get("AWS::StackId"), } ) - for resource in self.resources: + for resource in self.__get_resources_in_dependency_order(): if isinstance(self[resource], ec2_models.TaggedEC2Resource): self.tags["aws:cloudformation:logical-id"] = resource ec2_models.ec2_backends[self._region_name].create_tags( diff --git a/tests/test_cloudformation/test_cloudformation_depends_on.py b/tests/test_cloudformation/test_cloudformation_depends_on.py new file mode 100644 index 000000000000..1b47b40648fb --- /dev/null +++ b/tests/test_cloudformation/test_cloudformation_depends_on.py @@ -0,0 +1,143 @@ +import boto3 +from moto import mock_cloudformation, mock_ecs, mock_autoscaling, mock_s3 +import json + +depends_on_template_list = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "ECSCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "test-cluster"}, + }, + "AutoScalingGroup": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AutoScalingGroupName": "test-scaling-group", + "DesiredCapacity": 1, + "MinSize": 1, + "MaxSize": 50, + "LaunchConfigurationName": "test-launch-config", + "AvailabilityZones": ["us-east-1a"], + }, + "DependsOn": ["ECSCluster", "LaunchConfig"], + }, + "LaunchConfig": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": {"LaunchConfigurationName": "test-launch-config",}, + }, + }, +} + +depends_on_template_string = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "AutoScalingGroup": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AutoScalingGroupName": "test-scaling-group", + "DesiredCapacity": 1, + "MinSize": 1, + "MaxSize": 50, + "LaunchConfigurationName": "test-launch-config", + "AvailabilityZones": ["us-east-1a"], + }, + "DependsOn": "LaunchConfig", + }, + "LaunchConfig": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": {"LaunchConfigurationName": "test-launch-config",}, + }, + }, +} + + +def make_chained_depends_on_template(): + depends_on_template_linked_dependencies = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Bucket1": { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": "test-bucket-0-us-east-1"}, + }, + }, + } + + for i in range(1, 10): + depends_on_template_linked_dependencies["Resources"]["Bucket" + str(i)] = { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": "test-bucket-" + str(i) + "-us-east-1"}, + "DependsOn": ["Bucket" + str(i - 1)], + } + + return json.dumps(depends_on_template_linked_dependencies) + + +depends_on_template_list_json = json.dumps(depends_on_template_list) +depends_on_template_string_json = json.dumps(depends_on_template_string) + + +@mock_cloudformation +@mock_autoscaling +@mock_ecs +def test_create_stack_with_depends_on(): + boto3.client("cloudformation", region_name="us-east-1").create_stack( + StackName="depends_on_test", TemplateBody=depends_on_template_list_json + ) + + autoscaling = boto3.client("autoscaling", region_name="us-east-1") + autoscaling_group = autoscaling.describe_auto_scaling_groups()["AutoScalingGroups"][ + 0 + ] + assert autoscaling_group["AutoScalingGroupName"] == "test-scaling-group" + assert autoscaling_group["DesiredCapacity"] == 1 + assert autoscaling_group["MinSize"] == 1 + assert autoscaling_group["MaxSize"] == 50 + assert autoscaling_group["AvailabilityZones"] == ["us-east-1a"] + + launch_configuration = autoscaling.describe_launch_configurations()[ + "LaunchConfigurations" + ][0] + assert launch_configuration["LaunchConfigurationName"] == "test-launch-config" + + ecs = boto3.client("ecs", region_name="us-east-1") + cluster_arn = ecs.list_clusters()["clusterArns"][0] + assert cluster_arn == "arn:aws:ecs:us-east-1:012345678910:cluster/test-cluster" + + +@mock_cloudformation +@mock_autoscaling +def test_create_stack_with_depends_on_string(): + boto3.client("cloudformation", region_name="us-east-1").create_stack( + StackName="depends_on_string_test", TemplateBody=depends_on_template_string_json + ) + + autoscaling = boto3.client("autoscaling", region_name="us-east-1") + autoscaling_group = autoscaling.describe_auto_scaling_groups()["AutoScalingGroups"][ + 0 + ] + assert autoscaling_group["AutoScalingGroupName"] == "test-scaling-group" + assert autoscaling_group["DesiredCapacity"] == 1 + assert autoscaling_group["MinSize"] == 1 + assert autoscaling_group["MaxSize"] == 50 + assert autoscaling_group["AvailabilityZones"] == ["us-east-1a"] + + launch_configuration = autoscaling.describe_launch_configurations()[ + "LaunchConfigurations" + ][0] + assert launch_configuration["LaunchConfigurationName"] == "test-launch-config" + + +@mock_cloudformation +@mock_s3 +def test_create_chained_depends_on_stack(): + boto3.client("cloudformation", region_name="us-east-1").create_stack( + StackName="linked_depends_on_test", + TemplateBody=make_chained_depends_on_template(), + ) + + s3 = boto3.client("s3", region_name="us-east-1") + bucket_response = s3.list_buckets()["Buckets"] + + assert sorted([bucket["Name"] for bucket in bucket_response]) == [ + "test-bucket-" + str(i) + "-us-east-1" for i in range(1, 10) + ] diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 3abbab02d705..27bac5e57a4a 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -49,7 +49,7 @@ from moto.core import ACCOUNT_ID from moto.dynamodb2.models import Table -from .fixtures import ( +from tests.test_cloudformation.fixtures import ( ec2_classic_eip, fn_join, rds_mysql_with_db_parameter_group, @@ -940,12 +940,10 @@ def test_iam_roles(): role_name_to_id = {} for role_result in role_results: role = iam_conn.get_role(role_result.role_name) - if "my-role" not in role.role_name: + # Role name is not specified, so randomly generated - can't check exact name + if "with-path" in role.role_name: role_name_to_id["with-path"] = role.role_id role.path.should.equal("my-path") - len(role.role_name).should.equal( - 5 - ) # Role name is not specified, so randomly generated - can't check exact name else: role_name_to_id["no-path"] = role.role_id role.role_name.should.equal("my-role-no-path-name") From 59c71760ff96abb8485efecca697335156158e20 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 24 May 2020 02:51:45 -0500 Subject: [PATCH 360/658] Keep order in request body to ensure auth signing works. (#3024) * Keep order in request body to ensure auth signing works. * Lint. * More OrderedDict to ensure data parameter order. * Lint. * Improve CF test assertions. * Fix syntax error. * Cleanup CF test. --- moto/core/responses.py | 17 ++++++++++++----- tests/test_cloudformation/test_validate.py | 14 ++++++-------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index 508bd8c59b31..c52e898982ca 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -16,7 +16,7 @@ from jinja2 import Environment, DictLoader, TemplateNotFound import six -from six.moves.urllib.parse import parse_qs, urlparse +from six.moves.urllib.parse import parse_qs, parse_qsl, urlparse import xmltodict from werkzeug.exceptions import HTTPException @@ -30,7 +30,7 @@ def _decode_dict(d): - decoded = {} + decoded = OrderedDict() for key, value in d.items(): if isinstance(key, six.binary_type): newkey = key.decode("utf-8") @@ -199,7 +199,7 @@ def dispatch(cls, *args, **kwargs): return cls()._dispatch(*args, **kwargs) def setup_class(self, request, full_url, headers): - querystring = {} + querystring = OrderedDict() if hasattr(request, "body"): # Boto self.body = request.body @@ -211,7 +211,7 @@ def setup_class(self, request, full_url, headers): # definition for back-compatibility self.body = request.data - querystring = {} + querystring = OrderedDict() for key, value in request.form.items(): querystring[key] = [value] @@ -240,7 +240,14 @@ def setup_class(self, request, full_url, headers): querystring[key] = [value] elif self.body: try: - querystring.update(parse_qs(raw_body, keep_blank_values=True)) + querystring.update( + OrderedDict( + (key, [value]) + for key, value in parse_qsl( + raw_body, keep_blank_values=True + ) + ) + ) except UnicodeEncodeError: pass # ignore encoding errors, as the body may not contain a legitimate querystring if not querystring: diff --git a/tests/test_cloudformation/test_validate.py b/tests/test_cloudformation/test_validate.py index 4dd4d7e08e67..a4278b559771 100644 --- a/tests/test_cloudformation/test_validate.py +++ b/tests/test_cloudformation/test_validate.py @@ -62,10 +62,9 @@ def test_boto3_json_invalid_missing_resource(): cf_conn.validate_template(TemplateBody=dummy_bad_template_json) assert False except botocore.exceptions.ClientError as e: - assert ( - str(e) - == "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack" - " with id Missing top level item Resources to file module does not exist" + str(e).should.contain( + "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack" + " with id Missing top level" ) assert True @@ -103,9 +102,8 @@ def test_boto3_yaml_invalid_missing_resource(): cf_conn.validate_template(TemplateBody=yaml_bad_template) assert False except botocore.exceptions.ClientError as e: - assert ( - str(e) - == "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack" - " with id Missing top level item Resources to file module does not exist" + str(e).should.contain( + "An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack" + " with id Missing top level" ) assert True From 8daafaec584ac8f56482198f88f9ae08be792fe7 Mon Sep 17 00:00:00 2001 From: Ben <13878060+ben-nz@users.noreply.github.com> Date: Sun, 24 May 2020 05:25:38 -0400 Subject: [PATCH 361/658] Add tag get_resource support for target groups (#3012) --- moto/resourcegroupstaggingapi/models.py | 28 +++- .../test_resourcegroupstaggingapi.py | 127 ++++++++++++------ 2 files changed, 110 insertions(+), 45 deletions(-) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index b6e35d58606c..4cdf73cc7b92 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -286,8 +286,7 @@ def get_ec2_tags(res_id): } # TODO add these to the keys and values functions / combine functions - # ELB - + # ELB, resource type elasticloadbalancing:loadbalancer def get_elbv2_tags(arn): result = [] for key, value in self.elbv2_backend.load_balancers[elb.arn].tags.items(): @@ -296,8 +295,8 @@ def get_elbv2_tags(arn): if ( not resource_type_filters - or "elasticloadbalancer" in resource_type_filters - or "elasticloadbalancer:loadbalancer" in resource_type_filters + or "elasticloadbalancing" in resource_type_filters + or "elasticloadbalancing:loadbalancer" in resource_type_filters ): for elb in self.elbv2_backend.load_balancers.values(): tags = get_elbv2_tags(elb.arn) @@ -306,6 +305,27 @@ def get_elbv2_tags(arn): yield {"ResourceARN": "{0}".format(elb.arn), "Tags": tags} + # ELB Target Group, resource type elasticloadbalancing:targetgroup + def get_target_group_tags(arn): + result = [] + for key, value in self.elbv2_backend.target_groups[ + target_group.arn + ].tags.items(): + result.append({"Key": key, "Value": value}) + return result + + if ( + not resource_type_filters + or "elasticloadbalancing" in resource_type_filters + or "elasticloadbalancing:targetgroup" in resource_type_filters + ): + for target_group in self.elbv2_backend.target_groups.values(): + tags = get_target_group_tags(target_group.arn) + if not tag_filter(tags): # Skip if no tags, or invalid filter + continue + + yield {"ResourceARN": "{0}".format(target_group.arn), "Tags": tags} + # EMR Cluster # Glacier Vault diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 3ee517ce8a4c..c14636fff258 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -9,44 +9,6 @@ from moto import mock_s3 -@mock_s3 -@mock_resourcegroupstaggingapi -def test_get_resources_s3(): - # Tests pagination - s3_client = boto3.client("s3", region_name="eu-central-1") - - # Will end up having key1,key2,key3,key4 - response_keys = set() - - # Create 4 buckets - for i in range(1, 5): - i_str = str(i) - s3_client.create_bucket( - Bucket="test_bucket" + i_str, - CreateBucketConfiguration={"LocationConstraint": "eu-central-1"}, - ) - s3_client.put_bucket_tagging( - Bucket="test_bucket" + i_str, - Tagging={"TagSet": [{"Key": "key" + i_str, "Value": "value" + i_str}]}, - ) - response_keys.add("key" + i_str) - - rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1") - resp = rtapi.get_resources(ResourcesPerPage=2) - for resource in resp["ResourceTagMappingList"]: - response_keys.remove(resource["Tags"][0]["Key"]) - - response_keys.should.have.length_of(2) - - resp = rtapi.get_resources( - ResourcesPerPage=2, PaginationToken=resp["PaginationToken"] - ) - for resource in resp["ResourceTagMappingList"]: - response_keys.remove(resource["Tags"][0]["Key"]) - - response_keys.should.have.length_of(0) - - @mock_ec2 @mock_resourcegroupstaggingapi def test_get_resources_ec2(): @@ -233,12 +195,14 @@ def test_get_many_resources(): rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-east-1") - resp = rtapi.get_resources(ResourceTypeFilters=["elasticloadbalancer:loadbalancer"]) + resp = rtapi.get_resources( + ResourceTypeFilters=["elasticloadbalancing:loadbalancer"] + ) resp["ResourceTagMappingList"].should.have.length_of(2) resp["ResourceTagMappingList"][0]["ResourceARN"].should.contain("loadbalancer/") resp = rtapi.get_resources( - ResourceTypeFilters=["elasticloadbalancer:loadbalancer"], + ResourceTypeFilters=["elasticloadbalancing:loadbalancer"], TagFilters=[{"Key": "key_name"}], ) @@ -247,4 +211,85 @@ def test_get_many_resources(): {"Key": "key_name", "Value": "a_value"} ) - # TODO test pagenation + # TODO test pagination + + +@mock_ec2 +@mock_elbv2 +@mock_resourcegroupstaggingapi +def test_get_resources_target_group(): + ec2 = boto3.resource("ec2", region_name="eu-central-1") + elbv2 = boto3.client("elbv2", region_name="eu-central-1") + + vpc = ec2.create_vpc(CidrBlock="172.28.7.0/24", InstanceTenancy="default") + + # Create two tagged target groups + for i in range(1, 3): + i_str = str(i) + + target_group = elbv2.create_target_group( + Name="test" + i_str, + Protocol="HTTP", + Port=8080, + VpcId=vpc.id, + TargetType="instance", + )["TargetGroups"][0] + + elbv2.add_tags( + ResourceArns=[target_group["TargetGroupArn"]], + Tags=[{"Key": "Test", "Value": i_str}], + ) + + rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1") + + # Basic test + resp = rtapi.get_resources(ResourceTypeFilters=["elasticloadbalancing:targetgroup"]) + resp["ResourceTagMappingList"].should.have.length_of(2) + + # Test tag filtering + resp = rtapi.get_resources( + ResourceTypeFilters=["elasticloadbalancing:targetgroup"], + TagFilters=[{"Key": "Test", "Values": ["1"]}], + ) + resp["ResourceTagMappingList"].should.have.length_of(1) + resp["ResourceTagMappingList"][0]["Tags"].should.contain( + {"Key": "Test", "Value": "1"} + ) + + +@mock_s3 +@mock_resourcegroupstaggingapi +def test_get_resources_s3(): + # Tests pagination + s3_client = boto3.client("s3", region_name="eu-central-1") + + # Will end up having key1,key2,key3,key4 + response_keys = set() + + # Create 4 buckets + for i in range(1, 5): + i_str = str(i) + s3_client.create_bucket( + Bucket="test_bucket" + i_str, + CreateBucketConfiguration={"LocationConstraint": "eu-central-1"}, + ) + s3_client.put_bucket_tagging( + Bucket="test_bucket" + i_str, + Tagging={"TagSet": [{"Key": "key" + i_str, "Value": "value" + i_str}]}, + ) + response_keys.add("key" + i_str) + + rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1") + resp = rtapi.get_resources(ResourcesPerPage=2) + for resource in resp["ResourceTagMappingList"]: + response_keys.remove(resource["Tags"][0]["Key"]) + + response_keys.should.have.length_of(2) + + resp = rtapi.get_resources( + ResourcesPerPage=2, PaginationToken=resp["PaginationToken"] + ) + for resource in resp["ResourceTagMappingList"]: + response_keys.remove(resource["Tags"][0]["Key"]) + + response_keys.should.have.length_of(0) From 1ef3094e45dbf87f7359145b3c4c8d01818bb8eb Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 24 May 2020 12:12:35 +0100 Subject: [PATCH 362/658] SQS - Return multiple group-messages in the same request --- moto/sqs/models.py | 9 ++--- moto/sqs/responses.py | 8 +++++ tests/test_sqs/test_sqs.py | 73 +++++++++++++++++++++++++++----------- 3 files changed, 65 insertions(+), 25 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index f88d906b945d..ea3b89f049b6 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -6,6 +6,7 @@ import re import six import struct +from copy import deepcopy from xml.sax.saxutils import escape from boto3 import Session @@ -101,7 +102,6 @@ def utf8(str): if data_type == "String" or data_type == "Number": value = attr["string_value"] elif data_type == "Binary": - print(data_type, attr["binary_value"], type(attr["binary_value"])) value = base64.b64decode(attr["binary_value"]) else: print( @@ -722,6 +722,7 @@ def receive_messages( previous_result_count = len(result) polling_end = unix_time() + wait_seconds_timeout + currently_pending_groups = deepcopy(queue.pending_message_groups) # queue.messages only contains visible messages while True: @@ -739,11 +740,11 @@ def receive_messages( # The message is pending but is visible again, so the # consumer must have timed out. queue.pending_messages.remove(message) + currently_pending_groups = deepcopy(queue.pending_message_groups) if message.group_id and queue.fifo_queue: - if message.group_id in queue.pending_message_groups: - # There is already one active message with the same - # group, so we cannot deliver this one. + if message.group_id in currently_pending_groups: + # A previous call is still processing messages in this group, so we cannot deliver this one. continue queue.pending_messages.add(message) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index f5481cc10288..eed50a527a2d 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -232,6 +232,14 @@ def send_message(self): queue_name = self._get_queue_name() + if not message_group_id: + queue = self.sqs_backend.get_queue(queue_name) + if queue.attributes.get("FifoQueue", False): + return self._error( + "MissingParameter", + "The request must contain the parameter MessageGroupId.", + ) + message = self.sqs_backend.send_message( queue_name, message, diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 01e34de0b61c..31bbafffb1b8 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1164,7 +1164,7 @@ def test_send_message_batch_with_empty_list(): @mock_sqs def test_batch_change_message_visibility(): - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + if settings.TEST_SERVER_MODE: raise SkipTest("Cant manipulate time in server mode") with freeze_time("2015-01-01 12:00:00"): @@ -1174,9 +1174,15 @@ def test_batch_change_message_visibility(): ) queue_url = resp["QueueUrl"] - sqs.send_message(QueueUrl=queue_url, MessageBody="msg1") - sqs.send_message(QueueUrl=queue_url, MessageBody="msg2") - sqs.send_message(QueueUrl=queue_url, MessageBody="msg3") + sqs.send_message( + QueueUrl=queue_url, MessageBody="msg1", MessageGroupId="group1" + ) + sqs.send_message( + QueueUrl=queue_url, MessageBody="msg2", MessageGroupId="group2" + ) + sqs.send_message( + QueueUrl=queue_url, MessageBody="msg3", MessageGroupId="group3" + ) with freeze_time("2015-01-01 12:01:00"): receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) @@ -1529,7 +1535,7 @@ def test_create_fifo_queue_with_dlq(): @mock_sqs def test_queue_with_dlq(): - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + if settings.TEST_SERVER_MODE: raise SkipTest("Cant manipulate time in server mode") sqs = boto3.client("sqs", region_name="us-east-1") @@ -1554,8 +1560,12 @@ def test_queue_with_dlq(): ) queue_url2 = resp["QueueUrl"] - sqs.send_message(QueueUrl=queue_url2, MessageBody="msg1") - sqs.send_message(QueueUrl=queue_url2, MessageBody="msg2") + sqs.send_message( + QueueUrl=queue_url2, MessageBody="msg1", MessageGroupId="group" + ) + sqs.send_message( + QueueUrl=queue_url2, MessageBody="msg2", MessageGroupId="group" + ) with freeze_time("2015-01-01 13:00:00"): resp = sqs.receive_message( @@ -1686,20 +1696,24 @@ def test_receive_messages_with_message_group_id(): queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) queue.send_message(MessageBody="message-1", MessageGroupId="group") queue.send_message(MessageBody="message-2", MessageGroupId="group") + queue.send_message(MessageBody="message-3", MessageGroupId="group") + queue.send_message(MessageBody="separate-message", MessageGroupId="anothergroup") - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] + messages = queue.receive_messages(MaxNumberOfMessages=2) + messages.should.have.length_of(2) + messages[0].attributes["MessageGroupId"].should.equal("group") - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) + # Different client can not 'see' messages from the group until they are processed + messages_for_client_2 = queue.receive_messages(WaitTimeSeconds=0) + messages_for_client_2.should.have.length_of(1) + messages_for_client_2[0].body.should.equal("separate-message") # message is now processed, next one should be available - message.delete() + for message in messages: + message.delete() messages = queue.receive_messages() messages.should.have.length_of(1) + messages[0].body.should.equal("message-3") @mock_sqs @@ -1730,7 +1744,7 @@ def test_receive_messages_with_message_group_id_on_requeue(): @mock_sqs def test_receive_messages_with_message_group_id_on_visibility_timeout(): - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + if settings.TEST_SERVER_MODE: raise SkipTest("Cant manipulate time in server mode") with freeze_time("2015-01-01 12:00:00"): @@ -1746,12 +1760,12 @@ def test_receive_messages_with_message_group_id_on_visibility_timeout(): messages.should.have.length_of(1) message = messages[0] - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) + # received message is not processed yet + messages_for_second_client = queue.receive_messages(WaitTimeSeconds=0) + messages_for_second_client.should.have.length_of(0) - message.change_visibility(VisibilityTimeout=10) + for message in messages: + message.change_visibility(VisibilityTimeout=10) with freeze_time("2015-01-01 12:00:05"): # no timeout yet @@ -1794,3 +1808,20 @@ def test_list_queues_limits_to_1000_queues(): list(resource.queues.filter(QueueNamePrefix="test-queue")).should.have.length_of( 1000 ) + + +@mock_sqs +def test_send_messages_to_fifo_without_message_group_id(): + sqs = boto3.resource("sqs", region_name="eu-west-3") + queue = sqs.create_queue( + QueueName="blah.fifo", + Attributes={"FifoQueue": "true", "ContentBasedDeduplication": "true"}, + ) + + with assert_raises(Exception) as e: + queue.send_message(MessageBody="message-1") + ex = e.exception + ex.response["Error"]["Code"].should.equal("MissingParameter") + ex.response["Error"]["Message"].should.equal( + "The request must contain the parameter MessageGroupId." + ) From 31ce74a842c3a0d5a82afb431dca0afed7b89fe5 Mon Sep 17 00:00:00 2001 From: Zach Brookler <39153813+zbrookle@users.noreply.github.com> Date: Sun, 24 May 2020 07:21:29 -0400 Subject: [PATCH 363/658] Fix autoscaling tags (#3010) * ENH: Add unit test for propagation tags * BUG: Add missing translation of boolean PropagateAtLaunch tag values to strings * BUG: Should really be checking for "true" and not True * CLN: Black formatting --- moto/autoscaling/models.py | 14 ++++- .../test_cloudformation_stack_integration.py | 55 +++++++++++++++++++ 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 1da12a09cd5a..f4185da6ce88 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -301,6 +301,14 @@ def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=Fals self.availability_zones = availability_zones self.vpc_zone_identifier = vpc_zone_identifier + @staticmethod + def __set_string_propagate_at_launch_booleans_on_tags(tags): + bool_to_string = {True: "true", False: "false"} + for tag in tags: + if "PropagateAtLaunch" in tag: + tag["PropagateAtLaunch"] = bool_to_string[tag["PropagateAtLaunch"]] + return tags + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -329,7 +337,9 @@ def create_from_cloudformation_json( target_group_arns=target_group_arns, placement_group=None, termination_policies=properties.get("TerminationPolicies", []), - tags=properties.get("Tags", []), + tags=cls.__set_string_propagate_at_launch_booleans_on_tags( + properties.get("Tags", []) + ), new_instances_protected_from_scale_in=properties.get( "NewInstancesProtectedFromScaleIn", False ), @@ -455,7 +465,7 @@ def get_propagated_tags(self): # boto3 and cloudformation use PropagateAtLaunch if "propagate_at_launch" in tag and tag["propagate_at_launch"] == "true": propagated_tags[tag["key"]] = tag["value"] - if "PropagateAtLaunch" in tag and tag["PropagateAtLaunch"]: + if "PropagateAtLaunch" in tag and tag["PropagateAtLaunch"] == "true": propagated_tags[tag["Key"]] = tag["Value"] return propagated_tags diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 27bac5e57a4a..3abb3373da87 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -21,6 +21,7 @@ from moto import ( mock_autoscaling_deprecated, + mock_autoscaling, mock_cloudformation, mock_cloudformation_deprecated, mock_datapipeline_deprecated, @@ -2496,3 +2497,57 @@ def test_stack_events_create_rule_as_target(): log_groups["logGroups"][0]["logGroupName"].should.equal(rules["Rules"][0]["Arn"]) log_groups["logGroups"][0]["retentionInDays"].should.equal(3) + + +@mock_cloudformation +@mock_autoscaling +def test_autoscaling_propagate_tags(): + autoscaling_group_with_tags = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "AutoScalingGroup": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AutoScalingGroupName": "test-scaling-group", + "DesiredCapacity": 1, + "MinSize": 1, + "MaxSize": 50, + "LaunchConfigurationName": "test-launch-config", + "AvailabilityZones": ["us-east-1a"], + "Tags": [ + { + "Key": "test-key-propagate", + "Value": "test", + "PropagateAtLaunch": True, + }, + { + "Key": "test-key-no-propagate", + "Value": "test", + "PropagateAtLaunch": False, + }, + ], + }, + "DependsOn": "LaunchConfig", + }, + "LaunchConfig": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": {"LaunchConfigurationName": "test-launch-config"}, + }, + }, + } + boto3.client("cloudformation", "us-east-1").create_stack( + StackName="propagate_tags_test", + TemplateBody=json.dumps(autoscaling_group_with_tags), + ) + + autoscaling = boto3.client("autoscaling", "us-east-1") + + autoscaling_group_tags = autoscaling.describe_auto_scaling_groups()[ + "AutoScalingGroups" + ][0]["Tags"] + propagation_dict = { + tag["Key"]: tag["PropagateAtLaunch"] for tag in autoscaling_group_tags + } + + assert propagation_dict["test-key-propagate"] + assert not propagation_dict["test-key-no-propagate"] From 2320e8264796c4b1e62382af1a7e176b314ca780 Mon Sep 17 00:00:00 2001 From: Maxim Kirilov Date: Sun, 24 May 2020 14:22:45 +0300 Subject: [PATCH 364/658] Add support for detaching volumes upon instance termination (#2999) --- moto/ec2/exceptions.py | 10 +++ moto/ec2/models.py | 49 +++++++++++--- tests/test_ec2/test_instances.py | 106 ++++++++++++++++++++++++++++++- 3 files changed, 152 insertions(+), 13 deletions(-) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 5af4690aea35..4c47adbb9129 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -231,6 +231,16 @@ def __init__(self, volume_id, instance_id): ) +class InvalidVolumeDetachmentError(EC2ClientError): + def __init__(self, volume_id, instance_id, device): + super(InvalidVolumeDetachmentError, self).__init__( + "InvalidAttachment.NotFound", + "The volume {0} is not attached to instance {1} as device {2}".format( + volume_id, instance_id, device + ), + ) + + class VolumeInUseError(EC2ClientError): def __init__(self, volume_id, instance_id): super(VolumeInUseError, self).__init__( diff --git a/moto/ec2/models.py b/moto/ec2/models.py index bab4636af731..78e743540b43 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -72,6 +72,7 @@ InvalidVolumeIdError, VolumeInUseError, InvalidVolumeAttachmentError, + InvalidVolumeDetachmentError, InvalidVpcCidrBlockAssociationIdError, InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionStateTransitionError, @@ -560,23 +561,34 @@ def __del__(self): # worst case we'll get IP address exaustion... rarely pass - def add_block_device(self, size, device_path, snapshot_id=None, encrypted=False): + def add_block_device( + self, + size, + device_path, + snapshot_id=None, + encrypted=False, + delete_on_termination=False, + ): volume = self.ec2_backend.create_volume( size, self.region_name, snapshot_id, encrypted ) - self.ec2_backend.attach_volume(volume.id, self.id, device_path) + self.ec2_backend.attach_volume( + volume.id, self.id, device_path, delete_on_termination + ) def setup_defaults(self): # Default have an instance with root volume should you not wish to # override with attach volume cmd. volume = self.ec2_backend.create_volume(8, "us-east-1a") - self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1") + self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1", True) def teardown_defaults(self): - if "/dev/sda1" in self.block_device_mapping: - volume_id = self.block_device_mapping["/dev/sda1"].volume_id - self.ec2_backend.detach_volume(volume_id, self.id, "/dev/sda1") - self.ec2_backend.delete_volume(volume_id) + for device_path in list(self.block_device_mapping.keys()): + volume = self.block_device_mapping[device_path] + volume_id = volume.volume_id + self.ec2_backend.detach_volume(volume_id, self.id, device_path) + if volume.delete_on_termination: + self.ec2_backend.delete_volume(volume_id) @property def get_block_device_mapping(self): @@ -897,8 +909,15 @@ def add_instances(self, image_id, count, user_data, security_group_names, **kwar volume_size = block_device["Ebs"].get("VolumeSize") snapshot_id = block_device["Ebs"].get("SnapshotId") encrypted = block_device["Ebs"].get("Encrypted", False) + delete_on_termination = block_device["Ebs"].get( + "DeleteOnTermination", False + ) new_instance.add_block_device( - volume_size, device_name, snapshot_id, encrypted + volume_size, + device_name, + snapshot_id, + encrypted, + delete_on_termination, ) else: new_instance.setup_defaults() @@ -2475,7 +2494,9 @@ def delete_volume(self, volume_id): return self.volumes.pop(volume_id) raise InvalidVolumeIdError(volume_id) - def attach_volume(self, volume_id, instance_id, device_path): + def attach_volume( + self, volume_id, instance_id, device_path, delete_on_termination=False + ): volume = self.get_volume(volume_id) instance = self.get_instance(instance_id) @@ -2489,17 +2510,25 @@ def attach_volume(self, volume_id, instance_id, device_path): status=volume.status, size=volume.size, attach_time=utc_date_and_time(), + delete_on_termination=delete_on_termination, ) instance.block_device_mapping[device_path] = bdt return volume.attachment def detach_volume(self, volume_id, instance_id, device_path): volume = self.get_volume(volume_id) - self.get_instance(instance_id) + instance = self.get_instance(instance_id) old_attachment = volume.attachment if not old_attachment: raise InvalidVolumeAttachmentError(volume_id, instance_id) + device_path = device_path or old_attachment.device + + try: + del instance.block_device_mapping[device_path] + except KeyError: + raise InvalidVolumeDetachmentError(volume_id, instance_id, device_path) + old_attachment.status = "detached" volume.attachment = None diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index d53bd14aaa3d..d25880975af1 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -99,6 +99,106 @@ def test_instance_launch_and_terminate(): instance.state.should.equal("terminated") +@mock_ec2 +def test_instance_terminate_discard_volumes(): + + ec2_resource = boto3.resource("ec2", "us-west-1") + + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": {"VolumeSize": 50, "DeleteOnTermination": True}, + } + ], + ) + instance = result[0] + + instance_volume_ids = [] + for volume in instance.volumes.all(): + instance_volume_ids.append(volume.volume_id) + + instance.terminate() + instance.wait_until_terminated() + + assert not list(ec2_resource.volumes.all()) + + +@mock_ec2 +def test_instance_terminate_keep_volumes(): + ec2_resource = boto3.resource("ec2", "us-west-1") + + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}], + ) + instance = result[0] + + instance_volume_ids = [] + for volume in instance.volumes.all(): + instance_volume_ids.append(volume.volume_id) + + instance.terminate() + instance.wait_until_terminated() + + assert len(instance_volume_ids) == 1 + volume = ec2_resource.Volume(instance_volume_ids[0]) + volume.state.should.equal("available") + + +@mock_ec2 +def test_instance_terminate_detach_volumes(): + ec2_resource = boto3.resource("ec2", "us-west-1") + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[ + {"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}, + {"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}, + ], + ) + instance = result[0] + for volume in instance.volumes.all(): + response = instance.detach_volume(VolumeId=volume.volume_id) + response["State"].should.equal("detaching") + + instance.terminate() + instance.wait_until_terminated() + + assert len(list(ec2_resource.volumes.all())) == 2 + + +@mock_ec2 +def test_instance_detach_volume_wrong_path(): + ec2_resource = boto3.resource("ec2", "us-west-1") + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},], + ) + instance = result[0] + for volume in instance.volumes.all(): + with assert_raises(ClientError) as ex: + instance.detach_volume(VolumeId=volume.volume_id, Device="/dev/sdf") + + ex.exception.response["Error"]["Code"].should.equal( + "InvalidAttachment.NotFound" + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "The volume {0} is not attached to instance {1} as device {2}".format( + volume.volume_id, instance.instance_id, "/dev/sdf" + ) + ) + + @mock_ec2_deprecated def test_terminate_empty_instances(): conn = boto.connect_ec2("the_key", "the_secret") @@ -1416,14 +1516,14 @@ def test_modify_delete_on_termination(): result = ec2_client.create_instances(ImageId="ami-12345678", MinCount=1, MaxCount=1) instance = result[0] instance.load() - instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False) + instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True) instance.modify_attribute( BlockDeviceMappings=[ - {"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": True}} + {"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": False}} ] ) instance.load() - instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True) + instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False) @mock_ec2 From 93feeec1b7fa6d71d1507b585c0266f0960d560d Mon Sep 17 00:00:00 2001 From: Aidan Rowe Date: Mon, 25 May 2020 00:06:02 +1000 Subject: [PATCH 365/658] SFN - fix InvalidARN exception on start_execution (#3007) --- moto/stepfunctions/responses.py | 5 ++++- .../test_stepfunctions/test_stepfunctions.py | 20 ++++++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/moto/stepfunctions/responses.py b/moto/stepfunctions/responses.py index 689961d5aeb0..7083167b6ecf 100644 --- a/moto/stepfunctions/responses.py +++ b/moto/stepfunctions/responses.py @@ -95,7 +95,10 @@ def list_tags_for_resource(self): def start_execution(self): arn = self._get_param("stateMachineArn") name = self._get_param("name") - execution = self.stepfunction_backend.start_execution(arn, name) + try: + execution = self.stepfunction_backend.start_execution(arn, name) + except AWSError as err: + return err.response() response = { "executionArn": execution.execution_arn, "startDate": execution.start_date, diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index eb2ace53de8d..4324964d8b36 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -253,6 +253,15 @@ def test_state_machine_throws_error_when_describing_unknown_machine(): client.describe_state_machine(stateMachineArn=unknown_state_machine) +@mock_stepfunctions +@mock_sts +def test_state_machine_throws_error_when_describing_bad_arn(): + client = boto3.client("stepfunctions", region_name=region) + # + with assert_raises(ClientError) as exc: + client.describe_state_machine(stateMachineArn="bad") + + @mock_stepfunctions @mock_sts def test_state_machine_throws_error_when_describing_machine_in_different_account(): @@ -362,6 +371,15 @@ def test_state_machine_start_execution(): execution["startDate"].should.be.a(datetime) +@mock_stepfunctions +@mock_sts +def test_state_machine_start_execution_bad_arn_raises_exception(): + client = boto3.client("stepfunctions", region_name=region) + # + with assert_raises(ClientError) as exc: + client.start_execution(stateMachineArn="bad") + + @mock_stepfunctions @mock_sts def test_state_machine_start_execution_with_custom_name(): @@ -446,7 +464,7 @@ def test_state_machine_describe_execution(): @mock_stepfunctions @mock_sts -def test_state_machine_throws_error_when_describing_unknown_machine(): +def test_execution_throws_error_when_describing_unknown_execution(): client = boto3.client("stepfunctions", region_name=region) # with assert_raises(ClientError) as exc: From 97a6e8d9e8635e5ebe34bbfbbd9e75ce37a58e83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Nardy?= Date: Tue, 26 May 2020 07:04:59 -0300 Subject: [PATCH 366/658] Enhancement/describe network acls (#3003) * update describe_network_acls and create unit test * add fail test case * adjustment after feedback * fix result test --- IMPLEMENTATION_COVERAGE.md | 2 +- moto/ec2/models.py | 37 ++++++++++++++++------------- moto/ec2/responses/network_acls.py | 2 +- tests/test_ec2/test_network_acls.py | 29 ++++++++++++++++++++++ 4 files changed, 51 insertions(+), 19 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 1555da1c8748..bfcdd316797c 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -2738,7 +2738,7 @@ - [ ] describe_local_gateways - [ ] describe_moving_addresses - [ ] describe_nat_gateways -- [ ] describe_network_acls +- [X] describe_network_acls - [ ] describe_network_interface_attribute - [ ] describe_network_interface_permissions - [X] describe_network_interfaces diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 78e743540b43..f8ebd02ec2b2 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -4750,23 +4750,7 @@ def add_default_entries(self, network_acl_id): ) def get_all_network_acls(self, network_acl_ids=None, filters=None): - network_acls = self.network_acls.values() - - if network_acl_ids: - network_acls = [ - network_acl - for network_acl in network_acls - if network_acl.id in network_acl_ids - ] - if len(network_acls) != len(network_acl_ids): - invalid_id = list( - set(network_acl_ids).difference( - set([network_acl.id for network_acl in network_acls]) - ) - )[0] - raise InvalidRouteTableIdError(invalid_id) - - return generic_filter(filters, network_acls) + self.describe_network_acls(network_acl_ids, filters) def delete_network_acl(self, network_acl_id): deleted = self.network_acls.pop(network_acl_id, None) @@ -4886,6 +4870,25 @@ def associate_default_network_acl_with_subnet(self, subnet_id, vpc_id): self, association_id, subnet_id, acl.id ) + def describe_network_acls(self, network_acl_ids=None, filters=None): + network_acls = self.network_acls.values() + + if network_acl_ids: + network_acls = [ + network_acl + for network_acl in network_acls + if network_acl.id in network_acl_ids + ] + if len(network_acls) != len(network_acl_ids): + invalid_id = list( + set(network_acl_ids).difference( + set([network_acl.id for network_acl in network_acls]) + ) + )[0] + raise InvalidRouteTableIdError(invalid_id) + + return generic_filter(filters, network_acls) + class NetworkAclAssociation(object): def __init__(self, ec2_backend, new_association_id, subnet_id, network_acl_id): diff --git a/moto/ec2/responses/network_acls.py b/moto/ec2/responses/network_acls.py index 8d89e6065fcf..c0a9c7c9006e 100644 --- a/moto/ec2/responses/network_acls.py +++ b/moto/ec2/responses/network_acls.py @@ -83,7 +83,7 @@ def replace_network_acl_entry(self): def describe_network_acls(self): network_acl_ids = self._get_multi_param("NetworkAclId") filters = filters_from_querystring(self.querystring) - network_acls = self.ec2_backend.get_all_network_acls(network_acl_ids, filters) + network_acls = self.ec2_backend.describe_network_acls(network_acl_ids, filters) template = self.response_template(DESCRIBE_NETWORK_ACL_RESPONSE) return template.render(network_acls=network_acls) diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index fb62f717848a..f255fa67fe4a 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -275,3 +275,32 @@ def test_duplicate_network_acl_entry(): rule_number ) ) + + +@mock_ec2 +def test_describe_network_acls(): + conn = boto3.client("ec2", region_name="us-west-2") + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + + network_acl = conn.create_network_acl(VpcId=vpc_id) + + network_acl_id = network_acl["NetworkAcl"]["NetworkAclId"] + + resp = conn.describe_network_acls(NetworkAclIds=[network_acl_id]) + result = resp["NetworkAcls"] + + result.should.have.length_of(1) + result[0]["NetworkAclId"].should.equal(network_acl_id) + + resp2 = conn.describe_network_acls()["NetworkAcls"] + resp2.should.have.length_of(3) + + with assert_raises(ClientError) as ex: + conn.describe_network_acls(NetworkAclIds=["1"]) + + str(ex.exception).should.equal( + "An error occurred (InvalidRouteTableID.NotFound) when calling the " + "DescribeNetworkAcls operation: The routeTable ID '1' does not exist" + ) From b7a1b666a8cb2118b22cfd3508eba10763c1c598 Mon Sep 17 00:00:00 2001 From: jweite Date: Wed, 27 May 2020 12:00:28 -0400 Subject: [PATCH 367/658] =?UTF-8?q?Corrected=20bug=20in=20IAM=20delete=5Fr?= =?UTF-8?q?ole()=20due=20to=20overloading=20of=20name=20'role'=20=E2=80=A6?= =?UTF-8?q?=20(#3019)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Corrected bug in IAM delete_role() due to overloading of name 'role' in function * PR-requested fixes: added region to tests boto client create, reformatted with black Co-authored-by: Joseph Weitekamp --- moto/iam/models.py | 4 ++-- tests/test_iam/test_iam.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index d3907da266a9..41484add214b 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -1148,8 +1148,8 @@ def get_role_by_arn(self, arn): def delete_role(self, role_name): role = self.get_role(role_name) for instance_profile in self.get_instance_profiles(): - for role in instance_profile.roles: - if role.name == role_name: + for profile_role in instance_profile.roles: + if profile_role.name == role_name: raise IAMConflictException( code="DeleteConflict", message="Cannot delete entity, must remove roles from instance profile first.", diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 6792d8f52ce4..825e12fe0082 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -2815,3 +2815,36 @@ def test_list_user_tags(): [{"Key": "Stan", "Value": "The Caddy"}, {"Key": "like-a", "Value": "glove"}] ) response["IsTruncated"].should_not.be.ok + + +@mock_iam() +def test_delete_role_with_instance_profiles_present(): + iam = boto3.client("iam", region_name="us-east-1") + + trust_policy = """ + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } + """ + trust_policy = trust_policy.strip() + + iam.create_role(RoleName="Role1", AssumeRolePolicyDocument=trust_policy) + iam.create_instance_profile(InstanceProfileName="IP1") + iam.add_role_to_instance_profile(InstanceProfileName="IP1", RoleName="Role1") + + iam.create_role(RoleName="Role2", AssumeRolePolicyDocument=trust_policy) + + iam.delete_role(RoleName="Role2") + + role_names = [role["RoleName"] for role in iam.list_roles()["Roles"]] + assert "Role1" in role_names + assert "Role2" not in role_names From 4d3e3c8c5e7737e1f2d050d441d2d2e399de2384 Mon Sep 17 00:00:00 2001 From: jweite Date: Wed, 27 May 2020 12:21:03 -0400 Subject: [PATCH 368/658] implemented s3 default encryption methods (#3022) * implemented s3 default encryption methods * PR adjustments: moved logic for retrieving bucket's encrypted status to the backend. Co-authored-by: Joseph Weitekamp --- moto/s3/models.py | 10 +++++++ moto/s3/responses.py | 62 +++++++++++++++++++++++++++++++++++++++- tests/test_s3/test_s3.py | 33 +++++++++++++++++++++ 3 files changed, 104 insertions(+), 1 deletion(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 3020fd45e81c..25ead4f5ec3b 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -778,6 +778,7 @@ def __init__(self, name, region_name): self.payer = "BucketOwner" self.creation_date = datetime.datetime.utcnow() self.public_access_block = None + self.encryption = None @property def location(self): @@ -1227,6 +1228,9 @@ def set_bucket_versioning(self, bucket_name, status): def get_bucket_versioning(self, bucket_name): return self.get_bucket(bucket_name).versioning_status + def get_bucket_encryption(self, bucket_name): + return self.get_bucket(bucket_name).encryption + def get_bucket_latest_versions(self, bucket_name): versions = self.get_bucket_versions(bucket_name) latest_modified_per_key = {} @@ -1275,6 +1279,12 @@ def delete_bucket_policy(self, bucket_name, body): bucket = self.get_bucket(bucket_name) bucket.policy = None + def put_bucket_encryption(self, bucket_name, encryption): + self.get_bucket(bucket_name).encryption = encryption + + def delete_bucket_encryption(self, bucket_name): + self.get_bucket(bucket_name).encryption = None + def set_bucket_lifecycle(self, bucket_name, rules): bucket = self.get_bucket(bucket_name) bucket.set_lifecycle(rules) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 98f28f012679..4aaba1fcd548 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -466,6 +466,13 @@ def _bucket_response_get(self, bucket_name, querystring): is_truncated="false", ), ) + elif "encryption" in querystring: + encryption = self.backend.get_bucket_encryption(bucket_name) + if not encryption: + template = self.response_template(S3_NO_ENCRYPTION) + return 404, {}, template.render(bucket_name=bucket_name) + template = self.response_template(S3_ENCRYPTION_CONFIG) + return 200, {}, template.render(encryption=encryption) elif querystring.get("list-type", [None])[0] == "2": return 200, {}, self._handle_list_objects_v2(bucket_name, querystring) @@ -703,7 +710,16 @@ def _bucket_response_put( bucket_name, pab_config["PublicAccessBlockConfiguration"] ) return "" - + elif "encryption" in querystring: + try: + self.backend.put_bucket_encryption( + bucket_name, self._encryption_config_from_xml(body) + ) + return "" + except KeyError: + raise MalformedXML() + except Exception as e: + raise e else: # us-east-1, the default AWS region behaves a bit differently # - you should not use it as a location constraint --> it fails @@ -768,6 +784,9 @@ def _bucket_response_delete(self, body, bucket_name, querystring): elif "publicAccessBlock" in querystring: self.backend.delete_bucket_public_access_block(bucket_name) return 204, {}, "" + elif "encryption" in querystring: + bucket = self.backend.delete_bucket_encryption(bucket_name) + return 204, {}, "" removed_bucket = self.backend.delete_bucket(bucket_name) @@ -1427,6 +1446,22 @@ def _cors_from_xml(self, xml): return [parsed_xml["CORSConfiguration"]["CORSRule"]] + def _encryption_config_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + if ( + not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule") + or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get( + "ApplyServerSideEncryptionByDefault" + ) + or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][ + "ApplyServerSideEncryptionByDefault" + ].get("SSEAlgorithm") + ): + raise MalformedXML() + + return [parsed_xml["ServerSideEncryptionConfiguration"]] + def _logging_from_xml(self, xml): parsed_xml = xmltodict.parse(xml) @@ -2130,6 +2165,31 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): """ +S3_ENCRYPTION_CONFIG = """ + + {% for entry in encryption %} + + + {{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }} + {% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %} + {{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }} + {% endif %} + + + {% endfor %} + +""" + +S3_NO_ENCRYPTION = """ + + ServerSideEncryptionConfigurationNotFoundError + The server side encryption configuration was not found + {{ bucket_name }} + 0D68A23BB2E2215B + 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= + +""" + S3_GET_BUCKET_NOTIFICATION_CONFIG = """ {% for topic in bucket.notification_configuration.topic %} diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index bcb9da87f33c..363ccc02db77 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4521,3 +4521,36 @@ def test_creating_presigned_post(): ].read() == fdata ) + + +@mock_s3 +def test_encryption(): + # Create Bucket so that test can run + conn = boto3.client("s3", region_name="us-east-1") + conn.create_bucket(Bucket="mybucket") + + with assert_raises(ClientError) as exc: + conn.get_bucket_encryption(Bucket="mybucket") + + sse_config = { + "Rules": [ + { + "ApplyServerSideEncryptionByDefault": { + "SSEAlgorithm": "aws:kms", + "KMSMasterKeyID": "12345678", + } + } + ] + } + + conn.put_bucket_encryption( + Bucket="mybucket", ServerSideEncryptionConfiguration=sse_config + ) + + resp = conn.get_bucket_encryption(Bucket="mybucket") + assert "ServerSideEncryptionConfiguration" in resp + assert resp["ServerSideEncryptionConfiguration"] == sse_config + + conn.delete_bucket_encryption(Bucket="mybucket") + with assert_raises(ClientError) as exc: + conn.get_bucket_encryption(Bucket="mybucket") From 4303123312e9bd878e08b925f671438874ea4054 Mon Sep 17 00:00:00 2001 From: jweite Date: Wed, 27 May 2020 13:22:06 -0400 Subject: [PATCH 369/658] Implemented IAM delete_instance_profile (#3020) * Implemented IAM delete_instance_profile * PR adjustment: positively verifying instance profile deletion in test case. Co-authored-by: Joseph Weitekamp --- moto/iam/models.py | 9 +++++++++ moto/iam/responses.py | 13 +++++++++++++ tests/test_iam/test_iam.py | 20 ++++++++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/moto/iam/models.py b/moto/iam/models.py index 41484add214b..82dc84be5afb 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -1341,6 +1341,15 @@ def create_instance_profile(self, name, path, role_ids): self.instance_profiles[name] = instance_profile return instance_profile + def delete_instance_profile(self, name): + instance_profile = self.get_instance_profile(name) + if len(instance_profile.roles) > 0: + raise IAMConflictException( + code="DeleteConflict", + message="Cannot delete entity, must remove roles from instance profile first.", + ) + del self.instance_profiles[name] + def get_instance_profile(self, profile_name): for profile in self.get_instance_profiles(): if profile.name == profile_name: diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 667a6d13b851..60ab46069456 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -305,6 +305,13 @@ def create_instance_profile(self): template = self.response_template(CREATE_INSTANCE_PROFILE_TEMPLATE) return template.render(profile=profile) + def delete_instance_profile(self): + profile_name = self._get_param("InstanceProfileName") + + profile = iam_backend.delete_instance_profile(profile_name) + template = self.response_template(DELETE_INSTANCE_PROFILE_TEMPLATE) + return template.render(profile=profile) + def get_instance_profile(self): profile_name = self._get_param("InstanceProfileName") profile = iam_backend.get_instance_profile(profile_name) @@ -1180,6 +1187,12 @@ def get_account_summary(self): """ +DELETE_INSTANCE_PROFILE_TEMPLATE = """ + + 786dff92-6cfd-4fa4-b1eb-27EXAMPLE804 + +""" + GET_INSTANCE_PROFILE_TEMPLATE = """ diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 825e12fe0082..7b59a57268cd 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -206,6 +206,26 @@ def test_remove_role_from_instance_profile(): dict(profile.roles).should.be.empty +@mock_iam() +def test_delete_instance_profile(): + conn = boto3.client("iam", region_name="us-east-1") + conn.create_role( + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + ) + conn.create_instance_profile(InstanceProfileName="my-profile") + conn.add_role_to_instance_profile( + InstanceProfileName="my-profile", RoleName="my-role" + ) + with assert_raises(conn.exceptions.DeleteConflictException): + conn.delete_instance_profile(InstanceProfileName="my-profile") + conn.remove_role_from_instance_profile( + InstanceProfileName="my-profile", RoleName="my-role" + ) + conn.delete_instance_profile(InstanceProfileName="my-profile") + with assert_raises(conn.exceptions.NoSuchEntityException): + profile = conn.get_instance_profile(InstanceProfileName="my-profile") + + @mock_iam() def test_get_login_profile(): conn = boto3.client("iam", region_name="us-east-1") From 162a38bb10e1808d53ca9f88d95b39fe2bfc6249 Mon Sep 17 00:00:00 2001 From: Jeremie Tharaud <46786750+jeremietharaud@users.noreply.github.com> Date: Thu, 28 May 2020 15:14:09 +0200 Subject: [PATCH 370/658] fix missing sure package and region_name (#3031) --- .../test_cloudformation_stack_crud_boto3.py | 8 ++++---- tests/test_cloudformation/test_validate.py | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 4df1ff5d2228..58d505d8979a 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -572,7 +572,7 @@ def test_boto3_create_stack_set_with_yaml(): @mock_s3 def test_create_stack_set_from_s3_url(): s3 = boto3.client("s3") - s3_conn = boto3.resource("s3") + s3_conn = boto3.resource("s3", region_name="us-east-1") bucket = s3_conn.create_bucket(Bucket="foobar") key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) @@ -715,7 +715,7 @@ def test_create_stack_with_role_arn(): @mock_s3 def test_create_stack_from_s3_url(): s3 = boto3.client("s3") - s3_conn = boto3.resource("s3") + s3_conn = boto3.resource("s3", region_name="us-east-1") bucket = s3_conn.create_bucket(Bucket="foobar") key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) @@ -770,7 +770,7 @@ def test_update_stack_with_previous_value(): @mock_ec2 def test_update_stack_from_s3_url(): s3 = boto3.client("s3") - s3_conn = boto3.resource("s3") + s3_conn = boto3.resource("s3", region_name="us-east-1") cf_conn = boto3.client("cloudformation", region_name="us-east-1") cf_conn.create_stack( @@ -799,7 +799,7 @@ def test_update_stack_from_s3_url(): @mock_s3 def test_create_change_set_from_s3_url(): s3 = boto3.client("s3") - s3_conn = boto3.resource("s3") + s3_conn = boto3.resource("s3", region_name="us-east-1") bucket = s3_conn.create_bucket(Bucket="foobar") key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) diff --git a/tests/test_cloudformation/test_validate.py b/tests/test_cloudformation/test_validate.py index a4278b559771..5ffaeafb9b49 100644 --- a/tests/test_cloudformation/test_validate.py +++ b/tests/test_cloudformation/test_validate.py @@ -5,6 +5,7 @@ import boto3 from nose.tools import raises import botocore +import sure # noqa from moto.cloudformation.exceptions import ValidationError From 7a6d78afde02c9dc70c6db73f8729d0d7d6a0883 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Thu, 28 May 2020 20:22:56 +0530 Subject: [PATCH 371/658] Fix: Cloudwatch delete Alarm status code handling on invalid alarm name (#3028) * CloudWwatch delete Alarm status code handling on invalid alarm Name * Handled cases where a mix of existent and non existent alarms are tried to delete * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/cloudwatch/models.py | 7 +++++ .../test_cloudwatch/test_cloudwatch_boto3.py | 31 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 19b1efa5fb73..f089acb1414c 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -304,6 +304,13 @@ def get_alarms_by_state_value(self, target_state): ) def delete_alarms(self, alarm_names): + for alarm_name in alarm_names: + if alarm_name not in self.alarms: + raise RESTError( + "ResourceNotFound", + "Alarm {0} not found".format(alarm_name), + status=404, + ) for alarm_name in alarm_names: self.alarms.pop(alarm_name, None) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 0c814ee442c1..926c321ba9c2 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -92,6 +92,37 @@ def test_get_dashboard_fail(): raise RuntimeError("Should of raised error") +@mock_cloudwatch +def test_delete_invalid_alarm(): + cloudwatch = boto3.client("cloudwatch", "eu-west-1") + + cloudwatch.put_metric_alarm( + AlarmName="testalarm1", + MetricName="cpu", + Namespace="blah", + Period=10, + EvaluationPeriods=5, + Statistic="Average", + Threshold=2, + ComparisonOperator="GreaterThanThreshold", + ActionsEnabled=True, + ) + + # trying to delete an alarm which is not created along with valid alarm. + with assert_raises(ClientError) as e: + cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName", "testalarm1"]) + e.exception.response["Error"]["Code"].should.equal("ResourceNotFound") + + resp = cloudwatch.describe_alarms(AlarmNames=["testalarm1"]) + # making sure other alarms are not deleted in case of an error. + len(resp["MetricAlarms"]).should.equal(1) + + # test to check if the error raises if only one invalid alarm is tried to delete. + with assert_raises(ClientError) as e: + cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName"]) + e.exception.response["Error"]["Code"].should.equal("ResourceNotFound") + + @mock_cloudwatch def test_alarm_state(): client = boto3.client("cloudwatch", region_name="eu-central-1") From 8fa625c3def8287882c8193bf72bf778510db3d2 Mon Sep 17 00:00:00 2001 From: Jeremie Tharaud <46786750+jeremietharaud@users.noreply.github.com> Date: Fri, 29 May 2020 08:33:24 +0200 Subject: [PATCH 372/658] Cfn change set fix outputs (#3033) * set creation time of the change set * fix status, execution status, stak id, creation time and update tests --- moto/cloudformation/models.py | 6 ++++-- moto/cloudformation/responses.py | 2 +- .../test_cloudformation_stack_crud_boto3.py | 11 ++++++++--- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 281ab5e19a90..16ceafdb8219 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -240,7 +240,8 @@ def __init__( self.resource_map = self._create_resource_map() self.output_map = self._create_output_map() if create_change_set: - self.status = "REVIEW_IN_PROGRESS" + self.status = "CREATE_COMPLETE" + self.execution_status = "AVAILABLE" else: self.create_resources() self._add_stack_event("CREATE_COMPLETE") @@ -397,6 +398,7 @@ def __init__( self.change_set_id = change_set_id self.change_set_name = change_set_name self.changes = self.diff(template=template, parameters=parameters) + self.creation_time = datetime.utcnow() def diff(self, template, parameters=None): self.template = template @@ -587,7 +589,7 @@ def create_change_set( if stack is None: raise ValidationError(stack_name) else: - stack_id = generate_stack_id(stack_name) + stack_id = generate_stack_id(stack_name, region_name) stack_template = template change_set_id = generate_changeset_id(change_set_name, region_name) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 782d68946317..c028421caa63 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -609,7 +609,7 @@ def update_stack_instances(self): {% endfor %} - 2011-05-23T15:47:44Z + {{ change_set.creation_time_iso_8601 }} {{ change_set.execution_status }} {{ change_set.status }} {{ change_set.status_reason }} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 58d505d8979a..c4fddcad0ea9 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -819,7 +819,7 @@ def test_create_change_set_from_s3_url(): in response["Id"] ) assert ( - "arn:aws:cloudformation:us-east-1:123456789:stack/NewStack" + "arn:aws:cloudformation:us-west-1:123456789:stack/NewStack" in response["StackId"] ) @@ -838,7 +838,12 @@ def test_describe_change_set(): stack["ChangeSetName"].should.equal("NewChangeSet") stack["StackName"].should.equal("NewStack") - stack["Status"].should.equal("REVIEW_IN_PROGRESS") + stack["Status"].should.equal("CREATE_COMPLETE") + stack["ExecutionStatus"].should.equal("AVAILABLE") + two_secs_ago = datetime.now(tz=pytz.UTC) - timedelta(seconds=2) + assert ( + two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC) + ), "Change set should have been created recently" cf_conn.create_change_set( StackName="NewStack", @@ -868,7 +873,7 @@ def test_execute_change_set_w_arn(): ) ec2.describe_instances()["Reservations"].should.have.length_of(0) cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal( - "REVIEW_IN_PROGRESS" + "CREATE_COMPLETE" ) # Execute change set cf_conn.execute_change_set(ChangeSetName=change_set["Id"]) From 2433d64fe264f41481f6266a06d2ead2bef3dbab Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Fri, 29 May 2020 17:01:41 +0530 Subject: [PATCH 373/658] Fix: SecretsManager Added VersionIdsToStages key in describe_secret function (#3029) * Fix: SecretsManager Added VersionIdsToStages key in describe_secret function * Added more assertions * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/secretsmanager/models.py | 10 ++++++++++ tests/test_secretsmanager/test_secretsmanager.py | 12 ++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 29bd6c96e1c0..01acf2dbb59b 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -274,6 +274,7 @@ def describe_secret(self, secret_id): raise SecretNotFoundException() secret = self.secrets[secret_id] + version_id_to_stages = self.form_version_ids_to_stages(secret["versions"]) response = json.dumps( { @@ -291,6 +292,7 @@ def describe_secret(self, secret_id): "LastAccessedDate": None, "DeletedDate": secret.get("deleted_date", None), "Tags": secret["tags"], + "VersionIdsToStages": version_id_to_stages, } ) @@ -552,6 +554,14 @@ def get_resource_policy(secret_id): } ) + @staticmethod + def form_version_ids_to_stages(secret): + version_id_to_stages = {} + for key, value in secret.items(): + version_id_to_stages[key] = value["version_stages"] + + return version_id_to_stages + secretsmanager_backends = {} for region in Session().get_available_regions("secretsmanager"): diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 6ec53460a8b6..0fe23fd7f2cc 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -733,25 +733,33 @@ def test_put_secret_value_versions_differ_if_same_secret_put_twice(): def test_put_secret_value_maintains_description_and_tags(): conn = boto3.client("secretsmanager", region_name="us-west-2") - conn.create_secret( + previous_response = conn.create_secret( Name=DEFAULT_SECRET_NAME, SecretString="foosecret", Description="desc", Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}], ) + previous_version_id = previous_response["VersionId"] conn = boto3.client("secretsmanager", region_name="us-west-2") - conn.put_secret_value( + current_response = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="dupe_secret", VersionStages=["AWSCURRENT"], ) + current_version_id = current_response["VersionId"] + secret_details = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) assert secret_details["Tags"] == [ {"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}, ] assert secret_details["Description"] == "desc" + assert secret_details["VersionIdsToStages"] is not None + assert previous_version_id in secret_details["VersionIdsToStages"] + assert current_version_id in secret_details["VersionIdsToStages"] + assert secret_details["VersionIdsToStages"][previous_version_id] == ["AWSPREVIOUS"] + assert secret_details["VersionIdsToStages"][current_version_id] == ["AWSCURRENT"] @mock_secretsmanager From ca49b415997c687e9f472d88ec4fd5d6b1266d23 Mon Sep 17 00:00:00 2001 From: Tim Van Laer Date: Tue, 2 Jun 2020 11:32:47 +0200 Subject: [PATCH 374/658] Make sure the UTC tz is included in the bucket creation timestamp --- moto/s3/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 25ead4f5ec3b..36252bc1787e 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -5,6 +5,7 @@ import os import base64 import datetime +import pytz import hashlib import copy import itertools @@ -776,7 +777,7 @@ def __init__(self, name, region_name): self.notification_configuration = None self.accelerate_configuration = None self.payer = "BucketOwner" - self.creation_date = datetime.datetime.utcnow() + self.creation_date = datetime.datetime.now(tz=pytz.utc) self.public_access_block = None self.encryption = None From cb600377b48ca676fc6a29a6690aeb51e702da43 Mon Sep 17 00:00:00 2001 From: Victor Le Fichant Date: Tue, 2 Jun 2020 17:31:42 +0200 Subject: [PATCH 375/658] Fix incorrect response for put-targets action (#3037) --- moto/events/responses.py | 5 ++++- tests/test_events/test_events.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/moto/events/responses.py b/moto/events/responses.py index 55a664b24e99..73db00bdd1de 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -217,7 +217,10 @@ def put_targets(self): "ResourceNotFoundException", "Rule " + rule_name + " does not exist." ) - return "", self.response_headers + return ( + json.dumps({"FailedEntryCount": 0, "FailedEntries": []}), + self.response_headers, + ) def remove_targets(self): rule_name = self._get_param("Rule") diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 5b4e958d6a57..f83c607649ee 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -4,6 +4,7 @@ import boto3 import sure # noqa + from botocore.exceptions import ClientError from nose.tools import assert_raises @@ -201,6 +202,35 @@ def test_remove_targets(): assert targets_before - 1 == targets_after +@mock_events +def test_put_targets(): + client = boto3.client("events", "us-west-2") + rule_name = "my-event" + rule_data = { + "Name": rule_name, + "ScheduleExpression": "rate(5 minutes)", + "EventPattern": '{"source": ["test-source"]}', + } + + client.put_rule(**rule_data) + + targets = client.list_targets_by_rule(Rule=rule_name)["Targets"] + targets_before = len(targets) + assert targets_before == 0 + + targets_data = [{"Arn": "test_arn", "Id": "test_id"}] + resp = client.put_targets(Rule=rule_name, Targets=targets_data) + assert resp["FailedEntryCount"] == 0 + assert len(resp["FailedEntries"]) == 0 + + targets = client.list_targets_by_rule(Rule=rule_name)["Targets"] + targets_after = len(targets) + assert targets_before + 1 == targets_after + + assert targets[0]["Arn"] == "test_arn" + assert targets[0]["Id"] == "test_id" + + @mock_events def test_permissions(): client = boto3.client("events", "eu-central-1") From 90e200f0f6e0936c0ccb59c33990831a4eb1cda7 Mon Sep 17 00:00:00 2001 From: Jeremie Tharaud <46786750+jeremietharaud@users.noreply.github.com> Date: Wed, 3 Jun 2020 07:08:35 +0200 Subject: [PATCH 376/658] Add missing changes when creating a change set (#3039) * Display changes when creating a change set * add change set id and description when describing stack * fix lint with flake8 and black --- moto/cloudformation/models.py | 19 +++++++-- moto/cloudformation/parsing.py | 6 ++- moto/cloudformation/responses.py | 4 ++ .../test_cloudformation_stack_crud_boto3.py | 41 +++++++++++++------ .../test_cloudformation_stack_integration.py | 6 --- 5 files changed, 51 insertions(+), 25 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 16ceafdb8219..8c14f55b8b4d 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -219,7 +219,12 @@ def __init__( self.stack_id = stack_id self.name = name self.template = template - self._parse_template() + if template != {}: + self._parse_template() + self.description = self.template_dict.get("Description") + else: + self.template_dict = {} + self.description = None self.parameters = parameters self.region_name = region_name self.notification_arns = notification_arns if notification_arns else [] @@ -235,7 +240,6 @@ def __init__( "CREATE_IN_PROGRESS", resource_status_reason="User Initiated" ) - self.description = self.template_dict.get("Description") self.cross_stack_resources = cross_stack_resources or {} self.resource_map = self._create_resource_map() self.output_map = self._create_output_map() @@ -331,7 +335,9 @@ def exports(self): return self.output_map.exports def create_resources(self): - self.resource_map.create() + self.resource_map.create(self.template_dict) + # Set the description of the stack + self.description = self.template_dict.get("Description") self.status = "CREATE_COMPLETE" def update(self, template, role_arn=None, parameters=None, tags=None): @@ -398,6 +404,8 @@ def __init__( self.change_set_id = change_set_id self.change_set_name = change_set_name self.changes = self.diff(template=template, parameters=parameters) + if self.description is None: + self.description = self.template_dict.get("Description") self.creation_time = datetime.utcnow() def diff(self, template, parameters=None): @@ -590,7 +598,7 @@ def create_change_set( raise ValidationError(stack_name) else: stack_id = generate_stack_id(stack_name, region_name) - stack_template = template + stack_template = {} change_set_id = generate_changeset_id(change_set_name, region_name) new_change_set = FakeChangeSet( @@ -645,6 +653,9 @@ def execute_change_set(self, change_set_name, stack_name=None): if stack is None: raise ValidationError(stack_name) if stack.events[-1].resource_status == "REVIEW_IN_PROGRESS": + stack._add_stack_event( + "CREATE_IN_PROGRESS", resource_status_reason="User Initiated" + ) stack._add_stack_event("CREATE_COMPLETE") else: stack._add_stack_event("UPDATE_IN_PROGRESS") diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index d59b21b82898..81d4d1c7d86b 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -456,7 +456,7 @@ def __init__( cross_stack_resources, ): self._template = template - self._resource_json_map = template["Resources"] + self._resource_json_map = template["Resources"] if template != {} else {} self._region_name = region_name self.input_parameters = parameters self.tags = copy.deepcopy(tags) @@ -592,10 +592,12 @@ def load(self): self.load_parameters() self.load_conditions() - def create(self): + def create(self, template): # Since this is a lazy map, to create every object we just need to # iterate through self. # Assumes that self.load() has been called before + self._template = template + self._resource_json_map = template["Resources"] self.tags.update( { "aws:cloudformation:stack-name": self.get("AWS::StackName"), diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index c028421caa63..302849481e5d 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -662,6 +662,10 @@ def update_stack_instances(self): {{ stack.name }} {{ stack.stack_id }} + {% if stack.change_set_id %} + {{ stack.change_set_id }} + {% endif %} + {{ stack.description }} {{ stack.creation_time_iso_8601 }} {{ stack.status }} {% if stack.notification_arns %} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index c4fddcad0ea9..cd76743ddcf6 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -573,9 +573,9 @@ def test_boto3_create_stack_set_with_yaml(): def test_create_stack_set_from_s3_url(): s3 = boto3.client("s3") s3_conn = boto3.resource("s3", region_name="us-east-1") - bucket = s3_conn.create_bucket(Bucket="foobar") + s3_conn.create_bucket(Bucket="foobar") - key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) + s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) key_url = s3.generate_presigned_url( ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} ) @@ -716,9 +716,9 @@ def test_create_stack_with_role_arn(): def test_create_stack_from_s3_url(): s3 = boto3.client("s3") s3_conn = boto3.resource("s3", region_name="us-east-1") - bucket = s3_conn.create_bucket(Bucket="foobar") + s3_conn.create_bucket(Bucket="foobar") - key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) + s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) key_url = s3.generate_presigned_url( ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} ) @@ -800,9 +800,9 @@ def test_update_stack_from_s3_url(): def test_create_change_set_from_s3_url(): s3 = boto3.client("s3") s3_conn = boto3.resource("s3", region_name="us-east-1") - bucket = s3_conn.create_bucket(Bucket="foobar") + s3_conn.create_bucket(Bucket="foobar") - key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) + s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) key_url = s3.generate_presigned_url( ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} ) @@ -844,6 +844,25 @@ def test_describe_change_set(): assert ( two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC) ), "Change set should have been created recently" + stack["Changes"].should.have.length_of(1) + stack["Changes"][0].should.equal( + dict( + { + "Type": "Resource", + "ResourceChange": { + "Action": "Add", + "LogicalResourceId": "EC2Instance1", + "ResourceType": "AWS::EC2::Instance", + }, + } + ) + ) + + # Execute change set + cf_conn.execute_change_set(ChangeSetName="NewChangeSet") + # Verify that the changes have been applied + stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet") + stack["Changes"].should.have.length_of(1) cf_conn.create_change_set( StackName="NewStack", @@ -887,7 +906,7 @@ def test_execute_change_set_w_arn(): @mock_cloudformation def test_execute_change_set_w_name(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") - change_set = cf_conn.create_change_set( + cf_conn.create_change_set( StackName="NewStack", TemplateBody=dummy_template_json, ChangeSetName="NewChangeSet", @@ -1221,9 +1240,7 @@ def test_delete_stack_with_export(): @mock_cloudformation def test_export_names_must_be_unique(): cf = boto3.resource("cloudformation", region_name="us-east-1") - first_stack = cf.create_stack( - StackName="test_stack", TemplateBody=dummy_output_template_json - ) + cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json) with assert_raises(ClientError): cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json) @@ -1237,9 +1254,7 @@ def test_stack_with_imports(): output_stack = cf.create_stack( StackName="test_stack1", TemplateBody=dummy_output_template_json ) - import_stack = cf.create_stack( - StackName="test_stack2", TemplateBody=dummy_import_template_json - ) + cf.create_stack(StackName="test_stack2", TemplateBody=dummy_import_template_json) output_stack.outputs.should.have.length_of(1) output = output_stack.outputs[0]["OutputValue"] diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 3abb3373da87..a49c4a1f4066 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals import json -import base64 from decimal import Decimal import boto @@ -28,7 +27,6 @@ mock_dynamodb2, mock_ec2, mock_ec2_deprecated, - mock_elb, mock_elb_deprecated, mock_events, mock_iam_deprecated, @@ -37,18 +35,14 @@ mock_logs, mock_rds_deprecated, mock_rds2, - mock_rds2_deprecated, - mock_redshift, mock_redshift_deprecated, mock_route53_deprecated, mock_s3, mock_sns_deprecated, - mock_sqs, mock_sqs_deprecated, mock_elbv2, ) from moto.core import ACCOUNT_ID -from moto.dynamodb2.models import Table from tests.test_cloudformation.fixtures import ( ec2_classic_eip, From 149e307bc9421de5780febf9ab4a734e72cbee9d Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Wed, 3 Jun 2020 02:54:01 -0300 Subject: [PATCH 377/658] Rule's cloudformation support for updates (#3043) * add support to update stack using cloudformation * blacked test file --- moto/events/models.py | 9 +++++ .../test_cloudformation_stack_integration.py | 40 +++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/moto/events/models.py b/moto/events/models.py index e1224242e49f..5397f28ca7ad 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -80,6 +80,15 @@ def create_from_cloudformation_json( event_name = properties.get("Name") or resource_name return event_backend.put_rule(name=event_name, **properties) + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name + ): + original_resource.delete(region_name) + return cls.create_from_cloudformation_json( + new_resource_name, cloudformation_json, region_name + ) + @classmethod def delete_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index a49c4a1f4066..082a20e01e86 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -17,6 +17,7 @@ import boto.vpc import boto3 import sure # noqa +from string import Template from moto import ( mock_autoscaling_deprecated, @@ -2493,6 +2494,45 @@ def test_stack_events_create_rule_as_target(): log_groups["logGroups"][0]["retentionInDays"].should.equal(3) +@mock_cloudformation +@mock_events +def test_stack_events_update_rule_integration(): + events_template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Event": { + "Type": "AWS::Events::Rule", + "Properties": { + "Name": "$Name", + "State": "$State", + "ScheduleExpression": "rate(5 minutes)", + }, + } + }, + } """ + ) + + cf_conn = boto3.client("cloudformation", "us-west-2") + + original_template = events_template.substitute(Name="Foo", State="ENABLED") + cf_conn.create_stack(StackName="test_stack", TemplateBody=original_template) + + rules = boto3.client("events", "us-west-2").list_rules() + rules["Rules"].should.have.length_of(1) + rules["Rules"][0]["Name"].should.equal("Foo") + rules["Rules"][0]["State"].should.equal("ENABLED") + + update_template = events_template.substitute(Name="Bar", State="DISABLED") + cf_conn.update_stack(StackName="test_stack", TemplateBody=update_template) + + rules = boto3.client("events", "us-west-2").list_rules() + + rules["Rules"].should.have.length_of(1) + rules["Rules"][0]["Name"].should.equal("Bar") + rules["Rules"][0]["State"].should.equal("DISABLED") + + @mock_cloudformation @mock_autoscaling def test_autoscaling_propagate_tags(): From 9ca10e36301c759870276fda3910aa817995c8c2 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 3 Jun 2020 15:36:32 +0100 Subject: [PATCH 378/658] #3046 - DynamoDB - Add Key-size Validation for BatchGetItem --- moto/dynamodb2/responses.py | 20 ++++++++++++ tests/test_dynamodb2/test_dynamodb.py | 46 +++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index aec7c7560a4f..199a09b944a8 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -371,6 +371,26 @@ def batch_get_item(self): results = {"ConsumedCapacity": [], "Responses": {}, "UnprocessedKeys": {}} + # Validation: Can only request up to 100 items at the same time + # Scenario 1: We're requesting more than a 100 keys from a single table + for table_name, table_request in table_batches.items(): + if len(table_request["Keys"]) > 100: + return self.error( + "com.amazonaws.dynamodb.v20111205#ValidationException", + "1 validation error detected: Value at 'requestItems." + + table_name + + ".member.keys' failed to satisfy constraint: Member must have length less than or equal to 100", + ) + # Scenario 2: We're requesting more than a 100 keys across all tables + nr_of_keys_across_all_tables = sum( + [len(req["Keys"]) for _, req in table_batches.items()] + ) + if nr_of_keys_across_all_tables > 100: + return self.error( + "com.amazonaws.dynamodb.v20111205#ValidationException", + "Too many items requested for the BatchGetItem call", + ) + for table_name, table_request in table_batches.items(): keys = table_request["Keys"] if self._contains_duplicates(keys): diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 50fd4fd6c4dd..19c585bfaeef 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3038,6 +3038,52 @@ def test_batch_items_returns_all(): ] +@mock_dynamodb2 +def test_batch_items_throws_exception_when_requesting_100_items_for_single_table(): + dynamodb = _create_user_table() + with assert_raises(ClientError) as ex: + dynamodb.batch_get_item( + RequestItems={ + "users": { + "Keys": [{"username": {"S": f"user{i}"}} for i in range(0, 104)], + "ConsistentRead": True, + } + } + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + msg = ex.exception.response["Error"]["Message"] + msg.should.contain("1 validation error detected: Value") + msg.should.contain( + "at 'requestItems.users.member.keys' failed to satisfy constraint: Member must have length less than or equal to 100" + ) + + +@mock_dynamodb2 +def test_batch_items_throws_exception_when_requesting_100_items_across_all_tables(): + dynamodb = _create_user_table() + with assert_raises(ClientError) as ex: + dynamodb.batch_get_item( + RequestItems={ + "users": { + "Keys": [ + {"username": {"S": "user" + str(i)}} for i in range(0, 75) + ], + "ConsistentRead": True, + }, + "users2": { + "Keys": [ + {"username": {"S": "user" + str(i)}} for i in range(0, 75) + ], + "ConsistentRead": True, + }, + } + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.exception.response["Error"]["Message"].should.equal( + "Too many items requested for the BatchGetItem call" + ) + + @mock_dynamodb2 def test_batch_items_with_basic_projection_expression(): dynamodb = _create_user_table() From b0da78c29de801dde9e8757f0e29e5044112e03b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 3 Jun 2020 16:15:46 +0100 Subject: [PATCH 379/658] Update test_dynamodb.py --- tests/test_dynamodb2/test_dynamodb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 19c585bfaeef..8071a4d8dfee 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3045,7 +3045,7 @@ def test_batch_items_throws_exception_when_requesting_100_items_for_single_table dynamodb.batch_get_item( RequestItems={ "users": { - "Keys": [{"username": {"S": f"user{i}"}} for i in range(0, 104)], + "Keys": [{"username": {"S": "user" + str(i)}} for i in range(0, 104)], "ConsistentRead": True, } } From d21088699e9224b254f164d5654fa02759c9a5e1 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 3 Jun 2020 17:14:48 +0100 Subject: [PATCH 380/658] Linting --- tests/test_dynamodb2/test_dynamodb.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 8071a4d8dfee..3709991166b1 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3045,7 +3045,9 @@ def test_batch_items_throws_exception_when_requesting_100_items_for_single_table dynamodb.batch_get_item( RequestItems={ "users": { - "Keys": [{"username": {"S": "user" + str(i)}} for i in range(0, 104)], + "Keys": [ + {"username": {"S": "user" + str(i)}} for i in range(0, 104) + ], "ConsistentRead": True, } } From a66b0e5b1a5087b206b9e6bb384e2de993c96f2e Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 4 Jun 2020 07:45:00 +0100 Subject: [PATCH 381/658] CloudFormation - Support DynamoDB Streams --- moto/dynamodb2/models/__init__.py | 2 ++ .../test_cloudformation_stack_integration.py | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index a5277800ffe4..ff4ad3594235 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -342,6 +342,8 @@ def create_from_cloudformation_json( params["throughput"] = properties["ProvisionedThroughput"] if "LocalSecondaryIndexes" in properties: params["indexes"] = properties["LocalSecondaryIndexes"] + if "StreamSpecification" in properties: + params["streams"] = properties["StreamSpecification"] table = dynamodb_backends[region_name].create_table( name=properties["TableName"], **params diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 27bac5e57a4a..ad2436696eb5 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2307,6 +2307,7 @@ def test_stack_dynamodb_resources_integration(): }, } ], + "StreamSpecification": {"StreamViewType": "KEYS_ONLY"}, }, } }, @@ -2319,6 +2320,12 @@ def test_stack_dynamodb_resources_integration(): StackName="dynamodb_stack", TemplateBody=dynamodb_template_json ) + dynamodb_client = boto3.client("dynamodb", region_name="us-east-1") + table_desc = dynamodb_client.describe_table(TableName="myTableName")["Table"] + table_desc["StreamSpecification"].should.equal( + {"StreamEnabled": True, "StreamViewType": "KEYS_ONLY",} + ) + dynamodb_conn = boto3.resource("dynamodb", region_name="us-east-1") table = dynamodb_conn.Table("myTableName") table.name.should.equal("myTableName") From 029b2a9751817ed34bb43027c196a76cc97edadc Mon Sep 17 00:00:00 2001 From: Matt Bullock Date: Thu, 4 Jun 2020 06:04:53 -0700 Subject: [PATCH 382/658] chore: refine python-jose dependency (#3049) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 994e55300a89..707a56212925 100755 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def get_version(): "PyYAML>=5.1", "pytz", "python-dateutil<3.0.0,>=2.1", - "python-jose<4.0.0", + "python-jose[cryptography]>=3.1.0,<4.0.0", "docker>=2.5.1", "jsondiff>=1.1.2", "aws-xray-sdk!=0.96,>=0.93", From e32a60185fb7369d8916ee077fbabbc1bf9747ed Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sat, 6 Jun 2020 07:31:14 -0300 Subject: [PATCH 383/658] Cloudformation - EventBus support (#3052) * add EventBus to model's map * add support for creation of EventBus through cloudformation's api * add cloudformation's delete * add cloudformation's update * add cloudformation's attribute --- moto/cloudformation/parsing.py | 1 + moto/events/models.py | 49 ++++- .../test_cloudformation_stack_integration.py | 175 ++++++++++++++++++ 3 files changed, 223 insertions(+), 2 deletions(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 81d4d1c7d86b..05ebdace8e7a 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -96,6 +96,7 @@ "AWS::S3::Bucket": s3_models.FakeBucket, "AWS::SQS::Queue": sqs_models.Queue, "AWS::Events::Rule": events_models.Rule, + "AWS::Events::EventBus": events_models.EventBus, } UNDOCUMENTED_NAME_TYPE_MAP = { diff --git a/moto/events/models.py b/moto/events/models.py index 5397f28ca7ad..360c8d63166c 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -134,6 +134,52 @@ def policy(self): return json.dumps(policy) + def delete(self, region_name): + event_backend = events_backends[region_name] + event_backend.delete_event_bus(name=self.name) + + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Arn": + return self.arn + elif attribute_name == "Name": + return self.name + elif attribute_name == "Policy": + return self.policy + + raise UnformattedGetAttTemplateException() + + @classmethod + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + event_backend = events_backends[region_name] + event_name = properties["Name"] + event_source_name = properties.get("EventSourceName") + return event_backend.create_event_bus( + name=event_name, event_source_name=event_source_name + ) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name + ): + original_resource.delete(region_name) + return cls.create_from_cloudformation_json( + new_resource_name, cloudformation_json, region_name + ) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + event_backend = events_backends[region_name] + event_bus_name = properties["Name"] + event_backend.delete_event_bus(event_bus_name) + class EventsBackend(BaseBackend): ACCOUNT_ID = re.compile(r"^(\d{1,12}|\*)$") @@ -369,7 +415,7 @@ def describe_event_bus(self, name): return event_bus - def create_event_bus(self, name, event_source_name): + def create_event_bus(self, name, event_source_name=None): if name in self.event_buses: raise JsonRESTError( "ResourceAlreadyExistsException", @@ -406,7 +452,6 @@ def delete_event_bus(self, name): raise JsonRESTError( "ValidationException", "Cannot delete event bus default." ) - self.event_buses.pop(name, None) def list_tags_for_resource(self, arn): diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 082a20e01e86..fec9891ad3b5 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2585,3 +2585,178 @@ def test_autoscaling_propagate_tags(): assert propagation_dict["test-key-propagate"] assert not propagation_dict["test-key-no-propagate"] + + +@mock_cloudformation +@mock_events +def test_stack_eventbus_create_from_cfn_integration(): + eventbus_template = """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "EventBus": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "MyCustomEventBus" + }, + } + }, + }""" + + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack(StackName="test_stack", TemplateBody=eventbus_template) + + event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + + event_buses["EventBuses"].should.have.length_of(1) + event_buses["EventBuses"][0]["Name"].should.equal("MyCustomEventBus") + + +@mock_cloudformation +@mock_events +def test_stack_events_delete_eventbus_integration(): + eventbus_template = """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "EventBus": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "MyCustomEventBus" + }, + } + }, + }""" + cf_conn = boto3.client("cloudformation", "us-west-2") + cf_conn.create_stack(StackName="test_stack", TemplateBody=eventbus_template) + + event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + event_buses["EventBuses"].should.have.length_of(1) + + cf_conn.delete_stack(StackName="test_stack") + + event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + event_buses["EventBuses"].should.have.length_of(0) + + +@mock_cloudformation +@mock_events +def test_stack_events_delete_from_cfn_integration(): + eventbus_template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "$resource_name": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "$name" + }, + } + }, + }""" + ) + + cf_conn = boto3.client("cloudformation", "us-west-2") + + original_template = eventbus_template.substitute( + {"resource_name": "original", "name": "MyCustomEventBus"} + ) + cf_conn.create_stack(StackName="test_stack", TemplateBody=original_template) + + original_event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + original_event_buses["EventBuses"].should.have.length_of(1) + + original_eventbus = original_event_buses["EventBuses"][0] + + updated_template = eventbus_template.substitute( + {"resource_name": "updated", "name": "AnotherEventBus"} + ) + cf_conn.update_stack(StackName="test_stack", TemplateBody=updated_template) + + update_event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="AnotherEventBus" + ) + update_event_buses["EventBuses"].should.have.length_of(1) + update_event_buses["EventBuses"][0]["Arn"].shouldnt.equal(original_eventbus["Arn"]) + + +@mock_cloudformation +@mock_events +def test_stack_events_update_from_cfn_integration(): + eventbus_template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "EventBus": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "$name" + }, + } + }, + }""" + ) + + cf_conn = boto3.client("cloudformation", "us-west-2") + + original_template = eventbus_template.substitute({"name": "MyCustomEventBus"}) + cf_conn.create_stack(StackName="test_stack", TemplateBody=original_template) + + original_event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="MyCustom" + ) + original_event_buses["EventBuses"].should.have.length_of(1) + + original_eventbus = original_event_buses["EventBuses"][0] + + updated_template = eventbus_template.substitute({"name": "NewEventBus"}) + cf_conn.update_stack(StackName="test_stack", TemplateBody=updated_template) + + update_event_buses = boto3.client("events", "us-west-2").list_event_buses( + NamePrefix="NewEventBus" + ) + update_event_buses["EventBuses"].should.have.length_of(1) + update_event_buses["EventBuses"][0]["Name"].should.equal("NewEventBus") + update_event_buses["EventBuses"][0]["Arn"].shouldnt.equal(original_eventbus["Arn"]) + + +@mock_cloudformation +@mock_events +def test_stack_events_get_attribute_integration(): + eventbus_template = """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "EventBus": { + "Type": "AWS::Events::EventBus", + "Properties": { + "Name": "MyEventBus" + }, + } + }, + "Outputs": { + "bus_arn": {"Value": {"Fn::GetAtt": ["EventBus", "Arn"]}}, + "bus_name": {"Value": {"Fn::GetAtt": ["EventBus", "Name"]}}, + } + }""" + + cf = boto3.client("cloudformation", "us-west-2") + events = boto3.client("events", "us-west-2") + + cf.create_stack(StackName="test_stack", TemplateBody=eventbus_template) + + stack = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + outputs = stack["Outputs"] + + output_arn = list(filter(lambda item: item["OutputKey"] == "bus_arn", outputs))[0] + output_name = list(filter(lambda item: item["OutputKey"] == "bus_name", outputs))[0] + + event_bus = events.list_event_buses(NamePrefix="MyEventBus")["EventBuses"][0] + + output_arn["OutputValue"].should.equal(event_bus["Arn"]) + output_name["OutputValue"].should.equal(event_bus["Name"]) From 20784a2d67f791f0c01a576c3eda88ca2036e235 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 6 Jun 2020 13:15:50 +0100 Subject: [PATCH 384/658] Improve implementation coverage --- IMPLEMENTATION_COVERAGE.md | 341 ++++++++++++++++++++++++++++--- moto/awslambda/models.py | 4 +- moto/cloudformation/parsing.py | 2 +- moto/cloudformation/responses.py | 2 +- moto/s3/models.py | 30 ++- moto/s3/responses.py | 64 +++--- tests/test_s3/test_s3.py | 2 +- 7 files changed, 378 insertions(+), 67 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index bfcdd316797c..43983d912c46 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -377,6 +377,7 @@ - [ ] delete_route_settings - [ ] delete_stage - [ ] delete_vpc_link +- [ ] export_api - [ ] get_api - [ ] get_api_mapping - [ ] get_api_mappings @@ -754,6 +755,7 @@ - [ ] describe_copy_job - [ ] describe_protected_resource - [ ] describe_recovery_point +- [ ] describe_region_settings - [ ] describe_restore_job - [ ] export_backup_plan_template - [ ] get_backup_plan @@ -786,6 +788,7 @@ - [ ] untag_resource - [ ] update_backup_plan - [ ] update_recovery_point_lifecycle +- [ ] update_region_settings
## batch @@ -875,6 +878,7 @@ - [ ] create_bot - [ ] create_meeting - [ ] create_phone_number_order +- [ ] create_proxy_session - [ ] create_room - [ ] create_room_membership - [ ] create_user @@ -885,11 +889,13 @@ - [ ] delete_events_configuration - [ ] delete_meeting - [ ] delete_phone_number +- [ ] delete_proxy_session - [ ] delete_room - [ ] delete_room_membership - [ ] delete_voice_connector - [ ] delete_voice_connector_group - [ ] delete_voice_connector_origination +- [ ] delete_voice_connector_proxy - [ ] delete_voice_connector_streaming_configuration - [ ] delete_voice_connector_termination - [ ] delete_voice_connector_termination_credentials @@ -907,6 +913,8 @@ - [ ] get_phone_number - [ ] get_phone_number_order - [ ] get_phone_number_settings +- [ ] get_proxy_session +- [ ] get_retention_settings - [ ] get_room - [ ] get_user - [ ] get_user_settings @@ -914,39 +922,55 @@ - [ ] get_voice_connector_group - [ ] get_voice_connector_logging_configuration - [ ] get_voice_connector_origination +- [ ] get_voice_connector_proxy - [ ] get_voice_connector_streaming_configuration - [ ] get_voice_connector_termination - [ ] get_voice_connector_termination_health - [ ] invite_users - [ ] list_accounts +- [ ] list_attendee_tags - [ ] list_attendees - [ ] list_bots +- [ ] list_meeting_tags - [ ] list_meetings - [ ] list_phone_number_orders - [ ] list_phone_numbers +- [ ] list_proxy_sessions - [ ] list_room_memberships - [ ] list_rooms +- [ ] list_tags_for_resource - [ ] list_users - [ ] list_voice_connector_groups - [ ] list_voice_connector_termination_credentials - [ ] list_voice_connectors - [ ] logout_user - [ ] put_events_configuration +- [ ] put_retention_settings - [ ] put_voice_connector_logging_configuration - [ ] put_voice_connector_origination +- [ ] put_voice_connector_proxy - [ ] put_voice_connector_streaming_configuration - [ ] put_voice_connector_termination - [ ] put_voice_connector_termination_credentials +- [ ] redact_conversation_message +- [ ] redact_room_message - [ ] regenerate_security_token - [ ] reset_personal_pin - [ ] restore_phone_number - [ ] search_available_phone_numbers +- [ ] tag_attendee +- [ ] tag_meeting +- [ ] tag_resource +- [ ] untag_attendee +- [ ] untag_meeting +- [ ] untag_resource - [ ] update_account - [ ] update_account_settings - [ ] update_bot - [ ] update_global_settings - [ ] update_phone_number - [ ] update_phone_number_settings +- [ ] update_proxy_session - [ ] update_room - [ ] update_room_membership - [ ] update_user @@ -1446,6 +1470,7 @@ - [ ] delete_deployment_config - [ ] delete_deployment_group - [ ] delete_git_hub_account_token +- [ ] delete_resources_by_external_id - [ ] deregister_on_premises_instance - [ ] get_application - [ ] get_application_revision @@ -1482,9 +1507,15 @@ 0% implemented - [ ] associate_repository +- [ ] describe_code_review +- [ ] describe_recommendation_feedback - [ ] describe_repository_association - [ ] disassociate_repository +- [ ] list_code_reviews +- [ ] list_recommendation_feedback +- [ ] list_recommendations - [ ] list_repository_associations +- [ ] put_recommendation_feedback
## codeguruprofiler @@ -1495,10 +1526,13 @@ - [ ] create_profiling_group - [ ] delete_profiling_group - [ ] describe_profiling_group +- [ ] get_policy - [ ] get_profile - [ ] list_profile_times - [ ] list_profiling_groups - [ ] post_agent_profile +- [ ] put_permission +- [ ] remove_permission - [ ] update_profiling_group @@ -1577,6 +1611,9 @@ - [ ] delete_connection - [ ] get_connection - [ ] list_connections +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource ## codestar-notifications @@ -1816,18 +1853,26 @@ 0% implemented - [ ] describe_entities_detection_v2_job +- [ ] describe_icd10_cm_inference_job - [ ] describe_phi_detection_job +- [ ] describe_rx_norm_inference_job - [ ] detect_entities - [ ] detect_entities_v2 - [ ] detect_phi - [ ] infer_icd10_cm - [ ] infer_rx_norm - [ ] list_entities_detection_v2_jobs +- [ ] list_icd10_cm_inference_jobs - [ ] list_phi_detection_jobs +- [ ] list_rx_norm_inference_jobs - [ ] start_entities_detection_v2_job +- [ ] start_icd10_cm_inference_job - [ ] start_phi_detection_job +- [ ] start_rx_norm_inference_job - [ ] stop_entities_detection_v2_job +- [ ] stop_icd10_cm_inference_job - [ ] stop_phi_detection_job +- [ ] stop_rx_norm_inference_job ## compute-optimizer @@ -2113,6 +2158,7 @@ - [ ] list_invitations - [ ] list_members - [ ] reject_invitation +- [ ] start_monitoring_member ## devicefarm @@ -2679,6 +2725,7 @@ - [X] delete_vpn_gateway - [ ] deprovision_byoip_cidr - [X] deregister_image +- [ ] deregister_instance_event_notification_attributes - [ ] deregister_transit_gateway_multicast_group_members - [ ] deregister_transit_gateway_multicast_group_sources - [ ] describe_account_attributes @@ -2721,6 +2768,7 @@ - [ ] describe_import_snapshot_tasks - [X] describe_instance_attribute - [X] describe_instance_credit_specifications +- [ ] describe_instance_event_notification_attributes - [ ] describe_instance_status - [ ] describe_instance_type_offerings - [ ] describe_instance_types @@ -2892,6 +2940,7 @@ - [ ] purchase_scheduled_instances - [X] reboot_instances - [ ] register_image +- [ ] register_instance_event_notification_attributes - [ ] register_transit_gateway_multicast_group_members - [ ] register_transit_gateway_multicast_group_sources - [ ] reject_transit_gateway_peering_attachment @@ -3094,6 +3143,9 @@
0% implemented +- [ ] describe_accelerator_offerings +- [ ] describe_accelerator_types +- [ ] describe_accelerators - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource @@ -3195,6 +3247,7 @@ - [ ] describe_instances_health - [ ] describe_platform_version - [X] list_available_solution_stacks +- [ ] list_platform_branches - [ ] list_platform_versions - [X] list_tags_for_resource - [ ] rebuild_environment @@ -3312,7 +3365,7 @@ ## emr
-50% implemented +45% implemented - [ ] add_instance_fleet - [X] add_instance_groups @@ -3326,6 +3379,7 @@ - [ ] describe_security_configuration - [X] describe_step - [ ] get_block_public_access_configuration +- [ ] get_managed_scaling_policy - [X] list_bootstrap_actions - [X] list_clusters - [ ] list_instance_fleets @@ -3338,7 +3392,9 @@ - [X] modify_instance_groups - [ ] put_auto_scaling_policy - [ ] put_block_public_access_configuration +- [ ] put_managed_scaling_policy - [ ] remove_auto_scaling_policy +- [ ] remove_managed_scaling_policy - [X] remove_tags - [X] run_job_flow - [X] set_termination_protection @@ -3351,22 +3407,29 @@ 0% implemented - [ ] add_tags +- [ ] associate_package - [ ] cancel_elasticsearch_service_software_update - [ ] create_elasticsearch_domain +- [ ] create_package - [ ] delete_elasticsearch_domain - [ ] delete_elasticsearch_service_role +- [ ] delete_package - [ ] describe_elasticsearch_domain - [ ] describe_elasticsearch_domain_config - [ ] describe_elasticsearch_domains - [ ] describe_elasticsearch_instance_type_limits +- [ ] describe_packages - [ ] describe_reserved_elasticsearch_instance_offerings - [ ] describe_reserved_elasticsearch_instances +- [ ] dissociate_package - [ ] get_compatible_elasticsearch_versions - [ ] get_upgrade_history - [ ] get_upgrade_status - [ ] list_domain_names +- [ ] list_domains_for_package - [ ] list_elasticsearch_instance_types - [ ] list_elasticsearch_versions +- [ ] list_packages_for_domain - [ ] list_tags - [ ] purchase_reserved_elasticsearch_instance_offering - [ ] remove_tags @@ -3502,8 +3565,10 @@ - [ ] create_model_version - [ ] create_rule - [ ] create_variable +- [ ] delete_detector - [ ] delete_detector_version - [ ] delete_event +- [ ] delete_rule_version - [ ] describe_detector - [ ] describe_model_versions - [ ] get_detector_version @@ -3553,9 +3618,11 @@ 0% implemented - [ ] accept_match +- [ ] claim_game_server - [ ] create_alias - [ ] create_build - [ ] create_fleet +- [ ] create_game_server_group - [ ] create_game_session - [ ] create_game_session_queue - [ ] create_matchmaking_configuration @@ -3568,6 +3635,7 @@ - [ ] delete_alias - [ ] delete_build - [ ] delete_fleet +- [ ] delete_game_server_group - [ ] delete_game_session_queue - [ ] delete_matchmaking_configuration - [ ] delete_matchmaking_rule_set @@ -3575,6 +3643,7 @@ - [ ] delete_script - [ ] delete_vpc_peering_authorization - [ ] delete_vpc_peering_connection +- [ ] deregister_game_server - [ ] describe_alias - [ ] describe_build - [ ] describe_ec2_instance_limits @@ -3583,6 +3652,8 @@ - [ ] describe_fleet_events - [ ] describe_fleet_port_settings - [ ] describe_fleet_utilization +- [ ] describe_game_server +- [ ] describe_game_server_group - [ ] describe_game_session_details - [ ] describe_game_session_placement - [ ] describe_game_session_queues @@ -3602,11 +3673,15 @@ - [ ] list_aliases - [ ] list_builds - [ ] list_fleets +- [ ] list_game_server_groups +- [ ] list_game_servers - [ ] list_scripts - [ ] list_tags_for_resource - [ ] put_scaling_policy +- [ ] register_game_server - [ ] request_upload_credentials - [ ] resolve_alias +- [ ] resume_game_server_group - [ ] search_game_sessions - [ ] start_fleet_actions - [ ] start_game_session_placement @@ -3615,6 +3690,7 @@ - [ ] stop_fleet_actions - [ ] stop_game_session_placement - [ ] stop_matchmaking +- [ ] suspend_game_server_group - [ ] tag_resource - [ ] untag_resource - [ ] update_alias @@ -3622,6 +3698,8 @@ - [ ] update_fleet_attributes - [ ] update_fleet_capacity - [ ] update_fleet_port_settings +- [ ] update_game_server +- [ ] update_game_server_group - [ ] update_game_session - [ ] update_game_session_queue - [ ] update_matchmaking_configuration @@ -3813,6 +3891,7 @@ - [ ] stop_crawler - [ ] stop_crawler_schedule - [ ] stop_trigger +- [ ] stop_workflow_run - [ ] tag_resource - [ ] untag_resource - [ ] update_classifier @@ -3978,9 +4057,12 @@ - [ ] delete_members - [ ] delete_publishing_destination - [ ] delete_threat_intel_set +- [ ] describe_organization_configuration - [ ] describe_publishing_destination +- [ ] disable_organization_admin_account - [ ] disassociate_from_master_account - [ ] disassociate_members +- [ ] enable_organization_admin_account - [ ] get_detector - [ ] get_filter - [ ] get_findings @@ -3997,6 +4079,7 @@ - [ ] list_invitations - [ ] list_ip_sets - [ ] list_members +- [ ] list_organization_admin_accounts - [ ] list_publishing_destinations - [ ] list_tags_for_resource - [ ] list_threat_intel_sets @@ -4009,6 +4092,7 @@ - [ ] update_filter - [ ] update_findings_feedback - [ ] update_ip_set +- [ ] update_organization_configuration - [ ] update_publishing_destination - [ ] update_threat_intel_set
@@ -4034,7 +4118,7 @@ ## iam
-68% implemented +69% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile @@ -4063,7 +4147,7 @@ - [X] delete_account_password_policy - [X] delete_group - [ ] delete_group_policy -- [ ] delete_instance_profile +- [X] delete_instance_profile - [X] delete_login_profile - [X] delete_open_id_connect_provider - [X] delete_policy @@ -4303,6 +4387,7 @@ - [ ] create_authorizer - [ ] create_billing_group - [ ] create_certificate_from_csr +- [ ] create_dimension - [ ] create_domain_configuration - [ ] create_dynamic_thing_group - [X] create_job @@ -4328,6 +4413,7 @@ - [ ] delete_billing_group - [ ] delete_ca_certificate - [X] delete_certificate +- [ ] delete_dimension - [ ] delete_domain_configuration - [ ] delete_dynamic_thing_group - [X] delete_job @@ -4359,6 +4445,7 @@ - [ ] describe_ca_certificate - [X] describe_certificate - [ ] describe_default_authorizer +- [ ] describe_dimension - [ ] describe_domain_configuration - [ ] describe_endpoint - [ ] describe_event_configurations @@ -4407,6 +4494,7 @@ - [ ] list_ca_certificates - [X] list_certificates - [ ] list_certificates_by_ca +- [ ] list_dimensions - [ ] list_domain_configurations - [ ] list_indices - [X] list_job_executions_for_job @@ -4445,6 +4533,7 @@ - [ ] list_violation_events - [ ] register_ca_certificate - [X] register_certificate +- [ ] register_certificate_without_ca - [ ] register_thing - [ ] reject_certificate_transfer - [ ] remove_thing_from_billing_group @@ -4470,6 +4559,7 @@ - [ ] update_billing_group - [ ] update_ca_certificate - [X] update_certificate +- [ ] update_dimension - [ ] update_domain_configuration - [ ] update_dynamic_thing_group - [ ] update_event_configurations @@ -4634,6 +4724,66 @@ - [ ] untag_resource
+## iotsitewise +
+0% implemented + +- [ ] associate_assets +- [ ] batch_associate_project_assets +- [ ] batch_disassociate_project_assets +- [ ] batch_put_asset_property_value +- [ ] create_access_policy +- [ ] create_asset +- [ ] create_asset_model +- [ ] create_dashboard +- [ ] create_gateway +- [ ] create_portal +- [ ] create_project +- [ ] delete_access_policy +- [ ] delete_asset +- [ ] delete_asset_model +- [ ] delete_dashboard +- [ ] delete_gateway +- [ ] delete_portal +- [ ] delete_project +- [ ] describe_access_policy +- [ ] describe_asset +- [ ] describe_asset_model +- [ ] describe_asset_property +- [ ] describe_dashboard +- [ ] describe_gateway +- [ ] describe_gateway_capability_configuration +- [ ] describe_logging_options +- [ ] describe_portal +- [ ] describe_project +- [ ] disassociate_assets +- [ ] get_asset_property_aggregates +- [ ] get_asset_property_value +- [ ] get_asset_property_value_history +- [ ] list_access_policies +- [ ] list_asset_models +- [ ] list_assets +- [ ] list_associated_assets +- [ ] list_dashboards +- [ ] list_gateways +- [ ] list_portals +- [ ] list_project_assets +- [ ] list_projects +- [ ] list_tags_for_resource +- [ ] put_logging_options +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_access_policy +- [ ] update_asset +- [ ] update_asset_model +- [ ] update_asset_property +- [ ] update_dashboard +- [ ] update_gateway +- [ ] update_gateway_capability_configuration +- [ ] update_portal +- [ ] update_project +
+ ## iotthingsgraph
0% implemented @@ -4687,6 +4837,7 @@ - [ ] describe_configuration - [ ] describe_configuration_revision - [ ] get_bootstrap_brokers +- [ ] get_compatible_kafka_versions - [ ] list_cluster_operations - [ ] list_clusters - [ ] list_configuration_revisions @@ -4699,6 +4850,7 @@ - [ ] update_broker_count - [ ] update_broker_storage - [ ] update_cluster_configuration +- [ ] update_cluster_kafka_version - [ ] update_monitoring
@@ -4711,6 +4863,7 @@ - [ ] create_data_source - [ ] create_faq - [ ] create_index +- [ ] delete_data_source - [ ] delete_faq - [ ] delete_index - [ ] describe_data_source @@ -4720,10 +4873,13 @@ - [ ] list_data_sources - [ ] list_faqs - [ ] list_indices +- [ ] list_tags_for_resource - [ ] query - [ ] start_data_source_sync_job - [ ] stop_data_source_sync_job - [ ] submit_feedback +- [ ] tag_resource +- [ ] untag_resource - [ ] update_data_source - [ ] update_index
@@ -4766,6 +4922,7 @@
0% implemented +- [ ] get_clip - [ ] get_dash_streaming_session_url - [ ] get_hls_streaming_session_url - [ ] get_media_for_fragment_list @@ -5195,7 +5352,7 @@ ## logs
-43% implemented +40% implemented - [ ] associate_kms_key - [ ] cancel_export_task @@ -5206,6 +5363,7 @@ - [X] delete_log_group - [X] delete_log_stream - [ ] delete_metric_filter +- [ ] delete_query_definition - [ ] delete_resource_policy - [X] delete_retention_policy - [X] delete_subscription_filter @@ -5215,6 +5373,7 @@ - [X] describe_log_streams - [ ] describe_metric_filters - [ ] describe_queries +- [ ] describe_query_definitions - [ ] describe_resource_policies - [X] describe_subscription_filters - [ ] disassociate_kms_key @@ -5228,6 +5387,7 @@ - [ ] put_destination_policy - [X] put_log_events - [ ] put_metric_filter +- [ ] put_query_definition - [ ] put_resource_policy - [X] put_retention_policy - [X] put_subscription_filter @@ -5285,27 +5445,88 @@ - [ ] update_s3_resources
+## macie2 +
+0% implemented + +- [ ] accept_invitation +- [ ] archive_findings +- [ ] batch_get_custom_data_identifiers +- [ ] create_classification_job +- [ ] create_custom_data_identifier +- [ ] create_findings_filter +- [ ] create_invitations +- [ ] create_member +- [ ] create_sample_findings +- [ ] decline_invitations +- [ ] delete_custom_data_identifier +- [ ] delete_findings_filter +- [ ] delete_invitations +- [ ] delete_member +- [ ] describe_buckets +- [ ] describe_classification_job +- [ ] describe_organization_configuration +- [ ] disable_macie +- [ ] disable_organization_admin_account +- [ ] disassociate_from_master_account +- [ ] disassociate_member +- [ ] enable_macie +- [ ] enable_organization_admin_account +- [ ] get_bucket_statistics +- [ ] get_classification_export_configuration +- [ ] get_custom_data_identifier +- [ ] get_finding_statistics +- [ ] get_findings +- [ ] get_findings_filter +- [ ] get_invitations_count +- [ ] get_macie_session +- [ ] get_master_account +- [ ] get_member +- [ ] get_usage_statistics +- [ ] get_usage_totals +- [ ] list_classification_jobs +- [ ] list_custom_data_identifiers +- [ ] list_findings +- [ ] list_findings_filters +- [ ] list_invitations +- [ ] list_members +- [ ] list_organization_admin_accounts +- [ ] list_tags_for_resource +- [ ] put_classification_export_configuration +- [ ] tag_resource +- [ ] test_custom_data_identifier +- [ ] unarchive_findings +- [ ] untag_resource +- [ ] update_classification_job +- [ ] update_findings_filter +- [ ] update_macie_session +- [ ] update_member_session +- [ ] update_organization_configuration +
+ ## managedblockchain
-77% implemented +100% implemented - [X] create_member - [X] create_network -- [ ] create_node +- [X] create_node - [X] create_proposal - [X] delete_member -- [ ] delete_node +- [X] delete_node - [X] get_member - [X] get_network -- [ ] get_node +- [X] get_node - [X] get_proposal - [X] list_invitations - [X] list_members - [X] list_networks -- [ ] list_nodes +- [X] list_nodes - [X] list_proposal_votes - [X] list_proposals - [X] reject_invitation +- [X] update_member +- [X] update_node - [X] vote_on_proposal
@@ -5342,6 +5563,7 @@ - [ ] add_flow_outputs - [ ] add_flow_sources +- [ ] add_flow_vpc_interfaces - [ ] create_flow - [ ] delete_flow - [ ] describe_flow @@ -5351,6 +5573,7 @@ - [ ] list_tags_for_resource - [ ] remove_flow_output - [ ] remove_flow_source +- [ ] remove_flow_vpc_interface - [ ] revoke_flow_entitlement - [ ] start_flow - [ ] stop_flow @@ -5414,6 +5637,7 @@ - [ ] delete_tags - [ ] describe_channel - [ ] describe_input +- [ ] describe_input_device - [ ] describe_input_security_group - [ ] describe_multiplex - [ ] describe_multiplex_program @@ -5421,6 +5645,7 @@ - [ ] describe_reservation - [ ] describe_schedule - [ ] list_channels +- [ ] list_input_devices - [ ] list_input_security_groups - [ ] list_inputs - [ ] list_multiplex_programs @@ -5436,6 +5661,7 @@ - [ ] update_channel - [ ] update_channel_class - [ ] update_input +- [ ] update_input_device - [ ] update_input_security_group - [ ] update_multiplex - [ ] update_multiplex_program @@ -5482,6 +5708,9 @@ - [ ] list_assets - [ ] list_packaging_configurations - [ ] list_packaging_groups +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource
## mediastore @@ -5493,15 +5722,18 @@ - [ ] delete_container_policy - [ ] delete_cors_policy - [ ] delete_lifecycle_policy +- [ ] delete_metric_policy - [ ] describe_container - [ ] get_container_policy - [ ] get_cors_policy - [ ] get_lifecycle_policy +- [ ] get_metric_policy - [ ] list_containers - [ ] list_tags_for_resource - [ ] put_container_policy - [ ] put_cors_policy - [ ] put_lifecycle_policy +- [ ] put_metric_policy - [ ] start_access_logging - [ ] stop_access_logging - [ ] tag_resource @@ -5868,7 +6100,7 @@ ## organizations
-51% implemented +47% implemented - [ ] accept_handshake - [X] attach_policy @@ -5882,6 +6114,7 @@ - [ ] delete_organization - [ ] delete_organizational_unit - [ ] delete_policy +- [ ] deregister_delegated_administrator - [X] describe_account - [X] describe_create_account_status - [ ] describe_effective_policy @@ -5902,6 +6135,8 @@ - [ ] list_aws_service_access_for_organization - [X] list_children - [ ] list_create_account_status +- [ ] list_delegated_administrators +- [ ] list_delegated_services_for_account - [ ] list_handshakes_for_account - [ ] list_handshakes_for_organization - [X] list_organizational_units_for_parent @@ -5912,6 +6147,7 @@ - [X] list_tags_for_resource - [X] list_targets_for_policy - [X] move_account +- [ ] register_delegated_administrator - [ ] remove_account_from_organization - [X] tag_resource - [X] untag_resource @@ -6207,18 +6443,22 @@
0% implemented +- [ ] cancel_journal_kinesis_stream - [ ] create_ledger - [ ] delete_ledger +- [ ] describe_journal_kinesis_stream - [ ] describe_journal_s3_export - [ ] describe_ledger - [ ] export_journal_to_s3 - [ ] get_block - [ ] get_digest - [ ] get_revision +- [ ] list_journal_kinesis_streams_for_ledger - [ ] list_journal_s3_exports - [ ] list_journal_s3_exports_for_ledger - [ ] list_ledgers - [ ] list_tags_for_resource +- [ ] stream_journal_to_kinesis - [ ] tag_resource - [ ] untag_resource - [ ] update_ledger @@ -6324,6 +6564,7 @@ - [ ] list_permissions - [ ] list_principals - [ ] list_resource_share_permissions +- [ ] list_resource_types - [ ] list_resources - [ ] promote_resource_share_created_from_policy - [ ] reject_resource_share_invitation @@ -6482,7 +6723,7 @@ ## redshift
-29% implemented +28% implemented - [ ] accept_reserved_node_exchange - [ ] authorize_cluster_security_group_ingress @@ -6503,6 +6744,7 @@ - [X] create_snapshot_copy_grant - [ ] create_snapshot_schedule - [X] create_tags +- [ ] create_usage_limit - [X] delete_cluster - [X] delete_cluster_parameter_group - [X] delete_cluster_security_group @@ -6515,6 +6757,7 @@ - [X] delete_snapshot_copy_grant - [ ] delete_snapshot_schedule - [X] delete_tags +- [ ] delete_usage_limit - [ ] describe_account_attributes - [ ] describe_cluster_db_revisions - [X] describe_cluster_parameter_groups @@ -6543,6 +6786,7 @@ - [ ] describe_storage - [ ] describe_table_restore_status - [X] describe_tags +- [ ] describe_usage_limits - [ ] disable_logging - [X] disable_snapshot_copy - [ ] enable_logging @@ -6561,6 +6805,7 @@ - [ ] modify_scheduled_action - [X] modify_snapshot_copy_retention_period - [ ] modify_snapshot_schedule +- [ ] modify_usage_limit - [ ] pause_cluster - [ ] purchase_reserved_node_offering - [ ] reboot_cluster @@ -6585,6 +6830,8 @@ - [ ] create_stream_processor - [ ] delete_collection - [ ] delete_faces +- [ ] delete_project +- [ ] delete_project_version - [ ] delete_stream_processor - [ ] describe_collection - [ ] describe_project_versions @@ -6767,6 +7014,8 @@
0% implemented +- [ ] accept_domain_transfer_from_another_aws_account +- [ ] cancel_domain_transfer_to_another_aws_account - [ ] check_domain_availability - [ ] check_domain_transferability - [ ] delete_tags_for_domain @@ -6782,10 +7031,12 @@ - [ ] list_operations - [ ] list_tags_for_domain - [ ] register_domain +- [ ] reject_domain_transfer_from_another_aws_account - [ ] renew_domain - [ ] resend_contact_reachability_email - [ ] retrieve_domain_auth_code - [ ] transfer_domain +- [ ] transfer_domain_to_another_aws_account - [ ] update_domain_contact - [ ] update_domain_contact_privacy - [ ] update_domain_nameservers @@ -6823,7 +7074,7 @@ ## s3
-13% implemented +25% implemented - [ ] abort_multipart_upload - [ ] complete_multipart_upload @@ -6833,7 +7084,7 @@ - [X] delete_bucket - [ ] delete_bucket_analytics_configuration - [X] delete_bucket_cors -- [ ] delete_bucket_encryption +- [X] delete_bucket_encryption - [ ] delete_bucket_inventory_configuration - [ ] delete_bucket_lifecycle - [ ] delete_bucket_metrics_configuration @@ -6841,31 +7092,31 @@ - [ ] delete_bucket_replication - [X] delete_bucket_tagging - [ ] delete_bucket_website -- [ ] delete_object +- [X] delete_object - [ ] delete_object_tagging - [ ] delete_objects - [ ] delete_public_access_block - [ ] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration -- [ ] get_bucket_cors -- [ ] get_bucket_encryption +- [X] get_bucket_cors +- [X] get_bucket_encryption - [ ] get_bucket_inventory_configuration - [ ] get_bucket_lifecycle - [ ] get_bucket_lifecycle_configuration - [ ] get_bucket_location -- [ ] get_bucket_logging +- [X] get_bucket_logging - [ ] get_bucket_metrics_configuration - [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration +- [X] get_bucket_notification_configuration - [X] get_bucket_policy - [ ] get_bucket_policy_status - [ ] get_bucket_replication - [ ] get_bucket_request_payment -- [ ] get_bucket_tagging +- [X] get_bucket_tagging - [X] get_bucket_versioning - [ ] get_bucket_website -- [ ] get_object +- [X] get_object - [ ] get_object_acl - [ ] get_object_legal_hold - [ ] get_object_lock_configuration @@ -6888,7 +7139,7 @@ - [ ] put_bucket_acl - [ ] put_bucket_analytics_configuration - [X] put_bucket_cors -- [ ] put_bucket_encryption +- [X] put_bucket_encryption - [ ] put_bucket_inventory_configuration - [ ] put_bucket_lifecycle - [ ] put_bucket_lifecycle_configuration @@ -6899,7 +7150,7 @@ - [ ] put_bucket_policy - [ ] put_bucket_replication - [ ] put_bucket_request_payment -- [ ] put_bucket_tagging +- [X] put_bucket_tagging - [ ] put_bucket_versioning - [ ] put_bucket_website - [ ] put_object @@ -7120,6 +7371,7 @@ - [ ] create_schema - [ ] delete_discoverer - [ ] delete_registry +- [ ] delete_resource_policy - [ ] delete_schema - [ ] delete_schema_version - [ ] describe_code_binding @@ -7128,18 +7380,18 @@ - [ ] describe_schema - [ ] get_code_binding_source - [ ] get_discovered_schema +- [ ] get_resource_policy - [ ] list_discoverers - [ ] list_registries - [ ] list_schema_versions - [ ] list_schemas - [ ] list_tags_for_resource -- [ ] lock_service_linked_role - [ ] put_code_binding +- [ ] put_resource_policy - [ ] search_schemas - [ ] start_discoverer - [ ] stop_discoverer - [ ] tag_resource -- [ ] unlock_service_linked_role - [ ] untag_resource - [ ] update_discoverer - [ ] update_registry @@ -7194,6 +7446,7 @@ - [ ] batch_disable_standards - [ ] batch_enable_standards - [ ] batch_import_findings +- [ ] batch_update_findings - [ ] create_action_target - [ ] create_insight - [ ] create_members @@ -7900,6 +8153,7 @@ - [ ] create_stored_iscsi_volume - [ ] create_tape_with_barcode - [ ] create_tapes +- [ ] delete_automatic_tape_creation_policy - [ ] delete_bandwidth_rate_limit - [ ] delete_chap_credentials - [ ] delete_file_share @@ -7929,6 +8183,7 @@ - [ ] detach_volume - [ ] disable_gateway - [ ] join_domain +- [ ] list_automatic_tape_creation_policies - [ ] list_file_shares - [ ] list_gateways - [ ] list_local_disks @@ -7948,6 +8203,7 @@ - [ ] shutdown_gateway - [ ] start_availability_monitor_test - [ ] start_gateway +- [ ] update_automatic_tape_creation_policy - [ ] update_bandwidth_rate_limit - [ ] update_chap_credentials - [ ] update_gateway_information @@ -8037,6 +8293,25 @@ - [ ] untag_resource
+## synthetics +
+0% implemented + +- [ ] create_canary +- [ ] delete_canary +- [ ] describe_canaries +- [ ] describe_canaries_last_run +- [ ] describe_runtime_versions +- [ ] get_canary +- [ ] get_canary_runs +- [ ] list_tags_for_resource +- [ ] start_canary +- [ ] stop_canary +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_canary +
+ ## textract
0% implemented @@ -8053,18 +8328,27 @@
0% implemented +- [ ] create_medical_vocabulary - [ ] create_vocabulary - [ ] create_vocabulary_filter +- [ ] delete_medical_transcription_job +- [ ] delete_medical_vocabulary - [ ] delete_transcription_job - [ ] delete_vocabulary - [ ] delete_vocabulary_filter +- [ ] get_medical_transcription_job +- [ ] get_medical_vocabulary - [ ] get_transcription_job - [ ] get_vocabulary - [ ] get_vocabulary_filter +- [ ] list_medical_transcription_jobs +- [ ] list_medical_vocabularies - [ ] list_transcription_jobs - [ ] list_vocabularies - [ ] list_vocabulary_filters +- [ ] start_medical_transcription_job - [ ] start_transcription_job +- [ ] update_medical_vocabulary - [ ] update_vocabulary - [ ] update_vocabulary_filter
@@ -8123,6 +8407,7 @@ - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl +- [ ] create_web_acl_migration_stack - [ ] create_xss_match_set - [ ] delete_byte_match_set - [ ] delete_geo_match_set @@ -8206,6 +8491,7 @@ - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl +- [ ] create_web_acl_migration_stack - [ ] create_xss_match_set - [ ] delete_byte_match_set - [ ] delete_geo_match_set @@ -8286,8 +8572,10 @@ - [ ] create_regex_pattern_set - [ ] create_rule_group - [ ] create_web_acl +- [ ] delete_firewall_manager_rule_groups - [ ] delete_ip_set - [ ] delete_logging_configuration +- [ ] delete_permission_policy - [ ] delete_regex_pattern_set - [ ] delete_rule_group - [ ] delete_web_acl @@ -8295,6 +8583,7 @@ - [ ] disassociate_web_acl - [ ] get_ip_set - [ ] get_logging_configuration +- [ ] get_permission_policy - [ ] get_rate_based_statement_managed_keys - [ ] get_regex_pattern_set - [ ] get_rule_group @@ -8310,6 +8599,7 @@ - [ ] list_tags_for_resource - [ ] list_web_acls - [ ] put_logging_configuration +- [ ] put_permission_policy - [ ] tag_resource - [ ] untag_resource - [ ] update_ip_set @@ -8416,6 +8706,7 @@ - [ ] delete_group - [ ] delete_mailbox_permissions - [ ] delete_resource +- [ ] delete_retention_policy - [ ] delete_user - [ ] deregister_from_work_mail - [ ] describe_group @@ -8425,6 +8716,7 @@ - [ ] disassociate_delegate_from_resource - [ ] disassociate_member_from_group - [ ] get_access_control_effect +- [ ] get_default_retention_policy - [ ] get_mailbox_details - [ ] list_access_control_rules - [ ] list_aliases @@ -8438,6 +8730,7 @@ - [ ] list_users - [ ] put_access_control_rule - [ ] put_mailbox_permissions +- [ ] put_retention_policy - [ ] register_to_work_mail - [ ] reset_password - [ ] tag_resource diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 360c47528884..967944b9106c 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -218,7 +218,7 @@ def replace_adapter_send(*args, **kwargs): key = None try: # FIXME: does not validate bucket region - key = s3_backend.get_key(self.code["S3Bucket"], self.code["S3Key"]) + key = s3_backend.get_object(self.code["S3Bucket"], self.code["S3Key"]) except MissingBucket: if do_validate_s3(): raise InvalidParameterValueException( @@ -344,7 +344,7 @@ def update_function_code(self, updated_spec): key = None try: # FIXME: does not validate bucket region - key = s3_backend.get_key( + key = s3_backend.get_object( updated_spec["S3Bucket"], updated_spec["S3Key"] ) except MissingBucket: diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 05ebdace8e7a..a489f54febd1 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -541,7 +541,7 @@ def transform_mapping(self): if name == "AWS::Include": location = params["Location"] bucket_name, name = bucket_and_name_from_url(location) - key = s3_backend.get_key(bucket_name, name) + key = s3_backend.get_object(bucket_name, name) self._parsed_resources.update(json.loads(key.value)) def load_parameters(self): diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 302849481e5d..17b76854a348 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -36,7 +36,7 @@ def _get_stack_from_s3_url(self, template_url): bucket_name = template_url_parts.netloc.split(".")[0] key_name = template_url_parts.path.lstrip("/") - key = s3_backend.get_key(bucket_name, key_name) + key = s3_backend.get_object(bucket_name, key_name) return key.value.decode("utf-8") def create_stack(self): diff --git a/moto/s3/models.py b/moto/s3/models.py index 25ead4f5ec3b..c998a2bb63d0 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1315,7 +1315,7 @@ def get_account_public_access_block(self, account_id): return self.account_public_access_block - def set_key( + def set_object( self, bucket_name, key_name, value, storage=None, etag=None, multipart=None ): key_name = clean_key_name(key_name) @@ -1346,11 +1346,11 @@ def set_key( def append_to_key(self, bucket_name, key_name, value): key_name = clean_key_name(key_name) - key = self.get_key(bucket_name, key_name) + key = self.get_object(bucket_name, key_name) key.append_to_value(value) return key - def get_key(self, bucket_name, key_name, version_id=None, part_number=None): + def get_object(self, bucket_name, key_name, version_id=None, part_number=None): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) key = None @@ -1385,11 +1385,11 @@ def set_key_tags(self, key, tags, key_name=None): ) return key - def get_bucket_tags(self, bucket_name): + def get_bucket_tagging(self, bucket_name): bucket = self.get_bucket(bucket_name) return self.tagger.list_tags_for_resource(bucket.arn) - def put_bucket_tags(self, bucket_name, tags): + def put_bucket_tagging(self, bucket_name, tags): bucket = self.get_bucket(bucket_name) self.tagger.delete_all_tags_for_resource(bucket.arn) self.tagger.tag_resource( @@ -1481,7 +1481,7 @@ def complete_multipart(self, bucket_name, multipart_id, body): return del bucket.multiparts[multipart_id] - key = self.set_key( + key = self.set_object( bucket_name, multipart.key_name, value, etag=etag, multipart=multipart ) key.set_metadata(multipart.metadata) @@ -1521,7 +1521,7 @@ def copy_part( dest_bucket = self.get_bucket(dest_bucket_name) multipart = dest_bucket.multiparts[multipart_id] - src_value = self.get_key( + src_value = self.get_object( src_bucket_name, src_key_name, version_id=src_version_id ).value if start_byte is not None: @@ -1565,7 +1565,7 @@ def _set_delete_marker(self, bucket_name, key_name): bucket = self.get_bucket(bucket_name) bucket.keys[key_name] = FakeDeleteMarker(key=bucket.keys[key_name]) - def delete_key(self, bucket_name, key_name, version_id=None): + def delete_object(self, bucket_name, key_name, version_id=None): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) @@ -1606,7 +1606,7 @@ def copy_key( src_key_name = clean_key_name(src_key_name) dest_key_name = clean_key_name(dest_key_name) dest_bucket = self.get_bucket(dest_bucket_name) - key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id) + key = self.get_object(src_bucket_name, src_key_name, version_id=src_version_id) new_key = key.copy(dest_key_name, dest_bucket.is_versioned) self.tagger.copy_tags(key.arn, new_key.arn) @@ -1626,5 +1626,17 @@ def get_bucket_acl(self, bucket_name): bucket = self.get_bucket(bucket_name) return bucket.acl + def get_bucket_cors(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return bucket.cors + + def get_bucket_logging(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return bucket.logging + + def get_bucket_notification_configuration(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return bucket.notification_configuration + s3_backend = S3Backend() diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 4aaba1fcd548..41db43af7ca2 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -382,7 +382,7 @@ def _bucket_response_get(self, bucket_name, querystring): template = self.response_template(S3_OBJECT_ACL_RESPONSE) return template.render(obj=bucket) elif "tagging" in querystring: - tags = self.backend.get_bucket_tags(bucket_name)["Tags"] + tags = self.backend.get_bucket_tagging(bucket_name)["Tags"] # "Special Error" if no tags: if len(tags) == 0: template = self.response_template(S3_NO_BUCKET_TAGGING) @@ -390,25 +390,27 @@ def _bucket_response_get(self, bucket_name, querystring): template = self.response_template(S3_OBJECT_TAGGING_RESPONSE) return template.render(tags=tags) elif "logging" in querystring: - bucket = self.backend.get_bucket(bucket_name) - if not bucket.logging: + logging = self.backend.get_bucket_logging(bucket_name) + if not logging: template = self.response_template(S3_NO_LOGGING_CONFIG) return 200, {}, template.render() template = self.response_template(S3_LOGGING_CONFIG) - return 200, {}, template.render(logging=bucket.logging) + return 200, {}, template.render(logging=logging) elif "cors" in querystring: - bucket = self.backend.get_bucket(bucket_name) - if len(bucket.cors) == 0: + cors = self.backend.get_bucket_cors(bucket_name) + if len(cors) == 0: template = self.response_template(S3_NO_CORS_CONFIG) return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_CORS_RESPONSE) - return template.render(bucket=bucket) + return template.render(cors=cors) elif "notification" in querystring: - bucket = self.backend.get_bucket(bucket_name) - if not bucket.notification_configuration: + notification_configuration = self.backend.get_bucket_notification_configuration( + bucket_name + ) + if not notification_configuration: return 200, {}, "" template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) - return template.render(bucket=bucket) + return template.render(config=notification_configuration) elif "accelerate" in querystring: bucket = self.backend.get_bucket(bucket_name) if bucket.accelerate_configuration is None: @@ -663,7 +665,7 @@ def _bucket_response_put( return "" elif "tagging" in querystring: tagging = self._bucket_tagging_from_xml(body) - self.backend.put_bucket_tags(bucket_name, tagging) + self.backend.put_bucket_tagging(bucket_name, tagging) return "" elif "website" in querystring: self.backend.set_bucket_website_configuration(bucket_name, body) @@ -840,7 +842,7 @@ def _bucket_response_post(self, request, body, bucket_name): else: status_code = 204 - new_key = self.backend.set_key(bucket_name, key, f) + new_key = self.backend.set_object(bucket_name, key, f) # Metadata metadata = metadata_from_headers(form) @@ -879,7 +881,7 @@ def _bucket_response_delete_keys(self, request, body, bucket_name): key_name = object_["Key"] version_id = object_.get("VersionId", None) - success = self.backend.delete_key( + success = self.backend.delete_object( bucket_name, undo_clean_key_name(key_name), version_id=version_id ) if success: @@ -1056,7 +1058,7 @@ def _key_response(self, request, full_url, headers): signed_url = "Signature=" in request.url elif hasattr(request, "requestline"): signed_url = "Signature=" in request.path - key = self.backend.get_key(bucket_name, key_name) + key = self.backend.get_object(bucket_name, key_name) if key: if not key.acl.public_read and not signed_url: @@ -1118,7 +1120,7 @@ def _key_response_get(self, bucket_name, query, key_name, headers): ) version_id = query.get("versionId", [None])[0] if_modified_since = headers.get("If-Modified-Since", None) - key = self.backend.get_key(bucket_name, key_name, version_id=version_id) + key = self.backend.get_object(bucket_name, key_name, version_id=version_id) if key is None: raise MissingKey(key_name) if if_modified_since: @@ -1164,7 +1166,9 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers except ValueError: start_byte, end_byte = None, None - if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + if self.backend.get_object( + src_bucket, src_key, version_id=src_version_id + ): key = self.backend.copy_part( bucket_name, upload_id, @@ -1193,7 +1197,7 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers tagging = self._tagging_from_headers(request.headers) if "acl" in query: - key = self.backend.get_key(bucket_name, key_name) + key = self.backend.get_object(bucket_name, key_name) # TODO: Support the XML-based ACL format key.set_acl(acl) return 200, response_headers, "" @@ -1203,7 +1207,7 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers version_id = query["versionId"][0] else: version_id = None - key = self.backend.get_key(bucket_name, key_name, version_id=version_id) + key = self.backend.get_object(bucket_name, key_name, version_id=version_id) tagging = self._tagging_from_xml(body) self.backend.set_key_tags(key, tagging, key_name) return 200, response_headers, "" @@ -1221,7 +1225,9 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers ) src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0] - key = self.backend.get_key(src_bucket, src_key, version_id=src_version_id) + key = self.backend.get_object( + src_bucket, src_key, version_id=src_version_id + ) if key is not None: if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]: @@ -1238,7 +1244,7 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers else: return 404, response_headers, "" - new_key = self.backend.get_key(bucket_name, key_name) + new_key = self.backend.get_object(bucket_name, key_name) mdirective = request.headers.get("x-amz-metadata-directive") if mdirective is not None and mdirective == "REPLACE": metadata = metadata_from_headers(request.headers) @@ -1254,13 +1260,13 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers closing_connection = headers.get("connection") == "close" if closing_connection and streaming_request: # Closing the connection of a streaming request. No more data - new_key = self.backend.get_key(bucket_name, key_name) + new_key = self.backend.get_object(bucket_name, key_name) elif streaming_request: # Streaming request, more data new_key = self.backend.append_to_key(bucket_name, key_name, body) else: # Initial data - new_key = self.backend.set_key( + new_key = self.backend.set_object( bucket_name, key_name, body, storage=storage_class ) request.streaming = True @@ -1286,7 +1292,7 @@ def _key_response_head(self, bucket_name, query, key_name, headers): if if_modified_since: if_modified_since = str_to_rfc_1123_datetime(if_modified_since) - key = self.backend.get_key( + key = self.backend.get_object( bucket_name, key_name, version_id=version_id, part_number=part_number ) if key: @@ -1596,7 +1602,7 @@ def _key_response_delete(self, bucket_name, query, key_name): self.backend.cancel_multipart(bucket_name, upload_id) return 204, {}, "" version_id = query.get("versionId", [None])[0] - self.backend.delete_key(bucket_name, key_name, version_id=version_id) + self.backend.delete_object(bucket_name, key_name, version_id=version_id) return 204, {}, "" def _complete_multipart_body(self, body): @@ -1633,7 +1639,7 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): elif "restore" in query: es = minidom.parseString(body).getElementsByTagName("Days") days = es[0].childNodes[0].wholeText - key = self.backend.get_key(bucket_name, key_name) + key = self.backend.get_object(bucket_name, key_name) r = 202 if key.expiry_date is not None: r = 200 @@ -1959,7 +1965,7 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): S3_BUCKET_CORS_RESPONSE = """ - {% for cors in bucket.cors %} + {% for cors in cors %} {% for origin in cors.allowed_origins %} {{ origin }} @@ -2192,7 +2198,7 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): S3_GET_BUCKET_NOTIFICATION_CONFIG = """ - {% for topic in bucket.notification_configuration.topic %} + {% for topic in config.topic %} {{ topic.id }} {{ topic.arn }} @@ -2213,7 +2219,7 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): {% endif %} {% endfor %} - {% for queue in bucket.notification_configuration.queue %} + {% for queue in config.queue %} {{ queue.id }} {{ queue.arn }} @@ -2234,7 +2240,7 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): {% endif %} {% endfor %} - {% for cf in bucket.notification_configuration.cloud_function %} + {% for cf in config.cloud_function %} {{ cf.id }} {{ cf.arn }} diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 363ccc02db77..b91eb9983ea6 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4364,7 +4364,7 @@ def test_s3_config_dict(): # With 1 bucket in us-west-2: s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2") - s3_config_query.backends["global"].put_bucket_tags("bucket1", tags) + s3_config_query.backends["global"].put_bucket_tagging("bucket1", tags) # With a log bucket: s3_config_query.backends["global"].create_bucket("logbucket", "us-west-2") From b1d515c9295311170e240cf583251b93e9989b48 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 10 Jun 2020 12:28:40 +0530 Subject: [PATCH 385/658] =?UTF-8?q?Enhancement=20:=20API-Gateway=20Put=20I?= =?UTF-8?q?ntegration=20Response=20-=20Adding=20support=20f=E2=80=A6=20(#3?= =?UTF-8?q?058)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Enhancement : API-Gateway Put Integration Response - Adding support for contentHandling. * Added tests where the contentHandling is None also gets tested. * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/apigateway/models.py | 17 +++++-- moto/apigateway/responses.py | 2 + tests/test_apigateway/test_apigateway.py | 58 ++++++++++++++++++++++++ 3 files changed, 73 insertions(+), 4 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 4513c75abed7..fbd525df154e 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -56,13 +56,21 @@ def __init__(self, deployment_id, name, description=""): class IntegrationResponse(BaseModel, dict): - def __init__(self, status_code, selection_pattern=None, response_templates=None): + def __init__( + self, + status_code, + selection_pattern=None, + response_templates=None, + content_handling=None, + ): if response_templates is None: response_templates = {"application/json": None} self["responseTemplates"] = response_templates self["statusCode"] = status_code if selection_pattern: self["selectionPattern"] = selection_pattern + if content_handling: + self["contentHandling"] = content_handling class Integration(BaseModel, dict): @@ -75,12 +83,12 @@ def __init__(self, integration_type, uri, http_method, request_templates=None): self["integrationResponses"] = {"200": IntegrationResponse(200)} def create_integration_response( - self, status_code, selection_pattern, response_templates + self, status_code, selection_pattern, response_templates, content_handling ): if response_templates == {}: response_templates = None integration_response = IntegrationResponse( - status_code, selection_pattern, response_templates + status_code, selection_pattern, response_templates, content_handling ) self["integrationResponses"][status_code] = integration_response return integration_response @@ -959,12 +967,13 @@ def create_integration_response( status_code, selection_pattern, response_templates, + content_handling, ): if response_templates is None: raise InvalidRequestInput() integration = self.get_integration(function_id, resource_id, method_type) integration_response = integration.create_integration_response( - status_code, selection_pattern, response_templates + status_code, selection_pattern, response_templates, content_handling ) return integration_response diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 1a7689d286e9..e3951192b186 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -387,6 +387,7 @@ def integration_responses(self, request, full_url, headers): elif self.method == "PUT": selection_pattern = self._get_param("selectionPattern") response_templates = self._get_param("responseTemplates") + content_handling = self._get_param("contentHandling") integration_response = self.backend.create_integration_response( function_id, resource_id, @@ -394,6 +395,7 @@ def integration_responses(self, request, full_url, headers): status_code, selection_pattern, response_templates, + content_handling, ) elif self.method == "DELETE": integration_response = self.backend.delete_integration_response( diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 295cd1c54a5f..1c7f6d3850f7 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -544,6 +544,7 @@ def test_integration_response(): selectionPattern="foobar", responseTemplates={}, ) + # this is hard to match against, so remove it response["ResponseMetadata"].pop("HTTPHeaders", None) response["ResponseMetadata"].pop("RetryAttempts", None) @@ -592,6 +593,63 @@ def test_integration_response(): response = client.get_method(restApiId=api_id, resourceId=root_id, httpMethod="GET") response["methodIntegration"]["integrationResponses"].should.equal({}) + # adding a new method and perfomring put intergration with contentHandling as CONVERT_TO_BINARY + client.put_method( + restApiId=api_id, resourceId=root_id, httpMethod="PUT", authorizationType="none" + ) + + client.put_method_response( + restApiId=api_id, resourceId=root_id, httpMethod="PUT", statusCode="200" + ) + + client.put_integration( + restApiId=api_id, + resourceId=root_id, + httpMethod="PUT", + type="HTTP", + uri="http://httpbin.org/robots.txt", + integrationHttpMethod="POST", + ) + + response = client.put_integration_response( + restApiId=api_id, + resourceId=root_id, + httpMethod="PUT", + statusCode="200", + selectionPattern="foobar", + responseTemplates={}, + contentHandling="CONVERT_TO_BINARY", + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": None}, + "contentHandling": "CONVERT_TO_BINARY", + } + ) + + response = client.get_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="PUT", statusCode="200" + ) + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": None}, + "contentHandling": "CONVERT_TO_BINARY", + } + ) + @mock_apigateway @mock_cognitoidp From fbc5769b745ed9be4b77a64f2159ed0140b29330 Mon Sep 17 00:00:00 2001 From: Kristopher Chun Date: Wed, 10 Jun 2020 00:54:03 -0700 Subject: [PATCH 386/658] =?UTF-8?q?Fix:=20SecretsManager=20-=20Added=20mis?= =?UTF-8?q?sing=20pop()=20override=20to=20get=5Fsecret=5Fname=E2=80=A6=20(?= =?UTF-8?q?#3057)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix: SecretsManager - Added missing pop() override to get_secret_name_from_arn (#3056) * Added test case for delete_secret_force_with_arn (#3057) * Fixed lint for test_delete_secret_force_with_arn (#3057) --- moto/secretsmanager/models.py | 4 ++++ .../test_secretsmanager/test_secretsmanager.py | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 01acf2dbb59b..8641916a7d3b 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -38,6 +38,10 @@ def __contains__(self, key): new_key = get_secret_name_from_arn(key) return dict.__contains__(self, new_key) + def pop(self, key, *args, **kwargs): + new_key = get_secret_name_from_arn(key) + return super(SecretsStore, self).pop(new_key, *args, **kwargs) + class SecretsManagerBackend(BaseBackend): def __init__(self, region_name=None, **kwargs): diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 0fe23fd7f2cc..59992e094b20 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -211,6 +211,24 @@ def test_delete_secret_force(): result = conn.get_secret_value(SecretId="test-secret") +@mock_secretsmanager +def test_delete_secret_force_with_arn(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + create_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") + + result = conn.delete_secret( + SecretId=create_secret["ARN"], ForceDeleteWithoutRecovery=True + ) + + assert result["ARN"] + assert result["DeletionDate"] > datetime.fromtimestamp(1, pytz.utc) + assert result["Name"] == "test-secret" + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId="test-secret") + + @mock_secretsmanager def test_delete_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") From dcde2570b16cbe5183b358531faddcfe2c2b5a56 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 10 Jun 2020 14:53:43 +0530 Subject: [PATCH 387/658] =?UTF-8?q?Enhancement=20:=20SES=20-=20Added=20cre?= =?UTF-8?q?ate-receipt-rule-set,=20create-receipt-rul=E2=80=A6=20(#3059)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Enhancement : SES - Added create-receipt-rule-set, create-receipt-rule functionalities. * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ses/exceptions.py | 23 +++++++ moto/ses/models.py | 18 +++++ moto/ses/responses.py | 27 ++++++++ tests/test_ses/test_ses_boto3.py | 112 +++++++++++++++++++++++++++++++ 4 files changed, 180 insertions(+) diff --git a/moto/ses/exceptions.py b/moto/ses/exceptions.py index 7a4ef1b03925..d3e60aef53cb 100644 --- a/moto/ses/exceptions.py +++ b/moto/ses/exceptions.py @@ -41,3 +41,26 @@ class TemplateDoesNotExist(RESTError): def __init__(self, message): super(TemplateDoesNotExist, self).__init__("TemplateDoesNotExist", message) + + +class RuleSetNameAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(RuleSetNameAlreadyExists, self).__init__( + "RuleSetNameAlreadyExists", message + ) + + +class RuleAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(RuleAlreadyExists, self).__init__("RuleAlreadyExists", message) + + +class RuleSetDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(RuleSetDoesNotExist, self).__init__("RuleSetDoesNotExist", message) diff --git a/moto/ses/models.py b/moto/ses/models.py index f918d9021485..e90f66fa8627 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -12,6 +12,9 @@ EventDestinationAlreadyExists, TemplateNameAlreadyExists, TemplateDoesNotExist, + RuleSetNameAlreadyExists, + RuleSetDoesNotExist, + RuleAlreadyExists, ) from .utils import get_random_message_id from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY @@ -94,6 +97,7 @@ def __init__(self): self.config_set_event_destination = {} self.event_destinations = {} self.templates = {} + self.receipt_rule_set = {} def _is_verified_address(self, source): _, address = parseaddr(source) @@ -294,5 +298,19 @@ def get_template(self, template_name): def list_templates(self): return list(self.templates.values()) + def create_receipt_rule_set(self, rule_set_name): + if self.receipt_rule_set.get(rule_set_name) is not None: + raise RuleSetNameAlreadyExists("Duplicate receipt rule set Name.") + self.receipt_rule_set[rule_set_name] = [] + + def create_receipt_rule(self, rule_set_name, rule): + rule_set = self.receipt_rule_set.get(rule_set_name) + if rule_set is None: + raise RuleSetDoesNotExist("Invalid Rule Set Name.") + if rule in rule_set: + raise RuleAlreadyExists("Duplicate Rule Name.") + rule_set.append(rule) + self.receipt_rule_set[rule_set_name] = rule_set + ses_backend = SESBackend() diff --git a/moto/ses/responses.py b/moto/ses/responses.py index f0780e98a3c3..9702c724d3d5 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -199,6 +199,19 @@ def list_templates(self): template = self.response_template(LIST_TEMPLATES) return template.render(templates=email_templates) + def create_receipt_rule_set(self): + rule_set_name = self._get_param("RuleSetName") + ses_backend.create_receipt_rule_set(rule_set_name) + template = self.response_template(CREATE_RECEIPT_RULE_SET) + return template.render() + + def create_receipt_rule(self): + rule_set_name = self._get_param("RuleSetName") + rule = self._get_dict_param("Rule") + ses_backend.create_receipt_rule(rule_set_name, rule) + template = self.response_template(CREATE_RECEIPT_RULE) + return template.render() + VERIFY_EMAIL_IDENTITY = """ @@ -385,3 +398,17 @@ def list_templates(self): 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba """ + +CREATE_RECEIPT_RULE_SET = """ + + + 47e0ef1a-9bf2-11e1-9279-01ab88cf109a + +""" + +CREATE_RECEIPT_RULE = """ + + + 15e0ef1a-9bf2-11e1-9279-01ab88cf109a + +""" diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index 707afe8fb853..de8ec7261e20 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -300,6 +300,118 @@ def test_create_configuration_set(): ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") +@mock_ses +def test_create_receipt_rule_set(): + conn = boto3.client("ses", region_name="us-east-1") + result = conn.create_receipt_rule_set(RuleSetName="testRuleSet") + + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + with assert_raises(ClientError) as ex: + conn.create_receipt_rule_set(RuleSetName="testRuleSet") + + ex.exception.response["Error"]["Code"].should.equal("RuleSetNameAlreadyExists") + + +@mock_ses +def test_create_receipt_rule(): + conn = boto3.client("ses", region_name="us-east-1") + rule_set_name = "testRuleSet" + conn.create_receipt_rule_set(RuleSetName=rule_set_name) + + result = conn.create_receipt_rule( + RuleSetName=rule_set_name, + Rule={ + "Name": "testRule", + "Enabled": False, + "TlsPolicy": "Optional", + "Recipients": ["string"], + "Actions": [ + { + "S3Action": { + "TopicArn": "string", + "BucketName": "string", + "ObjectKeyPrefix": "string", + "KmsKeyArn": "string", + }, + "BounceAction": { + "TopicArn": "string", + "SmtpReplyCode": "string", + "StatusCode": "string", + "Message": "string", + "Sender": "string", + }, + } + ], + "ScanEnabled": False, + }, + ) + + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + with assert_raises(ClientError) as ex: + conn.create_receipt_rule( + RuleSetName=rule_set_name, + Rule={ + "Name": "testRule", + "Enabled": False, + "TlsPolicy": "Optional", + "Recipients": ["string"], + "Actions": [ + { + "S3Action": { + "TopicArn": "string", + "BucketName": "string", + "ObjectKeyPrefix": "string", + "KmsKeyArn": "string", + }, + "BounceAction": { + "TopicArn": "string", + "SmtpReplyCode": "string", + "StatusCode": "string", + "Message": "string", + "Sender": "string", + }, + } + ], + "ScanEnabled": False, + }, + ) + + ex.exception.response["Error"]["Code"].should.equal("RuleAlreadyExists") + + with assert_raises(ClientError) as ex: + conn.create_receipt_rule( + RuleSetName="InvalidRuleSetaName", + Rule={ + "Name": "testRule", + "Enabled": False, + "TlsPolicy": "Optional", + "Recipients": ["string"], + "Actions": [ + { + "S3Action": { + "TopicArn": "string", + "BucketName": "string", + "ObjectKeyPrefix": "string", + "KmsKeyArn": "string", + }, + "BounceAction": { + "TopicArn": "string", + "SmtpReplyCode": "string", + "StatusCode": "string", + "Message": "string", + "Sender": "string", + }, + } + ], + "ScanEnabled": False, + }, + ) + + ex.exception.response["Error"]["Code"].should.equal("RuleSetDoesNotExist") + + @mock_ses def test_create_ses_template(): conn = boto3.client("ses", region_name="us-east-1") From b88f1660991ebd0118839a48948cab943913d317 Mon Sep 17 00:00:00 2001 From: Gordon Cassie Date: Thu, 11 Jun 2020 01:50:50 -0400 Subject: [PATCH 388/658] Fix: Support streaming upload from requests. (#3062) * Fix: Support streaming upload from requests. * [FIX] style. Co-authored-by: Gordon Cassie --- moto/core/models.py | 2 ++ tests/test_s3/test_s3.py | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/moto/core/models.py b/moto/core/models.py index 1597efc7bdbf..ba4564e4aad3 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -184,6 +184,8 @@ def get_response(self, request): body = None elif isinstance(request.body, six.text_type): body = six.BytesIO(six.b(request.body)) + elif hasattr(request.body, "read"): + body = six.BytesIO(request.body.read()) else: body = six.BytesIO(request.body) req = Request.from_values( diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 363ccc02db77..1d1e834780c3 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1040,6 +1040,22 @@ def test_s3_object_in_public_bucket_using_multiple_presigned_urls(): assert response.status_code == 200, "Failed on req number {}".format(i) +@mock_s3 +def test_streaming_upload_from_file_to_presigned_url(): + s3 = boto3.resource("s3") + bucket = s3.Bucket("test-bucket") + bucket.create() + bucket.put_object(Body=b"ABCD", Key="file.txt") + + params = {"Bucket": "test-bucket", "Key": "file.txt"} + presigned_url = boto3.client("s3").generate_presigned_url( + "put_object", params, ExpiresIn=900 + ) + with open(__file__, "rb") as f: + response = requests.get(presigned_url, data=f) + assert response.status_code == 200 + + @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource("s3") From 5880d31f7e746388a64ff42b7f90077a0d666a82 Mon Sep 17 00:00:00 2001 From: ktrueda Date: Fri, 12 Jun 2020 01:27:29 +0900 Subject: [PATCH 389/658] Implemented Athena create_named_query, get_named_query (#1524) (#3065) * Implemented Athena create_named_query, get_named_query --- moto/athena/models.py | 25 +++++++++++++++++++++ moto/athena/responses.py | 29 ++++++++++++++++++++++++ tests/test_athena/test_athena.py | 38 ++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+) diff --git a/moto/athena/models.py b/moto/athena/models.py index c39c13817d73..24ad73ab901e 100644 --- a/moto/athena/models.py +++ b/moto/athena/models.py @@ -60,6 +60,16 @@ def __init__(self, query, context, config, workgroup): self.status = "QUEUED" +class NamedQuery(BaseModel): + def __init__(self, name, description, database, query_string, workgroup): + self.id = str(uuid4()) + self.name = name + self.description = description + self.database = database + self.query_string = query_string + self.workgroup = workgroup + + class AthenaBackend(BaseBackend): region_name = None @@ -68,6 +78,7 @@ def __init__(self, region_name=None): self.region_name = region_name self.work_groups = {} self.executions = {} + self.named_queries = {} def create_work_group(self, name, configuration, description, tags): if name in self.work_groups: @@ -113,6 +124,20 @@ def stop_query_execution(self, exec_id): execution = self.executions[exec_id] execution.status = "CANCELLED" + def create_named_query(self, name, description, database, query_string, workgroup): + nq = NamedQuery( + name=name, + description=description, + database=database, + query_string=query_string, + workgroup=workgroup, + ) + self.named_queries[nq.id] = nq + return nq.id + + def get_named_query(self, query_id): + return self.named_queries[query_id] if query_id in self.named_queries else None + athena_backends = {} for region in Session().get_available_regions("athena"): diff --git a/moto/athena/responses.py b/moto/athena/responses.py index b52e0beedb8d..b5e6d6a95777 100644 --- a/moto/athena/responses.py +++ b/moto/athena/responses.py @@ -85,3 +85,32 @@ def error(self, msg, status): json.dumps({"__type": "InvalidRequestException", "Message": msg,}), dict(status=status), ) + + def create_named_query(self): + name = self._get_param("Name") + description = self._get_param("Description") + database = self._get_param("Database") + query_string = self._get_param("QueryString") + workgroup = self._get_param("WorkGroup") + if workgroup and not self.athena_backend.get_work_group(workgroup): + return self.error("WorkGroup does not exist", 400) + query_id = self.athena_backend.create_named_query( + name, description, database, query_string, workgroup + ) + return json.dumps({"NamedQueryId": query_id}) + + def get_named_query(self): + query_id = self._get_param("NamedQueryId") + nq = self.athena_backend.get_named_query(query_id) + return json.dumps( + { + "NamedQuery": { + "Name": nq.name, + "Description": nq.description, + "Database": nq.database, + "QueryString": nq.query_string, + "NamedQueryId": nq.id, + "WorkGroup": nq.workgroup, + } + } + ) diff --git a/tests/test_athena/test_athena.py b/tests/test_athena/test_athena.py index 93ca436aa0f8..805a653e3e58 100644 --- a/tests/test_athena/test_athena.py +++ b/tests/test_athena/test_athena.py @@ -172,6 +172,44 @@ def test_stop_query_execution(): details["Status"]["State"].should.equal("CANCELLED") +@mock_athena +def test_create_named_query(): + client = boto3.client("athena", region_name="us-east-1") + + # craete named query + res = client.create_named_query( + Name="query-name", Database="target_db", QueryString="SELECT * FROM table1", + ) + + assert "NamedQueryId" in res + + +@mock_athena +def test_get_named_query(): + client = boto3.client("athena", region_name="us-east-1") + query_name = "query-name" + database = "target_db" + query_string = "SELECT * FROM tbl1" + description = "description of this query" + + # craete named query + res_create = client.create_named_query( + Name=query_name, + Database=database, + QueryString=query_string, + Description=description, + ) + query_id = res_create["NamedQueryId"] + + # get named query + res_get = client.get_named_query(NamedQueryId=query_id)["NamedQuery"] + res_get["Name"].should.equal(query_name) + res_get["Description"].should.equal(description) + res_get["Database"].should.equal(database) + res_get["QueryString"].should.equal(query_string) + res_get["NamedQueryId"].should.equal(query_id) + + def create_basic_workgroup(client, name): client.create_work_group( Name=name, From 475f022b7839420ffacb940abe6b0214bfa10279 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Fri, 12 Jun 2020 20:46:55 +0530 Subject: [PATCH 390/658] Enhancement: EC2 added create route with networkInterfaceId (#3063) * Enhancement:EC2- create route with network interfcaeID * modifying existing test case * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ec2/models.py | 35 ++++++++++++++++------------- tests/test_ec2/test_route_tables.py | 28 ++++++++++++++++++++++- 2 files changed, 47 insertions(+), 16 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index f8ebd02ec2b2..cb7ba0ff2b47 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3639,26 +3639,31 @@ def create_route( interface_id=None, vpc_peering_connection_id=None, ): + gateway = None + nat_gateway = None + route_table = self.get_route_table(route_table_id) if interface_id: - self.raise_not_implemented_error("CreateRoute to NetworkInterfaceId") + # for validating interface Id whether it is valid or not. + self.get_network_interface(interface_id) - gateway = None - if gateway_id: - if EC2_RESOURCE_TO_PREFIX["vpn-gateway"] in gateway_id: - gateway = self.get_vpn_gateway(gateway_id) - elif EC2_RESOURCE_TO_PREFIX["internet-gateway"] in gateway_id: - gateway = self.get_internet_gateway(gateway_id) - - try: - ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False) - except ValueError: - raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) + else: + if gateway_id: + if EC2_RESOURCE_TO_PREFIX["vpn-gateway"] in gateway_id: + gateway = self.get_vpn_gateway(gateway_id) + elif EC2_RESOURCE_TO_PREFIX["internet-gateway"] in gateway_id: + gateway = self.get_internet_gateway(gateway_id) + + try: + ipaddress.IPv4Network( + six.text_type(destination_cidr_block), strict=False + ) + except ValueError: + raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) - nat_gateway = None - if nat_gateway_id is not None: - nat_gateway = self.nat_gateways.get(nat_gateway_id) + if nat_gateway_id is not None: + nat_gateway = self.nat_gateways.get(nat_gateway_id) route = Route( route_table, diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index a64fbae1af7c..61fb33f90b6f 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -462,7 +462,7 @@ def test_routes_not_supported(): # Create conn.create_route.when.called_with( main_route_table.id, ROUTE_CIDR, interface_id="eni-1234abcd" - ).should.throw(NotImplementedError) + ).should.throw("InvalidNetworkInterfaceID.NotFound") # Replace igw = conn.create_internet_gateway() @@ -583,6 +583,32 @@ def test_create_route_with_invalid_destination_cidr_block_parameter(): ) +@mock_ec2 +def test_create_route_with_network_interface_id(): + ec2 = boto3.resource("ec2", region_name="us-west-2") + ec2_client = boto3.client("ec2", region_name="us-west-2") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock="10.0.0.0/24", AvailabilityZone="us-west-2a" + ) + + route_table = ec2_client.create_route_table(VpcId=vpc.id) + + route_table_id = route_table["RouteTable"]["RouteTableId"] + + eni1 = ec2_client.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress="10.0.10.5" + ) + + route = ec2_client.create_route( + NetworkInterfaceId=eni1["NetworkInterface"]["NetworkInterfaceId"], + RouteTableId=route_table_id, + ) + + route["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + @mock_ec2 def test_describe_route_tables_with_nat_gateway(): ec2 = boto3.client("ec2", region_name="us-west-1") From bbe1320e7c8587ed08418358b28f43d44d563d71 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 13 Jun 2020 20:27:05 +0100 Subject: [PATCH 391/658] DynamoDB - Add default GSI throughput --- moto/dynamodb2/models/__init__.py | 109 ++++++++++++++---- moto/dynamodb2/responses.py | 5 +- .../test_dynamodb_table_with_range_key.py | 77 +++++++++++++ 3 files changed, 166 insertions(+), 25 deletions(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index a5277800ffe4..60bc1b2fe5e1 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -272,6 +272,66 @@ def get(self, start, quantity): return [i.to_json() for i in self.items[start:end]] +class LocalSecondaryIndex(BaseModel): + def __init__(self, index_name, schema, projection): + self.name = index_name + self.schema = schema + self.projection = projection + + def describe(self): + return { + "IndexName": self.name, + "KeySchema": self.schema, + "Projection": self.projection, + } + + @staticmethod + def create(dct): + return LocalSecondaryIndex( + index_name=dct["IndexName"], + schema=dct["KeySchema"], + projection=dct["Projection"], + ) + + +class GlobalSecondaryIndex(BaseModel): + def __init__( + self, index_name, schema, projection, status="ACTIVE", throughput=None + ): + self.name = index_name + self.schema = schema + self.projection = projection + self.status = status + self.throughput = throughput or { + "ReadCapacityUnits": 0, + "WriteCapacityUnits": 0, + } + + def describe(self): + return { + "IndexName": self.name, + "KeySchema": self.schema, + "Projection": self.projection, + "IndexStatus": self.status, + "ProvisionedThroughput": self.throughput, + } + + @staticmethod + def create(dct): + return GlobalSecondaryIndex( + index_name=dct["IndexName"], + schema=dct["KeySchema"], + projection=dct["Projection"], + throughput=dct.get("ProvisionedThroughput", None), + ) + + def update(self, u): + self.name = u.get("IndexName", self.name) + self.schema = u.get("KeySchema", self.schema) + self.projection = u.get("Projection", self.projection) + self.throughput = u.get("ProvisionedThroughput", self.throughput) + + class Table(BaseModel): def __init__( self, @@ -302,12 +362,13 @@ def __init__( else: self.throughput = throughput self.throughput["NumberOfDecreasesToday"] = 0 - self.indexes = indexes - self.global_indexes = global_indexes if global_indexes else [] - for index in self.global_indexes: - index[ - "IndexStatus" - ] = "ACTIVE" # One of 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE' + self.indexes = [ + LocalSecondaryIndex.create(i) for i in (indexes if indexes else []) + ] + self.global_indexes = [ + GlobalSecondaryIndex.create(i) + for i in (global_indexes if global_indexes else []) + ] self.created_at = datetime.datetime.utcnow() self.items = defaultdict(dict) self.table_arn = self._generate_arn(table_name) @@ -374,8 +435,10 @@ def describe(self, base_key="TableDescription"): "KeySchema": self.schema, "ItemCount": len(self), "CreationDateTime": unix_time(self.created_at), - "GlobalSecondaryIndexes": [index for index in self.global_indexes], - "LocalSecondaryIndexes": [index for index in self.indexes], + "GlobalSecondaryIndexes": [ + index.describe() for index in self.global_indexes + ], + "LocalSecondaryIndexes": [index.describe() for index in self.indexes], } } if self.stream_specification and self.stream_specification["StreamEnabled"]: @@ -401,7 +464,7 @@ def hash_key_names(self): keys = [self.hash_key_attr] for index in self.global_indexes: hash_key = None - for key in index["KeySchema"]: + for key in index.schema: if key["KeyType"] == "HASH": hash_key = key["AttributeName"] keys.append(hash_key) @@ -412,7 +475,7 @@ def range_key_names(self): keys = [self.range_key_attr] for index in self.global_indexes: range_key = None - for key in index["KeySchema"]: + for key in index.schema: if key["KeyType"] == "RANGE": range_key = keys.append(key["AttributeName"]) keys.append(range_key) @@ -545,7 +608,7 @@ def query( if index_name: all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) if index_name not in indexes_by_name: raise ValueError( "Invalid index: %s for table: %s. Available indexes are: %s" @@ -555,14 +618,14 @@ def query( index = indexes_by_name[index_name] try: index_hash_key = [ - key for key in index["KeySchema"] if key["KeyType"] == "HASH" + key for key in index.schema if key["KeyType"] == "HASH" ][0] except IndexError: - raise ValueError("Missing Hash Key. KeySchema: %s" % index["KeySchema"]) + raise ValueError("Missing Hash Key. KeySchema: %s" % index.name) try: index_range_key = [ - key for key in index["KeySchema"] if key["KeyType"] == "RANGE" + key for key in index.schema if key["KeyType"] == "RANGE" ][0] except IndexError: index_range_key = None @@ -667,9 +730,9 @@ def all_indexes(self): def has_idx_items(self, index_name): all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) idx = indexes_by_name[index_name] - idx_col_set = set([i["AttributeName"] for i in idx["KeySchema"]]) + idx_col_set = set([i["AttributeName"] for i in idx.schema]) for hash_set in self.items.values(): if self.range_key_attr: @@ -692,7 +755,7 @@ def scan( results = [] scanned_count = 0 all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) if index_name: if index_name not in indexes_by_name: @@ -773,9 +836,9 @@ def _trim_results(self, results, limit, exclusive_start_key, scanned_index=None) if scanned_index: all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) idx = indexes_by_name[scanned_index] - idx_col_list = [i["AttributeName"] for i in idx["KeySchema"]] + idx_col_list = [i["AttributeName"] for i in idx.schema] for col in idx_col_list: last_evaluated_key[col] = results[-1].attrs[col] @@ -885,7 +948,7 @@ def update_table_streams(self, name, stream_specification): def update_table_global_indexes(self, name, global_index_updates): table = self.tables[name] - gsis_by_name = dict((i["IndexName"], i) for i in table.global_indexes) + gsis_by_name = dict((i.name, i) for i in table.global_indexes) for gsi_update in global_index_updates: gsi_to_create = gsi_update.get("Create") gsi_to_update = gsi_update.get("Update") @@ -906,7 +969,7 @@ def update_table_global_indexes(self, name, global_index_updates): if index_name not in gsis_by_name: raise ValueError( "Global Secondary Index does not exist, but tried to update: %s" - % gsi_to_update["IndexName"] + % index_name ) gsis_by_name[index_name].update(gsi_to_update) @@ -917,7 +980,9 @@ def update_table_global_indexes(self, name, global_index_updates): % gsi_to_create["IndexName"] ) - gsis_by_name[gsi_to_create["IndexName"]] = gsi_to_create + gsis_by_name[gsi_to_create["IndexName"]] = GlobalSecondaryIndex.create( + gsi_to_create + ) # in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other # parts of the codebase diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index aec7c7560a4f..6500a0a63931 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -411,7 +411,6 @@ def _contains_duplicates(self, keys): def query(self): name = self.body["TableName"] - # {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}} key_condition_expression = self.body.get("KeyConditionExpression") projection_expression = self.body.get("ProjectionExpression") expression_attribute_names = self.body.get("ExpressionAttributeNames", {}) @@ -439,7 +438,7 @@ def query(self): index_name = self.body.get("IndexName") if index_name: all_indexes = (table.global_indexes or []) + (table.indexes or []) - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) if index_name not in indexes_by_name: er = "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException" return self.error( @@ -449,7 +448,7 @@ def query(self): ), ) - index = indexes_by_name[index_name]["KeySchema"] + index = indexes_by_name[index_name].schema else: index = table.schema diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 33f65d5ec135..12e75a73e0b3 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -931,6 +931,83 @@ def test_conflicting_writes(): """ +@mock_dynamodb2 +def test_boto3_create_table_with_gsi(): + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + + table = dynamodb.create_table( + TableName="users", + KeySchema=[ + {"AttributeName": "forum_name", "KeyType": "HASH"}, + {"AttributeName": "subject", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "forum_name", "AttributeType": "S"}, + {"AttributeName": "subject", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + GlobalSecondaryIndexes=[ + { + "IndexName": "test_gsi", + "KeySchema": [{"AttributeName": "subject", "KeyType": "HASH"}], + "Projection": {"ProjectionType": "ALL"}, + } + ], + ) + table["TableDescription"]["GlobalSecondaryIndexes"].should.equal( + [ + { + "KeySchema": [{"KeyType": "HASH", "AttributeName": "subject"}], + "IndexName": "test_gsi", + "Projection": {"ProjectionType": "ALL"}, + "IndexStatus": "ACTIVE", + "ProvisionedThroughput": { + "ReadCapacityUnits": 0, + "WriteCapacityUnits": 0, + }, + } + ] + ) + + table = dynamodb.create_table( + TableName="users2", + KeySchema=[ + {"AttributeName": "forum_name", "KeyType": "HASH"}, + {"AttributeName": "subject", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "forum_name", "AttributeType": "S"}, + {"AttributeName": "subject", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + GlobalSecondaryIndexes=[ + { + "IndexName": "test_gsi", + "KeySchema": [{"AttributeName": "subject", "KeyType": "HASH"}], + "Projection": {"ProjectionType": "ALL"}, + "ProvisionedThroughput": { + "ReadCapacityUnits": 3, + "WriteCapacityUnits": 5, + }, + } + ], + ) + table["TableDescription"]["GlobalSecondaryIndexes"].should.equal( + [ + { + "KeySchema": [{"KeyType": "HASH", "AttributeName": "subject"}], + "IndexName": "test_gsi", + "Projection": {"ProjectionType": "ALL"}, + "IndexStatus": "ACTIVE", + "ProvisionedThroughput": { + "ReadCapacityUnits": 3, + "WriteCapacityUnits": 5, + }, + } + ] + ) + + @mock_dynamodb2 def test_boto3_conditions(): dynamodb = boto3.resource("dynamodb", region_name="us-east-1") From 849f16ff2da596fa20a3b54e04fb24c26b2e7b14 Mon Sep 17 00:00:00 2001 From: Tomoya Kabe Date: Sun, 14 Jun 2020 17:23:52 +0900 Subject: [PATCH 392/658] Correct group inline policy rendering (#3069) * Correct group inline policy rendering in iam:GetAccountAuthorizationDetails response * Include user inline policy if exists * Add tests for IAM inline policies * Remove unnecessary print stmts --- moto/iam/responses.py | 12 +++++++++++- tests/test_iam/test_iam.py | 10 ++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 60ab46069456..3a8296760e9e 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -2083,6 +2083,16 @@ def get_account_summary(self): {{ user.name }} {{ user.arn }} {{ user.created_iso_8601 }} + {% if user.policies %} + + {% for policy in user.policies %} + + {{ policy }} + {{ user.policies[policy] }} + + {% endfor %} + + {% endif %} {% endfor %} @@ -2106,7 +2116,7 @@ def get_account_summary(self): {% for policy in group.policies %} {{ policy }} - {{ group.get_policy(policy) }} + {{ group.policies[policy] }} {% endfor %} diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 7b59a57268cd..a749a37e7b58 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -1690,11 +1690,15 @@ def test_get_account_authorization_details(): assert result["RoleDetailList"][0]["AttachedManagedPolicies"][0][ "PolicyArn" ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert result["RoleDetailList"][0]["RolePolicyList"][0][ + "PolicyDocument" + ] == json.loads(test_policy) result = conn.get_account_authorization_details(Filter=["User"]) assert len(result["RoleDetailList"]) == 0 assert len(result["UserDetailList"]) == 1 assert len(result["UserDetailList"][0]["GroupList"]) == 1 + assert len(result["UserDetailList"][0]["UserPolicyList"]) == 1 assert len(result["UserDetailList"][0]["AttachedManagedPolicies"]) == 1 assert len(result["GroupDetailList"]) == 0 assert len(result["Policies"]) == 0 @@ -1705,6 +1709,9 @@ def test_get_account_authorization_details(): assert result["UserDetailList"][0]["AttachedManagedPolicies"][0][ "PolicyArn" ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert result["UserDetailList"][0]["UserPolicyList"][0][ + "PolicyDocument" + ] == json.loads(test_policy) result = conn.get_account_authorization_details(Filter=["Group"]) assert len(result["RoleDetailList"]) == 0 @@ -1720,6 +1727,9 @@ def test_get_account_authorization_details(): assert result["GroupDetailList"][0]["AttachedManagedPolicies"][0][ "PolicyArn" ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert result["GroupDetailList"][0]["GroupPolicyList"][0][ + "PolicyDocument" + ] == json.loads(test_policy) result = conn.get_account_authorization_details(Filter=["LocalManagedPolicy"]) assert len(result["RoleDetailList"]) == 0 From 4556a2f96f2820c6123004fe3139b61036c97910 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 14 Jun 2020 11:31:44 +0100 Subject: [PATCH 393/658] #1954 - CF - Check stack name in use --- moto/cloudformation/responses.py | 21 +++++++++++++++++++ .../test_cloudformation_stack_crud.py | 4 ++-- .../test_cloudformation_stack_crud_boto3.py | 20 ++++++++++++++++-- 3 files changed, 41 insertions(+), 4 deletions(-) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 302849481e5d..e503a1d1926b 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -50,6 +50,12 @@ def create_stack(self): for item in self._get_list_prefix("Tags.member") ) + if self.stack_name_exists(new_stack_name=stack_name): + template = self.response_template( + CREATE_STACK_NAME_EXISTS_RESPONSE_TEMPLATE + ) + return 400, {"status": 400}, template.render(name=stack_name) + # Hack dict-comprehension parameters = dict( [ @@ -82,6 +88,12 @@ def create_stack(self): template = self.response_template(CREATE_STACK_RESPONSE_TEMPLATE) return template.render(stack=stack) + def stack_name_exists(self, new_stack_name): + for stack in self.cloudformation_backend.stacks.values(): + if stack.name == new_stack_name: + return True + return False + @amzn_request_id def create_change_set(self): stack_name = self._get_param("StackName") @@ -564,6 +576,15 @@ def update_stack_instances(self): """ +CREATE_STACK_NAME_EXISTS_RESPONSE_TEMPLATE = """ + + Sender + AlreadyExistsException + Stack [{{ name }}] already exists + + 950ff8d7-812a-44b3-bb0c-9b271b954104 +""" + UPDATE_STACK_RESPONSE_TEMPLATE = """ {{ stack.stack_id }} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 3d1b2ab8c68c..800362ad21ff 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -98,12 +98,12 @@ def test_create_stack_hosted_zone_by_id(): }, } conn.create_stack( - "test_stack", template_body=json.dumps(dummy_template), parameters={}.items() + "test_stack1", template_body=json.dumps(dummy_template), parameters={}.items() ) r53_conn = boto.connect_route53() zone_id = r53_conn.get_zones()[0].id conn.create_stack( - "test_stack", + "test_stack2", template_body=json.dumps(dummy_template2), parameters={"ZoneId": zone_id}.items(), ) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index cd76743ddcf6..43f63dca268a 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -919,7 +919,9 @@ def test_execute_change_set_w_name(): def test_describe_stack_pagination(): conn = boto3.client("cloudformation", region_name="us-east-1") for i in range(100): - conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json) + conn.create_stack( + StackName="test_stack_{}".format(i), TemplateBody=dummy_template_json + ) resp = conn.describe_stacks() stacks = resp["Stacks"] @@ -1211,7 +1213,8 @@ def test_list_exports_with_token(): # Add index to ensure name is unique dummy_output_template["Outputs"]["StackVPC"]["Export"]["Name"] += str(i) cf.create_stack( - StackName="test_stack", TemplateBody=json.dumps(dummy_output_template) + StackName="test_stack_{}".format(i), + TemplateBody=json.dumps(dummy_output_template), ) exports = cf.list_exports() exports["Exports"].should.have.length_of(100) @@ -1273,3 +1276,16 @@ def test_non_json_redrive_policy(): stack.Resource("MainQueue").resource_status.should.equal("CREATE_COMPLETE") stack.Resource("DeadLetterQueue").resource_status.should.equal("CREATE_COMPLETE") + + +@mock_cloudformation +def test_boto3_create_duplicate_stack(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=dummy_template_json, + ) + + with assert_raises(ClientError): + cf_conn.create_stack( + StackName="test_stack", TemplateBody=dummy_template_json, + ) From 0dd41d4c32e5ae5f5a8a1c2dccc0a271d883b139 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 14 Jun 2020 12:03:00 -0300 Subject: [PATCH 394/658] Cloudformation support for EventSourceMapping (#3045) * change line position for uuid and last_modified because they're not input parameters * add event_source_arn validator and setter * refactor batch_size as setter * add helper function to parse arn and return source service * fix for EventSource's create_from_cfn, there was no reference in the lambda object for the esm if created by cfn * add esm deletion by cloudformation * remove unused variable in test * add cfn's update * add complete implementation of delete_from_cfn * blacked changed files * fix test with invalid batchsize for sqs * Dynamodb2 Table - Bugfix for localindex and implemented get_cfn_attributes * Dynamodb2 eventsource - fix test to use StreamArn attribute * Lambda Test - fix test_update_event_source_mapping --- moto/awslambda/models.py | 142 +++++++----- moto/dynamodb2/models/__init__.py | 10 + tests/test_awslambda/test_lambda.py | 3 +- .../test_lambda_cloudformation.py | 207 +++++++++++++++++- .../test_cloudformation_stack_crud.py | 3 +- 5 files changed, 313 insertions(+), 52 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 967944b9106c..91ecc42872aa 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -555,40 +555,63 @@ def delete(self, region): class EventSourceMapping(BaseModel): def __init__(self, spec): # required - self.function_arn = spec["FunctionArn"] + self.function_name = spec["FunctionName"] self.event_source_arn = spec["EventSourceArn"] + + # optional + self.batch_size = spec.get("BatchSize") + self.starting_position = spec.get("StartingPosition", "TRIM_HORIZON") + self.enabled = spec.get("Enabled", True) + self.starting_position_timestamp = spec.get("StartingPositionTimestamp", None) + + self.function_arn = spec["FunctionArn"] self.uuid = str(uuid.uuid4()) self.last_modified = time.mktime(datetime.datetime.utcnow().timetuple()) - # BatchSize service default/max mapping - batch_size_map = { + def _get_service_source_from_arn(self, event_source_arn): + return event_source_arn.split(":")[2].lower() + + def _validate_event_source(self, event_source_arn): + valid_services = ("dynamodb", "kinesis", "sqs") + service = self._get_service_source_from_arn(event_source_arn) + return True if service in valid_services else False + + @property + def event_source_arn(self): + return self._event_source_arn + + @event_source_arn.setter + def event_source_arn(self, event_source_arn): + if not self._validate_event_source(event_source_arn): + raise ValueError( + "InvalidParameterValueException", "Unsupported event source type" + ) + self._event_source_arn = event_source_arn + + @property + def batch_size(self): + return self._batch_size + + @batch_size.setter + def batch_size(self, batch_size): + batch_size_service_map = { "kinesis": (100, 10000), "dynamodb": (100, 1000), "sqs": (10, 10), } - source_type = self.event_source_arn.split(":")[2].lower() - batch_size_entry = batch_size_map.get(source_type) - if batch_size_entry: - # Use service default if not provided - batch_size = int(spec.get("BatchSize", batch_size_entry[0])) - if batch_size > batch_size_entry[1]: - raise ValueError( - "InvalidParameterValueException", - "BatchSize {} exceeds the max of {}".format( - batch_size, batch_size_entry[1] - ), - ) - else: - self.batch_size = batch_size - else: - raise ValueError( - "InvalidParameterValueException", "Unsupported event source type" - ) - # optional - self.starting_position = spec.get("StartingPosition", "TRIM_HORIZON") - self.enabled = spec.get("Enabled", True) - self.starting_position_timestamp = spec.get("StartingPositionTimestamp", None) + source_type = self._get_service_source_from_arn(self.event_source_arn) + batch_size_for_source = batch_size_service_map[source_type] + + if batch_size is None: + self._batch_size = batch_size_for_source[0] + elif batch_size > batch_size_for_source[1]: + error_message = "BatchSize {} exceeds the max of {}".format( + batch_size, batch_size_for_source[1] + ) + raise ValueError("InvalidParameterValueException", error_message) + else: + self._batch_size = int(batch_size) def get_configuration(self): return { @@ -602,23 +625,42 @@ def get_configuration(self): "StateTransitionReason": "User initiated", } + def delete(self, region_name): + lambda_backend = lambda_backends[region_name] + lambda_backend.delete_event_source_mapping(self.uuid) + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - func = lambda_backends[region_name].get_function(properties["FunctionName"]) - spec = { - "FunctionArn": func.function_arn, - "EventSourceArn": properties["EventSourceArn"], - "StartingPosition": properties["StartingPosition"], - "BatchSize": properties.get("BatchSize", 100), - } - optional_properties = "BatchSize Enabled StartingPositionTimestamp".split() - for prop in optional_properties: - if prop in properties: - spec[prop] = properties[prop] - return EventSourceMapping(spec) + lambda_backend = lambda_backends[region_name] + return lambda_backend.create_event_source_mapping(properties) + + @classmethod + def update_from_cloudformation_json( + cls, new_resource_name, cloudformation_json, original_resource, region_name + ): + properties = cloudformation_json["Properties"] + event_source_uuid = original_resource.uuid + lambda_backend = lambda_backends[region_name] + return lambda_backend.update_event_source_mapping(event_source_uuid, properties) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + lambda_backend = lambda_backends[region_name] + esms = lambda_backend.list_event_source_mappings( + event_source_arn=properties["EventSourceArn"], + function_name=properties["FunctionName"], + ) + + for esm in esms: + if esm.logical_resource_id in resource_name: + lambda_backend.delete_event_source_mapping + esm.delete(region_name) class LambdaVersion(BaseModel): @@ -819,7 +861,7 @@ def create_event_source_mapping(self, spec): ) # Validate function name - func = self._lambdas.get_function_by_name_or_arn(spec.pop("FunctionName", "")) + func = self._lambdas.get_function_by_name_or_arn(spec.get("FunctionName", "")) if not func: raise RESTError("ResourceNotFoundException", "Invalid FunctionName") @@ -877,18 +919,20 @@ def delete_event_source_mapping(self, uuid): def update_event_source_mapping(self, uuid, spec): esm = self.get_event_source_mapping(uuid) - if esm: - if spec.get("FunctionName"): - func = self._lambdas.get_function_by_name_or_arn( - spec.get("FunctionName") - ) + if not esm: + return False + + for key, value in spec.items(): + if key == "FunctionName": + func = self._lambdas.get_function_by_name_or_arn(spec[key]) esm.function_arn = func.function_arn - if "BatchSize" in spec: - esm.batch_size = spec["BatchSize"] - if "Enabled" in spec: - esm.enabled = spec["Enabled"] - return esm - return False + elif key == "BatchSize": + esm.batch_size = spec[key] + elif key == "Enabled": + esm.enabled = spec[key] + + esm.last_modified = time.mktime(datetime.datetime.utcnow().timetuple()) + return esm def list_event_source_mappings(self, event_source_arn, function_name): esms = list(self._event_source_mappings.values()) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 48b4bbbfd19d..13ee94948765 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -386,6 +386,16 @@ def __init__( }, } + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Arn": + return self.table_arn + elif attribute_name == "StreamArn" and self.stream_specification: + return self.describe()["TableDescription"]["LatestStreamArn"] + + raise UnformattedGetAttTemplateException() + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 8879ad7e34a4..1cd943f04192 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1446,11 +1446,12 @@ def test_update_event_source_mapping(): assert response["State"] == "Enabled" mapping = conn.update_event_source_mapping( - UUID=response["UUID"], Enabled=False, BatchSize=15, FunctionName="testFunction2" + UUID=response["UUID"], Enabled=False, BatchSize=2, FunctionName="testFunction2" ) assert mapping["UUID"] == response["UUID"] assert mapping["FunctionArn"] == func2["FunctionArn"] assert mapping["State"] == "Disabled" + assert mapping["BatchSize"] == 2 @mock_lambda diff --git a/tests/test_awslambda/test_lambda_cloudformation.py b/tests/test_awslambda/test_lambda_cloudformation.py index f57354d69531..c3061ff3a0e5 100644 --- a/tests/test_awslambda/test_lambda_cloudformation.py +++ b/tests/test_awslambda/test_lambda_cloudformation.py @@ -3,7 +3,7 @@ import sure # noqa import zipfile from botocore.exceptions import ClientError -from moto import mock_cloudformation, mock_iam, mock_lambda, mock_s3 +from moto import mock_cloudformation, mock_iam, mock_lambda, mock_s3, mock_sqs from nose.tools import assert_raises from string import Template from uuid import uuid4 @@ -48,6 +48,23 @@ def lambda_handler2(event, context): }""" ) +event_source_mapping_template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "$resource_name": { + "Type": "AWS::Lambda::EventSourceMapping", + "Properties": { + "BatchSize": $batch_size, + "EventSourceArn": $event_source_arn, + "FunctionName": $function_name, + "Enabled": $enabled + } + } + } +}""" +) + @mock_cloudformation @mock_lambda @@ -97,6 +114,194 @@ def test_lambda_can_be_deleted_by_cloudformation(): e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_create_from_cloudformation_json(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + created_fn_arn = lmbda.get_function(FunctionName=created_fn_name)["Configuration"][ + "FunctionArn" + ] + + template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + cf.create_stack(StackName="test-event-source", TemplateBody=template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(1) + event_source = event_sources["EventSourceMappings"][0] + event_source["EventSourceArn"].should.be.equal(queue.attributes["QueueArn"]) + event_source["FunctionArn"].should.be.equal(created_fn_arn) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_delete_stack(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + + template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + esm_stack = cf.create_stack(StackName="test-event-source", TemplateBody=template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(1) + + cf.delete_stack(StackName=esm_stack["StackId"]) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(0) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_update_from_cloudformation_json(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + created_fn_arn = lmbda.get_function(FunctionName=created_fn_name)["Configuration"][ + "FunctionArn" + ] + + original_template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + cf.create_stack(StackName="test-event-source", TemplateBody=original_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + original_esm = event_sources["EventSourceMappings"][0] + + original_esm["State"].should.equal("Enabled") + original_esm["BatchSize"].should.equal(1) + + # Update + new_template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 10, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": False, + } + ) + + cf.update_stack(StackName="test-event-source", TemplateBody=new_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + updated_esm = event_sources["EventSourceMappings"][0] + + updated_esm["State"].should.equal("Disabled") + updated_esm["BatchSize"].should.equal(10) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_delete_from_cloudformation_json(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + created_fn_arn = lmbda.get_function(FunctionName=created_fn_name)["Configuration"][ + "FunctionArn" + ] + + original_template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + cf.create_stack(StackName="test-event-source", TemplateBody=original_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + original_esm = event_sources["EventSourceMappings"][0] + + original_esm["State"].should.equal("Enabled") + original_esm["BatchSize"].should.equal(1) + + # Update with deletion of old resources + new_template = event_source_mapping_template.substitute( + { + "resource_name": "Bar", # changed name + "batch_size": 10, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": False, + } + ) + + cf.update_stack(StackName="test-event-source", TemplateBody=new_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(1) + updated_esm = event_sources["EventSourceMappings"][0] + + updated_esm["State"].should.equal("Disabled") + updated_esm["BatchSize"].should.equal(10) + updated_esm["UUID"].shouldnt.equal(original_esm["UUID"]) + + def create_stack(cf, s3): bucket_name = str(uuid4()) s3.create_bucket(Bucket=bucket_name) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 3d1b2ab8c68c..8a0a0b11c67d 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -541,13 +541,14 @@ def test_create_stack_lambda_and_dynamodb(): "ReadCapacityUnits": 10, "WriteCapacityUnits": 10, }, + "StreamSpecification": {"StreamViewType": "KEYS_ONLY"}, }, }, "func1mapping": { "Type": "AWS::Lambda::EventSourceMapping", "Properties": { "FunctionName": {"Ref": "func1"}, - "EventSourceArn": "arn:aws:dynamodb:region:XXXXXX:table/tab1/stream/2000T00:00:00.000", + "EventSourceArn": {"Fn::GetAtt": ["tab1", "StreamArn"]}, "StartingPosition": "0", "BatchSize": 100, "Enabled": True, From 5988e5efaa07e4f6cde6b9a92d72f6e636f81677 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 15 Jun 2020 15:02:43 +0530 Subject: [PATCH 395/658] EC2 : Fix - modified volume type in ec2 describe images. (#3074) * "modified volume type in ec2 describe images" * removed unncessary comments * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ec2/responses/amis.py | 2 +- tests/test_ec2/test_amis.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index 6736a7175757..0e70182bb155 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -125,7 +125,7 @@ def reset_image_attribute(self): {{ image.ebs_snapshot.id }} 15 false - {{ image.root_device_type }} + standard diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index ad432bb78ebf..275b12905b32 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -843,7 +843,11 @@ def test_ami_snapshots_have_correct_owner(): ] existing_snapshot_ids = owner_id_to_snapshot_ids.get(owner_id, []) owner_id_to_snapshot_ids[owner_id] = existing_snapshot_ids + snapshot_ids - + # adding an assertion to volumeType + assert ( + image.get("BlockDeviceMappings", {})[0].get("Ebs", {}).get("VolumeType") + == "standard" + ) for owner_id in owner_id_to_snapshot_ids: snapshots_rseponse = ec2_client.describe_snapshots( SnapshotIds=owner_id_to_snapshot_ids[owner_id] From 610bf36f3bde597fc474b0eca3ac522032a08afe Mon Sep 17 00:00:00 2001 From: Maxim Kirilov Date: Tue, 16 Jun 2020 08:03:02 +0300 Subject: [PATCH 396/658] Improve parsing of string values that represents booleans during block device mapping construction (#3073) * convert str into bool * Fix python2 * Fix python2 * pylint --- moto/ec2/responses/instances.py | 19 +++++++++++++++---- tests/test_ec2/test_instances.py | 30 +++++++++++++++++++++++++++++- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index adcbfa741738..9090847be45b 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -13,6 +13,7 @@ from moto.core import ACCOUNT_ID from copy import deepcopy +import six class InstanceResponse(BaseResponse): @@ -283,15 +284,15 @@ def _parse_block_device_mapping(self): device_template["Ebs"]["VolumeSize"] = device_mapping.get( "ebs._volume_size" ) - device_template["Ebs"]["DeleteOnTermination"] = device_mapping.get( - "ebs._delete_on_termination", False + device_template["Ebs"]["DeleteOnTermination"] = self._convert_to_bool( + device_mapping.get("ebs._delete_on_termination", False) ) device_template["Ebs"]["VolumeType"] = device_mapping.get( "ebs._volume_type" ) device_template["Ebs"]["Iops"] = device_mapping.get("ebs._iops") - device_template["Ebs"]["Encrypted"] = device_mapping.get( - "ebs._encrypted", False + device_template["Ebs"]["Encrypted"] = self._convert_to_bool( + device_mapping.get("ebs._encrypted", False) ) mappings.append(device_template) @@ -308,6 +309,16 @@ def _validate_block_device_mapping(device_mapping): ): raise MissingParameterError("size or snapshotId") + @staticmethod + def _convert_to_bool(bool_str): + if isinstance(bool_str, bool): + return bool_str + + if isinstance(bool_str, six.text_type): + return str(bool_str).lower() == "true" + + return False + BLOCK_DEVICE_MAPPING_TEMPLATE = { "VirtualName": None, diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index d25880975af1..c775ab0abec6 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -128,7 +128,35 @@ def test_instance_terminate_discard_volumes(): @mock_ec2 -def test_instance_terminate_keep_volumes(): +def test_instance_terminate_keep_volumes_explicit(): + + ec2_resource = boto3.resource("ec2", "us-west-1") + + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": {"VolumeSize": 50, "DeleteOnTermination": False}, + } + ], + ) + instance = result[0] + + instance_volume_ids = [] + for volume in instance.volumes.all(): + instance_volume_ids.append(volume.volume_id) + + instance.terminate() + instance.wait_until_terminated() + + assert len(list(ec2_resource.volumes.all())) == 1 + + +@mock_ec2 +def test_instance_terminate_keep_volumes_implicit(): ec2_resource = boto3.resource("ec2", "us-west-1") result = ec2_resource.create_instances( From 6305f707d28d11000fda0336fe628a3d4743f4ba Mon Sep 17 00:00:00 2001 From: Shane Dowling Date: Thu, 18 Jun 2020 09:50:58 +0100 Subject: [PATCH 397/658] fix to capture yaml scanner error (#3077) --- moto/cloudformation/models.py | 4 +-- .../test_cloudformation/test_stack_parsing.py | 25 +++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 8c14f55b8b4d..23cdc0925b64 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -315,8 +315,8 @@ def _parse_template(self): yaml.add_multi_constructor("", yaml_tag_constructor) try: self.template_dict = yaml.load(self.template, Loader=yaml.Loader) - except yaml.parser.ParserError: - self.template_dict = json.loads(self.template, Loader=yaml.Loader) + except (yaml.parser.ParserError, yaml.scanner.ScannerError): + self.template_dict = json.loads(self.template) @property def stack_parameters(self): diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 85df765926b4..116287162110 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -38,6 +38,16 @@ }, } +name_type_template_with_tabs_json = """ +\t{ +\t\t"AWSTemplateFormatVersion": "2010-09-09", +\t\t"Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", +\t\t"Resources": { +\t\t\t"Queue": {"Type": "AWS::SQS::Queue", "Properties": {"VisibilityTimeout": 60}} +\t\t} +\t} +""" + output_dict = { "Outputs": { "Output1": {"Value": {"Ref": "Queue"}, "Description": "This is a description."} @@ -186,6 +196,21 @@ def test_parse_stack_with_name_type_resource(): queue.should.be.a(Queue) +def test_parse_stack_with_tabbed_json_template(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=name_type_template_with_tabs_json, + parameters={}, + region_name="us-west-1", + ) + + stack.resource_map.should.have.length_of(1) + list(stack.resource_map.keys())[0].should.equal("Queue") + queue = list(stack.resource_map.values())[0] + queue.should.be.a(Queue) + + def test_parse_stack_with_yaml_template(): stack = FakeStack( stack_id="test_id", From 8ce12027dd2d89e00fa3ecfd295357471957ba3d Mon Sep 17 00:00:00 2001 From: Dawn James Date: Fri, 19 Jun 2020 11:44:43 +0100 Subject: [PATCH 398/658] Return correct error when creating a bucket with empty CreateBucketConfiguration (#3079) * Several updates to the contributor documentation with extra information. * Fix failing test by providing a region. * Create test for issue 2210. * Check if CreateBucketConfiguration is supplied and empty; raise MalformedXML error if so. --- CONTRIBUTING.md | 25 +++++++++++++++++-------- moto/s3/responses.py | 16 ++++++++++++++++ tests/test_s3/test_s3.py | 11 ++++++++++- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 941fc0624e58..7e54236bdf42 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,32 +1,41 @@ -### Contributing code +# Contributing code Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_CONDUCT.md), you can expect to be treated with respect at all times when interacting with this project. ## Running the tests locally -Moto has a Makefile which has some helpful commands for getting setup. You should be able to run `make init` to install the dependencies and then `make test` to run the tests. +Moto has a [Makefile](./Makefile) which has some helpful commands for getting set up. +You should be able to run `make init` to install the dependencies and then `make test` to run the tests. + +*NB. On first run, some tests might take a while to execute, especially the Lambda ones, because they may need to download a Docker image before they can execute.* ## Linting + Run `make lint` or `black --check moto tests` to verify whether your code confirms to the guidelines. -## Is there a missing feature? +## Getting to grips with the codebase + +Moto maintains a list of [good first issues](https://github.com/spulec/moto/contribute) which you may want to look at before +implementing a whole new endpoint. + +## Missing features Moto is easier to contribute to than you probably think. There's [a list of which endpoints have been implemented](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) and we invite you to add new endpoints to existing services or to add new services. How to teach Moto to support a new AWS endpoint: -* Create an issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. +* Search for an existing [issue](https://github.com/spulec/moto/issues) that matches what you want to achieve. +* If one doesn't already exist, create a new issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. * Create a [pull request](https://help.github.com/articles/using-pull-requests/) and mention the issue # in the PR description. * Try to add a failing test case. For example, if you're trying to implement `boto3.client('acm').import_certificate()` you'll want to add a new method called `def test_import_certificate` to `tests/test_acm/test_acm.py`. * If you can also implement the code that gets that test passing that's great. If not, just ask the community for a hand and somebody will assist you. -# Maintainers +## Maintainers -## Releasing a new version of Moto +### Releasing a new version of Moto -You'll need a PyPi account and a Dockerhub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. +You'll need a PyPi account and a DockerHub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. * First, `scripts/bump_version` modifies the version and opens a PR * Then, merge the new pull request * Finally, generate and ship the new artifacts with `make publish` - diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 41db43af7ca2..d4d872a8d0ef 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -615,6 +615,19 @@ def _body_contains_location_constraint(self, body): pass return False + def _create_bucket_configuration_is_empty(self, body): + if body: + try: + create_bucket_configuration = xmltodict.parse(body)[ + "CreateBucketConfiguration" + ] + del create_bucket_configuration["@xmlns"] + if len(create_bucket_configuration) == 0: + return True + except KeyError: + pass + return False + def _parse_pab_config(self, body): parsed_xml = xmltodict.parse(body) parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None) @@ -733,6 +746,9 @@ def _bucket_response_put( ): raise IllegalLocationConstraintException() if body: + if self._create_bucket_configuration_is_empty(body): + raise MalformedXML() + try: forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][ "LocationConstraint" diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 010a23d50901..dbdd1b90c3fa 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1042,7 +1042,7 @@ def test_s3_object_in_public_bucket_using_multiple_presigned_urls(): @mock_s3 def test_streaming_upload_from_file_to_presigned_url(): - s3 = boto3.resource("s3") + s3 = boto3.resource("s3", region_name="us-east-1") bucket = s3.Bucket("test-bucket") bucket.create() bucket.put_object(Body=b"ABCD", Key="file.txt") @@ -1976,6 +1976,15 @@ def test_boto3_bucket_create_eu_central(): ) +@mock_s3 +def test_bucket_create_empty_bucket_configuration_should_return_malformed_xml_error(): + s3 = boto3.resource("s3", region_name="us-east-1") + with assert_raises(ClientError) as e: + s3.create_bucket(Bucket="whatever", CreateBucketConfiguration={}) + e.exception.response["Error"]["Code"].should.equal("MalformedXML") + e.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + @mock_s3 def test_boto3_head_object(): s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) From dd556a66c6f33d75a0bde70722ee0a04b06619fb Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 20 Jun 2020 10:43:02 +0100 Subject: [PATCH 399/658] CognitoIDP - Return KID in headers of ID token --- moto/cognitoidp/models.py | 6 +- moto/cognitoidp/urls.py | 2 +- tests/test_cognitoidp/test_cognitoidp.py | 75 +++++++++++++++++++++++- 3 files changed, 80 insertions(+), 3 deletions(-) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 93e29755134a..4b4e0a8b1ba4 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -128,8 +128,12 @@ def create_jwt( "exp": now + expires_in, } payload.update(extra_data) + headers = {"kid": "dummy"} # KID as present in jwks-public.json - return jws.sign(payload, self.json_web_key, algorithm="RS256"), expires_in + return ( + jws.sign(payload, self.json_web_key, headers, algorithm="RS256"), + expires_in, + ) def create_id_token(self, client_id, username): extra_data = self.get_user_extra_data_by_client_id(client_id, username) diff --git a/moto/cognitoidp/urls.py b/moto/cognitoidp/urls.py index 5d1dff1d0406..09e675e7017a 100644 --- a/moto/cognitoidp/urls.py +++ b/moto/cognitoidp/urls.py @@ -5,5 +5,5 @@ url_paths = { "{0}/$": CognitoIdpResponse.dispatch, - "{0}//.well-known/jwks.json$": CognitoIdpJsonWebKeyResponse().serve_json_web_key, + "{0}/(?P[^/]+)/.well-known/jwks.json$": CognitoIdpJsonWebKeyResponse().serve_json_web_key, } diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 37e1a56a3540..aefa573ef829 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -3,6 +3,7 @@ import json import os import random +import requests import uuid import boto3 @@ -10,7 +11,7 @@ # noinspection PyUnresolvedReferences import sure # noqa from botocore.exceptions import ClientError -from jose import jws +from jose import jws, jwk, jwt from nose.tools import assert_raises from moto import mock_cognitoidp @@ -1309,3 +1310,75 @@ def test_admin_update_user_attributes(): val.should.equal("Doe") elif attr["Name"] == "given_name": val.should.equal("Jane") + + +@mock_cognitoidp +def test_idtoken_contains_kid_header(): + # https://github.com/spulec/moto/issues/3078 + # Setup + cognito = boto3.client("cognito-idp", "us-west-2") + user_pool_id = cognito.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"][ + "Id" + ] + client = cognito.create_user_pool_client( + UserPoolId=user_pool_id, + ExplicitAuthFlows=[ + "ALLOW_ADMIN_USER_PASSWORD_AUTH", + "ALLOW_REFRESH_TOKEN_AUTH", + "ALLOW_ADMIN_NO_SRP_AUTH", + ], + AllowedOAuthFlows=["code", "implicit"], + ClientName=str(uuid.uuid4()), + CallbackURLs=["https://example.com"], + ) + client_id = client["UserPoolClient"]["ClientId"] + username = str(uuid.uuid4()) + temporary_password = "1TemporaryP@ssword" + cognito.admin_create_user( + UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_password + ) + result = cognito.admin_initiate_auth( + UserPoolId=user_pool_id, + ClientId=client_id, + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={"USERNAME": username, "PASSWORD": temporary_password}, + ) + + # A newly created user is forced to set a new password + # This sets a new password and logs the user in (creates tokens) + password = "1F@kePassword" + result = cognito.respond_to_auth_challenge( + Session=result["Session"], + ClientId=client_id, + ChallengeName="NEW_PASSWORD_REQUIRED", + ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": password}, + ) + # + id_token = result["AuthenticationResult"]["IdToken"] + + # Verify the KID header is present in the token, and corresponds to the KID supplied by the public JWT + verify_kid_header(id_token) + + +def verify_kid_header(token): + """Verifies the kid-header is corresponds with the public key""" + headers = jwt.get_unverified_headers(token) + kid = headers["kid"] + + key_index = -1 + keys = fetch_public_keys() + for i in range(len(keys)): + if kid == keys[i]["kid"]: + key_index = i + break + if key_index == -1: + raise Exception("Public key (kid) not found in jwks.json") + + +def fetch_public_keys(): + keys_url = "https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format( + "us-west-2", "someuserpoolid" + ) + response = requests.get(keys_url).text + my_keys = json.loads(response.decode("utf-8"))["keys"] + return my_keys From 655b92a2a4288407705f07ae7cd468ca5b14081f Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 20 Jun 2020 11:05:06 +0100 Subject: [PATCH 400/658] Simplify Cognito test - auto decode JSON --- tests/test_cognitoidp/test_cognitoidp.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index aefa573ef829..5eb529e285bb 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1379,6 +1379,5 @@ def fetch_public_keys(): keys_url = "https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format( "us-west-2", "someuserpoolid" ) - response = requests.get(keys_url).text - my_keys = json.loads(response.decode("utf-8"))["keys"] - return my_keys + response = requests.get(keys_url).json() + return response["keys"] From 9ed7ba58df31c01c2518c724ae0d13f6070c98d7 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 20 Jun 2020 12:15:29 +0100 Subject: [PATCH 401/658] S3 - Implement delete_object_tagging --- IMPLEMENTATION_COVERAGE.md | 2 +- moto/s3/models.py | 4 ++++ moto/s3/responses.py | 12 ++++++++++++ tests/test_s3/test_s3.py | 8 ++++++-- 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 43983d912c46..8db762945ed2 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -7093,7 +7093,7 @@ - [X] delete_bucket_tagging - [ ] delete_bucket_website - [X] delete_object -- [ ] delete_object_tagging +- [x] delete_object_tagging - [ ] delete_objects - [ ] delete_public_access_block - [ ] get_bucket_accelerate_configuration diff --git a/moto/s3/models.py b/moto/s3/models.py index 350a4fd153af..b809c0fc278b 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1566,6 +1566,10 @@ def _set_delete_marker(self, bucket_name, key_name): bucket = self.get_bucket(bucket_name) bucket.keys[key_name] = FakeDeleteMarker(key=bucket.keys[key_name]) + def delete_object_tagging(self, bucket_name, key_name, version_id=None): + key = self.get_object(bucket_name, key_name, version_id=version_id) + self.tagger.delete_all_tags_for_resource(key.arn) + def delete_object(self, bucket_name, key_name, version_id=None): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index d4d872a8d0ef..10e68d56975e 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1618,6 +1618,12 @@ def _key_response_delete(self, bucket_name, query, key_name): self.backend.cancel_multipart(bucket_name, upload_id) return 204, {}, "" version_id = query.get("versionId", [None])[0] + if "tagging" in query: + self.backend.delete_object_tagging( + bucket_name, key_name, version_id=version_id + ) + template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE) + return 204, {}, template.render(version_id=version_id) self.backend.delete_object(bucket_name, key_name, version_id=version_id) return 204, {}, "" @@ -1935,6 +1941,12 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): {% endfor %} """ +S3_DELETE_KEY_TAGGING_RESPONSE = """ + +{{version_id}} + +""" + S3_OBJECT_ACL_RESPONSE = """ diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index dbdd1b90c3fa..8ac227f4fde3 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2424,9 +2424,13 @@ def test_boto3_put_object_with_tagging(): s3.put_object(Bucket=bucket_name, Key=key, Body="test", Tagging="foo=bar") - resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + s3.get_object_tagging(Bucket=bucket_name, Key=key)["TagSet"].should.contain( + {"Key": "foo", "Value": "bar"} + ) + + s3.delete_object_tagging(Bucket=bucket_name, Key=key) - resp["TagSet"].should.contain({"Key": "foo", "Value": "bar"}) + s3.get_object_tagging(Bucket=bucket_name, Key=key)["TagSet"].should.equal([]) @mock_s3 From f27e29e04d51b800a87be244bbe9c86231f59dea Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 20 Jun 2020 12:48:10 +0100 Subject: [PATCH 402/658] Cognito - Dont run test in ServerMode --- tests/test_cognitoidp/test_cognitoidp.py | 96 +++++++++++++----------- 1 file changed, 51 insertions(+), 45 deletions(-) diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 5eb529e285bb..3b7037889910 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -14,7 +14,7 @@ from jose import jws, jwk, jwt from nose.tools import assert_raises -from moto import mock_cognitoidp +from moto import mock_cognitoidp, settings from moto.core import ACCOUNT_ID @@ -1312,52 +1312,58 @@ def test_admin_update_user_attributes(): val.should.equal("Jane") -@mock_cognitoidp -def test_idtoken_contains_kid_header(): - # https://github.com/spulec/moto/issues/3078 - # Setup - cognito = boto3.client("cognito-idp", "us-west-2") - user_pool_id = cognito.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"][ - "Id" - ] - client = cognito.create_user_pool_client( - UserPoolId=user_pool_id, - ExplicitAuthFlows=[ - "ALLOW_ADMIN_USER_PASSWORD_AUTH", - "ALLOW_REFRESH_TOKEN_AUTH", - "ALLOW_ADMIN_NO_SRP_AUTH", - ], - AllowedOAuthFlows=["code", "implicit"], - ClientName=str(uuid.uuid4()), - CallbackURLs=["https://example.com"], - ) - client_id = client["UserPoolClient"]["ClientId"] - username = str(uuid.uuid4()) - temporary_password = "1TemporaryP@ssword" - cognito.admin_create_user( - UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_password - ) - result = cognito.admin_initiate_auth( - UserPoolId=user_pool_id, - ClientId=client_id, - AuthFlow="ADMIN_NO_SRP_AUTH", - AuthParameters={"USERNAME": username, "PASSWORD": temporary_password}, - ) +# Test will retrieve public key from cognito.amazonaws.com/.well-known/jwks.json, +# which isnt mocked in ServerMode +if not settings.TEST_SERVER_MODE: - # A newly created user is forced to set a new password - # This sets a new password and logs the user in (creates tokens) - password = "1F@kePassword" - result = cognito.respond_to_auth_challenge( - Session=result["Session"], - ClientId=client_id, - ChallengeName="NEW_PASSWORD_REQUIRED", - ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": password}, - ) - # - id_token = result["AuthenticationResult"]["IdToken"] + @mock_cognitoidp + def test_idtoken_contains_kid_header(): + # https://github.com/spulec/moto/issues/3078 + # Setup + cognito = boto3.client("cognito-idp", "us-west-2") + user_pool_id = cognito.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"][ + "Id" + ] + client = cognito.create_user_pool_client( + UserPoolId=user_pool_id, + ExplicitAuthFlows=[ + "ALLOW_ADMIN_USER_PASSWORD_AUTH", + "ALLOW_REFRESH_TOKEN_AUTH", + "ALLOW_ADMIN_NO_SRP_AUTH", + ], + AllowedOAuthFlows=["code", "implicit"], + ClientName=str(uuid.uuid4()), + CallbackURLs=["https://example.com"], + ) + client_id = client["UserPoolClient"]["ClientId"] + username = str(uuid.uuid4()) + temporary_password = "1TemporaryP@ssword" + cognito.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + TemporaryPassword=temporary_password, + ) + result = cognito.admin_initiate_auth( + UserPoolId=user_pool_id, + ClientId=client_id, + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={"USERNAME": username, "PASSWORD": temporary_password}, + ) + + # A newly created user is forced to set a new password + # This sets a new password and logs the user in (creates tokens) + password = "1F@kePassword" + result = cognito.respond_to_auth_challenge( + Session=result["Session"], + ClientId=client_id, + ChallengeName="NEW_PASSWORD_REQUIRED", + ChallengeResponses={"USERNAME": username, "NEW_PASSWORD": password}, + ) + # + id_token = result["AuthenticationResult"]["IdToken"] - # Verify the KID header is present in the token, and corresponds to the KID supplied by the public JWT - verify_kid_header(id_token) + # Verify the KID header is present in the token, and corresponds to the KID supplied by the public JWT + verify_kid_header(id_token) def verify_kid_header(token): From e033d6cd68bdabba7710ebea62822ca2f1b4dd5d Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 24 Jun 2020 09:50:55 +0100 Subject: [PATCH 403/658] Update README -'mock_dynamodb' is deprecated --- README.md | 178 +++++++++++++++++++++++++++--------------------------- 1 file changed, 89 insertions(+), 89 deletions(-) diff --git a/README.md b/README.md index 6fb942aefdd5..7a2862744d05 100644 --- a/README.md +++ b/README.md @@ -58,95 +58,95 @@ With the decorator wrapping the test, all the calls to s3 are automatically mock It gets even better! Moto isn't just for Python code and it isn't just for S3. Look at the [standalone server mode](https://github.com/spulec/moto#stand-alone-server-mode) for more information about running Moto with other languages. Here's the status of the other AWS services implemented: ```gherkin -|-------------------------------------------------------------------------------------| -| Service Name | Decorator | Development Status | -|-------------------------------------------------------------------------------------| -| ACM | @mock_acm | all endpoints done | -|-------------------------------------------------------------------------------------| -| API Gateway | @mock_apigateway | core endpoints done | -|-------------------------------------------------------------------------------------| -| Autoscaling | @mock_autoscaling | core endpoints done | -|-------------------------------------------------------------------------------------| -| Cloudformation | @mock_cloudformation | core endpoints done | -|-------------------------------------------------------------------------------------| -| Cloudwatch | @mock_cloudwatch | basic endpoints done | -|-------------------------------------------------------------------------------------| -| CloudwatchEvents | @mock_events | all endpoints done | -|-------------------------------------------------------------------------------------| -| Cognito Identity | @mock_cognitoidentity | basic endpoints done | -|-------------------------------------------------------------------------------------| -| Cognito Identity Provider | @mock_cognitoidp | basic endpoints done | -|-------------------------------------------------------------------------------------| -| Config | @mock_config | basic endpoints done | -| | | core endpoints done | -|-------------------------------------------------------------------------------------| -| Data Pipeline | @mock_datapipeline | basic endpoints done | -|-------------------------------------------------------------------------------------| -| DynamoDB | @mock_dynamodb | core endpoints done | -| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | -|-------------------------------------------------------------------------------------| -| EC2 | @mock_ec2 | core endpoints done | -| - AMI | | core endpoints done | -| - EBS | | core endpoints done | -| - Instances | | all endpoints done | -| - Security Groups | | core endpoints done | -| - Tags | | all endpoints done | -|-------------------------------------------------------------------------------------| -| ECR | @mock_ecr | basic endpoints done | -|-------------------------------------------------------------------------------------| -| ECS | @mock_ecs | basic endpoints done | -|-------------------------------------------------------------------------------------| -| ELB | @mock_elb | core endpoints done | -|-------------------------------------------------------------------------------------| -| ELBv2 | @mock_elbv2 | all endpoints done | -|-------------------------------------------------------------------------------------| -| EMR | @mock_emr | core endpoints done | -|-------------------------------------------------------------------------------------| -| Glacier | @mock_glacier | core endpoints done | -|-------------------------------------------------------------------------------------| -| IAM | @mock_iam | core endpoints done | -|-------------------------------------------------------------------------------------| -| IoT | @mock_iot | core endpoints done | -| | @mock_iotdata | core endpoints done | -|-------------------------------------------------------------------------------------| -| Kinesis | @mock_kinesis | core endpoints done | -|-------------------------------------------------------------------------------------| -| KMS | @mock_kms | basic endpoints done | -|-------------------------------------------------------------------------------------| -| Lambda | @mock_lambda | basic endpoints done, requires | -| | | docker | -|-------------------------------------------------------------------------------------| -| Logs | @mock_logs | basic endpoints done | -|-------------------------------------------------------------------------------------| -| Organizations | @mock_organizations | some core endpoints done | -|-------------------------------------------------------------------------------------| -| Polly | @mock_polly | all endpoints done | -|-------------------------------------------------------------------------------------| -| RDS | @mock_rds | core endpoints done | -|-------------------------------------------------------------------------------------| -| RDS2 | @mock_rds2 | core endpoints done | -|-------------------------------------------------------------------------------------| -| Redshift | @mock_redshift | core endpoints done | -|-------------------------------------------------------------------------------------| -| Route53 | @mock_route53 | core endpoints done | -|-------------------------------------------------------------------------------------| -| S3 | @mock_s3 | core endpoints done | -|-------------------------------------------------------------------------------------| -| SecretsManager | @mock_secretsmanager | basic endpoints done | -|-------------------------------------------------------------------------------------| -| SES | @mock_ses | all endpoints done | -|-------------------------------------------------------------------------------------| -| SNS | @mock_sns | all endpoints done | -|-------------------------------------------------------------------------------------| -| SQS | @mock_sqs | core endpoints done | -|-------------------------------------------------------------------------------------| -| SSM | @mock_ssm | core endpoints done | -|-------------------------------------------------------------------------------------| -| STS | @mock_sts | core endpoints done | -|-------------------------------------------------------------------------------------| -| SWF | @mock_swf | basic endpoints done | -|-------------------------------------------------------------------------------------| -| X-Ray | @mock_xray | all endpoints done | +|-------------------------------------------------------------------------------------|-----------------------------| +| Service Name | Decorator | Development Status | Comment | +|-------------------------------------------------------------------------------------| | +| ACM | @mock_acm | all endpoints done | | +|-------------------------------------------------------------------------------------| | +| API Gateway | @mock_apigateway | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| Autoscaling | @mock_autoscaling | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| Cloudformation | @mock_cloudformation | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| Cloudwatch | @mock_cloudwatch | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| CloudwatchEvents | @mock_events | all endpoints done | | +|-------------------------------------------------------------------------------------| | +| Cognito Identity | @mock_cognitoidentity | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| Cognito Identity Provider | @mock_cognitoidp | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| Config | @mock_config | basic endpoints done | | +| | | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| Data Pipeline | @mock_datapipeline | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| DynamoDB | @mock_dynamodb | core endpoints done | API 20111205. Deprecated. | +| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | API 20120810 (Latest) | +|-------------------------------------------------------------------------------------| | +| EC2 | @mock_ec2 | core endpoints done | | +| - AMI | | core endpoints done | | +| - EBS | | core endpoints done | | +| - Instances | | all endpoints done | | +| - Security Groups | | core endpoints done | | +| - Tags | | all endpoints done | | +|-------------------------------------------------------------------------------------| | +| ECR | @mock_ecr | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| ECS | @mock_ecs | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| ELB | @mock_elb | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| ELBv2 | @mock_elbv2 | all endpoints done | | +|-------------------------------------------------------------------------------------| | +| EMR | @mock_emr | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| Glacier | @mock_glacier | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| IAM | @mock_iam | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| IoT | @mock_iot | core endpoints done | | +| | @mock_iotdata | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| Kinesis | @mock_kinesis | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| KMS | @mock_kms | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| Lambda | @mock_lambda | basic endpoints done, requires | | +| | | docker | | +|-------------------------------------------------------------------------------------| | +| Logs | @mock_logs | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| Organizations | @mock_organizations | some core endpoints done | | +|-------------------------------------------------------------------------------------| | +| Polly | @mock_polly | all endpoints done | | +|-------------------------------------------------------------------------------------| | +| RDS | @mock_rds | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| RDS2 | @mock_rds2 | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| Redshift | @mock_redshift | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| Route53 | @mock_route53 | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| S3 | @mock_s3 | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| SecretsManager | @mock_secretsmanager | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| SES | @mock_ses | all endpoints done | | +|-------------------------------------------------------------------------------------| | +| SNS | @mock_sns | all endpoints done | | +|-------------------------------------------------------------------------------------| | +| SQS | @mock_sqs | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| SSM | @mock_ssm | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| STS | @mock_sts | core endpoints done | | +|-------------------------------------------------------------------------------------| | +| SWF | @mock_swf | basic endpoints done | | +|-------------------------------------------------------------------------------------| | +| X-Ray | @mock_xray | all endpoints done | | |-------------------------------------------------------------------------------------| ``` From 80c53d8b5a10f5e01d3fd16d9f31218bb69c05bb Mon Sep 17 00:00:00 2001 From: Shane Dowling Date: Fri, 26 Jun 2020 14:01:57 +0100 Subject: [PATCH 404/658] Add support for template urls in cfn validation (#3089) Added as boto supports both TemplateBody and TemplateUrl * Adds TemplateURL as a validate_template option * Adds a test to validate this --- moto/cloudformation/responses.py | 13 ++++++++----- tests/test_cloudformation/test_validate.py | 19 +++++++++++++++++++ 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index cceedc86e256..c4a085705534 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -351,18 +351,21 @@ def list_exports(self): return template.render(exports=exports, next_token=next_token) def validate_template(self): - cfn_lint = self.cloudformation_backend.validate_template( - self._get_param("TemplateBody") - ) + template_body = self._get_param("TemplateBody") + template_url = self._get_param("TemplateURL") + if template_url: + template_body = self._get_stack_from_s3_url(template_url) + + cfn_lint = self.cloudformation_backend.validate_template(template_body) if cfn_lint: raise ValidationError(cfn_lint[0].message) description = "" try: - description = json.loads(self._get_param("TemplateBody"))["Description"] + description = json.loads(template_body)["Description"] except (ValueError, KeyError): pass try: - description = yaml.load(self._get_param("TemplateBody"))["Description"] + description = yaml.load(template_body)["Description"] except (yaml.ParserError, KeyError): pass template = self.response_template(VALIDATE_STACK_RESPONSE_TEMPLATE) diff --git a/tests/test_cloudformation/test_validate.py b/tests/test_cloudformation/test_validate.py index 5ffaeafb9b49..19dec46ef7eb 100644 --- a/tests/test_cloudformation/test_validate.py +++ b/tests/test_cloudformation/test_validate.py @@ -96,6 +96,25 @@ def test_boto3_yaml_validate_successful(): assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 +@mock_cloudformation +@mock_s3 +def test_boto3_yaml_validate_template_url_successful(): + s3 = boto3.client("s3") + s3_conn = boto3.resource("s3", region_name="us-east-1") + s3_conn.create_bucket(Bucket="foobar") + + s3_conn.Object("foobar", "template-key").put(Body=yaml_template) + key_url = s3.generate_presigned_url( + ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} + ) + + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + response = cf_conn.validate_template(TemplateURL=key_url) + assert response["Description"] == "Simple CloudFormation Test Template" + assert response["Parameters"] == [] + assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 + + @mock_cloudformation def test_boto3_yaml_invalid_missing_resource(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") From e2f6544228b9ee81324840e638ab062caa68b6e3 Mon Sep 17 00:00:00 2001 From: Alex Bainbridge Date: Fri, 26 Jun 2020 10:47:28 -0400 Subject: [PATCH 405/658] ssm document code done, testing now --- moto/ssm/exceptions.py | 50 ++++ moto/ssm/models.py | 470 ++++++++++++++++++++++++++++---- moto/ssm/responses.py | 92 +++++++ tests/test_ssm/test_ssm_docs.py | 0 4 files changed, 563 insertions(+), 49 deletions(-) create mode 100644 tests/test_ssm/test_ssm_docs.py diff --git a/moto/ssm/exceptions.py b/moto/ssm/exceptions.py index 83ae26b6cf3e..a1e1290028d0 100644 --- a/moto/ssm/exceptions.py +++ b/moto/ssm/exceptions.py @@ -53,3 +53,53 @@ class ValidationException(JsonRESTError): def __init__(self, message): super(ValidationException, self).__init__("ValidationException", message) + + +class DocumentAlreadyExists(JsonRESTError): + code = 400 + + def __init__(self, message): + super(DocumentAlreadyExists, self).__init__("DocumentAlreadyExists", message) + + +class InvalidDocument(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidDocument, self).__init__("InvalidDocument", message) + + +class InvalidDocumentOperation(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidDocumentOperation, self).__init__("InvalidDocumentOperation", message) + + +class InvalidDocumentContent(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidDocumentContent, self).__init__("InvalidDocumentContent", message) + + +class InvalidDocumentVersion(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidDocumentVersion, self).__init__("InvalidDocumentVersion", message) + + +class DuplicateDocumentVersionName(JsonRESTError): + code = 400 + + def __init__(self, message): + super(DuplicateDocumentVersionName, self).__init__("DuplicateDocumentVersionName", message) + + +class DuplicateDocumentContent(JsonRESTError): + code = 400 + + def __init__(self, message): + super(DuplicateDocumentContent, self).__init__("DuplicateDocumentContent", message) + diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 67216972ed05..713cbd6287e5 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -3,7 +3,7 @@ import re from collections import defaultdict -from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.exceptions import RESTError from moto.ec2 import ec2_backends from moto.cloudformation import cloudformation_backends @@ -12,6 +12,8 @@ import time import uuid import itertools +import json +import yaml from .utils import parameter_arn from .exceptions import ( @@ -22,20 +24,27 @@ ParameterVersionLabelLimitExceeded, ParameterVersionNotFound, ParameterNotFound, + DocumentAlreadyExists, + InvalidDocumentOperation, + InvalidDocument, + InvalidDocumentContent, + InvalidDocumentVersion, + DuplicateDocumentVersionName, + DuplicateDocumentContent ) class Parameter(BaseModel): def __init__( - self, - name, - value, - type, - description, - allowed_pattern, - keyid, - last_modified_date, - version, + self, + name, + value, + type, + description, + allowed_pattern, + keyid, + last_modified_date, + version, ): self.name = name self.type = type @@ -63,7 +72,7 @@ def decrypt(self, value): prefix = "kms:{}:".format(self.keyid or "default") if value.startswith(prefix): - return value[len(prefix) :] + return value[len(prefix):] def response_object(self, decrypt=False, region=None): r = { @@ -102,23 +111,86 @@ def describe_response_object(self, decrypt=False, include_labels=False): MAX_TIMEOUT_SECONDS = 3600 +def generate_ssm_doc_param_list(parameters): + if not parameters: + return None + param_list = [] + for param_name, param_info in parameters.items(): + param_info["Name"] = param_name + param_list.append(param_info) + return param_list + + +class Document(BaseModel): + def __init__(self, name, version_name, content, document_type, document_format, requires, attachments, + target_type, tags, document_version="1"): + self.name = name + self.version_name = version_name + self.content = content + self.document_type = document_type + self.document_format = document_format + self.requires = requires + self.attachments = attachments + self.target_type = target_type + self.tags = tags + + self.status = "Active" + self.document_version = document_version + self.owner = ACCOUNT_ID + self.created_date = datetime.datetime.now() + + if document_format == "JSON": + try: + content_json = json.loads(content) + except json.decoder.JSONDecodeError: + raise InvalidDocumentContent("The content for the document is not valid.") + elif document_format == "YAML": + try: + content_json = yaml.safe_load(content) + except yaml.YAMLError: + raise InvalidDocumentContent("The content for the document is not valid.") + else: + raise ValidationException(f'Invalid document format {document_format}') + + self.content_json = content_json + + try: + self.schema_version = content_json["schemaVersion"] + self.description = content_json.get("description") + self.outputs = content_json.get("outputs") + self.files = content_json.get("files") + # TODO add platformType + self.platform_types = "Not Implemented (moto)" + self.parameter_list = generate_ssm_doc_param_list(content_json.get("parameters")) + + if self.schema_version == "0.3" or self.schema_version == "2.0" or self.schema_version == "2.2": + self.mainSteps = content_json["mainSteps"] + elif self.schema_version == "1.2": + self.runtimeConfig = content_json.get("runtimeConfig") + + except KeyError: + raise InvalidDocumentContent("The content for the document is not valid.") + + + + class Command(BaseModel): def __init__( - self, - comment="", - document_name="", - timeout_seconds=MAX_TIMEOUT_SECONDS, - instance_ids=None, - max_concurrency="", - max_errors="", - notification_config=None, - output_s3_bucket_name="", - output_s3_key_prefix="", - output_s3_region="", - parameters=None, - service_role_arn="", - targets=None, - backend_region="us-east-1", + self, + comment="", + document_name="", + timeout_seconds=MAX_TIMEOUT_SECONDS, + instance_ids=None, + max_concurrency="", + max_errors="", + notification_config=None, + output_s3_bucket_name="", + output_s3_key_prefix="", + output_s3_region="", + parameters=None, + service_role_arn="", + targets=None, + backend_region="us-east-1", ): if instance_ids is None: @@ -269,6 +341,75 @@ def get_invocation(self, instance_id, plugin_name): return invocation +def _validate_document_format(document_format): + aws_doc_formats = ["JSON", "YAML"] + if document_format not in aws_doc_formats: + raise ValidationException(f'Invalid document format {document_format}') + + +def _validate_document_info(content, name, document_type, document_format, strict=True): + aws_ssm_name_regex = r'^[a-zA-Z0-9_\-.]{3,128}$' + aws_name_reject_list = ["aws-", "amazon", "amzn"] + aws_doc_types = ["Command", "Policy", "Automation", "Session", "Package", "ApplicationConfiguration", + "ApplicationConfigurationSchema", "DeploymentStrategy", "ChangeCalendar"] + + _validate_document_format(document_format) + + if not content: + raise ValidationException("Content is required") + + if list(filter(name.startswith, aws_name_reject_list)): + raise ValidationException(f'Invalid document name {name}') + ssm_name_pattern = re.compile(aws_ssm_name_regex) + if not ssm_name_pattern.match(name): + raise ValidationException(f'Invalid document name {name}') + + if strict and document_type not in aws_doc_types: + # Update document doesn't use document type + raise ValidationException(f'Invalid document type {document_type}') + + +def _document_filter_equal_comparator(keyed_value, filter): + for v in filter["Values"]: + if keyed_value == v: + return True + return False + + +def _document_filter_list_includes_comparator(keyed_value_list, filter): + for v in filter["Values"]: + if v in keyed_value_list: + return True + return False + + +def _document_filter_match(filters, ssm_doc): + for filter in filters: + if filter["Key"] == "Name" and not _document_filter_equal_comparator(ssm_doc.name, filter): + return False + + elif filter["Key"] == "Owner": + if len(filter["Values"]) != 1: + raise ValidationException("Owner filter can only have one value.") + if filter["Values"][0] == "Self": + # Update to running account ID + filter["Values"][0] = ACCOUNT_ID + if not _document_filter_equal_comparator(ssm_doc.owner, filter): + return False + + elif filter["Key"] == "PlatformTypes" and not \ + _document_filter_list_includes_comparator(ssm_doc.platform_types, filter): + return False + + elif filter["Key"] == "DocumentType" and not _document_filter_equal_comparator(ssm_doc.document_type, filter): + return False + + elif filter["Key"] == "TargetType" and not _document_filter_equal_comparator(ssm_doc.target_type, filter): + return False + + return True + + class SimpleSystemManagerBackend(BaseBackend): def __init__(self): # each value is a list of all of the versions for a parameter @@ -278,12 +419,243 @@ def __init__(self): self._resource_tags = defaultdict(lambda: defaultdict(dict)) self._commands = [] self._errors = [] + self._documents = defaultdict(dict) # figure out what region we're in for region, backend in ssm_backends.items(): if backend == self: self._region = region + def _generate_document_description(self, document): + + latest = self._documents[document.name]['latest_version'] + default_version = self._documents[document.name]["default_version"] + + return { + "Hash": hash, + "HashType": "Sha256", + "Name": document.name, + "Owner": document.owner, + "CreatedDate": document.created_date, + "Status": document.status, + "DocumentVersion": document.document_version, + "Description": document.description, + "Parameters": document.parameter_list, + "PlatformTypes": document.platform_types, + "SchemaVersion": document.schema_version, + "LatestVersion": latest, + "DefaultVersion": default_version, + "DocumentFormat": document.document_format + } + + def _generate_document_information(self, ssm_document, document_format): + base = { + "Name": ssm_document.name, + "DocumentVersion": ssm_document.document_version, + "Status": ssm_document.status, + "Content": ssm_document.content, + "DocumentType": ssm_document.document_type, + "DocumentFormat": ssm_document.document_format + } + + if document_format == "JSON": + base["Content"] = json.dumps(ssm_document.content_json) + elif document_format == "YAML": + base["Content"] = yaml.dump(ssm_document.content_json) + else: + raise ValidationException(f'Invalid document format {document_format}') + + if ssm_document.version_name: + base["VersionName"] = ssm_document.version_name + if ssm_document.requires: + base["Requires"] = ssm_document.requires + if ssm_document.attachments: + base["AttachmentsContent"] = ssm_document.attachments + + return base + + def _generate_document_list_information(self, ssm_document): + base = { + "Name": ssm_document.name, + "Owner": ssm_document.owner, + "DocumentVersion": ssm_document.document_version, + "DocumentType": ssm_document.document_type, + "SchemaVersion": ssm_document.schema_version, + "DocumentFormat": ssm_document.document_format + } + if ssm_document.version_name: + base["VersionName"] = ssm_document.version_name + if ssm_document.platform_types: + base["PlatformTypes"] = ssm_document.platform_types + if ssm_document.target_type: + base["TargetType"] = ssm_document.target_type + if ssm_document.tags: + base["Tags"] = ssm_document.tags + if ssm_document.requires: + base["Requires"] = ssm_document.requires + + return base + + def create_document(self, content, requires, attachments, name, version_name, document_type, document_format, + target_type, tags): + ssm_document = Document(name=name, version_name=version_name, content=content, document_type=document_type, + document_format=document_format, requires=requires, attachments=attachments, + target_type=target_type, tags=tags) + + _validate_document_info(content=content, name=name, document_type=document_type) + + if self._documents.get(ssm_document.Name): + raise DocumentAlreadyExists(f"Document with same name {name} already exists") + + self._documents[ssm_document.Name] = { + "documents": { + ssm_document.document_version: ssm_document + }, + "default_version": ssm_document.document_version, + "latest_version": ssm_document.document_version + } + + return self._generate_document_description(ssm_document) + + def delete_document(self, name, document_version, version_name, force): + documents = self._documents.get(name, {}).get("documents", {}) + keys_to_delete = set() + + if documents: + if documents[0].document_type == "ApplicationConfigurationSchema" and not force: + raise InvalidDocumentOperation("You attempted to delete a document while it is still shared. " + "You must stop sharing the document before you can delete it.") + if document_version and document_version == self._documents[name]["default_version"]: + raise InvalidDocumentOperation("Default version of the document can't be deleted.") + + if document_version or version_name: + for doc_version, document in documents.items(): + if document_version and doc_version == document_version: + keys_to_delete.add(document_version) + continue + if version_name and document.version_name == version_name: + keys_to_delete.add(document_version) + continue + else: + keys_to_delete = set(documents.keys()) + + for key in keys_to_delete: + self._documents[name]["documents"][key] = None + + if len(self._documents[name]["documents"].keys()) == 0: + self._documents[name] = None + else: + raise InvalidDocument("The specified document does not exist.") + + def _find_document(self, name, document_version=None, version_name=None, strict=True): + if not self._documents.get(name): + raise InvalidDocument(f"Document with name {name} does not exist.") + + documents = self._documents[name]["documents"] + ssm_document = None + + if not version_name and not document_version: + # Retrieve default version + default_version = self._documents[name]['default_version'] + ssm_document = documents.get(default_version) + + elif version_name and document_version: + for doc_version, document in documents.items(): + if doc_version == document_version and document.version_name == version_name: + ssm_document = document + break + + else: + for doc_version, document in documents.items(): + if document_version and doc_version == document_version : + ssm_document = document + break + if version_name and document.version_name == version_name: + ssm_document = document + break + + if strict and not ssm_document: + raise InvalidDocument(f"Document with name {name} does not exist.") + + return ssm_document + + def get_document(self, name, document_version, version_name, document_format): + _validate_document_format(document_format=document_format) + + ssm_document = self._find_document(name, document_version, version_name) + + return self._generate_document_information(ssm_document, document_format) + + def update_document_default_version(self, name, document_version): + ssm_document = self._find_document(name, document_version=document_version) + self._documents[name]["default_version"] = document_version + base = { + 'Name': ssm_document.name, + 'DefaultVersion': document_version, + } + + if ssm_document.version_name: + base['DefaultVersionName'] = ssm_document.version_name + + return base + + def update_document(self, content, attachments, name, version_name, document_version, document_format, target_type): + _validate_document_info(content=content, name=name, document_type=None, strict=False) + if not self._documents.get(name): + raise InvalidDocument("The specified document does not exist.") + if self._documents.get[name]['latest_version'] != document_version or document_version != "$LATEST": + raise InvalidDocumentVersion("The document version is not valid or does not exist.") + if self._find_document(name, version_name=version_name, strict=False): + raise DuplicateDocumentVersionName(f"The specified version name is a duplicate.") + + old_ssm_document = self._find_document(name) + + new_ssm_document = Document(name=name, version_name=version_name, content=content, + document_type=old_ssm_document.document_type, document_format=document_format, + requires=old_ssm_document.requires, attachments=attachments, + target_type=target_type, tags=old_ssm_document.tags, + document_version=self._documents.get[name]['latest_version']) + + for doc_version, document in self._documents[name].items(): + if document.content == new_ssm_document.content: + raise DuplicateDocumentContent("The content of the association document matches another document. " + "Change the content of the document and try again.") + + self._documents[name]["documents"][new_ssm_document.document_version] = new_ssm_document + + return self._generate_document_description(new_ssm_document) + + def describe_document(self, name, document_version, version_name): + ssm_document = self._find_document(name, document_version, version_name) + return self._generate_document_description(ssm_document) + + def list_documents(self, document_filter_list, filters, max_results=10, next_token=0): + if document_filter_list: + raise ValidationException( + "DocumentFilterList is deprecated. Instead use Filters." + ) + + results = [] + dummy_token_tracker = 0 + # Sort to maintain next token adjacency + for document_name, document_bundle in sorted(self._documents.items()): + if dummy_token_tracker < next_token: + dummy_token_tracker = dummy_token_tracker + 1 + continue + + default_version = document_bundle['default_version'] + ssm_doc = self._documents[document_name]['documents'][default_version] + if filters and not _document_filter_match(filters, ssm_doc): + # If we have filters enabled, and we don't match them, + continue + else: + results.append(self._generate_document_list_information(ssm_doc)) + + if len(results) == max_results: + return results, next_token + max_results + + return results + def delete_parameter(self, name): return self._parameters.pop(name, None) @@ -449,9 +821,9 @@ def _validate_parameter_filters(self, parameter_filters, by_path): "When using global parameters, please specify within a global namespace." ) if ( - "//" in value - or not value.startswith("/") - or not re.match("^[a-zA-Z0-9_.-/]*$", value) + "//" in value + or not value.startswith("/") + or not re.match("^[a-zA-Z0-9_.-/]*$", value) ): raise ValidationException( 'The parameter doesn\'t meet the parameter name requirements. The parameter name must begin with a forward slash "/". ' @@ -530,13 +902,13 @@ def get_parameters(self, names, with_decryption): return result def get_parameters_by_path( - self, - path, - with_decryption, - recursive, - filters=None, - next_token=None, - max_results=10, + self, + path, + with_decryption, + recursive, + filters=None, + next_token=None, + max_results=10, ): """Implement the get-parameters-by-path-API in the backend.""" result = [] @@ -546,10 +918,10 @@ def get_parameters_by_path( for param_name in self._parameters: if path != "/" and not param_name.startswith(path): continue - if "/" in param_name[len(path) + 1 :] and not recursive: + if "/" in param_name[len(path) + 1:] and not recursive: continue if not self._match_filters( - self.get_parameter(param_name, with_decryption), filters + self.get_parameter(param_name, with_decryption), filters ): continue result.append(self.get_parameter(param_name, with_decryption)) @@ -561,7 +933,7 @@ def _get_values_nexttoken(self, values_list, max_results, next_token=None): next_token = 0 next_token = int(next_token) max_results = int(max_results) - values = values_list[next_token : next_token + max_results] + values = values_list[next_token: next_token + max_results] if len(values) == max_results: next_token = str(next_token + max_results) else: @@ -599,7 +971,7 @@ def _match_filters(self, parameter, filters=None): if what is None: return False elif option == "BeginsWith" and not any( - what.startswith(value) for value in values + what.startswith(value) for value in values ): return False elif option == "Equals" and not any(what == value for value in values): @@ -608,10 +980,10 @@ def _match_filters(self, parameter, filters=None): if any(value == "/" and len(what.split("/")) == 2 for value in values): continue elif any( - value != "/" - and what.startswith(value + "/") - and len(what.split("/")) - 1 == len(value.split("/")) - for value in values + value != "/" + and what.startswith(value + "/") + and len(what.split("/")) - 1 == len(value.split("/")) + for value in values ): continue else: @@ -658,10 +1030,10 @@ def label_parameter_version(self, name, version, labels): invalid_labels = [] for label in labels: if ( - label.startswith("aws") - or label.startswith("ssm") - or label[:1].isdigit() - or not re.match(r"^[a-zA-z0-9_\.\-]*$", label) + label.startswith("aws") + or label.startswith("ssm") + or label[:1].isdigit() + or not re.match(r"^[a-zA-z0-9_\.\-]*$", label) ): invalid_labels.append(label) continue @@ -691,7 +1063,7 @@ def label_parameter_version(self, name, version, labels): return [invalid_labels, version] def put_parameter( - self, name, description, value, type, allowed_pattern, keyid, overwrite + self, name, description, value, type, allowed_pattern, keyid, overwrite ): previous_parameter_versions = self._parameters[name] if len(previous_parameter_versions) == 0: diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 45d2dec0ad27..c0e35b914ad7 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -17,6 +17,98 @@ def request_params(self): except ValueError: return {} + def create_document(self): + content = self._get_param("Content") + requires = self._get_param("Requires") + attachments = self._get_param("Attachments") + name = self._get_param("Name") + version_name = self._get_param("VersionName") + document_type = self._get_param("DocumentType") + document_format = self._get_param("DocumentFormat") + target_type = self._get_param("TargetType") + tags = self._get_param("Tags") + + result = self.ssm_backend.create_document(content=content, requires=requires, attachments=attachments, + name=name, version_name=version_name, document_type=document_type, + document_format=document_format, target_type=target_type, tags=tags) + + return { + 'DocumentDescription': result + } + + def delete_document(self): + name = self._get_param("Name") + document_version = self._get_param("DocumentVersion") + version_name = self._get_param("VersionName") + force = self._get_param("Force", False) + self.ssm_backend.delete_document(name=name, document_version=document_version, + version_name=version_name, force=force) + + return {} + + def get_document(self): + name = self._get_param("Name") + version_name = self._get_param("VersionName") + document_version = self._get_param("DocumentVersion") + document_format = self._get_param("DocumentFormat") + + document = self.ssm_backend.get_document(name=name, document_version=document_version, + document_format=document_format, version_name=version_name) + + return document + + def describe_document(self): + name = self._get_param("Name") + document_version = self._get_param("DocumentVersion") + version_name = self._get_param("VersionName") + + result = self.ssm_backend.describe_document(name=name, document_version=document_version, + version_name=version_name) + + return { + 'Document': result + } + + def update_document(self): + content = self._get_param("Content") + attachments = self._get_param("Attachments") + name = self._get_param("Name") + version_name = self._get_param("VersionName") + document_version = self._get_param("DocumentVersion") + document_format = self._get_param("DocumentFormat") + target_type = self._get_param("TargetType") + + result = self.ssm_backend.update_document(content=content, attachments=attachments, name=name, + version_name=version_name, document_version=document_version, + document_format=document_format, target_type=target_type) + + return { + 'DocumentDescription': result + } + + def update_document_default_version(self): + name = self._get_param("Name") + document_version = self._get_param("DocumentVersion") + + result = self.ssm_backend.update_document_default_version(name=name, document_version=document_version) + return { + 'Description': result + } + + def list_documents(self): + document_filter_list = self._get_param("DocumentFilterList") + filters = self._get_param("Filters") + max_results = self._get_param("MaxResults", 10) + next_token = self._get_param("NextToken") + + documents, token = self.ssm_backend.list_documents(document_filter_list=document_filter_list, filters=filters, + max_results=max_results, next_token=next_token) + + return { + "DocumentIdentifiers": documents, + "NextToken": token + } + def _get_param(self, param, default=None): return self.request_params.get(param, default) diff --git a/tests/test_ssm/test_ssm_docs.py b/tests/test_ssm/test_ssm_docs.py new file mode 100644 index 000000000000..e69de29bb2d1 From 73813460b66119dd2491f286a01bf07e0b18d193 Mon Sep 17 00:00:00 2001 From: Adrian <40185566+adriank-convoy@users.noreply.github.com> Date: Sat, 27 Jun 2020 01:42:32 -0700 Subject: [PATCH 406/658] Fix condition filtering bug in elbv2.create_rule() (#3092) * Fix condition filtering bug * Update test_handle_listener_rules unit test * Run black --- moto/elbv2/responses.py | 4 ++-- tests/test_elbv2/test_elbv2.py | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 922de96d4577..68fbc88165ea 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -158,7 +158,7 @@ def create_rule(self): condition = {} condition["field"] = _condition["field"] values = sorted( - [e for e in _condition.items() if e[0].startswith("values.member")], + [e for e in _condition.items() if "values.member" in e[0]], key=lambda x: x[0], ) condition["values"] = [e[1] for e in values] @@ -356,7 +356,7 @@ def modify_rule(self): condition = {} condition["field"] = _condition["field"] values = sorted( - [e for e in _condition.items() if e[0].startswith("values.member")], + [e for e in _condition.items() if "values.member" in e[0]], key=lambda x: x[0], ) condition["values"] = [e[1] for e in values] diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index af1b19f0962c..c155cba20107 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -994,12 +994,17 @@ def test_handle_listener_rules(): priority = 100 host = "xxx.example.com" path_pattern = "foobar" + pathpatternconfig_pattern = "foobar2" created_rule = conn.create_rule( ListenerArn=http_listener_arn, Priority=priority, Conditions=[ {"Field": "host-header", "Values": [host]}, {"Field": "path-pattern", "Values": [path_pattern]}, + { + "Field": "path-pattern", + "PathPatternConfig": {"Values": [pathpatternconfig_pattern]}, + }, ], Actions=[ {"TargetGroupArn": target_group.get("TargetGroupArn"), "Type": "forward"} @@ -1017,6 +1022,10 @@ def test_handle_listener_rules(): Conditions=[ {"Field": "host-header", "Values": [host]}, {"Field": "path-pattern", "Values": [path_pattern]}, + { + "Field": "path-pattern", + "PathPatternConfig": {"Values": [pathpatternconfig_pattern]}, + }, ], Actions=[ {"TargetGroupArn": target_group.get("TargetGroupArn"), "Type": "forward"} @@ -1031,6 +1040,10 @@ def test_handle_listener_rules(): Conditions=[ {"Field": "host-header", "Values": [host]}, {"Field": "path-pattern", "Values": [path_pattern]}, + { + "Field": "path-pattern", + "PathPatternConfig": {"Values": [pathpatternconfig_pattern]}, + }, ], Actions=[ { @@ -1079,11 +1092,16 @@ def test_handle_listener_rules(): # modify rule partially new_host = "new.example.com" new_path_pattern = "new_path" + new_pathpatternconfig_pattern = "new_path2" modified_rule = conn.modify_rule( RuleArn=first_rule["RuleArn"], Conditions=[ {"Field": "host-header", "Values": [new_host]}, {"Field": "path-pattern", "Values": [new_path_pattern]}, + { + "Field": "path-pattern", + "PathPatternConfig": {"Values": [new_pathpatternconfig_pattern]}, + }, ], )["Rules"][0] @@ -1092,6 +1110,9 @@ def test_handle_listener_rules(): modified_rule.should.equal(obtained_rule) obtained_rule["Conditions"][0]["Values"][0].should.equal(new_host) obtained_rule["Conditions"][1]["Values"][0].should.equal(new_path_pattern) + obtained_rule["Conditions"][2]["Values"][0].should.equal( + new_pathpatternconfig_pattern + ) obtained_rule["Actions"][0]["TargetGroupArn"].should.equal( target_group.get("TargetGroupArn") ) From 8a092c91ae9dcc4961754596b4398a7a2d1cf2ed Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 27 Jun 2020 11:07:15 +0100 Subject: [PATCH 407/658] DynamoDB - Add support for GSI's ProjectionType: KEYS_ONLY --- moto/dynamodb2/models/__init__.py | 44 +++++++++++++++++++-------- tests/test_dynamodb2/test_dynamodb.py | 44 +++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 12 deletions(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 13ee94948765..7e288bb9dec7 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -331,6 +331,21 @@ def update(self, u): self.projection = u.get("Projection", self.projection) self.throughput = u.get("ProvisionedThroughput", self.throughput) + def project(self, item): + """ + Enforces the ProjectionType of this GSI + Removes any non-wanted attributes from the item + :param item: + :return: + """ + if self.projection: + if self.projection.get("ProjectionType", None) == "KEYS_ONLY": + allowed_attributes = ",".join( + [key["AttributeName"] for key in self.schema] + ) + item.filter(allowed_attributes) + return item + class Table(BaseModel): def __init__( @@ -719,6 +734,10 @@ def conv(x): results = [item for item in results if filter_expression.expr(item)] results = copy.deepcopy(results) + if index_name: + index = self.get_index(index_name) + for result in results: + index.project(result) if projection_expression: for result in results: result.filter(projection_expression) @@ -739,11 +758,16 @@ def all_items(self): def all_indexes(self): return (self.global_indexes or []) + (self.indexes or []) - def has_idx_items(self, index_name): - + def get_index(self, index_name, err=None): all_indexes = self.all_indexes() indexes_by_name = dict((i.name, i) for i in all_indexes) - idx = indexes_by_name[index_name] + if err and index_name not in indexes_by_name: + raise err + return indexes_by_name[index_name] + + def has_idx_items(self, index_name): + + idx = self.get_index(index_name) idx_col_set = set([i["AttributeName"] for i in idx.schema]) for hash_set in self.items.values(): @@ -766,14 +790,12 @@ def scan( ): results = [] scanned_count = 0 - all_indexes = self.all_indexes() - indexes_by_name = dict((i.name, i) for i in all_indexes) if index_name: - if index_name not in indexes_by_name: - raise InvalidIndexNameError( - "The table does not have the specified index: %s" % index_name - ) + err = InvalidIndexNameError( + "The table does not have the specified index: %s" % index_name + ) + self.get_index(index_name, err) items = self.has_idx_items(index_name) else: items = self.all_items() @@ -847,9 +869,7 @@ def _trim_results(self, results, limit, exclusive_start_key, scanned_index=None) last_evaluated_key[self.range_key_attr] = results[-1].range_key if scanned_index: - all_indexes = self.all_indexes() - indexes_by_name = dict((i.name, i) for i in all_indexes) - idx = indexes_by_name[scanned_index] + idx = self.get_index(scanned_index) idx_col_list = [i["AttributeName"] for i in idx.schema] for col in idx_col_list: last_evaluated_key[col] = results[-1].attrs[col] diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 3709991166b1..cf1548e03982 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -5316,3 +5316,47 @@ def test_transact_write_items_fails_with_transaction_canceled_exception(): ex.exception.response["Error"]["Message"].should.equal( "Transaction cancelled, please refer cancellation reasons for specific reasons [None, ConditionalCheckFailed]" ) + + +@mock_dynamodb2 +def test_gsi_projection_type_keys_only(): + table_schema = { + "KeySchema": [{"AttributeName": "partitionKey", "KeyType": "HASH"}], + "GlobalSecondaryIndexes": [ + { + "IndexName": "GSI-K1", + "KeySchema": [ + {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, + {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, + ], + "Projection": {"ProjectionType": "KEYS_ONLY",}, + } + ], + "AttributeDefinitions": [ + {"AttributeName": "partitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1PartitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1SortKey", "AttributeType": "S"}, + ], + } + + item = { + "partitionKey": "pk-1", + "gsiK1PartitionKey": "gsi-pk", + "gsiK1SortKey": "gsi-sk", + "someAttribute": "lore ipsum", + } + + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + table = dynamodb.Table("test-table") + table.put_item(Item=item) + + items = table.query( + KeyConditionExpression=Key("gsiK1PartitionKey").eq("gsi-pk"), + IndexName="GSI-K1", + )["Items"] + items.should.have.length_of(1) + # Item should only include GSI Keys, as per the ProjectionType + items[0].should.equal({"gsiK1PartitionKey": "gsi-pk", "gsiK1SortKey": "gsi-sk"}) From 7d43a1d23de705e346322e858032af643d018425 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 27 Jun 2020 15:11:41 +0100 Subject: [PATCH 408/658] Store Region-info in UserAgent-header --- moto/core/models.py | 3 +++ moto/core/responses.py | 10 +++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index ba4564e4aad3..c8ee1709bf76 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -10,6 +10,7 @@ import types from io import BytesIO from collections import defaultdict +from botocore.config import Config from botocore.handlers import BUILTIN_HANDLERS from botocore.awsrequest import AWSResponse from six.moves.urllib.parse import urlparse @@ -416,6 +417,8 @@ def enable_patching(self): import mock def fake_boto3_client(*args, **kwargs): + service, region = args + kwargs["config"] = Config(user_agent_extra="region/"+region) if "endpoint_url" not in kwargs: kwargs["endpoint_url"] = "http://localhost:5000" return real_boto3_client(*args, **kwargs) diff --git a/moto/core/responses.py b/moto/core/responses.py index c52e898982ca..690964df0b3d 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -188,6 +188,7 @@ class BaseResponse(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): default_region = "us-east-1" # to extract region, use [^.] region_regex = re.compile(r"\.(?P[a-z]{2}-[a-z]+-\d{1})\.amazonaws\.com") + region_from_useragent_regex = re.compile(r"region/(?P[a-z]{2}-[a-z]+-\d{1})") param_list_regex = re.compile(r"(.*)\.(\d+)\.") access_key_regex = re.compile( r"AWS.*(?P(? Date: Sat, 27 Jun 2020 19:05:34 +0100 Subject: [PATCH 409/658] Linting --- moto/core/models.py | 2 +- moto/core/responses.py | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index c8ee1709bf76..8a8bd5110ef4 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -418,7 +418,7 @@ def enable_patching(self): def fake_boto3_client(*args, **kwargs): service, region = args - kwargs["config"] = Config(user_agent_extra="region/"+region) + kwargs["config"] = Config(user_agent_extra="region/" + region) if "endpoint_url" not in kwargs: kwargs["endpoint_url"] = "http://localhost:5000" return real_boto3_client(*args, **kwargs) diff --git a/moto/core/responses.py b/moto/core/responses.py index 690964df0b3d..676d7549d1de 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -188,7 +188,9 @@ class BaseResponse(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): default_region = "us-east-1" # to extract region, use [^.] region_regex = re.compile(r"\.(?P[a-z]{2}-[a-z]+-\d{1})\.amazonaws\.com") - region_from_useragent_regex = re.compile(r"region/(?P[a-z]{2}-[a-z]+-\d{1})") + region_from_useragent_regex = re.compile( + r"region/(?P[a-z]{2}-[a-z]+-\d{1})" + ) param_list_regex = re.compile(r"(.*)\.(\d+)\.") access_key_regex = re.compile( r"AWS.*(?P(? Date: Sat, 27 Jun 2020 19:46:26 +0100 Subject: [PATCH 410/658] Get region from args or kwargs --- moto/core/models.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index 8a8bd5110ef4..235ee8599c78 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -417,8 +417,9 @@ def enable_patching(self): import mock def fake_boto3_client(*args, **kwargs): - service, region = args - kwargs["config"] = Config(user_agent_extra="region/" + region) + region = self._get_region(*args, **kwargs) + if region: + kwargs["config"] = Config(user_agent_extra="region/" + region) if "endpoint_url" not in kwargs: kwargs["endpoint_url"] = "http://localhost:5000" return real_boto3_client(*args, **kwargs) @@ -466,6 +467,14 @@ def _convert_to_bytes(mixed_buffer): if six.PY2: self._httplib_patcher.start() + def _get_region(self, *args, **kwargs): + if "region_name" in kwargs: + return kwargs["region_name"] + if type(args) == tuple: + service, region = args + return region + return None + def disable_patching(self): if self._client_patcher: self._client_patcher.stop() From f963d2ebaab54c223ac78d6a56d60379f576d46b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 27 Jun 2020 20:13:42 +0100 Subject: [PATCH 411/658] Allow service-invocations without region (S3, e.g.) --- moto/core/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/core/models.py b/moto/core/models.py index 235ee8599c78..d7f96fe37942 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -470,7 +470,7 @@ def _convert_to_bytes(mixed_buffer): def _get_region(self, *args, **kwargs): if "region_name" in kwargs: return kwargs["region_name"] - if type(args) == tuple: + if type(args) == tuple and len(args) == 2: service, region = args return region return None From 96989bb645b69fe82e928271e4b4f69a73547a31 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 29 Jun 2020 14:00:30 +0100 Subject: [PATCH 412/658] SSM: Use EC2 region --- moto/ssm/models.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 67216972ed05..8da0a97c58fd 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -270,7 +270,8 @@ def get_invocation(self, instance_id, plugin_name): class SimpleSystemManagerBackend(BaseBackend): - def __init__(self): + def __init__(self, region_name=None): + super(SimpleSystemManagerBackend, self).__init__() # each value is a list of all of the versions for a parameter # to get the current value, grab the last item of the list self._parameters = defaultdict(list) @@ -279,10 +280,12 @@ def __init__(self): self._commands = [] self._errors = [] - # figure out what region we're in - for region, backend in ssm_backends.items(): - if backend == self: - self._region = region + self._region = region_name + + def reset(self): + region_name = self._region + self.__dict__ = {} + self.__init__(region_name) def delete_parameter(self, name): return self._parameters.pop(name, None) @@ -805,4 +808,4 @@ def get_command_invocation(self, **kwargs): ssm_backends = {} for region, ec2_backend in ec2_backends.items(): - ssm_backends[region] = SimpleSystemManagerBackend() + ssm_backends[region] = SimpleSystemManagerBackend(region) From bdc1e93a4f2633198e5a6b089cc4aa51184d2008 Mon Sep 17 00:00:00 2001 From: Alex Bainbridge Date: Mon, 29 Jun 2020 18:20:57 -0400 Subject: [PATCH 413/658] most of testing is done --- moto/ssm/models.py | 109 ++++-- moto/ssm/responses.py | 32 +- tests/test_ssm/__init__.py | 0 tests/test_ssm/test_ssm_docs.py | 460 ++++++++++++++++++++++++ tests/test_ssm/test_templates/good.yaml | 47 +++ 5 files changed, 595 insertions(+), 53 deletions(-) create mode 100644 tests/test_ssm/__init__.py create mode 100644 tests/test_ssm/test_templates/good.yaml diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 713cbd6287e5..45f89fd5c962 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -14,6 +14,7 @@ import itertools import json import yaml +import hashlib from .utils import parameter_arn from .exceptions import ( @@ -116,8 +117,19 @@ def generate_ssm_doc_param_list(parameters): return None param_list = [] for param_name, param_info in parameters.items(): - param_info["Name"] = param_name - param_list.append(param_info) + final_dict = {} + + final_dict["Name"] = param_name + final_dict["Type"] = param_info["type"] + final_dict["Description"] = param_info["description"] + + if param_info["type"] == "StringList" or param_info["type"] == "StringMap" or param_info["type"] == "MapList": + final_dict["DefaultValue"] = json.dumps(param_info["default"]) + else: + final_dict["DefaultValue"] = str(param_info["default"]) + + param_list.append(final_dict) + return param_list @@ -137,7 +149,7 @@ def __init__(self, name, version_name, content, document_type, document_format, self.status = "Active" self.document_version = document_version self.owner = ACCOUNT_ID - self.created_date = datetime.datetime.now() + self.created_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") if document_format == "JSON": try: @@ -155,12 +167,12 @@ def __init__(self, name, version_name, content, document_type, document_format, self.content_json = content_json try: - self.schema_version = content_json["schemaVersion"] + self.schema_version = str(content_json["schemaVersion"]) self.description = content_json.get("description") self.outputs = content_json.get("outputs") self.files = content_json.get("files") # TODO add platformType - self.platform_types = "Not Implemented (moto)" + self.platform_types = ["Not Implemented (moto)"] self.parameter_list = generate_ssm_doc_param_list(content_json.get("parameters")) if self.schema_version == "0.3" or self.schema_version == "2.0" or self.schema_version == "2.2": @@ -430,9 +442,8 @@ def _generate_document_description(self, document): latest = self._documents[document.name]['latest_version'] default_version = self._documents[document.name]["default_version"] - - return { - "Hash": hash, + base = { + "Hash": hashlib.sha256(document.content.encode('utf-8')).hexdigest(), "HashType": "Sha256", "Name": document.name, "Owner": document.owner, @@ -442,11 +453,20 @@ def _generate_document_description(self, document): "Description": document.description, "Parameters": document.parameter_list, "PlatformTypes": document.platform_types, + "DocumentType": document.document_type, "SchemaVersion": document.schema_version, "LatestVersion": latest, "DefaultVersion": default_version, "DocumentFormat": document.document_format } + if document.version_name: + base["VersionName"] = document.version_name + if document.target_type: + base["TargetType"] = document.target_type + if document.tags: + base["Tags"] = document.tags + + return base def _generate_document_information(self, ssm_document, document_format): base = { @@ -502,12 +522,13 @@ def create_document(self, content, requires, attachments, name, version_name, do document_format=document_format, requires=requires, attachments=attachments, target_type=target_type, tags=tags) - _validate_document_info(content=content, name=name, document_type=document_type) + _validate_document_info(content=content, name=name, document_type=document_type, + document_format=document_format) - if self._documents.get(ssm_document.Name): - raise DocumentAlreadyExists(f"Document with same name {name} already exists") + if self._documents.get(ssm_document.name): + raise DocumentAlreadyExists(f"The specified document already exists.") - self._documents[ssm_document.Name] = { + self._documents[ssm_document.name] = { "documents": { ssm_document.document_version: ssm_document }, @@ -522,21 +543,24 @@ def delete_document(self, name, document_version, version_name, force): keys_to_delete = set() if documents: - if documents[0].document_type == "ApplicationConfigurationSchema" and not force: + default_version = self._documents[name]["default_version"] + + if documents[default_version].document_type == "ApplicationConfigurationSchema" and not force: raise InvalidDocumentOperation("You attempted to delete a document while it is still shared. " "You must stop sharing the document before you can delete it.") - if document_version and document_version == self._documents[name]["default_version"]: + + if document_version and document_version == default_version: raise InvalidDocumentOperation("Default version of the document can't be deleted.") if document_version or version_name: - for doc_version, document in documents.items(): - if document_version and doc_version == document_version: - keys_to_delete.add(document_version) - continue - if version_name and document.version_name == version_name: - keys_to_delete.add(document_version) - continue + # We delete only a specific version + delete_doc = self._find_document(name, document_version, version_name) + if delete_doc: + keys_to_delete.add(document_version) + else: + raise InvalidDocument("The specified document does not exist.") else: + # We are deleting all versions keys_to_delete = set(documents.keys()) for key in keys_to_delete: @@ -549,7 +573,7 @@ def delete_document(self, name, document_version, version_name, force): def _find_document(self, name, document_version=None, version_name=None, strict=True): if not self._documents.get(name): - raise InvalidDocument(f"Document with name {name} does not exist.") + raise InvalidDocument(f"The specified document does not exist.") documents = self._documents[name]["documents"] ssm_document = None @@ -575,37 +599,43 @@ def _find_document(self, name, document_version=None, version_name=None, strict= break if strict and not ssm_document: - raise InvalidDocument(f"Document with name {name} does not exist.") + raise InvalidDocument(f"The specified document does not exist.") return ssm_document def get_document(self, name, document_version, version_name, document_format): - _validate_document_format(document_format=document_format) ssm_document = self._find_document(name, document_version, version_name) + if not document_format: + document_format = ssm_document.document_format + else: + _validate_document_format(document_format=document_format) return self._generate_document_information(ssm_document, document_format) def update_document_default_version(self, name, document_version): + ssm_document = self._find_document(name, document_version=document_version) self._documents[name]["default_version"] = document_version base = { - 'Name': ssm_document.name, - 'DefaultVersion': document_version, + "Name": ssm_document.name, + "DefaultVersion": document_version, } if ssm_document.version_name: - base['DefaultVersionName'] = ssm_document.version_name + base["DefaultVersionName"] = ssm_document.version_name return base def update_document(self, content, attachments, name, version_name, document_version, document_format, target_type): - _validate_document_info(content=content, name=name, document_type=None, strict=False) + _validate_document_info(content=content, name=name, document_type=None, document_format=document_format, + strict=False) + if not self._documents.get(name): raise InvalidDocument("The specified document does not exist.") - if self._documents.get[name]['latest_version'] != document_version or document_version != "$LATEST": + if self._documents[name]['latest_version'] != document_version and document_version != "$LATEST": raise InvalidDocumentVersion("The document version is not valid or does not exist.") - if self._find_document(name, version_name=version_name, strict=False): + if version_name and self._find_document(name, version_name=version_name, strict=False): raise DuplicateDocumentVersionName(f"The specified version name is a duplicate.") old_ssm_document = self._find_document(name) @@ -614,13 +644,14 @@ def update_document(self, content, attachments, name, version_name, document_ver document_type=old_ssm_document.document_type, document_format=document_format, requires=old_ssm_document.requires, attachments=attachments, target_type=target_type, tags=old_ssm_document.tags, - document_version=self._documents.get[name]['latest_version']) + document_version=str(int(self._documents[name]['latest_version']) + 1)) - for doc_version, document in self._documents[name].items(): + for doc_version, document in self._documents[name]['documents'].items(): if document.content == new_ssm_document.content: raise DuplicateDocumentContent("The content of the association document matches another document. " "Change the content of the document and try again.") + self._documents[name]["latest_version"] = str(int(self._documents[name]["latest_version"]) + 1) self._documents[name]["documents"][new_ssm_document.document_version] = new_ssm_document return self._generate_document_description(new_ssm_document) @@ -629,16 +660,22 @@ def describe_document(self, name, document_version, version_name): ssm_document = self._find_document(name, document_version, version_name) return self._generate_document_description(ssm_document) - def list_documents(self, document_filter_list, filters, max_results=10, next_token=0): + def list_documents(self, document_filter_list, filters, max_results=10, next_token="0"): if document_filter_list: raise ValidationException( "DocumentFilterList is deprecated. Instead use Filters." ) + next_token = int(next_token) results = [] dummy_token_tracker = 0 # Sort to maintain next token adjacency for document_name, document_bundle in sorted(self._documents.items()): + if len(results) == max_results: + # There's still more to go so we need a next token + return results, str(next_token + len(results)) + + if dummy_token_tracker < next_token: dummy_token_tracker = dummy_token_tracker + 1 continue @@ -651,10 +688,8 @@ def list_documents(self, document_filter_list, filters, max_results=10, next_tok else: results.append(self._generate_document_list_information(ssm_doc)) - if len(results) == max_results: - return results, next_token + max_results - - return results + # If we've fallen out of the loop, theres no more documents. No next token. + return results, "" def delete_parameter(self, name): return self._parameters.pop(name, None) diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index c0e35b914ad7..6d818b065d3b 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -24,7 +24,7 @@ def create_document(self): name = self._get_param("Name") version_name = self._get_param("VersionName") document_type = self._get_param("DocumentType") - document_format = self._get_param("DocumentFormat") + document_format = self._get_param("DocumentFormat", "JSON") target_type = self._get_param("TargetType") tags = self._get_param("Tags") @@ -32,9 +32,9 @@ def create_document(self): name=name, version_name=version_name, document_type=document_type, document_format=document_format, target_type=target_type, tags=tags) - return { + return json.dumps({ 'DocumentDescription': result - } + }) def delete_document(self): name = self._get_param("Name") @@ -44,18 +44,18 @@ def delete_document(self): self.ssm_backend.delete_document(name=name, document_version=document_version, version_name=version_name, force=force) - return {} + return json.dumps({}) def get_document(self): name = self._get_param("Name") version_name = self._get_param("VersionName") document_version = self._get_param("DocumentVersion") - document_format = self._get_param("DocumentFormat") + document_format = self._get_param("DocumentFormat", "JSON") document = self.ssm_backend.get_document(name=name, document_version=document_version, document_format=document_format, version_name=version_name) - return document + return json.dumps(document) def describe_document(self): name = self._get_param("Name") @@ -65,9 +65,9 @@ def describe_document(self): result = self.ssm_backend.describe_document(name=name, document_version=document_version, version_name=version_name) - return { + return json.dumps({ 'Document': result - } + }) def update_document(self): content = self._get_param("Content") @@ -75,39 +75,39 @@ def update_document(self): name = self._get_param("Name") version_name = self._get_param("VersionName") document_version = self._get_param("DocumentVersion") - document_format = self._get_param("DocumentFormat") + document_format = self._get_param("DocumentFormat", "JSON") target_type = self._get_param("TargetType") result = self.ssm_backend.update_document(content=content, attachments=attachments, name=name, version_name=version_name, document_version=document_version, document_format=document_format, target_type=target_type) - return { + return json.dumps({ 'DocumentDescription': result - } + }) def update_document_default_version(self): name = self._get_param("Name") document_version = self._get_param("DocumentVersion") result = self.ssm_backend.update_document_default_version(name=name, document_version=document_version) - return { + return json.dumps({ 'Description': result - } + }) def list_documents(self): document_filter_list = self._get_param("DocumentFilterList") filters = self._get_param("Filters") max_results = self._get_param("MaxResults", 10) - next_token = self._get_param("NextToken") + next_token = self._get_param("NextToken", "0") documents, token = self.ssm_backend.list_documents(document_filter_list=document_filter_list, filters=filters, max_results=max_results, next_token=next_token) - return { + return json.dumps({ "DocumentIdentifiers": documents, "NextToken": token - } + }) def _get_param(self, param, default=None): return self.request_params.get(param, default) diff --git a/tests/test_ssm/__init__.py b/tests/test_ssm/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/test_ssm/test_ssm_docs.py b/tests/test_ssm/test_ssm_docs.py index e69de29bb2d1..d8cc90b1383a 100644 --- a/tests/test_ssm/test_ssm_docs.py +++ b/tests/test_ssm/test_ssm_docs.py @@ -0,0 +1,460 @@ +from __future__ import unicode_literals + +import string + +import boto3 +import botocore.exceptions +import sure # noqa +import datetime +import uuid +import json +import pkg_resources +import yaml +import hashlib +import copy +from moto.core import ACCOUNT_ID + +from botocore.exceptions import ClientError, ParamValidationError +from nose.tools import assert_raises + +from moto import mock_ssm, mock_cloudformation + + +def _get_yaml_template(): + template_path = '/'.join(['test_ssm', 'test_templates', 'good.yaml']) + resource_path = pkg_resources.resource_string('tests', template_path) + return resource_path + + +def _validate_document_description(doc_name, doc_description, json_doc, expected_document_version, + expected_latest_version, expected_default_version, expected_format): + + if expected_format == "JSON": + doc_description["Hash"].should.equal(hashlib.sha256(json.dumps(json_doc).encode('utf-8')).hexdigest()) + else: + doc_description["Hash"].should.equal(hashlib.sha256(yaml.dump(json_doc).encode('utf-8')).hexdigest()) + + doc_description["HashType"].should.equal("Sha256") + doc_description["Name"].should.equal(doc_name) + doc_description["Owner"].should.equal(ACCOUNT_ID) + + difference = datetime.datetime.utcnow() - doc_description["CreatedDate"] + if difference.min > datetime.timedelta(minutes=1): + assert False + + doc_description["Status"].should.equal("Active") + doc_description["DocumentVersion"].should.equal(expected_document_version) + doc_description["Description"].should.equal(json_doc["description"]) + + doc_description["Parameters"][0]["Name"].should.equal("Parameter1") + doc_description["Parameters"][0]["Type"].should.equal("Integer") + doc_description["Parameters"][0]["Description"].should.equal("Command Duration.") + doc_description["Parameters"][0]["DefaultValue"].should.equal("3") + + doc_description["Parameters"][1]["Name"].should.equal("Parameter2") + doc_description["Parameters"][1]["Type"].should.equal("String") + doc_description["Parameters"][1]["DefaultValue"].should.equal("def") + + doc_description["Parameters"][2]["Name"].should.equal("Parameter3") + doc_description["Parameters"][2]["Type"].should.equal("Boolean") + doc_description["Parameters"][2]["Description"].should.equal("A boolean") + doc_description["Parameters"][2]["DefaultValue"].should.equal("False") + + doc_description["Parameters"][3]["Name"].should.equal("Parameter4") + doc_description["Parameters"][3]["Type"].should.equal("StringList") + doc_description["Parameters"][3]["Description"].should.equal("A string list") + doc_description["Parameters"][3]["DefaultValue"].should.equal("[\"abc\", \"def\"]") + + doc_description["Parameters"][4]["Name"].should.equal("Parameter5") + doc_description["Parameters"][4]["Type"].should.equal("StringMap") + + doc_description["Parameters"][5]["Name"].should.equal("Parameter6") + doc_description["Parameters"][5]["Type"].should.equal("MapList") + + if expected_format == "JSON": + # We have to replace single quotes from the response to package it back up + json.loads(doc_description["Parameters"][4]["DefaultValue"]).should.equal( + {'NotificationArn': '$dependency.topicArn', + 'NotificationEvents': ['Failed'], + 'NotificationType': 'Command'}) + + json.loads(doc_description["Parameters"][5]["DefaultValue"]).should.equal( + [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeSize': '50'}}, + {'DeviceName': '/dev/sdm', 'Ebs': {'VolumeSize': '100'}}] + ) + else: + yaml.safe_load(doc_description["Parameters"][4]["DefaultValue"]).should.equal( + {'NotificationArn': '$dependency.topicArn', + 'NotificationEvents': ['Failed'], + 'NotificationType': 'Command'}) + yaml.safe_load(doc_description["Parameters"][5]["DefaultValue"]).should.equal( + [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeSize': '50'}}, + {'DeviceName': '/dev/sdm', 'Ebs': {'VolumeSize': '100'}}] + ) + + doc_description["DocumentType"].should.equal("Command") + doc_description["SchemaVersion"].should.equal("2.2") + doc_description["LatestVersion"].should.equal(expected_latest_version) + doc_description["DefaultVersion"].should.equal(expected_default_version) + doc_description["DocumentFormat"].should.equal(expected_format) + +# Done +@mock_ssm +def test_create_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + + client = boto3.client("ssm", region_name="us-east-1") + + response = client.create_document( + Content=yaml.dump(json_doc), Name="TestDocument", DocumentType="Command", DocumentFormat="YAML" + ) + doc_description = response["DocumentDescription"] + _validate_document_description("TestDocument", doc_description, json_doc, "1", "1", "1", "YAML") + + response = client.create_document( + Content=json.dumps(json_doc), Name="TestDocument2", DocumentType="Command", DocumentFormat="JSON" + ) + doc_description = response["DocumentDescription"] + _validate_document_description("TestDocument2", doc_description, json_doc, "1", "1", "1", "JSON") + + response = client.create_document( + Content=json.dumps(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="JSON", + VersionName="Base", TargetType="/AWS::EC2::Instance", Tags=[{'Key': 'testing', 'Value': 'testingValue'}] + ) + doc_description = response["DocumentDescription"] + doc_description["VersionName"].should.equal("Base") + doc_description["TargetType"].should.equal("/AWS::EC2::Instance") + doc_description["Tags"].should.equal([{'Key': 'testing', 'Value': 'testingValue'}]) + + _validate_document_description("TestDocument3", doc_description, json_doc, "1", "1", "1", "JSON") + + +@mock_ssm +def test_get_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.get_document(Name="DNE") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + client.create_document( + Content=yaml.dump(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="YAML", + VersionName="Base" + ) + + response = client.get_document(Name="TestDocument3") + response["Name"].should.equal("TestDocument3") + response["VersionName"].should.equal("Base") + response["DocumentVersion"].should.equal("1") + response["Status"].should.equal("Active") + response["Content"].should.equal(yaml.dump(json_doc)) + response["DocumentType"].should.equal("Command") + response["DocumentFormat"].should.equal("YAML") + + response = client.get_document(Name="TestDocument3", DocumentFormat="YAML") + response["Name"].should.equal("TestDocument3") + response["VersionName"].should.equal("Base") + response["DocumentVersion"].should.equal("1") + response["Status"].should.equal("Active") + response["Content"].should.equal(yaml.dump(json_doc)) + response["DocumentType"].should.equal("Command") + response["DocumentFormat"].should.equal("YAML") + + response = client.get_document(Name="TestDocument3", DocumentFormat="JSON") + response["Name"].should.equal("TestDocument3") + response["VersionName"].should.equal("Base") + response["DocumentVersion"].should.equal("1") + response["Status"].should.equal("Active") + response["Content"].should.equal(json.dumps(json_doc)) + response["DocumentType"].should.equal("Command") + response["DocumentFormat"].should.equal("JSON") + + # response = client.get_document(Name="TestDocument3", VersionName="Base") + # response = client.get_document(Name="TestDocument3", DocumentVersion="1") + + # response = client.get_document(Name="TestDocument3", DocumentVersion="2") + # response = client.get_document(Name="TestDocument3", VersionName="Base", DocumentVersion="2") + # response = client.get_document(Name="TestDocument3", DocumentFormat="YAML") + # response = client.get_document(Name="TestDocument3", DocumentFormat="JSON") + +@mock_ssm +def test_delete_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + client = boto3.client("ssm", region_name="us-east-1") + + # Test simple + client.create_document( + Content=yaml.dump(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="YAML", + VersionName="Base", TargetType="/AWS::EC2::Instance" + ) + response = client.delete_document(Name="TestDocument3") + # response = client.get_document(Name="TestDocument3") + # + # # Test re-use + # client.create_document( + # Content=yaml.dump(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="YAML", + # VersionName="Base", TargetType="/AWS::EC2::Instance" + # ) + # response = client.get_document(Name="TestDocument3") + + # updates + + # We update default_version here to test some other cases around deleting specific versions + # response = client.update_document_default_version( + # Name="TestDocument3", + # DocumentVersion=2 + # ) + # + # response = client.delete_document(Name="TestDocument3", DocumentVersion="4") + # response = client.get_document(Name="TestDocument3") + # response = client.get_document(Name="TestDocument3", DocumentVersion="4") + # + # # Both filters should match in order to delete + # response = client.delete_document(Name="TestDocument3", DocumentVersion="1", VersionName="NotVersion") + # response = client.get_document(Name="TestDocument3") + # response = client.get_document(Name="TestDocument3", DocumentVersion="1") + # + # response = client.delete_document(Name="TestDocument3", DocumentVersion="1", VersionName="RealVersion") + # response = client.get_document(Name="TestDocument3") + # response = client.get_document(Name="TestDocument3", DocumentVersion="1") + # + # # AWS doesn't allow deletion of default version if other versions are left + # response = client.delete_document(Name="TestDocument3", DocumentVersion="2") + # + # response = client.delete_document(Name="TestDocument3") + # response = client.get_document(Name="TestDocument3") + # response = client.get_document(Name="TestDocument3", DocumentVersion="3") + +# Done +@mock_ssm +def test_update_document_default_version(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.update_document_default_version(Name="DNE", DocumentVersion="1") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocumentDefaultVersion") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + client.create_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentType="Command", VersionName="Base" + ) + + json_doc['description'] = "a new description" + + client.update_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST", + DocumentFormat="JSON" + ) + + json_doc['description'] = "a new description2" + + client.update_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST" + ) + + response = client.update_document_default_version( + Name="TestDocument", + DocumentVersion="2" + ) + response["Description"]["Name"].should.equal("TestDocument") + response["Description"]["DefaultVersion"].should.equal("2") + + json_doc['description'] = "a new description3" + + client.update_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST", VersionName="NewBase" + ) + + response = client.update_document_default_version( + Name="TestDocument", + DocumentVersion="4" + ) + response["Description"]["Name"].should.equal("TestDocument") + response["Description"]["DefaultVersion"].should.equal("4") + response["Description"]["DefaultVersionName"].should.equal("NewBase") + +# Done +@mock_ssm +def test_update_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.update_document(Name="DNE", Content=json.dumps(json_doc), DocumentVersion="1", DocumentFormat="JSON") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + client.create_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentType="Command", DocumentFormat="JSON", + VersionName="Base" + ) + + # Duplicate content throws an error + try: + client.update_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="1", DocumentFormat="JSON" + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocument") + err.response["Error"]["Message"].should.equal("The content of the association document matches another " + "document. Change the content of the document and try again.") + + json_doc['description'] = "a new description" + # Duplicate version name + try: + client.update_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="1", DocumentFormat="JSON", + VersionName="Base" + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocument") + err.response["Error"]["Message"].should.equal("The specified version name is a duplicate.") + + response = client.update_document( + Content=json.dumps(json_doc), Name="TestDocument", VersionName="Base2", DocumentVersion="1", + DocumentFormat="JSON" + ) + response["DocumentDescription"]["Description"].should.equal("a new description") + response["DocumentDescription"]["DocumentVersion"].should.equal("2") + response["DocumentDescription"]["LatestVersion"].should.equal("2") + response["DocumentDescription"]["DefaultVersion"].should.equal("1") + + json_doc['description'] = "a new description2" + + response = client.update_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST", + DocumentFormat="JSON", VersionName="NewBase" + ) + response["DocumentDescription"]["Description"].should.equal("a new description2") + response["DocumentDescription"]["DocumentVersion"].should.equal("3") + response["DocumentDescription"]["LatestVersion"].should.equal("3") + response["DocumentDescription"]["DefaultVersion"].should.equal("1") + response["DocumentDescription"]["VersionName"].should.equal("NewBase") + +# Done +@mock_ssm +def test_describe_document(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + client = boto3.client("ssm", region_name="us-east-1") + + try: + client.describe_document(Name="DNE") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("DescribeDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + client.create_document( + Content=yaml.dump(json_doc), Name="TestDocument", DocumentType="Command", DocumentFormat="YAML", + VersionName="Base", TargetType="/AWS::EC2::Instance", Tags=[{'Key': 'testing', 'Value': 'testingValue'}] + ) + response = client.describe_document(Name="TestDocument") + doc_description=response['Document'] + _validate_document_description("TestDocument", doc_description, json_doc, "1", "1", "1", "YAML") + + # Adding update to check for issues + new_json_doc = copy.copy(json_doc) + new_json_doc['description'] = "a new description2" + + client.update_document( + Content=json.dumps(new_json_doc), Name="TestDocument", DocumentVersion="$LATEST" + ) + response = client.describe_document(Name="TestDocument") + doc_description = response['Document'] + _validate_document_description("TestDocument", doc_description, json_doc, "1", "2", "1", "YAML") + +# Done +@mock_ssm +def test_list_documents(): + template_file = _get_yaml_template() + json_doc = yaml.safe_load(template_file) + + client = boto3.client("ssm", region_name="us-east-1") + + client.create_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentType="Command", DocumentFormat="JSON" + ) + client.create_document( + Content=json.dumps(json_doc), Name="TestDocument2", DocumentType="Command", DocumentFormat="JSON" + ) + client.create_document( + Content=json.dumps(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="JSON" + ) + + response = client.list_documents() + len(response['DocumentIdentifiers']).should.equal(3) + response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument") + response['DocumentIdentifiers'][1]["Name"].should.equal("TestDocument2") + response['DocumentIdentifiers'][2]["Name"].should.equal("TestDocument3") + response['NextToken'].should.equal("") + + response = client.list_documents(MaxResults=1) + len(response['DocumentIdentifiers']).should.equal(1) + response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument") + response['DocumentIdentifiers'][0]["DocumentVersion"].should.equal("1") + response['NextToken'].should.equal("1") + + response = client.list_documents(MaxResults=1, NextToken=response['NextToken']) + len(response['DocumentIdentifiers']).should.equal(1) + response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument2") + response['DocumentIdentifiers'][0]["DocumentVersion"].should.equal("1") + response['NextToken'].should.equal("2") + + response = client.list_documents(MaxResults=1, NextToken=response['NextToken']) + len(response['DocumentIdentifiers']).should.equal(1) + response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument3") + response['DocumentIdentifiers'][0]["DocumentVersion"].should.equal("1") + response['NextToken'].should.equal("") + + # making sure no bad interactions with update + json_doc['description'] = "a new description" + client.update_document( + Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST", + DocumentFormat="JSON" + ) + + client.update_document( + Content=json.dumps(json_doc), Name="TestDocument2", DocumentVersion="$LATEST", + DocumentFormat="JSON" + ) + + response = client.update_document_default_version( + Name="TestDocument", + DocumentVersion="2" + ) + + response = client.list_documents() + len(response['DocumentIdentifiers']).should.equal(3) + response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument") + response['DocumentIdentifiers'][0]["DocumentVersion"].should.equal("2") + + response['DocumentIdentifiers'][1]["Name"].should.equal("TestDocument2") + response['DocumentIdentifiers'][1]["DocumentVersion"].should.equal("1") + + response['DocumentIdentifiers'][2]["Name"].should.equal("TestDocument3") + response['DocumentIdentifiers'][2]["DocumentVersion"].should.equal("1") + response['NextToken'].should.equal("") + + + + + diff --git a/tests/test_ssm/test_templates/good.yaml b/tests/test_ssm/test_templates/good.yaml new file mode 100644 index 000000000000..7f0372f3a984 --- /dev/null +++ b/tests/test_ssm/test_templates/good.yaml @@ -0,0 +1,47 @@ +schemaVersion: "2.2" +description: "Sample Yaml" +parameters: + Parameter1: + type: "Integer" + default: 3 + description: "Command Duration." + allowedValues: [1,2,3,4] + Parameter2: + type: "String" + default: "def" + description: + allowedValues: ["abc", "def", "ghi"] + allowedPattern: r"^[a-zA-Z0-9_\-.]{3,128}$" + Parameter3: + type: "Boolean" + default: false + description: "A boolean" + allowedValues: [True, False] + Parameter4: + type: "StringList" + default: ["abc", "def"] + description: "A string list" + Parameter5: + type: "StringMap" + default: + NotificationType: Command + NotificationEvents: + - Failed + NotificationArn: "$dependency.topicArn" + description: + Parameter6: + type: "MapList" + default: + - DeviceName: "/dev/sda1" + Ebs: + VolumeSize: '50' + - DeviceName: "/dev/sdm" + Ebs: + VolumeSize: '100' + description: +mainSteps: + - action: "aws:runShellScript" + name: "sampleCommand" + inputs: + runCommand: + - "echo hi" From 8ff32bf4fab28b02877bce5e2b414805c3969ef4 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 30 Jun 2020 15:00:08 +0100 Subject: [PATCH 414/658] Append region-info to UserAgent-header, if it already exists --- moto/core/models.py | 6 ++++- tests/test_cognitoidp/test_cognitoidp.py | 32 ++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/moto/core/models.py b/moto/core/models.py index d7f96fe37942..26ee1a1f5a29 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -419,7 +419,11 @@ def enable_patching(self): def fake_boto3_client(*args, **kwargs): region = self._get_region(*args, **kwargs) if region: - kwargs["config"] = Config(user_agent_extra="region/" + region) + if "config" in kwargs: + kwargs["config"].__dict__["user_agent_extra"] += " region/" + region + else: + config = Config(user_agent_extra="region/" + region) + kwargs["config"] = config if "endpoint_url" not in kwargs: kwargs["endpoint_url"] = "http://localhost:5000" return real_boto3_client(*args, **kwargs) diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 37e1a56a3540..9c4b8de495d4 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1243,6 +1243,38 @@ def test_change_password(): result["AuthenticationResult"].should_not.be.none +@mock_cognitoidp +def test_change_password__using_custom_user_agent_header(): + # https://github.com/spulec/moto/issues/3098 + # As the admin_initiate_auth-method is unauthenticated, we use the user-agent header to pass in the region + # This test verifies this works, even if we pass in our own user-agent header + from botocore.config import Config + + my_config = Config(user_agent_extra="more/info", signature_version="v4") + conn = boto3.client("cognito-idp", "us-west-2", config=my_config) + + outputs = authentication_flow(conn) + + # Take this opportunity to test change_password, which requires an access token. + newer_password = str(uuid.uuid4()) + conn.change_password( + AccessToken=outputs["access_token"], + PreviousPassword=outputs["password"], + ProposedPassword=newer_password, + ) + + # Log in again, which should succeed without a challenge because the user is no + # longer in the force-new-password state. + result = conn.admin_initiate_auth( + UserPoolId=outputs["user_pool_id"], + ClientId=outputs["client_id"], + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={"USERNAME": outputs["username"], "PASSWORD": newer_password}, + ) + + result["AuthenticationResult"].should_not.be.none + + @mock_cognitoidp def test_forgot_password(): conn = boto3.client("cognito-idp", "us-west-2") From 82825787dbe5597c9e654448d6e8206ec541f6d3 Mon Sep 17 00:00:00 2001 From: Alex Bainbridge Date: Tue, 30 Jun 2020 12:39:52 -0400 Subject: [PATCH 415/658] all tests passing --- moto/ssm/models.py | 23 +++- tests/test_ssm/test_ssm_docs.py | 227 +++++++++++++++++++++++--------- 2 files changed, 181 insertions(+), 69 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 45f89fd5c962..3fa71b2aa623 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -475,7 +475,7 @@ def _generate_document_information(self, ssm_document, document_format): "Status": ssm_document.status, "Content": ssm_document.content, "DocumentType": ssm_document.document_type, - "DocumentFormat": ssm_document.document_format + "DocumentFormat": document_format } if document_format == "JSON": @@ -555,8 +555,13 @@ def delete_document(self, name, document_version, version_name, force): if document_version or version_name: # We delete only a specific version delete_doc = self._find_document(name, document_version, version_name) + + # we can't delete only the default version + if delete_doc and delete_doc.document_version == default_version and len(documents) != 1: + raise InvalidDocumentOperation("Default version of the document can't be deleted.") + if delete_doc: - keys_to_delete.add(document_version) + keys_to_delete.add(delete_doc.document_version) else: raise InvalidDocument("The specified document does not exist.") else: @@ -564,10 +569,20 @@ def delete_document(self, name, document_version, version_name, force): keys_to_delete = set(documents.keys()) for key in keys_to_delete: - self._documents[name]["documents"][key] = None + del self._documents[name]["documents"][key] + + keys = self._documents[name]["documents"].keys() if len(self._documents[name]["documents"].keys()) == 0: - self._documents[name] = None + del self._documents[name] + else: + old_latest = self._documents[name]["latest_version"] + if old_latest not in self._documents[name]["documents"].keys(): + leftover_keys = self._documents[name]["documents"].keys() + int_keys = [] + for key in leftover_keys: + int_keys.append(int(key)) + self._documents[name]["latest_version"] = str(sorted(int_keys)[-1]) else: raise InvalidDocument("The specified document does not exist.") diff --git a/tests/test_ssm/test_ssm_docs.py b/tests/test_ssm/test_ssm_docs.py index d8cc90b1383a..ac5460f9d387 100644 --- a/tests/test_ssm/test_ssm_docs.py +++ b/tests/test_ssm/test_ssm_docs.py @@ -98,6 +98,19 @@ def _validate_document_description(doc_name, doc_description, json_doc, expected doc_description["DefaultVersion"].should.equal(expected_default_version) doc_description["DocumentFormat"].should.equal(expected_format) +def _get_doc_validator(response, version_name, doc_version, json_doc_content, document_format): + response["Name"].should.equal("TestDocument3") + if version_name: + response["VersionName"].should.equal(version_name) + response["DocumentVersion"].should.equal(doc_version) + response["Status"].should.equal("Active") + if document_format == "JSON": + json.loads(response["Content"]).should.equal(json_doc_content) + else: + yaml.safe_load(response["Content"]).should.equal(json_doc_content) + response["DocumentType"].should.equal("Command") + response["DocumentFormat"].should.equal(document_format) + # Done @mock_ssm def test_create_document(): @@ -129,7 +142,7 @@ def test_create_document(): _validate_document_description("TestDocument3", doc_description, json_doc, "1", "1", "1", "JSON") - +# Done @mock_ssm def test_get_document(): template_file = _get_yaml_template() @@ -149,40 +162,59 @@ def test_get_document(): VersionName="Base" ) + new_json_doc = copy.copy(json_doc) + new_json_doc['description'] = "a new description" + + client.update_document( + Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST", VersionName="NewBase" + ) + response = client.get_document(Name="TestDocument3") - response["Name"].should.equal("TestDocument3") - response["VersionName"].should.equal("Base") - response["DocumentVersion"].should.equal("1") - response["Status"].should.equal("Active") - response["Content"].should.equal(yaml.dump(json_doc)) - response["DocumentType"].should.equal("Command") - response["DocumentFormat"].should.equal("YAML") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") response = client.get_document(Name="TestDocument3", DocumentFormat="YAML") - response["Name"].should.equal("TestDocument3") - response["VersionName"].should.equal("Base") - response["DocumentVersion"].should.equal("1") - response["Status"].should.equal("Active") - response["Content"].should.equal(yaml.dump(json_doc)) - response["DocumentType"].should.equal("Command") - response["DocumentFormat"].should.equal("YAML") + _get_doc_validator(response, "Base", "1", json_doc, "YAML") response = client.get_document(Name="TestDocument3", DocumentFormat="JSON") - response["Name"].should.equal("TestDocument3") - response["VersionName"].should.equal("Base") - response["DocumentVersion"].should.equal("1") - response["Status"].should.equal("Active") - response["Content"].should.equal(json.dumps(json_doc)) - response["DocumentType"].should.equal("Command") - response["DocumentFormat"].should.equal("JSON") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") - # response = client.get_document(Name="TestDocument3", VersionName="Base") - # response = client.get_document(Name="TestDocument3", DocumentVersion="1") + response = client.get_document(Name="TestDocument3", VersionName="Base") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") - # response = client.get_document(Name="TestDocument3", DocumentVersion="2") - # response = client.get_document(Name="TestDocument3", VersionName="Base", DocumentVersion="2") - # response = client.get_document(Name="TestDocument3", DocumentFormat="YAML") - # response = client.get_document(Name="TestDocument3", DocumentFormat="JSON") + response = client.get_document(Name="TestDocument3", DocumentVersion="1") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") + + response = client.get_document(Name="TestDocument3", DocumentVersion="2") + _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") + + response = client.get_document(Name="TestDocument3", VersionName="NewBase") + _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") + + response = client.get_document(Name="TestDocument3", VersionName="NewBase", DocumentVersion="2") + _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") + + try: + response = client.get_document(Name="TestDocument3", VersionName="BadName", DocumentVersion="2") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + try: + response = client.get_document(Name="TestDocument3", DocumentVersion="3") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + # Updating default should update normal get + client.update_document_default_version( + Name="TestDocument3", + DocumentVersion="2" + ) + + response = client.get_document(Name="TestDocument3", DocumentFormat="JSON") + _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") @mock_ssm def test_delete_document(): @@ -190,48 +222,113 @@ def test_delete_document(): json_doc = yaml.safe_load(template_file) client = boto3.client("ssm", region_name="us-east-1") + try: + client.delete_document(Name="DNE") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("DeleteDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + # Test simple client.create_document( Content=yaml.dump(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="YAML", VersionName="Base", TargetType="/AWS::EC2::Instance" ) - response = client.delete_document(Name="TestDocument3") - # response = client.get_document(Name="TestDocument3") - # - # # Test re-use - # client.create_document( - # Content=yaml.dump(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="YAML", - # VersionName="Base", TargetType="/AWS::EC2::Instance" - # ) - # response = client.get_document(Name="TestDocument3") - - # updates - - # We update default_version here to test some other cases around deleting specific versions - # response = client.update_document_default_version( - # Name="TestDocument3", - # DocumentVersion=2 - # ) - # - # response = client.delete_document(Name="TestDocument3", DocumentVersion="4") - # response = client.get_document(Name="TestDocument3") - # response = client.get_document(Name="TestDocument3", DocumentVersion="4") - # - # # Both filters should match in order to delete - # response = client.delete_document(Name="TestDocument3", DocumentVersion="1", VersionName="NotVersion") - # response = client.get_document(Name="TestDocument3") - # response = client.get_document(Name="TestDocument3", DocumentVersion="1") - # - # response = client.delete_document(Name="TestDocument3", DocumentVersion="1", VersionName="RealVersion") - # response = client.get_document(Name="TestDocument3") - # response = client.get_document(Name="TestDocument3", DocumentVersion="1") - # - # # AWS doesn't allow deletion of default version if other versions are left - # response = client.delete_document(Name="TestDocument3", DocumentVersion="2") - # - # response = client.delete_document(Name="TestDocument3") - # response = client.get_document(Name="TestDocument3") - # response = client.get_document(Name="TestDocument3", DocumentVersion="3") + client.delete_document(Name="TestDocument3") + + try: + client.get_document(Name="TestDocument3") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + + # Delete default version with other version is bad + client.create_document( + Content=yaml.dump(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="YAML", + VersionName="Base", TargetType="/AWS::EC2::Instance" + ) + + new_json_doc = copy.copy(json_doc) + new_json_doc['description'] = "a new description" + + client.update_document( + Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST", VersionName="NewBase" + ) + + new_json_doc['description'] = "a new description2" + client.update_document( + Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST" + ) + + new_json_doc['description'] = "a new description3" + client.update_document( + Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST" + ) + + new_json_doc['description'] = "a new description4" + client.update_document( + Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST" + ) + + + try: + client.delete_document(Name="TestDocument3", DocumentVersion="1") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("DeleteDocument") + err.response["Error"]["Message"].should.equal("Default version of the document can't be deleted.") + + try: + client.delete_document(Name="TestDocument3", VersionName="Base") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("DeleteDocument") + err.response["Error"]["Message"].should.equal("Default version of the document can't be deleted.") + + # Make sure no ill side effects + response = client.get_document(Name="TestDocument3") + _get_doc_validator(response, "Base", "1", json_doc, "JSON") + + client.delete_document(Name="TestDocument3", DocumentVersion="5") + + # Check that latest version is changed + response = client.describe_document(Name="TestDocument3") + response["Document"]["LatestVersion"].should.equal("4") + + client.delete_document(Name="TestDocument3", VersionName="NewBase") + + # Make sure other versions okay + client.get_document(Name="TestDocument3", DocumentVersion="1") + client.get_document(Name="TestDocument3", DocumentVersion="3") + client.get_document(Name="TestDocument3", DocumentVersion="4") + + client.delete_document(Name="TestDocument3") + + try: + client.get_document(Name="TestDocument3", DocumentVersion="1") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + try: + client.get_document(Name="TestDocument3", DocumentVersion="3") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + try: + client.get_document(Name="TestDocument3", DocumentVersion="4") + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("GetDocument") + err.response["Error"]["Message"].should.equal("The specified document does not exist.") + + response = client.list_documents() + len(response['DocumentIdentifiers']).should.equal(0) # Done @mock_ssm From c9b38e25b80b9127b6dfa006e9e2845491db0b47 Mon Sep 17 00:00:00 2001 From: Alex Bainbridge Date: Tue, 30 Jun 2020 12:43:42 -0400 Subject: [PATCH 416/658] black linting --- moto/ssm/exceptions.py | 13 +- moto/ssm/models.py | 359 ++++++++++++++++++--------- moto/ssm/responses.py | 80 +++--- tests/test_ssm/test_ssm_docs.py | 424 +++++++++++++++++++++----------- 4 files changed, 578 insertions(+), 298 deletions(-) diff --git a/moto/ssm/exceptions.py b/moto/ssm/exceptions.py index a1e1290028d0..2e715f16a24c 100644 --- a/moto/ssm/exceptions.py +++ b/moto/ssm/exceptions.py @@ -73,7 +73,9 @@ class InvalidDocumentOperation(JsonRESTError): code = 400 def __init__(self, message): - super(InvalidDocumentOperation, self).__init__("InvalidDocumentOperation", message) + super(InvalidDocumentOperation, self).__init__( + "InvalidDocumentOperation", message + ) class InvalidDocumentContent(JsonRESTError): @@ -94,12 +96,15 @@ class DuplicateDocumentVersionName(JsonRESTError): code = 400 def __init__(self, message): - super(DuplicateDocumentVersionName, self).__init__("DuplicateDocumentVersionName", message) + super(DuplicateDocumentVersionName, self).__init__( + "DuplicateDocumentVersionName", message + ) class DuplicateDocumentContent(JsonRESTError): code = 400 def __init__(self, message): - super(DuplicateDocumentContent, self).__init__("DuplicateDocumentContent", message) - + super(DuplicateDocumentContent, self).__init__( + "DuplicateDocumentContent", message + ) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 3fa71b2aa623..ad9806e9f583 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -31,21 +31,21 @@ InvalidDocumentContent, InvalidDocumentVersion, DuplicateDocumentVersionName, - DuplicateDocumentContent + DuplicateDocumentContent, ) class Parameter(BaseModel): def __init__( - self, - name, - value, - type, - description, - allowed_pattern, - keyid, - last_modified_date, - version, + self, + name, + value, + type, + description, + allowed_pattern, + keyid, + last_modified_date, + version, ): self.name = name self.type = type @@ -73,7 +73,7 @@ def decrypt(self, value): prefix = "kms:{}:".format(self.keyid or "default") if value.startswith(prefix): - return value[len(prefix):] + return value[len(prefix) :] def response_object(self, decrypt=False, region=None): r = { @@ -123,7 +123,11 @@ def generate_ssm_doc_param_list(parameters): final_dict["Type"] = param_info["type"] final_dict["Description"] = param_info["description"] - if param_info["type"] == "StringList" or param_info["type"] == "StringMap" or param_info["type"] == "MapList": + if ( + param_info["type"] == "StringList" + or param_info["type"] == "StringMap" + or param_info["type"] == "MapList" + ): final_dict["DefaultValue"] = json.dumps(param_info["default"]) else: final_dict["DefaultValue"] = str(param_info["default"]) @@ -134,8 +138,19 @@ def generate_ssm_doc_param_list(parameters): class Document(BaseModel): - def __init__(self, name, version_name, content, document_type, document_format, requires, attachments, - target_type, tags, document_version="1"): + def __init__( + self, + name, + version_name, + content, + document_type, + document_format, + requires, + attachments, + target_type, + tags, + document_version="1", + ): self.name = name self.version_name = version_name self.content = content @@ -155,14 +170,18 @@ def __init__(self, name, version_name, content, document_type, document_format, try: content_json = json.loads(content) except json.decoder.JSONDecodeError: - raise InvalidDocumentContent("The content for the document is not valid.") + raise InvalidDocumentContent( + "The content for the document is not valid." + ) elif document_format == "YAML": try: content_json = yaml.safe_load(content) except yaml.YAMLError: - raise InvalidDocumentContent("The content for the document is not valid.") + raise InvalidDocumentContent( + "The content for the document is not valid." + ) else: - raise ValidationException(f'Invalid document format {document_format}') + raise ValidationException(f"Invalid document format {document_format}") self.content_json = content_json @@ -171,11 +190,17 @@ def __init__(self, name, version_name, content, document_type, document_format, self.description = content_json.get("description") self.outputs = content_json.get("outputs") self.files = content_json.get("files") - # TODO add platformType + # TODO add platformType (requires mapping the ssm actions to OS's this isn't well documented) self.platform_types = ["Not Implemented (moto)"] - self.parameter_list = generate_ssm_doc_param_list(content_json.get("parameters")) + self.parameter_list = generate_ssm_doc_param_list( + content_json.get("parameters") + ) - if self.schema_version == "0.3" or self.schema_version == "2.0" or self.schema_version == "2.2": + if ( + self.schema_version == "0.3" + or self.schema_version == "2.0" + or self.schema_version == "2.2" + ): self.mainSteps = content_json["mainSteps"] elif self.schema_version == "1.2": self.runtimeConfig = content_json.get("runtimeConfig") @@ -184,25 +209,23 @@ def __init__(self, name, version_name, content, document_type, document_format, raise InvalidDocumentContent("The content for the document is not valid.") - - class Command(BaseModel): def __init__( - self, - comment="", - document_name="", - timeout_seconds=MAX_TIMEOUT_SECONDS, - instance_ids=None, - max_concurrency="", - max_errors="", - notification_config=None, - output_s3_bucket_name="", - output_s3_key_prefix="", - output_s3_region="", - parameters=None, - service_role_arn="", - targets=None, - backend_region="us-east-1", + self, + comment="", + document_name="", + timeout_seconds=MAX_TIMEOUT_SECONDS, + instance_ids=None, + max_concurrency="", + max_errors="", + notification_config=None, + output_s3_bucket_name="", + output_s3_key_prefix="", + output_s3_region="", + parameters=None, + service_role_arn="", + targets=None, + backend_region="us-east-1", ): if instance_ids is None: @@ -356,14 +379,23 @@ def get_invocation(self, instance_id, plugin_name): def _validate_document_format(document_format): aws_doc_formats = ["JSON", "YAML"] if document_format not in aws_doc_formats: - raise ValidationException(f'Invalid document format {document_format}') + raise ValidationException(f"Invalid document format {document_format}") def _validate_document_info(content, name, document_type, document_format, strict=True): - aws_ssm_name_regex = r'^[a-zA-Z0-9_\-.]{3,128}$' + aws_ssm_name_regex = r"^[a-zA-Z0-9_\-.]{3,128}$" aws_name_reject_list = ["aws-", "amazon", "amzn"] - aws_doc_types = ["Command", "Policy", "Automation", "Session", "Package", "ApplicationConfiguration", - "ApplicationConfigurationSchema", "DeploymentStrategy", "ChangeCalendar"] + aws_doc_types = [ + "Command", + "Policy", + "Automation", + "Session", + "Package", + "ApplicationConfiguration", + "ApplicationConfigurationSchema", + "DeploymentStrategy", + "ChangeCalendar", + ] _validate_document_format(document_format) @@ -371,14 +403,14 @@ def _validate_document_info(content, name, document_type, document_format, stric raise ValidationException("Content is required") if list(filter(name.startswith, aws_name_reject_list)): - raise ValidationException(f'Invalid document name {name}') + raise ValidationException(f"Invalid document name {name}") ssm_name_pattern = re.compile(aws_ssm_name_regex) if not ssm_name_pattern.match(name): - raise ValidationException(f'Invalid document name {name}') + raise ValidationException(f"Invalid document name {name}") if strict and document_type not in aws_doc_types: # Update document doesn't use document type - raise ValidationException(f'Invalid document type {document_type}') + raise ValidationException(f"Invalid document type {document_type}") def _document_filter_equal_comparator(keyed_value, filter): @@ -397,7 +429,9 @@ def _document_filter_list_includes_comparator(keyed_value_list, filter): def _document_filter_match(filters, ssm_doc): for filter in filters: - if filter["Key"] == "Name" and not _document_filter_equal_comparator(ssm_doc.name, filter): + if filter["Key"] == "Name" and not _document_filter_equal_comparator( + ssm_doc.name, filter + ): return False elif filter["Key"] == "Owner": @@ -409,14 +443,21 @@ def _document_filter_match(filters, ssm_doc): if not _document_filter_equal_comparator(ssm_doc.owner, filter): return False - elif filter["Key"] == "PlatformTypes" and not \ - _document_filter_list_includes_comparator(ssm_doc.platform_types, filter): + elif filter[ + "Key" + ] == "PlatformTypes" and not _document_filter_list_includes_comparator( + ssm_doc.platform_types, filter + ): return False - elif filter["Key"] == "DocumentType" and not _document_filter_equal_comparator(ssm_doc.document_type, filter): + elif filter["Key"] == "DocumentType" and not _document_filter_equal_comparator( + ssm_doc.document_type, filter + ): return False - elif filter["Key"] == "TargetType" and not _document_filter_equal_comparator(ssm_doc.target_type, filter): + elif filter["Key"] == "TargetType" and not _document_filter_equal_comparator( + ssm_doc.target_type, filter + ): return False return True @@ -440,10 +481,10 @@ def __init__(self): def _generate_document_description(self, document): - latest = self._documents[document.name]['latest_version'] + latest = self._documents[document.name]["latest_version"] default_version = self._documents[document.name]["default_version"] base = { - "Hash": hashlib.sha256(document.content.encode('utf-8')).hexdigest(), + "Hash": hashlib.sha256(document.content.encode("utf-8")).hexdigest(), "HashType": "Sha256", "Name": document.name, "Owner": document.owner, @@ -457,7 +498,7 @@ def _generate_document_description(self, document): "SchemaVersion": document.schema_version, "LatestVersion": latest, "DefaultVersion": default_version, - "DocumentFormat": document.document_format + "DocumentFormat": document.document_format, } if document.version_name: base["VersionName"] = document.version_name @@ -475,7 +516,7 @@ def _generate_document_information(self, ssm_document, document_format): "Status": ssm_document.status, "Content": ssm_document.content, "DocumentType": ssm_document.document_type, - "DocumentFormat": document_format + "DocumentFormat": document_format, } if document_format == "JSON": @@ -483,7 +524,7 @@ def _generate_document_information(self, ssm_document, document_format): elif document_format == "YAML": base["Content"] = yaml.dump(ssm_document.content_json) else: - raise ValidationException(f'Invalid document format {document_format}') + raise ValidationException(f"Invalid document format {document_format}") if ssm_document.version_name: base["VersionName"] = ssm_document.version_name @@ -501,7 +542,7 @@ def _generate_document_list_information(self, ssm_document): "DocumentVersion": ssm_document.document_version, "DocumentType": ssm_document.document_type, "SchemaVersion": ssm_document.schema_version, - "DocumentFormat": ssm_document.document_format + "DocumentFormat": ssm_document.document_format, } if ssm_document.version_name: base["VersionName"] = ssm_document.version_name @@ -516,24 +557,44 @@ def _generate_document_list_information(self, ssm_document): return base - def create_document(self, content, requires, attachments, name, version_name, document_type, document_format, - target_type, tags): - ssm_document = Document(name=name, version_name=version_name, content=content, document_type=document_type, - document_format=document_format, requires=requires, attachments=attachments, - target_type=target_type, tags=tags) + def create_document( + self, + content, + requires, + attachments, + name, + version_name, + document_type, + document_format, + target_type, + tags, + ): + ssm_document = Document( + name=name, + version_name=version_name, + content=content, + document_type=document_type, + document_format=document_format, + requires=requires, + attachments=attachments, + target_type=target_type, + tags=tags, + ) - _validate_document_info(content=content, name=name, document_type=document_type, - document_format=document_format) + _validate_document_info( + content=content, + name=name, + document_type=document_type, + document_format=document_format, + ) if self._documents.get(ssm_document.name): raise DocumentAlreadyExists(f"The specified document already exists.") self._documents[ssm_document.name] = { - "documents": { - ssm_document.document_version: ssm_document - }, + "documents": {ssm_document.document_version: ssm_document}, "default_version": ssm_document.document_version, - "latest_version": ssm_document.document_version + "latest_version": ssm_document.document_version, } return self._generate_document_description(ssm_document) @@ -545,20 +606,34 @@ def delete_document(self, name, document_version, version_name, force): if documents: default_version = self._documents[name]["default_version"] - if documents[default_version].document_type == "ApplicationConfigurationSchema" and not force: - raise InvalidDocumentOperation("You attempted to delete a document while it is still shared. " - "You must stop sharing the document before you can delete it.") + if ( + documents[default_version].document_type + == "ApplicationConfigurationSchema" + and not force + ): + raise InvalidDocumentOperation( + "You attempted to delete a document while it is still shared. " + "You must stop sharing the document before you can delete it." + ) if document_version and document_version == default_version: - raise InvalidDocumentOperation("Default version of the document can't be deleted.") + raise InvalidDocumentOperation( + "Default version of the document can't be deleted." + ) if document_version or version_name: # We delete only a specific version delete_doc = self._find_document(name, document_version, version_name) # we can't delete only the default version - if delete_doc and delete_doc.document_version == default_version and len(documents) != 1: - raise InvalidDocumentOperation("Default version of the document can't be deleted.") + if ( + delete_doc + and delete_doc.document_version == default_version + and len(documents) != 1 + ): + raise InvalidDocumentOperation( + "Default version of the document can't be deleted." + ) if delete_doc: keys_to_delete.add(delete_doc.document_version) @@ -571,8 +646,6 @@ def delete_document(self, name, document_version, version_name, force): for key in keys_to_delete: del self._documents[name]["documents"][key] - keys = self._documents[name]["documents"].keys() - if len(self._documents[name]["documents"].keys()) == 0: del self._documents[name] else: @@ -586,7 +659,9 @@ def delete_document(self, name, document_version, version_name, force): else: raise InvalidDocument("The specified document does not exist.") - def _find_document(self, name, document_version=None, version_name=None, strict=True): + def _find_document( + self, name, document_version=None, version_name=None, strict=True + ): if not self._documents.get(name): raise InvalidDocument(f"The specified document does not exist.") @@ -595,18 +670,21 @@ def _find_document(self, name, document_version=None, version_name=None, strict= if not version_name and not document_version: # Retrieve default version - default_version = self._documents[name]['default_version'] + default_version = self._documents[name]["default_version"] ssm_document = documents.get(default_version) elif version_name and document_version: for doc_version, document in documents.items(): - if doc_version == document_version and document.version_name == version_name: + if ( + doc_version == document_version + and document.version_name == version_name + ): ssm_document = document break else: for doc_version, document in documents.items(): - if document_version and doc_version == document_version : + if document_version and doc_version == document_version: ssm_document = document break if version_name and document.version_name == version_name: @@ -642,32 +720,68 @@ def update_document_default_version(self, name, document_version): return base - def update_document(self, content, attachments, name, version_name, document_version, document_format, target_type): - _validate_document_info(content=content, name=name, document_type=None, document_format=document_format, - strict=False) + def update_document( + self, + content, + attachments, + name, + version_name, + document_version, + document_format, + target_type, + ): + _validate_document_info( + content=content, + name=name, + document_type=None, + document_format=document_format, + strict=False, + ) if not self._documents.get(name): raise InvalidDocument("The specified document does not exist.") - if self._documents[name]['latest_version'] != document_version and document_version != "$LATEST": - raise InvalidDocumentVersion("The document version is not valid or does not exist.") - if version_name and self._find_document(name, version_name=version_name, strict=False): - raise DuplicateDocumentVersionName(f"The specified version name is a duplicate.") + if ( + self._documents[name]["latest_version"] != document_version + and document_version != "$LATEST" + ): + raise InvalidDocumentVersion( + "The document version is not valid or does not exist." + ) + if version_name and self._find_document( + name, version_name=version_name, strict=False + ): + raise DuplicateDocumentVersionName( + f"The specified version name is a duplicate." + ) old_ssm_document = self._find_document(name) - new_ssm_document = Document(name=name, version_name=version_name, content=content, - document_type=old_ssm_document.document_type, document_format=document_format, - requires=old_ssm_document.requires, attachments=attachments, - target_type=target_type, tags=old_ssm_document.tags, - document_version=str(int(self._documents[name]['latest_version']) + 1)) + new_ssm_document = Document( + name=name, + version_name=version_name, + content=content, + document_type=old_ssm_document.document_type, + document_format=document_format, + requires=old_ssm_document.requires, + attachments=attachments, + target_type=target_type, + tags=old_ssm_document.tags, + document_version=str(int(self._documents[name]["latest_version"]) + 1), + ) - for doc_version, document in self._documents[name]['documents'].items(): + for doc_version, document in self._documents[name]["documents"].items(): if document.content == new_ssm_document.content: - raise DuplicateDocumentContent("The content of the association document matches another document. " - "Change the content of the document and try again.") + raise DuplicateDocumentContent( + "The content of the association document matches another document. " + "Change the content of the document and try again." + ) - self._documents[name]["latest_version"] = str(int(self._documents[name]["latest_version"]) + 1) - self._documents[name]["documents"][new_ssm_document.document_version] = new_ssm_document + self._documents[name]["latest_version"] = str( + int(self._documents[name]["latest_version"]) + 1 + ) + self._documents[name]["documents"][ + new_ssm_document.document_version + ] = new_ssm_document return self._generate_document_description(new_ssm_document) @@ -675,7 +789,9 @@ def describe_document(self, name, document_version, version_name): ssm_document = self._find_document(name, document_version, version_name) return self._generate_document_description(ssm_document) - def list_documents(self, document_filter_list, filters, max_results=10, next_token="0"): + def list_documents( + self, document_filter_list, filters, max_results=10, next_token="0" + ): if document_filter_list: raise ValidationException( "DocumentFilterList is deprecated. Instead use Filters." @@ -690,13 +806,12 @@ def list_documents(self, document_filter_list, filters, max_results=10, next_tok # There's still more to go so we need a next token return results, str(next_token + len(results)) - if dummy_token_tracker < next_token: dummy_token_tracker = dummy_token_tracker + 1 continue - default_version = document_bundle['default_version'] - ssm_doc = self._documents[document_name]['documents'][default_version] + default_version = document_bundle["default_version"] + ssm_doc = self._documents[document_name]["documents"][default_version] if filters and not _document_filter_match(filters, ssm_doc): # If we have filters enabled, and we don't match them, continue @@ -871,9 +986,9 @@ def _validate_parameter_filters(self, parameter_filters, by_path): "When using global parameters, please specify within a global namespace." ) if ( - "//" in value - or not value.startswith("/") - or not re.match("^[a-zA-Z0-9_.-/]*$", value) + "//" in value + or not value.startswith("/") + or not re.match("^[a-zA-Z0-9_.-/]*$", value) ): raise ValidationException( 'The parameter doesn\'t meet the parameter name requirements. The parameter name must begin with a forward slash "/". ' @@ -952,13 +1067,13 @@ def get_parameters(self, names, with_decryption): return result def get_parameters_by_path( - self, - path, - with_decryption, - recursive, - filters=None, - next_token=None, - max_results=10, + self, + path, + with_decryption, + recursive, + filters=None, + next_token=None, + max_results=10, ): """Implement the get-parameters-by-path-API in the backend.""" result = [] @@ -968,10 +1083,10 @@ def get_parameters_by_path( for param_name in self._parameters: if path != "/" and not param_name.startswith(path): continue - if "/" in param_name[len(path) + 1:] and not recursive: + if "/" in param_name[len(path) + 1 :] and not recursive: continue if not self._match_filters( - self.get_parameter(param_name, with_decryption), filters + self.get_parameter(param_name, with_decryption), filters ): continue result.append(self.get_parameter(param_name, with_decryption)) @@ -983,7 +1098,7 @@ def _get_values_nexttoken(self, values_list, max_results, next_token=None): next_token = 0 next_token = int(next_token) max_results = int(max_results) - values = values_list[next_token: next_token + max_results] + values = values_list[next_token : next_token + max_results] if len(values) == max_results: next_token = str(next_token + max_results) else: @@ -1021,7 +1136,7 @@ def _match_filters(self, parameter, filters=None): if what is None: return False elif option == "BeginsWith" and not any( - what.startswith(value) for value in values + what.startswith(value) for value in values ): return False elif option == "Equals" and not any(what == value for value in values): @@ -1030,10 +1145,10 @@ def _match_filters(self, parameter, filters=None): if any(value == "/" and len(what.split("/")) == 2 for value in values): continue elif any( - value != "/" - and what.startswith(value + "/") - and len(what.split("/")) - 1 == len(value.split("/")) - for value in values + value != "/" + and what.startswith(value + "/") + and len(what.split("/")) - 1 == len(value.split("/")) + for value in values ): continue else: @@ -1080,10 +1195,10 @@ def label_parameter_version(self, name, version, labels): invalid_labels = [] for label in labels: if ( - label.startswith("aws") - or label.startswith("ssm") - or label[:1].isdigit() - or not re.match(r"^[a-zA-z0-9_\.\-]*$", label) + label.startswith("aws") + or label.startswith("ssm") + or label[:1].isdigit() + or not re.match(r"^[a-zA-z0-9_\.\-]*$", label) ): invalid_labels.append(label) continue @@ -1113,7 +1228,7 @@ def label_parameter_version(self, name, version, labels): return [invalid_labels, version] def put_parameter( - self, name, description, value, type, allowed_pattern, keyid, overwrite + self, name, description, value, type, allowed_pattern, keyid, overwrite ): previous_parameter_versions = self._parameters[name] if len(previous_parameter_versions) == 0: diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 6d818b065d3b..66606c2838b0 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -28,21 +28,31 @@ def create_document(self): target_type = self._get_param("TargetType") tags = self._get_param("Tags") - result = self.ssm_backend.create_document(content=content, requires=requires, attachments=attachments, - name=name, version_name=version_name, document_type=document_type, - document_format=document_format, target_type=target_type, tags=tags) + result = self.ssm_backend.create_document( + content=content, + requires=requires, + attachments=attachments, + name=name, + version_name=version_name, + document_type=document_type, + document_format=document_format, + target_type=target_type, + tags=tags, + ) - return json.dumps({ - 'DocumentDescription': result - }) + return json.dumps({"DocumentDescription": result}) def delete_document(self): name = self._get_param("Name") document_version = self._get_param("DocumentVersion") version_name = self._get_param("VersionName") force = self._get_param("Force", False) - self.ssm_backend.delete_document(name=name, document_version=document_version, - version_name=version_name, force=force) + self.ssm_backend.delete_document( + name=name, + document_version=document_version, + version_name=version_name, + force=force, + ) return json.dumps({}) @@ -52,8 +62,12 @@ def get_document(self): document_version = self._get_param("DocumentVersion") document_format = self._get_param("DocumentFormat", "JSON") - document = self.ssm_backend.get_document(name=name, document_version=document_version, - document_format=document_format, version_name=version_name) + document = self.ssm_backend.get_document( + name=name, + document_version=document_version, + document_format=document_format, + version_name=version_name, + ) return json.dumps(document) @@ -62,12 +76,11 @@ def describe_document(self): document_version = self._get_param("DocumentVersion") version_name = self._get_param("VersionName") - result = self.ssm_backend.describe_document(name=name, document_version=document_version, - version_name=version_name) + result = self.ssm_backend.describe_document( + name=name, document_version=document_version, version_name=version_name + ) - return json.dumps({ - 'Document': result - }) + return json.dumps({"Document": result}) def update_document(self): content = self._get_param("Content") @@ -78,22 +91,26 @@ def update_document(self): document_format = self._get_param("DocumentFormat", "JSON") target_type = self._get_param("TargetType") - result = self.ssm_backend.update_document(content=content, attachments=attachments, name=name, - version_name=version_name, document_version=document_version, - document_format=document_format, target_type=target_type) + result = self.ssm_backend.update_document( + content=content, + attachments=attachments, + name=name, + version_name=version_name, + document_version=document_version, + document_format=document_format, + target_type=target_type, + ) - return json.dumps({ - 'DocumentDescription': result - }) + return json.dumps({"DocumentDescription": result}) def update_document_default_version(self): name = self._get_param("Name") document_version = self._get_param("DocumentVersion") - result = self.ssm_backend.update_document_default_version(name=name, document_version=document_version) - return json.dumps({ - 'Description': result - }) + result = self.ssm_backend.update_document_default_version( + name=name, document_version=document_version + ) + return json.dumps({"Description": result}) def list_documents(self): document_filter_list = self._get_param("DocumentFilterList") @@ -101,13 +118,14 @@ def list_documents(self): max_results = self._get_param("MaxResults", 10) next_token = self._get_param("NextToken", "0") - documents, token = self.ssm_backend.list_documents(document_filter_list=document_filter_list, filters=filters, - max_results=max_results, next_token=next_token) + documents, token = self.ssm_backend.list_documents( + document_filter_list=document_filter_list, + filters=filters, + max_results=max_results, + next_token=next_token, + ) - return json.dumps({ - "DocumentIdentifiers": documents, - "NextToken": token - }) + return json.dumps({"DocumentIdentifiers": documents, "NextToken": token}) def _get_param(self, param, default=None): return self.request_params.get(param, default) diff --git a/tests/test_ssm/test_ssm_docs.py b/tests/test_ssm/test_ssm_docs.py index ac5460f9d387..409a3bf959d0 100644 --- a/tests/test_ssm/test_ssm_docs.py +++ b/tests/test_ssm/test_ssm_docs.py @@ -21,18 +21,29 @@ def _get_yaml_template(): - template_path = '/'.join(['test_ssm', 'test_templates', 'good.yaml']) - resource_path = pkg_resources.resource_string('tests', template_path) + template_path = "/".join(["test_ssm", "test_templates", "good.yaml"]) + resource_path = pkg_resources.resource_string("tests", template_path) return resource_path -def _validate_document_description(doc_name, doc_description, json_doc, expected_document_version, - expected_latest_version, expected_default_version, expected_format): +def _validate_document_description( + doc_name, + doc_description, + json_doc, + expected_document_version, + expected_latest_version, + expected_default_version, + expected_format, +): if expected_format == "JSON": - doc_description["Hash"].should.equal(hashlib.sha256(json.dumps(json_doc).encode('utf-8')).hexdigest()) + doc_description["Hash"].should.equal( + hashlib.sha256(json.dumps(json_doc).encode("utf-8")).hexdigest() + ) else: - doc_description["Hash"].should.equal(hashlib.sha256(yaml.dump(json_doc).encode('utf-8')).hexdigest()) + doc_description["Hash"].should.equal( + hashlib.sha256(yaml.dump(json_doc).encode("utf-8")).hexdigest() + ) doc_description["HashType"].should.equal("Sha256") doc_description["Name"].should.equal(doc_name) @@ -63,7 +74,7 @@ def _validate_document_description(doc_name, doc_description, json_doc, expected doc_description["Parameters"][3]["Name"].should.equal("Parameter4") doc_description["Parameters"][3]["Type"].should.equal("StringList") doc_description["Parameters"][3]["Description"].should.equal("A string list") - doc_description["Parameters"][3]["DefaultValue"].should.equal("[\"abc\", \"def\"]") + doc_description["Parameters"][3]["DefaultValue"].should.equal('["abc", "def"]') doc_description["Parameters"][4]["Name"].should.equal("Parameter5") doc_description["Parameters"][4]["Type"].should.equal("StringMap") @@ -74,22 +85,32 @@ def _validate_document_description(doc_name, doc_description, json_doc, expected if expected_format == "JSON": # We have to replace single quotes from the response to package it back up json.loads(doc_description["Parameters"][4]["DefaultValue"]).should.equal( - {'NotificationArn': '$dependency.topicArn', - 'NotificationEvents': ['Failed'], - 'NotificationType': 'Command'}) + { + "NotificationArn": "$dependency.topicArn", + "NotificationEvents": ["Failed"], + "NotificationType": "Command", + } + ) json.loads(doc_description["Parameters"][5]["DefaultValue"]).should.equal( - [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeSize': '50'}}, - {'DeviceName': '/dev/sdm', 'Ebs': {'VolumeSize': '100'}}] + [ + {"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": "50"}}, + {"DeviceName": "/dev/sdm", "Ebs": {"VolumeSize": "100"}}, + ] ) else: yaml.safe_load(doc_description["Parameters"][4]["DefaultValue"]).should.equal( - {'NotificationArn': '$dependency.topicArn', - 'NotificationEvents': ['Failed'], - 'NotificationType': 'Command'}) + { + "NotificationArn": "$dependency.topicArn", + "NotificationEvents": ["Failed"], + "NotificationType": "Command", + } + ) yaml.safe_load(doc_description["Parameters"][5]["DefaultValue"]).should.equal( - [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeSize': '50'}}, - {'DeviceName': '/dev/sdm', 'Ebs': {'VolumeSize': '100'}}] + [ + {"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": "50"}}, + {"DeviceName": "/dev/sdm", "Ebs": {"VolumeSize": "100"}}, + ] ) doc_description["DocumentType"].should.equal("Command") @@ -98,7 +119,10 @@ def _validate_document_description(doc_name, doc_description, json_doc, expected doc_description["DefaultVersion"].should.equal(expected_default_version) doc_description["DocumentFormat"].should.equal(expected_format) -def _get_doc_validator(response, version_name, doc_version, json_doc_content, document_format): + +def _get_doc_validator( + response, version_name, doc_version, json_doc_content, document_format +): response["Name"].should.equal("TestDocument3") if version_name: response["VersionName"].should.equal(version_name) @@ -111,6 +135,7 @@ def _get_doc_validator(response, version_name, doc_version, json_doc_content, do response["DocumentType"].should.equal("Command") response["DocumentFormat"].should.equal(document_format) + # Done @mock_ssm def test_create_document(): @@ -120,27 +145,45 @@ def test_create_document(): client = boto3.client("ssm", region_name="us-east-1") response = client.create_document( - Content=yaml.dump(json_doc), Name="TestDocument", DocumentType="Command", DocumentFormat="YAML" + Content=yaml.dump(json_doc), + Name="TestDocument", + DocumentType="Command", + DocumentFormat="YAML", ) doc_description = response["DocumentDescription"] - _validate_document_description("TestDocument", doc_description, json_doc, "1", "1", "1", "YAML") + _validate_document_description( + "TestDocument", doc_description, json_doc, "1", "1", "1", "YAML" + ) response = client.create_document( - Content=json.dumps(json_doc), Name="TestDocument2", DocumentType="Command", DocumentFormat="JSON" + Content=json.dumps(json_doc), + Name="TestDocument2", + DocumentType="Command", + DocumentFormat="JSON", ) doc_description = response["DocumentDescription"] - _validate_document_description("TestDocument2", doc_description, json_doc, "1", "1", "1", "JSON") + _validate_document_description( + "TestDocument2", doc_description, json_doc, "1", "1", "1", "JSON" + ) response = client.create_document( - Content=json.dumps(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="JSON", - VersionName="Base", TargetType="/AWS::EC2::Instance", Tags=[{'Key': 'testing', 'Value': 'testingValue'}] + Content=json.dumps(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="JSON", + VersionName="Base", + TargetType="/AWS::EC2::Instance", + Tags=[{"Key": "testing", "Value": "testingValue"}], ) doc_description = response["DocumentDescription"] doc_description["VersionName"].should.equal("Base") doc_description["TargetType"].should.equal("/AWS::EC2::Instance") - doc_description["Tags"].should.equal([{'Key': 'testing', 'Value': 'testingValue'}]) + doc_description["Tags"].should.equal([{"Key": "testing", "Value": "testingValue"}]) + + _validate_document_description( + "TestDocument3", doc_description, json_doc, "1", "1", "1", "JSON" + ) - _validate_document_description("TestDocument3", doc_description, json_doc, "1", "1", "1", "JSON") # Done @mock_ssm @@ -155,18 +198,26 @@ def test_get_document(): raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("GetDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) client.create_document( - Content=yaml.dump(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="YAML", - VersionName="Base" + Content=yaml.dump(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="YAML", + VersionName="Base", ) new_json_doc = copy.copy(json_doc) - new_json_doc['description'] = "a new description" + new_json_doc["description"] = "a new description" client.update_document( - Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST", VersionName="NewBase" + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", + VersionName="NewBase", ) response = client.get_document(Name="TestDocument3") @@ -190,32 +241,38 @@ def test_get_document(): response = client.get_document(Name="TestDocument3", VersionName="NewBase") _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") - response = client.get_document(Name="TestDocument3", VersionName="NewBase", DocumentVersion="2") + response = client.get_document( + Name="TestDocument3", VersionName="NewBase", DocumentVersion="2" + ) _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") try: - response = client.get_document(Name="TestDocument3", VersionName="BadName", DocumentVersion="2") + response = client.get_document( + Name="TestDocument3", VersionName="BadName", DocumentVersion="2" + ) raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("GetDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) try: response = client.get_document(Name="TestDocument3", DocumentVersion="3") raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("GetDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) # Updating default should update normal get - client.update_document_default_version( - Name="TestDocument3", - DocumentVersion="2" - ) + client.update_document_default_version(Name="TestDocument3", DocumentVersion="2") response = client.get_document(Name="TestDocument3", DocumentFormat="JSON") _get_doc_validator(response, "NewBase", "2", new_json_doc, "JSON") + @mock_ssm def test_delete_document(): template_file = _get_yaml_template() @@ -227,12 +284,18 @@ def test_delete_document(): raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("DeleteDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) # Test simple client.create_document( - Content=yaml.dump(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="YAML", - VersionName="Base", TargetType="/AWS::EC2::Instance" + Content=yaml.dump(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="YAML", + VersionName="Base", + TargetType="/AWS::EC2::Instance", ) client.delete_document(Name="TestDocument3") @@ -241,51 +304,68 @@ def test_delete_document(): raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("GetDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") - + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) # Delete default version with other version is bad client.create_document( - Content=yaml.dump(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="YAML", - VersionName="Base", TargetType="/AWS::EC2::Instance" + Content=yaml.dump(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="YAML", + VersionName="Base", + TargetType="/AWS::EC2::Instance", ) new_json_doc = copy.copy(json_doc) - new_json_doc['description'] = "a new description" + new_json_doc["description"] = "a new description" client.update_document( - Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST", VersionName="NewBase" + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", + VersionName="NewBase", ) - new_json_doc['description'] = "a new description2" + new_json_doc["description"] = "a new description2" client.update_document( - Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST" + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", ) - new_json_doc['description'] = "a new description3" + new_json_doc["description"] = "a new description3" client.update_document( - Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST" + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", ) - new_json_doc['description'] = "a new description4" + new_json_doc["description"] = "a new description4" client.update_document( - Content=json.dumps(new_json_doc), Name="TestDocument3", DocumentVersion="$LATEST" + Content=json.dumps(new_json_doc), + Name="TestDocument3", + DocumentVersion="$LATEST", ) - try: client.delete_document(Name="TestDocument3", DocumentVersion="1") raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("DeleteDocument") - err.response["Error"]["Message"].should.equal("Default version of the document can't be deleted.") + err.response["Error"]["Message"].should.equal( + "Default version of the document can't be deleted." + ) try: client.delete_document(Name="TestDocument3", VersionName="Base") raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("DeleteDocument") - err.response["Error"]["Message"].should.equal("Default version of the document can't be deleted.") + err.response["Error"]["Message"].should.equal( + "Default version of the document can't be deleted." + ) # Make sure no ill side effects response = client.get_document(Name="TestDocument3") @@ -311,24 +391,31 @@ def test_delete_document(): raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("GetDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) try: client.get_document(Name="TestDocument3", DocumentVersion="3") raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("GetDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) try: client.get_document(Name="TestDocument3", DocumentVersion="4") raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("GetDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) response = client.list_documents() - len(response['DocumentIdentifiers']).should.equal(0) + len(response["DocumentIdentifiers"]).should.equal(0) + # Done @mock_ssm @@ -342,46 +429,55 @@ def test_update_document_default_version(): raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("UpdateDocumentDefaultVersion") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) client.create_document( - Content=json.dumps(json_doc), Name="TestDocument", DocumentType="Command", VersionName="Base" + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentType="Command", + VersionName="Base", ) - json_doc['description'] = "a new description" + json_doc["description"] = "a new description" client.update_document( - Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST", - DocumentFormat="JSON" + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="$LATEST", + DocumentFormat="JSON", ) - json_doc['description'] = "a new description2" + json_doc["description"] = "a new description2" client.update_document( Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST" ) response = client.update_document_default_version( - Name="TestDocument", - DocumentVersion="2" + Name="TestDocument", DocumentVersion="2" ) response["Description"]["Name"].should.equal("TestDocument") response["Description"]["DefaultVersion"].should.equal("2") - json_doc['description'] = "a new description3" + json_doc["description"] = "a new description3" client.update_document( - Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST", VersionName="NewBase" + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="$LATEST", + VersionName="NewBase", ) response = client.update_document_default_version( - Name="TestDocument", - DocumentVersion="4" + Name="TestDocument", DocumentVersion="4" ) response["Description"]["Name"].should.equal("TestDocument") response["Description"]["DefaultVersion"].should.equal("4") response["Description"]["DefaultVersionName"].should.equal("NewBase") + # Done @mock_ssm def test_update_document(): @@ -391,54 +487,80 @@ def test_update_document(): client = boto3.client("ssm", region_name="us-east-1") try: - client.update_document(Name="DNE", Content=json.dumps(json_doc), DocumentVersion="1", DocumentFormat="JSON") + client.update_document( + Name="DNE", + Content=json.dumps(json_doc), + DocumentVersion="1", + DocumentFormat="JSON", + ) raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("UpdateDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) client.create_document( - Content=json.dumps(json_doc), Name="TestDocument", DocumentType="Command", DocumentFormat="JSON", - VersionName="Base" + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentType="Command", + DocumentFormat="JSON", + VersionName="Base", ) # Duplicate content throws an error try: client.update_document( - Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="1", DocumentFormat="JSON" + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="1", + DocumentFormat="JSON", ) raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("UpdateDocument") - err.response["Error"]["Message"].should.equal("The content of the association document matches another " - "document. Change the content of the document and try again.") + err.response["Error"]["Message"].should.equal( + "The content of the association document matches another " + "document. Change the content of the document and try again." + ) - json_doc['description'] = "a new description" + json_doc["description"] = "a new description" # Duplicate version name try: client.update_document( - Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="1", DocumentFormat="JSON", - VersionName="Base" + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="1", + DocumentFormat="JSON", + VersionName="Base", ) raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("UpdateDocument") - err.response["Error"]["Message"].should.equal("The specified version name is a duplicate.") + err.response["Error"]["Message"].should.equal( + "The specified version name is a duplicate." + ) response = client.update_document( - Content=json.dumps(json_doc), Name="TestDocument", VersionName="Base2", DocumentVersion="1", - DocumentFormat="JSON" + Content=json.dumps(json_doc), + Name="TestDocument", + VersionName="Base2", + DocumentVersion="1", + DocumentFormat="JSON", ) response["DocumentDescription"]["Description"].should.equal("a new description") response["DocumentDescription"]["DocumentVersion"].should.equal("2") response["DocumentDescription"]["LatestVersion"].should.equal("2") response["DocumentDescription"]["DefaultVersion"].should.equal("1") - json_doc['description'] = "a new description2" + json_doc["description"] = "a new description2" response = client.update_document( - Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST", - DocumentFormat="JSON", VersionName="NewBase" + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="$LATEST", + DocumentFormat="JSON", + VersionName="NewBase", ) response["DocumentDescription"]["Description"].should.equal("a new description2") response["DocumentDescription"]["DocumentVersion"].should.equal("3") @@ -446,6 +568,7 @@ def test_update_document(): response["DocumentDescription"]["DefaultVersion"].should.equal("1") response["DocumentDescription"]["VersionName"].should.equal("NewBase") + # Done @mock_ssm def test_describe_document(): @@ -458,26 +581,38 @@ def test_describe_document(): raise RuntimeError("Should fail") except botocore.exceptions.ClientError as err: err.operation_name.should.equal("DescribeDocument") - err.response["Error"]["Message"].should.equal("The specified document does not exist.") + err.response["Error"]["Message"].should.equal( + "The specified document does not exist." + ) client.create_document( - Content=yaml.dump(json_doc), Name="TestDocument", DocumentType="Command", DocumentFormat="YAML", - VersionName="Base", TargetType="/AWS::EC2::Instance", Tags=[{'Key': 'testing', 'Value': 'testingValue'}] + Content=yaml.dump(json_doc), + Name="TestDocument", + DocumentType="Command", + DocumentFormat="YAML", + VersionName="Base", + TargetType="/AWS::EC2::Instance", + Tags=[{"Key": "testing", "Value": "testingValue"}], ) response = client.describe_document(Name="TestDocument") - doc_description=response['Document'] - _validate_document_description("TestDocument", doc_description, json_doc, "1", "1", "1", "YAML") + doc_description = response["Document"] + _validate_document_description( + "TestDocument", doc_description, json_doc, "1", "1", "1", "YAML" + ) # Adding update to check for issues new_json_doc = copy.copy(json_doc) - new_json_doc['description'] = "a new description2" + new_json_doc["description"] = "a new description2" client.update_document( Content=json.dumps(new_json_doc), Name="TestDocument", DocumentVersion="$LATEST" ) response = client.describe_document(Name="TestDocument") - doc_description = response['Document'] - _validate_document_description("TestDocument", doc_description, json_doc, "1", "2", "1", "YAML") + doc_description = response["Document"] + _validate_document_description( + "TestDocument", doc_description, json_doc, "1", "2", "1", "YAML" + ) + # Done @mock_ssm @@ -488,70 +623,77 @@ def test_list_documents(): client = boto3.client("ssm", region_name="us-east-1") client.create_document( - Content=json.dumps(json_doc), Name="TestDocument", DocumentType="Command", DocumentFormat="JSON" + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentType="Command", + DocumentFormat="JSON", ) client.create_document( - Content=json.dumps(json_doc), Name="TestDocument2", DocumentType="Command", DocumentFormat="JSON" + Content=json.dumps(json_doc), + Name="TestDocument2", + DocumentType="Command", + DocumentFormat="JSON", ) client.create_document( - Content=json.dumps(json_doc), Name="TestDocument3", DocumentType="Command", DocumentFormat="JSON" + Content=json.dumps(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="JSON", ) response = client.list_documents() - len(response['DocumentIdentifiers']).should.equal(3) - response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument") - response['DocumentIdentifiers'][1]["Name"].should.equal("TestDocument2") - response['DocumentIdentifiers'][2]["Name"].should.equal("TestDocument3") - response['NextToken'].should.equal("") + len(response["DocumentIdentifiers"]).should.equal(3) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument") + response["DocumentIdentifiers"][1]["Name"].should.equal("TestDocument2") + response["DocumentIdentifiers"][2]["Name"].should.equal("TestDocument3") + response["NextToken"].should.equal("") response = client.list_documents(MaxResults=1) - len(response['DocumentIdentifiers']).should.equal(1) - response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument") - response['DocumentIdentifiers'][0]["DocumentVersion"].should.equal("1") - response['NextToken'].should.equal("1") - - response = client.list_documents(MaxResults=1, NextToken=response['NextToken']) - len(response['DocumentIdentifiers']).should.equal(1) - response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument2") - response['DocumentIdentifiers'][0]["DocumentVersion"].should.equal("1") - response['NextToken'].should.equal("2") - - response = client.list_documents(MaxResults=1, NextToken=response['NextToken']) - len(response['DocumentIdentifiers']).should.equal(1) - response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument3") - response['DocumentIdentifiers'][0]["DocumentVersion"].should.equal("1") - response['NextToken'].should.equal("") + len(response["DocumentIdentifiers"]).should.equal(1) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument") + response["DocumentIdentifiers"][0]["DocumentVersion"].should.equal("1") + response["NextToken"].should.equal("1") + + response = client.list_documents(MaxResults=1, NextToken=response["NextToken"]) + len(response["DocumentIdentifiers"]).should.equal(1) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument2") + response["DocumentIdentifiers"][0]["DocumentVersion"].should.equal("1") + response["NextToken"].should.equal("2") + + response = client.list_documents(MaxResults=1, NextToken=response["NextToken"]) + len(response["DocumentIdentifiers"]).should.equal(1) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument3") + response["DocumentIdentifiers"][0]["DocumentVersion"].should.equal("1") + response["NextToken"].should.equal("") # making sure no bad interactions with update - json_doc['description'] = "a new description" + json_doc["description"] = "a new description" client.update_document( - Content=json.dumps(json_doc), Name="TestDocument", DocumentVersion="$LATEST", - DocumentFormat="JSON" + Content=json.dumps(json_doc), + Name="TestDocument", + DocumentVersion="$LATEST", + DocumentFormat="JSON", ) client.update_document( - Content=json.dumps(json_doc), Name="TestDocument2", DocumentVersion="$LATEST", - DocumentFormat="JSON" + Content=json.dumps(json_doc), + Name="TestDocument2", + DocumentVersion="$LATEST", + DocumentFormat="JSON", ) response = client.update_document_default_version( - Name="TestDocument", - DocumentVersion="2" + Name="TestDocument", DocumentVersion="2" ) response = client.list_documents() - len(response['DocumentIdentifiers']).should.equal(3) - response['DocumentIdentifiers'][0]["Name"].should.equal("TestDocument") - response['DocumentIdentifiers'][0]["DocumentVersion"].should.equal("2") - - response['DocumentIdentifiers'][1]["Name"].should.equal("TestDocument2") - response['DocumentIdentifiers'][1]["DocumentVersion"].should.equal("1") - - response['DocumentIdentifiers'][2]["Name"].should.equal("TestDocument3") - response['DocumentIdentifiers'][2]["DocumentVersion"].should.equal("1") - response['NextToken'].should.equal("") - - - + len(response["DocumentIdentifiers"]).should.equal(3) + response["DocumentIdentifiers"][0]["Name"].should.equal("TestDocument") + response["DocumentIdentifiers"][0]["DocumentVersion"].should.equal("2") + response["DocumentIdentifiers"][1]["Name"].should.equal("TestDocument2") + response["DocumentIdentifiers"][1]["DocumentVersion"].should.equal("1") + response["DocumentIdentifiers"][2]["Name"].should.equal("TestDocument3") + response["DocumentIdentifiers"][2]["DocumentVersion"].should.equal("1") + response["NextToken"].should.equal("") From 0f062f68ff1f6a48e3768c0068d201277c49a83a Mon Sep 17 00:00:00 2001 From: Shane Date: Tue, 30 Jun 2020 22:35:47 +0100 Subject: [PATCH 417/658] Cloudformation: Fix - validate template yml fixes This change fixes: * Replace call to non-existent exception yaml.ParserError * Catches yaml scanner error for valid json with tabs * Supply yaml loader to ensure yaml loading throws exception validly for json with tabs and doesn't try to load the json incorrectly --- moto/cloudformation/responses.py | 4 ++-- tests/test_cloudformation/test_validate.py | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index c4a085705534..92a8b1cabfd9 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -365,8 +365,8 @@ def validate_template(self): except (ValueError, KeyError): pass try: - description = yaml.load(template_body)["Description"] - except (yaml.ParserError, KeyError): + description = yaml.load(template_body, Loader=yaml.Loader)["Description"] + except (yaml.parser.ParserError, yaml.scanner.ScannerError, KeyError): pass template = self.response_template(VALIDATE_STACK_RESPONSE_TEMPLATE) return template.render(description=description) diff --git a/tests/test_cloudformation/test_validate.py b/tests/test_cloudformation/test_validate.py index 19dec46ef7eb..081ceee5415c 100644 --- a/tests/test_cloudformation/test_validate.py +++ b/tests/test_cloudformation/test_validate.py @@ -40,6 +40,16 @@ }, } +json_valid_template_with_tabs = """ +{ +\t"AWSTemplateFormatVersion": "2010-09-09", +\t"Description": "Stack 2", +\t"Resources": { +\t\t"Queue": {"Type": "AWS::SQS::Queue", "Properties": {"VisibilityTimeout": 60}} +\t} +} +""" + # One resource is required json_bad_template = {"AWSTemplateFormatVersion": "2010-09-09", "Description": "Stack 1"} @@ -56,6 +66,15 @@ def test_boto3_json_validate_successful(): assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 +@mock_cloudformation +def test_boto3_json_with_tabs_validate_successful(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + response = cf_conn.validate_template(TemplateBody=json_valid_template_with_tabs) + assert response["Description"] == "Stack 2" + assert response["Parameters"] == [] + assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 + + @mock_cloudformation def test_boto3_json_invalid_missing_resource(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") From 487829810faed0ffae683d2c9cd9bf61058048e0 Mon Sep 17 00:00:00 2001 From: Alex Bainbridge Date: Thu, 2 Jul 2020 13:43:14 -0400 Subject: [PATCH 418/658] passes python3 and 2.7. added additional few tests for coverage bump --- moto/ssm/models.py | 25 +++++---- tests/test_ssm/test_ssm_docs.py | 96 +++++++++++++++++++++++++++++---- 2 files changed, 101 insertions(+), 20 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index ad9806e9f583..fc9cdd273831 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -169,6 +169,11 @@ def __init__( if document_format == "JSON": try: content_json = json.loads(content) + except ValueError: + # Python2 + raise InvalidDocumentContent( + "The content for the document is not valid." + ) except json.decoder.JSONDecodeError: raise InvalidDocumentContent( "The content for the document is not valid." @@ -181,7 +186,7 @@ def __init__( "The content for the document is not valid." ) else: - raise ValidationException(f"Invalid document format {document_format}") + raise ValidationException("Invalid document format " + str(document_format)) self.content_json = content_json @@ -379,7 +384,7 @@ def get_invocation(self, instance_id, plugin_name): def _validate_document_format(document_format): aws_doc_formats = ["JSON", "YAML"] if document_format not in aws_doc_formats: - raise ValidationException(f"Invalid document format {document_format}") + raise ValidationException("Invalid document format " + str(document_format)) def _validate_document_info(content, name, document_type, document_format, strict=True): @@ -403,14 +408,14 @@ def _validate_document_info(content, name, document_type, document_format, stric raise ValidationException("Content is required") if list(filter(name.startswith, aws_name_reject_list)): - raise ValidationException(f"Invalid document name {name}") + raise ValidationException("Invalid document name " + str(name)) ssm_name_pattern = re.compile(aws_ssm_name_regex) if not ssm_name_pattern.match(name): - raise ValidationException(f"Invalid document name {name}") + raise ValidationException("Invalid document name " + str(name)) if strict and document_type not in aws_doc_types: # Update document doesn't use document type - raise ValidationException(f"Invalid document type {document_type}") + raise ValidationException("Invalid document type " + str(document_type)) def _document_filter_equal_comparator(keyed_value, filter): @@ -524,7 +529,7 @@ def _generate_document_information(self, ssm_document, document_format): elif document_format == "YAML": base["Content"] = yaml.dump(ssm_document.content_json) else: - raise ValidationException(f"Invalid document format {document_format}") + raise ValidationException("Invalid document format " + str(document_format)) if ssm_document.version_name: base["VersionName"] = ssm_document.version_name @@ -589,7 +594,7 @@ def create_document( ) if self._documents.get(ssm_document.name): - raise DocumentAlreadyExists(f"The specified document already exists.") + raise DocumentAlreadyExists("The specified document already exists.") self._documents[ssm_document.name] = { "documents": {ssm_document.document_version: ssm_document}, @@ -663,7 +668,7 @@ def _find_document( self, name, document_version=None, version_name=None, strict=True ): if not self._documents.get(name): - raise InvalidDocument(f"The specified document does not exist.") + raise InvalidDocument("The specified document does not exist.") documents = self._documents[name]["documents"] ssm_document = None @@ -692,7 +697,7 @@ def _find_document( break if strict and not ssm_document: - raise InvalidDocument(f"The specified document does not exist.") + raise InvalidDocument("The specified document does not exist.") return ssm_document @@ -751,7 +756,7 @@ def update_document( name, version_name=version_name, strict=False ): raise DuplicateDocumentVersionName( - f"The specified version name is a duplicate." + "The specified version name is a duplicate." ) old_ssm_document = self._find_document(name) diff --git a/tests/test_ssm/test_ssm_docs.py b/tests/test_ssm/test_ssm_docs.py index 409a3bf959d0..d39fa12c6a59 100644 --- a/tests/test_ssm/test_ssm_docs.py +++ b/tests/test_ssm/test_ssm_docs.py @@ -1,12 +1,9 @@ from __future__ import unicode_literals -import string - import boto3 import botocore.exceptions import sure # noqa import datetime -import uuid import json import pkg_resources import yaml @@ -14,10 +11,7 @@ import copy from moto.core import ACCOUNT_ID -from botocore.exceptions import ClientError, ParamValidationError -from nose.tools import assert_raises - -from moto import mock_ssm, mock_cloudformation +from moto import mock_ssm def _get_yaml_template(): @@ -57,6 +51,10 @@ def _validate_document_description( doc_description["DocumentVersion"].should.equal(expected_document_version) doc_description["Description"].should.equal(json_doc["description"]) + doc_description["Parameters"] = sorted( + doc_description["Parameters"], key=lambda doc: doc["Name"] + ) + doc_description["Parameters"][0]["Name"].should.equal("Parameter1") doc_description["Parameters"][0]["Type"].should.equal("Integer") doc_description["Parameters"][0]["Description"].should.equal("Command Duration.") @@ -184,6 +182,63 @@ def test_create_document(): "TestDocument3", doc_description, json_doc, "1", "1", "1", "JSON" ) + try: + client.create_document( + Content=json.dumps(json_doc), + Name="TestDocument3", + DocumentType="Command", + DocumentFormat="JSON", + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("CreateDocument") + err.response["Error"]["Message"].should.equal( + "The specified document already exists." + ) + + try: + client.create_document( + Content=yaml.dump(json_doc), + Name="TestDocument4", + DocumentType="Command", + DocumentFormat="JSON", + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("CreateDocument") + err.response["Error"]["Message"].should.equal( + "The content for the document is not valid." + ) + + del json_doc["parameters"] + response = client.create_document( + Content=yaml.dump(json_doc), + Name="EmptyParamDoc", + DocumentType="Command", + DocumentFormat="YAML", + ) + doc_description = response["DocumentDescription"] + + doc_description["Hash"].should.equal( + hashlib.sha256(yaml.dump(json_doc).encode("utf-8")).hexdigest() + ) + doc_description["HashType"].should.equal("Sha256") + doc_description["Name"].should.equal("EmptyParamDoc") + doc_description["Owner"].should.equal(ACCOUNT_ID) + + difference = datetime.datetime.utcnow() - doc_description["CreatedDate"] + if difference.min > datetime.timedelta(minutes=1): + assert False + + doc_description["Status"].should.equal("Active") + doc_description["DocumentVersion"].should.equal("1") + doc_description["Description"].should.equal(json_doc["description"]) + doc_description["DocumentType"].should.equal("Command") + doc_description["SchemaVersion"].should.equal("2.2") + doc_description["LatestVersion"].should.equal("1") + doc_description["DefaultVersion"].should.equal("1") + doc_description["DocumentFormat"].should.equal("YAML") + # Done @mock_ssm @@ -508,6 +563,20 @@ def test_update_document(): VersionName="Base", ) + try: + client.update_document( + Name="TestDocument", + Content=json.dumps(json_doc), + DocumentVersion="2", + DocumentFormat="JSON", + ) + raise RuntimeError("Should fail") + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal("UpdateDocument") + err.response["Error"]["Message"].should.equal( + "The document version is not valid or does not exist." + ) + # Duplicate content throws an error try: client.update_document( @@ -639,6 +708,7 @@ def test_list_documents(): Name="TestDocument3", DocumentType="Command", DocumentFormat="JSON", + TargetType="/AWS::EC2::Instance", ) response = client.list_documents() @@ -682,9 +752,7 @@ def test_list_documents(): DocumentFormat="JSON", ) - response = client.update_document_default_version( - Name="TestDocument", DocumentVersion="2" - ) + client.update_document_default_version(Name="TestDocument", DocumentVersion="2") response = client.list_documents() len(response["DocumentIdentifiers"]).should.equal(3) @@ -697,3 +765,11 @@ def test_list_documents(): response["DocumentIdentifiers"][2]["Name"].should.equal("TestDocument3") response["DocumentIdentifiers"][2]["DocumentVersion"].should.equal("1") response["NextToken"].should.equal("") + + response = client.list_documents(Filters=[{"Key": "Owner", "Values": ["Self"]}]) + len(response["DocumentIdentifiers"]).should.equal(3) + + response = client.list_documents( + Filters=[{"Key": "TargetType", "Values": ["/AWS::EC2::Instance"]}] + ) + len(response["DocumentIdentifiers"]).should.equal(1) From 4e0d5883073b3c12c27e888b978dba530708035a Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 3 Jul 2020 14:20:04 +0100 Subject: [PATCH 419/658] DynamoDB - Allow ProjectionType to be set for LSIs --- moto/dynamodb2/models/__init__.py | 36 ++++++++++++----------- tests/test_dynamodb2/test_dynamodb.py | 41 +++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 17 deletions(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 7e288bb9dec7..eafa2743af4c 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -272,7 +272,24 @@ def get(self, start, quantity): return [i.to_json() for i in self.items[start:end]] -class LocalSecondaryIndex(BaseModel): +class SecondaryIndex(BaseModel): + def project(self, item): + """ + Enforces the ProjectionType of this Index (LSI/GSI) + Removes any non-wanted attributes from the item + :param item: + :return: + """ + if self.projection: + if self.projection.get("ProjectionType", None) == "KEYS_ONLY": + allowed_attributes = ",".join( + [key["AttributeName"] for key in self.schema] + ) + item.filter(allowed_attributes) + return item + + +class LocalSecondaryIndex(SecondaryIndex): def __init__(self, index_name, schema, projection): self.name = index_name self.schema = schema @@ -294,7 +311,7 @@ def create(dct): ) -class GlobalSecondaryIndex(BaseModel): +class GlobalSecondaryIndex(SecondaryIndex): def __init__( self, index_name, schema, projection, status="ACTIVE", throughput=None ): @@ -331,21 +348,6 @@ def update(self, u): self.projection = u.get("Projection", self.projection) self.throughput = u.get("ProvisionedThroughput", self.throughput) - def project(self, item): - """ - Enforces the ProjectionType of this GSI - Removes any non-wanted attributes from the item - :param item: - :return: - """ - if self.projection: - if self.projection.get("ProjectionType", None) == "KEYS_ONLY": - allowed_attributes = ",".join( - [key["AttributeName"] for key in self.schema] - ) - item.filter(allowed_attributes) - return item - class Table(BaseModel): def __init__( diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index cf1548e03982..2dfb8fd2ddd1 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -5360,3 +5360,44 @@ def test_gsi_projection_type_keys_only(): items.should.have.length_of(1) # Item should only include GSI Keys, as per the ProjectionType items[0].should.equal({"gsiK1PartitionKey": "gsi-pk", "gsiK1SortKey": "gsi-sk"}) + + +@mock_dynamodb2 +def test_lsi_projection_type_keys_only(): + table_schema = { + "KeySchema": [{"AttributeName": "partitionKey", "KeyType": "HASH"}], + "LocalSecondaryIndexes": [ + { + "IndexName": "LSI", + "KeySchema": [ + {"AttributeName": "partitionKey", "KeyType": "HASH"}, + {"AttributeName": "lsiK1SortKey", "KeyType": "RANGE"}, + ], + "Projection": {"ProjectionType": "KEYS_ONLY",}, + } + ], + "AttributeDefinitions": [ + {"AttributeName": "partitionKey", "AttributeType": "S"}, + {"AttributeName": "lsiK1SortKey", "AttributeType": "S"}, + ], + } + + item = { + "partitionKey": "pk-1", + "lsiK1SortKey": "lsi-sk", + "someAttribute": "lore ipsum", + } + + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + table = dynamodb.Table("test-table") + table.put_item(Item=item) + + items = table.query( + KeyConditionExpression=Key("partitionKey").eq("pk-1"), IndexName="LSI", + )["Items"] + items.should.have.length_of(1) + # Item should only include GSI Keys, as per the ProjectionType + items[0].should.equal({"partitionKey": "pk-1", "lsiK1SortKey": "lsi-sk"}) From b225e96ae0de5a1bad477df78949073f9249fe4b Mon Sep 17 00:00:00 2001 From: Dawn James Date: Fri, 3 Jul 2020 14:23:17 +0100 Subject: [PATCH 420/658] Application Autoscaling basic features (#3082) * Placeholder to test Application Autoscaling. * Wire everything together and create a first passing test without any real functionality. * Get one test working properly. * Add some TODO items. * Reformat code with black * Second passing test for describe_scalable_targets. * New test for NextToken. * Add some tests for ParamValidationError and ValidationException. * black * Ensure scalable targets are being captured in an OrderedDict() for deterministic return later. * Add validation to describe_scalable_targets and register_scalable_target. * Fix tests. * Add creation_time, refactor, add ECS backend, and add failing test for checking that ecs service exists. * Add parameter validation. * Improved documentation for CONTRIBUTING.md Adds some details to give people an idea what's involved in adding new features/services * Integrate with ECS. * black * Refactor to allow implementation of SuspendedState. * Complete support for SuspendedState. * Bump up implementation coverage percentage. * Tidy up code; add comments. * Implement suggested changes from code review. * Minor refactorings for elegance. * README update Co-authored-by: Bert Blommers --- CONTRIBUTING.md | 22 +- IMPLEMENTATION_COVERAGE.md | 6 +- README.md | 2 + moto/__init__.py | 3 + moto/applicationautoscaling/__init__.py | 6 + moto/applicationautoscaling/exceptions.py | 22 ++ moto/applicationautoscaling/models.py | 179 +++++++++++++++++ moto/applicationautoscaling/responses.py | 97 +++++++++ moto/applicationautoscaling/urls.py | 8 + moto/applicationautoscaling/utils.py | 10 + moto/backends.py | 4 + tests/test_applicationautoscaling/__init__.py | 1 + .../test_applicationautoscaling.py | 189 ++++++++++++++++++ .../test_validation.py | 123 ++++++++++++ 14 files changed, 668 insertions(+), 4 deletions(-) create mode 100644 moto/applicationautoscaling/__init__.py create mode 100644 moto/applicationautoscaling/exceptions.py create mode 100644 moto/applicationautoscaling/models.py create mode 100644 moto/applicationautoscaling/responses.py create mode 100644 moto/applicationautoscaling/urls.py create mode 100644 moto/applicationautoscaling/utils.py create mode 100644 tests/test_applicationautoscaling/__init__.py create mode 100644 tests/test_applicationautoscaling/test_applicationautoscaling.py create mode 100644 tests/test_applicationautoscaling/test_validation.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7e54236bdf42..edcc4656176f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -28,7 +28,27 @@ How to teach Moto to support a new AWS endpoint: * If one doesn't already exist, create a new issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. * Create a [pull request](https://help.github.com/articles/using-pull-requests/) and mention the issue # in the PR description. * Try to add a failing test case. For example, if you're trying to implement `boto3.client('acm').import_certificate()` you'll want to add a new method called `def test_import_certificate` to `tests/test_acm/test_acm.py`. -* If you can also implement the code that gets that test passing that's great. If not, just ask the community for a hand and somebody will assist you. +* Implementing the feature itself can be done by creating a method called `import_certificate` in `moto/acm/responses.py`. It's considered good practice to deal with input/output formatting and validation in `responses.py`, and create a method `import_certificate` in `moto/acm/models.py` that handles the actual import logic. +* If you can also implement the code that gets that test passing then great! If not, just ask the community for a hand and somebody will assist you. + +## Before pushing changes to GitHub + +1. Run `black moto/ tests/` over your code to ensure that it is properly formatted +1. Run `make test` to ensure your tests are passing + +## Python versions + +moto currently supports both Python 2 and 3, so make sure your tests pass against both major versions of Python. + +## Missing services + +Implementing a new service from scratch is more work, but still quite straightforward. All the code that intercepts network requests to `*.amazonaws.com` is already handled for you in `moto/core` - all that's necessary for new services to be recognized is to create a new decorator and determine which URLs should be intercepted. + +See this PR for an example of what's involved in creating a new service: https://github.com/spulec/moto/pull/2409/files + +Note the `urls.py` that redirects all incoming URL requests to a generic `dispatch` method, which in turn will call the appropriate method in `responses.py`. + +If you want more control over incoming requests or their bodies, it is possible to redirect specific requests to a custom method. See this PR for an example: https://github.com/spulec/moto/pull/2957/files ## Maintainers diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 43983d912c46..1d5eb946a05b 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -459,18 +459,18 @@ ## application-autoscaling
-0% implemented +20% implemented - [ ] delete_scaling_policy - [ ] delete_scheduled_action - [ ] deregister_scalable_target -- [ ] describe_scalable_targets +- [x] describe_scalable_targets - [ ] describe_scaling_activities - [ ] describe_scaling_policies - [ ] describe_scheduled_actions - [ ] put_scaling_policy - [ ] put_scheduled_action -- [ ] register_scalable_target +- [x] register_scalable_target - includes enhanced validation support for ECS targets
## application-insights diff --git a/README.md b/README.md index 7a2862744d05..956be5da15ea 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |-------------------------------------------------------------------------------------| | | API Gateway | @mock_apigateway | core endpoints done | | |-------------------------------------------------------------------------------------| | +| Application Autoscaling | @mock_applicationautoscaling | basic endpoints done | | +|-------------------------------------------------------------------------------------| | | Autoscaling | @mock_autoscaling | core endpoints done | | |-------------------------------------------------------------------------------------| | | Cloudformation | @mock_cloudformation | core endpoints done | | diff --git a/moto/__init__.py b/moto/__init__.py index 4f8f08eda5f4..b4375bfc6d83 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -15,6 +15,9 @@ def f(*args, **kwargs): mock_apigateway = lazy_load(".apigateway", "mock_apigateway") mock_apigateway_deprecated = lazy_load(".apigateway", "mock_apigateway_deprecated") mock_athena = lazy_load(".athena", "mock_athena") +mock_applicationautoscaling = lazy_load( + ".applicationautoscaling", "mock_applicationautoscaling" +) mock_autoscaling = lazy_load(".autoscaling", "mock_autoscaling") mock_autoscaling_deprecated = lazy_load(".autoscaling", "mock_autoscaling_deprecated") mock_lambda = lazy_load(".awslambda", "mock_lambda") diff --git a/moto/applicationautoscaling/__init__.py b/moto/applicationautoscaling/__init__.py new file mode 100644 index 000000000000..6e3db1ccf56c --- /dev/null +++ b/moto/applicationautoscaling/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import applicationautoscaling_backends +from ..core.models import base_decorator + +applicationautoscaling_backend = applicationautoscaling_backends["us-east-1"] +mock_applicationautoscaling = base_decorator(applicationautoscaling_backends) diff --git a/moto/applicationautoscaling/exceptions.py b/moto/applicationautoscaling/exceptions.py new file mode 100644 index 000000000000..2e2e0ef9f86d --- /dev/null +++ b/moto/applicationautoscaling/exceptions.py @@ -0,0 +1,22 @@ +from __future__ import unicode_literals +import json + + +class AWSError(Exception): + """ Copied from acm/models.py; this class now exists in >5 locations, + maybe this should be centralised for use by any module? + """ + + TYPE = None + STATUS = 400 + + def __init__(self, message): + self.message = message + + def response(self): + resp = {"__type": self.TYPE, "message": self.message} + return json.dumps(resp), dict(status=self.STATUS) + + +class AWSValidationException(AWSError): + TYPE = "ValidationException" diff --git a/moto/applicationautoscaling/models.py b/moto/applicationautoscaling/models.py new file mode 100644 index 000000000000..39bb497aa524 --- /dev/null +++ b/moto/applicationautoscaling/models.py @@ -0,0 +1,179 @@ +from __future__ import unicode_literals +from moto.core import BaseBackend, BaseModel +from moto.ecs import ecs_backends +from .exceptions import AWSValidationException +from collections import OrderedDict +from enum import Enum, unique +import time + + +@unique +class ServiceNamespaceValueSet(Enum): + APPSTREAM = "appstream" + RDS = "rds" + LAMBDA = "lambda" + CASSANDRA = "cassandra" + DYNAMODB = "dynamodb" + CUSTOM_RESOURCE = "custom-resource" + ELASTICMAPREDUCE = "elasticmapreduce" + EC2 = "ec2" + COMPREHEND = "comprehend" + ECS = "ecs" + SAGEMAKER = "sagemaker" + + +@unique +class ScalableDimensionValueSet(Enum): + CASSANDRA_TABLE_READ_CAPACITY_UNITS = "cassandra:table:ReadCapacityUnits" + CASSANDRA_TABLE_WRITE_CAPACITY_UNITS = "cassandra:table:WriteCapacityUnits" + DYNAMODB_INDEX_READ_CAPACITY_UNITS = "dynamodb:index:ReadCapacityUnits" + DYNAMODB_INDEX_WRITE_CAPACITY_UNITS = "dynamodb:index:WriteCapacityUnits" + DYNAMODB_TABLE_READ_CAPACITY_UNITS = "dynamodb:table:ReadCapacityUnits" + DYNAMODB_TABLE_WRITE_CAPACITY_UNITS = "dynamodb:table:WriteCapacityUnits" + RDS_CLUSTER_READ_REPLICA_COUNT = "rds:cluster:ReadReplicaCount" + RDS_CLUSTER_CAPACITY = "rds:cluster:Capacity" + COMPREHEND_DOCUMENT_CLASSIFIER_ENDPOINT_DESIRED_INFERENCE_UNITS = ( + "comprehend:document-classifier-endpoint:DesiredInferenceUnits" + ) + ELASTICMAPREDUCE_INSTANCE_FLEET_ON_DEMAND_CAPACITY = ( + "elasticmapreduce:instancefleet:OnDemandCapacity" + ) + ELASTICMAPREDUCE_INSTANCE_FLEET_SPOT_CAPACITY = ( + "elasticmapreduce:instancefleet:SpotCapacity" + ) + ELASTICMAPREDUCE_INSTANCE_GROUP_INSTANCE_COUNT = ( + "elasticmapreduce:instancegroup:InstanceCount" + ) + LAMBDA_FUNCTION_PROVISIONED_CONCURRENCY = "lambda:function:ProvisionedConcurrency" + APPSTREAM_FLEET_DESIRED_CAPACITY = "appstream:fleet:DesiredCapacity" + CUSTOM_RESOURCE_RESOURCE_TYPE_PROPERTY = "custom-resource:ResourceType:Property" + SAGEMAKER_VARIANT_DESIRED_INSTANCE_COUNT = "sagemaker:variant:DesiredInstanceCount" + EC2_SPOT_FLEET_REQUEST_TARGET_CAPACITY = "ec2:spot-fleet-request:TargetCapacity" + ECS_SERVICE_DESIRED_COUNT = "ecs:service:DesiredCount" + + +class ApplicationAutoscalingBackend(BaseBackend): + def __init__(self, region, ecs): + super(ApplicationAutoscalingBackend, self).__init__() + self.region = region + self.ecs_backend = ecs + self.targets = OrderedDict() + + def reset(self): + region = self.region + ecs = self.ecs_backend + self.__dict__ = {} + self.__init__(region, ecs) + + @property + def applicationautoscaling_backend(self): + return applicationautoscaling_backends[self.region] + + def describe_scalable_targets( + self, namespace, r_ids=None, dimension=None, + ): + """ Describe scalable targets. """ + if r_ids is None: + r_ids = [] + targets = self._flatten_scalable_targets(namespace) + if dimension is not None: + targets = [t for t in targets if t.scalable_dimension == dimension] + if len(r_ids) > 0: + targets = [t for t in targets if t.resource_id in r_ids] + return targets + + def _flatten_scalable_targets(self, namespace): + """ Flatten scalable targets for a given service namespace down to a list. """ + targets = [] + for dimension in self.targets.keys(): + for resource_id in self.targets[dimension].keys(): + targets.append(self.targets[dimension][resource_id]) + targets = [t for t in targets if t.service_namespace == namespace] + return targets + + def register_scalable_target(self, namespace, r_id, dimension, **kwargs): + """ Registers or updates a scalable target. """ + _ = _target_params_are_valid(namespace, r_id, dimension) + if namespace == ServiceNamespaceValueSet.ECS.value: + _ = self._ecs_service_exists_for_target(r_id) + if self._scalable_target_exists(r_id, dimension): + target = self.targets[dimension][r_id] + target.update(kwargs) + else: + target = FakeScalableTarget(self, namespace, r_id, dimension, **kwargs) + self._add_scalable_target(target) + return target + + def _scalable_target_exists(self, r_id, dimension): + return r_id in self.targets.get(dimension, []) + + def _ecs_service_exists_for_target(self, r_id): + """ Raises a ValidationException if an ECS service does not exist + for the specified resource ID. + """ + resource_type, cluster, service = r_id.split("/") + result = self.ecs_backend.describe_services(cluster, [service]) + if len(result) != 1: + raise AWSValidationException("ECS service doesn't exist: {}".format(r_id)) + return True + + def _add_scalable_target(self, target): + if target.scalable_dimension not in self.targets: + self.targets[target.scalable_dimension] = OrderedDict() + if target.resource_id not in self.targets[target.scalable_dimension]: + self.targets[target.scalable_dimension][target.resource_id] = target + return target + + +def _target_params_are_valid(namespace, r_id, dimension): + """ Check whether namespace, resource_id and dimension are valid and consistent with each other. """ + is_valid = True + valid_namespaces = [n.value for n in ServiceNamespaceValueSet] + if namespace not in valid_namespaces: + is_valid = False + if dimension is not None: + try: + valid_dimensions = [d.value for d in ScalableDimensionValueSet] + d_namespace, d_resource_type, scaling_property = dimension.split(":") + resource_type, cluster, service = r_id.split("/") + if ( + dimension not in valid_dimensions + or d_namespace != namespace + or resource_type != d_resource_type + ): + is_valid = False + except ValueError: + is_valid = False + if not is_valid: + raise AWSValidationException( + "Unsupported service namespace, resource type or scalable dimension" + ) + return is_valid + + +class FakeScalableTarget(BaseModel): + def __init__( + self, backend, service_namespace, resource_id, scalable_dimension, **kwargs + ): + self.applicationautoscaling_backend = backend + self.service_namespace = service_namespace + self.resource_id = resource_id + self.scalable_dimension = scalable_dimension + self.min_capacity = kwargs["min_capacity"] + self.max_capacity = kwargs["max_capacity"] + self.role_arn = kwargs["role_arn"] + self.suspended_state = kwargs["suspended_state"] + self.creation_time = time.time() + + def update(self, **kwargs): + if kwargs["min_capacity"] is not None: + self.min_capacity = kwargs["min_capacity"] + if kwargs["max_capacity"] is not None: + self.max_capacity = kwargs["max_capacity"] + + +applicationautoscaling_backends = {} +for region_name, ecs_backend in ecs_backends.items(): + applicationautoscaling_backends[region_name] = ApplicationAutoscalingBackend( + region_name, ecs_backend + ) diff --git a/moto/applicationautoscaling/responses.py b/moto/applicationautoscaling/responses.py new file mode 100644 index 000000000000..9a2905d79190 --- /dev/null +++ b/moto/applicationautoscaling/responses.py @@ -0,0 +1,97 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +import json +from .models import ( + applicationautoscaling_backends, + ScalableDimensionValueSet, + ServiceNamespaceValueSet, +) +from .exceptions import AWSValidationException + + +class ApplicationAutoScalingResponse(BaseResponse): + @property + def applicationautoscaling_backend(self): + return applicationautoscaling_backends[self.region] + + def describe_scalable_targets(self): + try: + self._validate_params() + except AWSValidationException as e: + return e.response() + service_namespace = self._get_param("ServiceNamespace") + resource_ids = self._get_param("ResourceIds") + scalable_dimension = self._get_param("ScalableDimension") + max_results = self._get_int_param("MaxResults", 50) + marker = self._get_param("NextToken") + all_scalable_targets = self.applicationautoscaling_backend.describe_scalable_targets( + service_namespace, resource_ids, scalable_dimension + ) + start = int(marker) + 1 if marker else 0 + next_token = None + scalable_targets_resp = all_scalable_targets[start : start + max_results] + if len(all_scalable_targets) > start + max_results: + next_token = str(len(scalable_targets_resp) - 1) + targets = [_build_target(t) for t in scalable_targets_resp] + return json.dumps({"ScalableTargets": targets, "NextToken": next_token}) + + def register_scalable_target(self): + """ Registers or updates a scalable target. """ + try: + self._validate_params() + self.applicationautoscaling_backend.register_scalable_target( + self._get_param("ServiceNamespace"), + self._get_param("ResourceId"), + self._get_param("ScalableDimension"), + min_capacity=self._get_int_param("MinCapacity"), + max_capacity=self._get_int_param("MaxCapacity"), + role_arn=self._get_param("RoleARN"), + suspended_state=self._get_param("SuspendedState"), + ) + except AWSValidationException as e: + return e.response() + return json.dumps({}) + + def _validate_params(self): + """ Validate parameters. + TODO Integrate this validation with the validation in models.py + """ + namespace = self._get_param("ServiceNamespace") + dimension = self._get_param("ScalableDimension") + messages = [] + dimensions = [d.value for d in ScalableDimensionValueSet] + message = None + if dimension is not None and dimension not in dimensions: + messages.append( + "Value '{}' at 'scalableDimension' " + "failed to satisfy constraint: Member must satisfy enum value set: " + "{}".format(dimension, dimensions) + ) + namespaces = [n.value for n in ServiceNamespaceValueSet] + if namespace is not None and namespace not in namespaces: + messages.append( + "Value '{}' at 'serviceNamespace' " + "failed to satisfy constraint: Member must satisfy enum value set: " + "{}".format(namespace, namespaces) + ) + if len(messages) == 1: + message = "1 validation error detected: {}".format(messages[0]) + elif len(messages) > 1: + message = "{} validation errors detected: {}".format( + len(messages), "; ".join(messages) + ) + if message: + raise AWSValidationException(message) + + +def _build_target(t): + return { + "CreationTime": t.creation_time, + "ServiceNamespace": t.service_namespace, + "ResourceId": t.resource_id, + "RoleARN": t.role_arn, + "ScalableDimension": t.scalable_dimension, + "MaxCapacity": t.max_capacity, + "MinCapacity": t.min_capacity, + "SuspendedState": t.suspended_state, + } diff --git a/moto/applicationautoscaling/urls.py b/moto/applicationautoscaling/urls.py new file mode 100644 index 000000000000..8a608f954d62 --- /dev/null +++ b/moto/applicationautoscaling/urls.py @@ -0,0 +1,8 @@ +from __future__ import unicode_literals +from .responses import ApplicationAutoScalingResponse + +url_bases = ["https?://application-autoscaling.(.+).amazonaws.com"] + +url_paths = { + "{0}/$": ApplicationAutoScalingResponse.dispatch, +} diff --git a/moto/applicationautoscaling/utils.py b/moto/applicationautoscaling/utils.py new file mode 100644 index 000000000000..72330c508ffa --- /dev/null +++ b/moto/applicationautoscaling/utils.py @@ -0,0 +1,10 @@ +from six.moves.urllib.parse import urlparse + + +def region_from_applicationautoscaling_url(url): + domain = urlparse(url).netloc + + if "." in domain: + return domain.split(".")[1] + else: + return "us-east-1" diff --git a/moto/backends.py b/moto/backends.py index 44534d574a59..6f612bf1f65a 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -6,6 +6,10 @@ "acm": ("acm", "acm_backends"), "apigateway": ("apigateway", "apigateway_backends"), "athena": ("athena", "athena_backends"), + "applicationautoscaling": ( + "applicationautoscaling", + "applicationautoscaling_backends", + ), "autoscaling": ("autoscaling", "autoscaling_backends"), "batch": ("batch", "batch_backends"), "cloudformation": ("cloudformation", "cloudformation_backends"), diff --git a/tests/test_applicationautoscaling/__init__.py b/tests/test_applicationautoscaling/__init__.py new file mode 100644 index 000000000000..baffc4882521 --- /dev/null +++ b/tests/test_applicationautoscaling/__init__.py @@ -0,0 +1 @@ +from __future__ import unicode_literals diff --git a/tests/test_applicationautoscaling/test_applicationautoscaling.py b/tests/test_applicationautoscaling/test_applicationautoscaling.py new file mode 100644 index 000000000000..632804992f53 --- /dev/null +++ b/tests/test_applicationautoscaling/test_applicationautoscaling.py @@ -0,0 +1,189 @@ +from __future__ import unicode_literals +import boto3 +from moto import mock_applicationautoscaling, mock_ecs +import sure # noqa +from nose.tools import with_setup + +DEFAULT_REGION = "us-east-1" +DEFAULT_ECS_CLUSTER = "default" +DEFAULT_ECS_TASK = "test_ecs_task" +DEFAULT_ECS_SERVICE = "sample-webapp" +DEFAULT_SERVICE_NAMESPACE = "ecs" +DEFAULT_RESOURCE_ID = "service/{}/{}".format(DEFAULT_ECS_CLUSTER, DEFAULT_ECS_SERVICE) +DEFAULT_SCALABLE_DIMENSION = "ecs:service:DesiredCount" +DEFAULT_MIN_CAPACITY = 1 +DEFAULT_MAX_CAPACITY = 1 +DEFAULT_ROLE_ARN = "test:arn" +DEFAULT_SUSPENDED_STATE = { + "DynamicScalingInSuspended": True, + "DynamicScalingOutSuspended": True, + "ScheduledScalingSuspended": True, +} + + +def _create_ecs_defaults(ecs, create_service=True): + _ = ecs.create_cluster(clusterName=DEFAULT_ECS_CLUSTER) + _ = ecs.register_task_definition( + family=DEFAULT_ECS_TASK, + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + if create_service: + _ = ecs.create_service( + cluster=DEFAULT_ECS_CLUSTER, + serviceName=DEFAULT_ECS_SERVICE, + taskDefinition=DEFAULT_ECS_TASK, + desiredCount=2, + ) + + +@mock_ecs +@mock_applicationautoscaling +def test_describe_scalable_targets_one_basic_ecs_success(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + client.register_scalable_target( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, + ResourceId=DEFAULT_RESOURCE_ID, + ScalableDimension=DEFAULT_SCALABLE_DIMENSION, + ) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(1) + t = response["ScalableTargets"][0] + t.should.have.key("ServiceNamespace").which.should.equal(DEFAULT_SERVICE_NAMESPACE) + t.should.have.key("ResourceId").which.should.equal(DEFAULT_RESOURCE_ID) + t.should.have.key("ScalableDimension").which.should.equal( + DEFAULT_SCALABLE_DIMENSION + ) + t.should.have.key("CreationTime").which.should.be.a("datetime.datetime") + + +@mock_ecs +@mock_applicationautoscaling +def test_describe_scalable_targets_one_full_ecs_success(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + register_scalable_target(client) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(1) + t = response["ScalableTargets"][0] + t.should.have.key("ServiceNamespace").which.should.equal(DEFAULT_SERVICE_NAMESPACE) + t.should.have.key("ResourceId").which.should.equal(DEFAULT_RESOURCE_ID) + t.should.have.key("ScalableDimension").which.should.equal( + DEFAULT_SCALABLE_DIMENSION + ) + t.should.have.key("MinCapacity").which.should.equal(DEFAULT_MIN_CAPACITY) + t.should.have.key("MaxCapacity").which.should.equal(DEFAULT_MAX_CAPACITY) + t.should.have.key("RoleARN").which.should.equal(DEFAULT_ROLE_ARN) + t.should.have.key("CreationTime").which.should.be.a("datetime.datetime") + t.should.have.key("SuspendedState") + t["SuspendedState"]["DynamicScalingInSuspended"].should.equal( + DEFAULT_SUSPENDED_STATE["DynamicScalingInSuspended"] + ) + + +@mock_ecs +@mock_applicationautoscaling +def test_describe_scalable_targets_only_return_ecs_targets(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs, create_service=False) + _ = ecs.create_service( + cluster=DEFAULT_ECS_CLUSTER, + serviceName="test1", + taskDefinition=DEFAULT_ECS_TASK, + desiredCount=2, + ) + _ = ecs.create_service( + cluster=DEFAULT_ECS_CLUSTER, + serviceName="test2", + taskDefinition=DEFAULT_ECS_TASK, + desiredCount=2, + ) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + register_scalable_target( + client, + ServiceNamespace="ecs", + ResourceId="service/{}/test1".format(DEFAULT_ECS_CLUSTER), + ) + register_scalable_target( + client, + ServiceNamespace="ecs", + ResourceId="service/{}/test2".format(DEFAULT_ECS_CLUSTER), + ) + register_scalable_target( + client, + ServiceNamespace="elasticmapreduce", + ResourceId="instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0", + ScalableDimension="elasticmapreduce:instancegroup:InstanceCount", + ) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(2) + + +@mock_ecs +@mock_applicationautoscaling +def test_describe_scalable_targets_next_token_success(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs, create_service=False) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + for i in range(0, 100): + _ = ecs.create_service( + cluster=DEFAULT_ECS_CLUSTER, + serviceName=str(i), + taskDefinition=DEFAULT_ECS_TASK, + desiredCount=2, + ) + register_scalable_target( + client, + ServiceNamespace="ecs", + ResourceId="service/{}/{}".format(DEFAULT_ECS_CLUSTER, i), + ) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(50) + response["ScalableTargets"][0]["ResourceId"].should.equal("service/default/0") + response.should.have.key("NextToken").which.should.equal("49") + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, NextToken=str(response["NextToken"]) + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalableTargets"]).should.equal(50) + response["ScalableTargets"][0]["ResourceId"].should.equal("service/default/50") + response.should_not.have.key("NextToken") + + +def register_scalable_target(client, **kwargs): + """ Build a default scalable target object for use in tests. """ + return client.register_scalable_target( + ServiceNamespace=kwargs.get("ServiceNamespace", DEFAULT_SERVICE_NAMESPACE), + ResourceId=kwargs.get("ResourceId", DEFAULT_RESOURCE_ID), + ScalableDimension=kwargs.get("ScalableDimension", DEFAULT_SCALABLE_DIMENSION), + MinCapacity=kwargs.get("MinCapacity", DEFAULT_MIN_CAPACITY), + MaxCapacity=kwargs.get("MaxCapacity", DEFAULT_MAX_CAPACITY), + RoleARN=kwargs.get("RoleARN", DEFAULT_ROLE_ARN), + SuspendedState=kwargs.get("SuspendedState", DEFAULT_SUSPENDED_STATE), + ) diff --git a/tests/test_applicationautoscaling/test_validation.py b/tests/test_applicationautoscaling/test_validation.py new file mode 100644 index 000000000000..02281ab05ad3 --- /dev/null +++ b/tests/test_applicationautoscaling/test_validation.py @@ -0,0 +1,123 @@ +from __future__ import unicode_literals +import boto3 +from moto import mock_applicationautoscaling, mock_ecs +from moto.applicationautoscaling import models +from moto.applicationautoscaling.exceptions import AWSValidationException +from botocore.exceptions import ParamValidationError +from nose.tools import assert_raises +import sure # noqa +from botocore.exceptions import ClientError +from parameterized import parameterized +from .test_applicationautoscaling import register_scalable_target + +DEFAULT_REGION = "us-east-1" +DEFAULT_ECS_CLUSTER = "default" +DEFAULT_ECS_TASK = "test_ecs_task" +DEFAULT_ECS_SERVICE = "sample-webapp" +DEFAULT_SERVICE_NAMESPACE = "ecs" +DEFAULT_RESOURCE_ID = "service/{}/{}".format(DEFAULT_ECS_CLUSTER, DEFAULT_ECS_SERVICE) +DEFAULT_SCALABLE_DIMENSION = "ecs:service:DesiredCount" +DEFAULT_MIN_CAPACITY = 1 +DEFAULT_MAX_CAPACITY = 1 +DEFAULT_ROLE_ARN = "test:arn" + + +@mock_applicationautoscaling +def test_describe_scalable_targets_no_params_should_raise_param_validation_errors(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + with assert_raises(ParamValidationError): + client.describe_scalable_targets() + + +@mock_applicationautoscaling +def test_register_scalable_target_no_params_should_raise_param_validation_errors(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + with assert_raises(ParamValidationError): + client.register_scalable_target() + + +@mock_applicationautoscaling +def test_register_scalable_target_with_none_service_namespace_should_raise_param_validation_errors(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + with assert_raises(ParamValidationError): + register_scalable_target(client, ServiceNamespace=None) + + +@mock_applicationautoscaling +def test_describe_scalable_targets_with_invalid_scalable_dimension_should_return_validation_exception(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + + with assert_raises(ClientError) as err: + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, ScalableDimension="foo", + ) + err.response["Error"]["Code"].should.equal("ValidationException") + err.response["Error"]["Message"].split(":")[0].should.look_like( + "1 validation error detected" + ) + err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@mock_applicationautoscaling +def test_describe_scalable_targets_with_invalid_service_namespace_should_return_validation_exception(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + + with assert_raises(ClientError) as err: + response = client.describe_scalable_targets( + ServiceNamespace="foo", ScalableDimension=DEFAULT_SCALABLE_DIMENSION, + ) + err.response["Error"]["Code"].should.equal("ValidationException") + err.response["Error"]["Message"].split(":")[0].should.look_like( + "1 validation error detected" + ) + err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@mock_applicationautoscaling +def test_describe_scalable_targets_with_multiple_invalid_parameters_should_return_validation_exception(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + + with assert_raises(ClientError) as err: + response = client.describe_scalable_targets( + ServiceNamespace="foo", ScalableDimension="bar", + ) + err.response["Error"]["Code"].should.equal("ValidationException") + err.response["Error"]["Message"].split(":")[0].should.look_like( + "2 validation errors detected" + ) + err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@mock_ecs +@mock_applicationautoscaling +def test_register_scalable_target_ecs_with_non_existent_service_should_return_validation_exception(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + resource_id = "service/{}/foo".format(DEFAULT_ECS_CLUSTER) + + with assert_raises(ClientError) as err: + register_scalable_target(client, ServiceNamespace="ecs", ResourceId=resource_id) + err.response["Error"]["Code"].should.equal("ValidationException") + err.response["Error"]["Message"].should.equal( + "ECS service doesn't exist: {}".format(resource_id) + ) + err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + +@parameterized( + [ + ("ecs", "service/default/test-svc", "ecs:service:DesiredCount", True), + ("ecs", "banana/default/test-svc", "ecs:service:DesiredCount", False), + ("rds", "service/default/test-svc", "ecs:service:DesiredCount", False), + ] +) +def test_target_params_are_valid_success(namespace, r_id, dimension, expected): + if expected is True: + models._target_params_are_valid(namespace, r_id, dimension).should.equal( + expected + ) + else: + with assert_raises(AWSValidationException): + models._target_params_are_valid(namespace, r_id, dimension) + + +# TODO add a test for not-supplied MinCapacity or MaxCapacity (ValidationException) From c1326ed8ccf2312e5594bfc196a2f821bdae738e Mon Sep 17 00:00:00 2001 From: Alex Bainbridge Date: Fri, 3 Jul 2020 13:25:03 -0400 Subject: [PATCH 421/658] removed done comments --- tests/test_ssm/test_ssm_docs.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/test_ssm/test_ssm_docs.py b/tests/test_ssm/test_ssm_docs.py index d39fa12c6a59..9a1fb7cf4749 100644 --- a/tests/test_ssm/test_ssm_docs.py +++ b/tests/test_ssm/test_ssm_docs.py @@ -134,7 +134,6 @@ def _get_doc_validator( response["DocumentFormat"].should.equal(document_format) -# Done @mock_ssm def test_create_document(): template_file = _get_yaml_template() @@ -240,7 +239,6 @@ def test_create_document(): doc_description["DocumentFormat"].should.equal("YAML") -# Done @mock_ssm def test_get_document(): template_file = _get_yaml_template() @@ -472,7 +470,6 @@ def test_delete_document(): len(response["DocumentIdentifiers"]).should.equal(0) -# Done @mock_ssm def test_update_document_default_version(): template_file = _get_yaml_template() @@ -533,7 +530,6 @@ def test_update_document_default_version(): response["Description"]["DefaultVersionName"].should.equal("NewBase") -# Done @mock_ssm def test_update_document(): template_file = _get_yaml_template() @@ -638,7 +634,6 @@ def test_update_document(): response["DocumentDescription"]["VersionName"].should.equal("NewBase") -# Done @mock_ssm def test_describe_document(): template_file = _get_yaml_template() @@ -683,7 +678,6 @@ def test_describe_document(): ) -# Done @mock_ssm def test_list_documents(): template_file = _get_yaml_template() From 81a5ae6ef4fb4042321820e3afd2ca5a5a4cdcc2 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 3 Jul 2020 18:35:03 +0100 Subject: [PATCH 422/658] SSM - Get your own regions, instead of relying on EC2 --- moto/ssm/models.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 8da0a97c58fd..37750d94499a 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -1,11 +1,11 @@ from __future__ import unicode_literals import re +from boto3 import Session from collections import defaultdict from moto.core import BaseBackend, BaseModel from moto.core.exceptions import RESTError -from moto.ec2 import ec2_backends from moto.cloudformation import cloudformation_backends import datetime @@ -807,5 +807,9 @@ def get_command_invocation(self, **kwargs): ssm_backends = {} -for region, ec2_backend in ec2_backends.items(): +for region in Session().get_available_regions("ssm"): + ssm_backends[region] = SimpleSystemManagerBackend(region) +for region in Session().get_available_regions("ssm", partition_name="aws-us-gov"): + ssm_backends[region] = SimpleSystemManagerBackend(region) +for region in Session().get_available_regions("ssm", partition_name="aws-cn"): ssm_backends[region] = SimpleSystemManagerBackend(region) From 7a801a888e2b083c326803062ed8dcb0acbb06e2 Mon Sep 17 00:00:00 2001 From: Ninh Khong Date: Sat, 4 Jul 2020 01:09:31 +0700 Subject: [PATCH 423/658] Add region information for requesterVpcInfo and accepterVpcInfo --- moto/ec2/responses/vpc_peering_connections.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 3bf86af8a4f7..84dbf2bf5362 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -86,6 +86,7 @@ def reject_vpc_peering_connection(self): 777788889999 {{ vpc_pcx.vpc.id }} {{ vpc_pcx.vpc.cidr_block }} + {{ vpc_pcx.vpc.ec2_backend.region_name }} """ @@ -98,6 +99,7 @@ def reject_vpc_peering_connection(self): true false + {{ vpc_pcx.peer_vpc.ec2_backend.region_name }} {{ vpc_pcx._status.code }} @@ -128,6 +130,7 @@ def reject_vpc_peering_connection(self): 777788889999 {{ vpc_pcx.vpc.id }} {{ vpc_pcx.vpc.cidr_block }} + {{ vpc_pcx.vpc.ec2_backend.region_name }} """ @@ -140,6 +143,7 @@ def reject_vpc_peering_connection(self): false false + {{ vpc_pcx.peer_vpc.ec2_backend.region_name }} {{ vpc_pcx._status.code }} From 2a950f0da207179621b0517838064dc8a1aed439 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Sat, 4 Jul 2020 12:36:14 -0700 Subject: [PATCH 424/658] Fixed circlular import with RDS and CF --- moto/rds/exceptions.py | 10 ++++++++++ moto/rds/models.py | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/moto/rds/exceptions.py b/moto/rds/exceptions.py index cf9b9aac6c21..6fe30878ba32 100644 --- a/moto/rds/exceptions.py +++ b/moto/rds/exceptions.py @@ -36,3 +36,13 @@ def __init__(self, subnet_group_name): "DBSubnetGroupNotFound", "Subnet Group {0} not found.".format(subnet_group_name), ) + + +class UnformattedGetAttTemplateException(Exception): + """Duplicated from CloudFormation to prevent circular deps.""" + + description = ( + "Template error: resource {0} does not support attribute type {1} in Fn::GetAtt" + ) + + status_code = 400 diff --git a/moto/rds/models.py b/moto/rds/models.py index 421f3784b706..40b1197b6326 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -3,10 +3,10 @@ import boto.rds from jinja2 import Template -from moto.cloudformation.exceptions import UnformattedGetAttTemplateException from moto.core import BaseBackend, BaseModel from moto.core.utils import get_random_hex from moto.ec2.models import ec2_backends +from moto.rds.exceptions import UnformattedGetAttTemplateException from moto.rds2.models import rds2_backends From 87eb8a21d6a472880484e3531144635aee4ff29b Mon Sep 17 00:00:00 2001 From: Ninh Khong Date: Sun, 5 Jul 2020 22:09:57 +0700 Subject: [PATCH 425/658] Update unittest checking region response in accept_vpc_peering_connection and describe_vpc_peering_connects functions --- tests/test_ec2/test_vpc_peering.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index fc1646961a45..ce1c1e30f13f 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -160,8 +160,14 @@ def test_vpc_peering_connections_cross_region_accept(): VpcPeeringConnectionIds=[vpc_pcx_usw1.id] ) acp_pcx_apn1["VpcPeeringConnection"]["Status"]["Code"].should.equal("active") + acp_pcx_apn1["VpcPeeringConnection"]["AccepterVpcInfo"]["Region"].should.equal("ap-northeast-1") + acp_pcx_apn1["VpcPeeringConnection"]["RequesterVpcInfo"]["Region"].should.equal("us-west-1") des_pcx_apn1["VpcPeeringConnections"][0]["Status"]["Code"].should.equal("active") + des_pcx_apn1["VpcPeeringConnections"][0]["AccepterVpcInfo"]["Region"].should.equal("ap-northeast-1") + des_pcx_apn1["VpcPeeringConnections"][0]["RequesterVpcInfo"]["Region"].should.equal("us-west-1") des_pcx_usw1["VpcPeeringConnections"][0]["Status"]["Code"].should.equal("active") + des_pcx_usw1["VpcPeeringConnections"][0]["AccepterVpcInfo"]["Region"].should.equal("ap-northeast-1") + des_pcx_usw1["VpcPeeringConnections"][0]["RequesterVpcInfo"]["Region"].should.equal("us-west-1") @mock_ec2 From b7671819df08b8faadb8fdb36240862a25204cab Mon Sep 17 00:00:00 2001 From: Ninh Khong Date: Sun, 5 Jul 2020 23:04:34 +0700 Subject: [PATCH 426/658] Update code lint --- tests/test_ec2/test_vpc_peering.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index ce1c1e30f13f..b535518dedd6 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -160,14 +160,26 @@ def test_vpc_peering_connections_cross_region_accept(): VpcPeeringConnectionIds=[vpc_pcx_usw1.id] ) acp_pcx_apn1["VpcPeeringConnection"]["Status"]["Code"].should.equal("active") - acp_pcx_apn1["VpcPeeringConnection"]["AccepterVpcInfo"]["Region"].should.equal("ap-northeast-1") - acp_pcx_apn1["VpcPeeringConnection"]["RequesterVpcInfo"]["Region"].should.equal("us-west-1") + acp_pcx_apn1["VpcPeeringConnection"]["AccepterVpcInfo"]["Region"].should.equal( + "ap-northeast-1" + ) + acp_pcx_apn1["VpcPeeringConnection"]["RequesterVpcInfo"]["Region"].should.equal( + "us-west-1" + ) des_pcx_apn1["VpcPeeringConnections"][0]["Status"]["Code"].should.equal("active") - des_pcx_apn1["VpcPeeringConnections"][0]["AccepterVpcInfo"]["Region"].should.equal("ap-northeast-1") - des_pcx_apn1["VpcPeeringConnections"][0]["RequesterVpcInfo"]["Region"].should.equal("us-west-1") + des_pcx_apn1["VpcPeeringConnections"][0]["AccepterVpcInfo"]["Region"].should.equal( + "ap-northeast-1" + ) + des_pcx_apn1["VpcPeeringConnections"][0]["RequesterVpcInfo"]["Region"].should.equal( + "us-west-1" + ) des_pcx_usw1["VpcPeeringConnections"][0]["Status"]["Code"].should.equal("active") - des_pcx_usw1["VpcPeeringConnections"][0]["AccepterVpcInfo"]["Region"].should.equal("ap-northeast-1") - des_pcx_usw1["VpcPeeringConnections"][0]["RequesterVpcInfo"]["Region"].should.equal("us-west-1") + des_pcx_usw1["VpcPeeringConnections"][0]["AccepterVpcInfo"]["Region"].should.equal( + "ap-northeast-1" + ) + des_pcx_usw1["VpcPeeringConnections"][0]["RequesterVpcInfo"]["Region"].should.equal( + "us-west-1" + ) @mock_ec2 From 81be4b37a125d62586ab8429e8d98bb002ce7154 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Tue, 7 Jul 2020 19:02:55 +0530 Subject: [PATCH 427/658] Fix: Ec2 - add destinationIpv6CIDR support. (#3106) * Fix: Ec2 - add destinationIpv6CIDR support. * removing unneccessary debug statements * modifying existing test case * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ec2/models.py | 15 +++++++++++---- moto/ec2/responses/route_tables.py | 2 ++ moto/ec2/utils.py | 4 +++- tests/test_ec2/test_route_tables.py | 11 +++++++++++ 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index cb7ba0ff2b47..89dd753f949f 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3547,6 +3547,7 @@ def __init__( self, route_table, destination_cidr_block, + destination_ipv6_cidr_block, local=False, gateway=None, instance=None, @@ -3554,9 +3555,12 @@ def __init__( interface=None, vpc_pcx=None, ): - self.id = generate_route_id(route_table.id, destination_cidr_block) + self.id = generate_route_id( + route_table.id, destination_cidr_block, destination_ipv6_cidr_block + ) self.route_table = route_table self.destination_cidr_block = destination_cidr_block + self.destination_ipv6_cidr_block = destination_ipv6_cidr_block self.local = local self.gateway = gateway self.instance = instance @@ -3632,6 +3636,7 @@ def create_route( self, route_table_id, destination_cidr_block, + destination_ipv6_cidr_block=None, local=False, gateway_id=None, instance_id=None, @@ -3656,9 +3661,10 @@ def create_route( gateway = self.get_internet_gateway(gateway_id) try: - ipaddress.IPv4Network( - six.text_type(destination_cidr_block), strict=False - ) + if destination_cidr_block: + ipaddress.IPv4Network( + six.text_type(destination_cidr_block), strict=False + ) except ValueError: raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) @@ -3668,6 +3674,7 @@ def create_route( route = Route( route_table, destination_cidr_block, + destination_ipv6_cidr_block, local=local, gateway=gateway, instance=self.get_instance(instance_id) if instance_id else None, diff --git a/moto/ec2/responses/route_tables.py b/moto/ec2/responses/route_tables.py index b5d65f83118f..a91d02317d00 100644 --- a/moto/ec2/responses/route_tables.py +++ b/moto/ec2/responses/route_tables.py @@ -16,6 +16,7 @@ def associate_route_table(self): def create_route(self): route_table_id = self._get_param("RouteTableId") destination_cidr_block = self._get_param("DestinationCidrBlock") + destination_ipv6_cidr_block = self._get_param("DestinationIpv6CidrBlock") gateway_id = self._get_param("GatewayId") instance_id = self._get_param("InstanceId") nat_gateway_id = self._get_param("NatGatewayId") @@ -25,6 +26,7 @@ def create_route(self): self.ec2_backend.create_route( route_table_id, destination_cidr_block, + destination_ipv6_cidr_block, gateway_id=gateway_id, instance_id=instance_id, nat_gateway_id=nat_gateway_id, diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index c07c470a9e24..b8c19b580f19 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -189,7 +189,9 @@ def random_ipv6_cidr(): return "2400:6500:{}:{}::/56".format(random_resource_id(4), random_resource_id(4)) -def generate_route_id(route_table_id, cidr_block): +def generate_route_id(route_table_id, cidr_block, ipv6_cidr_block=None): + if ipv6_cidr_block and not cidr_block: + cidr_block = ipv6_cidr_block return "%s~%s" % (route_table_id, cidr_block) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 61fb33f90b6f..7bb4db6959bf 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -582,6 +582,17 @@ def test_create_route_with_invalid_destination_cidr_block_parameter(): ) ) + route_table.create_route( + DestinationIpv6CidrBlock="2001:db8::/125", GatewayId=internet_gateway.id + ) + new_routes = [ + route + for route in route_table.routes + if route.destination_cidr_block != vpc.cidr_block + ] + new_routes.should.have.length_of(1) + new_routes[0].route_table_id.shouldnt.be.equal(None) + @mock_ec2 def test_create_route_with_network_interface_id(): From 766f527d379bf173c5fb6b4589ae6fa6af13d4fd Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Sat, 11 Jul 2020 00:43:45 -0700 Subject: [PATCH 428/658] Add NUMBER and LIST parsing to cloudformation/parsing.py (#3118) * Add NUMBER and LIST parsing to cloudformation/parsing.py * Fix black formatting error in test_stack_parsing.py --- moto/cloudformation/parsing.py | 17 +++++++++++++++++ tests/test_cloudformation/test_stack_parsing.py | 15 ++++++++++++++- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index a489f54febd1..0a3e0a0c296a 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -560,6 +560,23 @@ def load_parameters(self): if value_type == "CommaDelimitedList" or value_type.startswith("List"): value = value.split(",") + def _parse_number_parameter(num_string): + """CloudFormation NUMBER types can be an int or float. + Try int first and then fall back to float if that fails + """ + try: + return int(num_string) + except ValueError: + return float(num_string) + + if value_type == "List": + # The if statement directly above already converted + # to a list. Now we convert each element to a number + value = [_parse_number_parameter(v) for v in value] + + if value_type == "Number": + value = _parse_number_parameter(value) + if parameter_slot.get("NoEcho"): self.no_echo_parameter_keys.append(key) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 116287162110..4e51c5b1220b 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -67,6 +67,8 @@ parameters = { "Parameters": { "Param": {"Type": "String"}, + "NumberParam": {"Type": "Number"}, + "NumberListParam": {"Type": "List"}, "NoEchoParam": {"Type": "String", "NoEcho": True}, } } @@ -303,12 +305,23 @@ def test_parse_stack_with_parameters(): stack_id="test_id", name="test_stack", template=parameters_template_json, - parameters={"Param": "visible value", "NoEchoParam": "hidden value"}, + parameters={ + "Param": "visible value", + "NumberParam": "42", + "NumberListParam": "42,3.14159", + "NoEchoParam": "hidden value", + }, region_name="us-west-1", ) stack.resource_map.no_echo_parameter_keys.should.have("NoEchoParam") stack.resource_map.no_echo_parameter_keys.should_not.have("Param") + stack.resource_map.no_echo_parameter_keys.should_not.have("NumberParam") + stack.resource_map.no_echo_parameter_keys.should_not.have("NumberListParam") + stack.resource_map.resolved_parameters["NumberParam"].should.equal(42) + stack.resource_map.resolved_parameters["NumberListParam"].should.equal( + [42, 3.14159] + ) def test_parse_equals_condition(): From 069c159492b04202aa06e15bcdd3f1bcd9d93120 Mon Sep 17 00:00:00 2001 From: ngander64 <67654233+ngander64@users.noreply.github.com> Date: Sat, 11 Jul 2020 03:38:33 -0500 Subject: [PATCH 429/658] Always return 'HardExpiry' for account password policy. (#3117) Co-authored-by: Nick B Gander --- moto/iam/responses.py | 2 -- tests/test_iam/test_iam.py | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 3a8296760e9e..8eb1730ea0b2 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -2423,9 +2423,7 @@ def get_account_summary(self): {{ password_policy.allow_users_to_change_password | lower }} {{ password_policy.expire_passwords | lower }} - {% if password_policy.hard_expiry %} {{ password_policy.hard_expiry | lower }} - {% endif %} {% if password_policy.max_password_age %} {{ password_policy.max_password_age }} {% endif %} diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index a749a37e7b58..4ae5ad49e7ef 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -2608,6 +2608,7 @@ def test_update_account_password_policy(): "RequireNumbers": False, "RequireSymbols": False, "RequireUppercaseCharacters": False, + "HardExpiry": False, } ) From 80761ebb3b24591b3d768298511da39b1d57409f Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 11 Jul 2020 09:46:03 +0100 Subject: [PATCH 430/658] #3114 - Allow DynamoDB table as CFN output --- moto/dynamodb2/models/__init__.py | 4 +++ .../test_cloudformation_stack_integration.py | 36 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index eafa2743af4c..428ccb01e03c 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -413,6 +413,10 @@ def get_cfn_attribute(self, attribute_name): raise UnformattedGetAttTemplateException() + @property + def physical_resource_id(self): + return self.name + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 9d639ed42946..5a8e9cd683ba 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2767,3 +2767,39 @@ def test_stack_events_get_attribute_integration(): output_arn["OutputValue"].should.equal(event_bus["Arn"]) output_name["OutputValue"].should.equal(event_bus["Name"]) + + +@mock_cloudformation +@mock_dynamodb2 +def test_dynamodb_table_creation(): + CFN_TEMPLATE = { + "Outputs": {"MyTableName": {"Value": {"Ref": "MyTable"}},}, + "Resources": { + "MyTable": { + "Type": "AWS::DynamoDB::Table", + "Properties": { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"} + ], + "BillingMode": "PAY_PER_REQUEST", + }, + }, + }, + } + stack_name = "foobar" + cfn = boto3.client("cloudformation", "us-west-2") + cfn.create_stack(StackName=stack_name, TemplateBody=json.dumps(CFN_TEMPLATE)) + # Wait until moto creates the stack + waiter = cfn.get_waiter("stack_create_complete") + waiter.wait(StackName=stack_name) + # Verify the TableName is part of the outputs + stack = cfn.describe_stacks(StackName=stack_name)["Stacks"][0] + outputs = stack["Outputs"] + outputs.should.have.length_of(1) + outputs[0]["OutputKey"].should.equal("MyTableName") + outputs[0]["OutputValue"].should.contain("foobar") + # Assert the table is created + ddb = boto3.client("dynamodb", "us-west-2") + table_names = ddb.list_tables()["TableNames"] + table_names.should.equal([outputs[0]["OutputValue"]]) From b33c5dff0651cb8f9a72819275ce6ce1bf62b0f7 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 12 Jul 2020 13:33:46 +0100 Subject: [PATCH 431/658] #2104 - S3 - Persist metadata for Presigned URL --- moto/s3/responses.py | 37 ++++++++++++++ moto/s3/utils.py | 6 ++- tests/test_s3/test_s3.py | 101 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+), 1 deletion(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 10e68d56975e..89719773e02e 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1079,6 +1079,10 @@ def _key_response(self, request, full_url, headers): if key: if not key.acl.public_read and not signed_url: return 403, {}, "" + elif signed_url: + # coming in from requests.get(s3.generate_presigned_url()) + if self._invalid_headers(request.url, dict(headers)): + return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS if hasattr(request, "body"): # Boto @@ -1287,6 +1291,7 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers ) request.streaming = True metadata = metadata_from_headers(request.headers) + metadata.update(metadata_from_headers(query)) new_key.set_metadata(metadata) new_key.set_acl(acl) new_key.website_redirect_location = request.headers.get( @@ -1672,6 +1677,29 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): "Method POST had only been implemented for multipart uploads and restore operations, so far" ) + def _invalid_headers(self, url, headers): + """ + Verify whether the provided metadata in the URL is also present in the headers + :param url: .../file.txt&content-type=app%2Fjson&Signature=.. + :param headers: Content-Type=app/json + :return: True or False + """ + metadata_to_check = { + "content-disposition": "Content-Disposition", + "content-encoding": "Content-Encoding", + "content-language": "Content-Language", + "content-length": "Content-Length", + "content-md5": "Content-MD5", + "content-type": "Content-Type", + } + for url_key, header_key in metadata_to_check.items(): + metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url) + if metadata_in_url: + url_value = unquote(metadata_in_url.group(1)) + if header_key not in headers or (url_value != headers[header_key]): + return True + return False + S3ResponseInstance = ResponseObject(s3_backend) @@ -2214,6 +2242,15 @@ def _key_response_post(self, request, body, bucket_name, query, key_name): """ +S3_INVALID_PRESIGNED_PARAMETERS = """ + + SignatureDoesNotMatch + The request signature we calculated does not match the signature you provided. Check your key and signing method. + 0D68A23BB2E2215B + 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= + +""" + S3_NO_ENCRYPTION = """ ServerSideEncryptionConfigurationNotFoundError diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 014e98ca9c32..1ca31ce1bb9f 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -75,7 +75,11 @@ def metadata_from_headers(headers): # Check for special metadata that doesn't start with x-amz-meta meta_key = header if meta_key: - metadata[meta_key] = headers[header] + metadata[meta_key] = ( + headers[header][0] + if type(headers[header]) == list + else headers[header] + ) return metadata diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 8ac227f4fde3..4c32e38759c5 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4583,3 +4583,104 @@ def test_encryption(): conn.delete_bucket_encryption(Bucket="mybucket") with assert_raises(ClientError) as exc: conn.get_bucket_encryption(Bucket="mybucket") + + +@mock_s3 +def test_presigned_url_restrict_parameters(): + # Only specific params can be set + # Ensure error is thrown when adding custom metadata this way + bucket = str(uuid.uuid4()) + key = "file.txt" + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + s3 = boto3.client("s3", region_name="us-east-1") + + # Create a pre-signed url with some metadata. + with assert_raises(botocore.exceptions.ParamValidationError) as err: + s3.generate_presigned_url( + ClientMethod="put_object", + Params={"Bucket": bucket, "Key": key, "Unknown": "metadata"}, + ) + assert str(err.exception).should.equal( + 'Parameter validation failed:\nUnknown parameter in input: "Unknown", must be one of: ACL, Body, Bucket, CacheControl, ContentDisposition, ContentEncoding, ContentLanguage, ContentLength, ContentMD5, ContentType, Expires, GrantFullControl, GrantRead, GrantReadACP, GrantWriteACP, Key, Metadata, ServerSideEncryption, StorageClass, WebsiteRedirectLocation, SSECustomerAlgorithm, SSECustomerKey, SSECustomerKeyMD5, SSEKMSKeyId, SSEKMSEncryptionContext, RequestPayer, Tagging, ObjectLockMode, ObjectLockRetainUntilDate, ObjectLockLegalHoldStatus' + ) + + s3.delete_bucket(Bucket=bucket) + + +@mock_s3 +def test_presigned_put_url_with_approved_headers(): + bucket = str(uuid.uuid4()) + key = "file.txt" + expected_contenttype = "app/sth" + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + s3 = boto3.client("s3", region_name="us-east-1") + + # Create a pre-signed url with some metadata. + url = s3.generate_presigned_url( + ClientMethod="put_object", + Params={"Bucket": bucket, "Key": key, "ContentType": expected_contenttype}, + ) + + # Verify S3 throws an error when the header is not provided + response = requests.put(url, data="filecontent") + response.status_code.should.equal(403) + response.content.should.contain("SignatureDoesNotMatch") + response.content.should.contain( + "The request signature we calculated does not match the signature you provided. Check your key and signing method." + ) + + # Verify S3 throws an error when the header has the wrong value + response = requests.put( + url, data="filecontent", headers={"Content-Type": "application/unknown"} + ) + response.status_code.should.equal(403) + response.content.should.contain("SignatureDoesNotMatch") + response.content.should.contain( + "The request signature we calculated does not match the signature you provided. Check your key and signing method." + ) + + # Verify S3 uploads correctly when providing the meta data + response = requests.put( + url, data="filecontent", headers={"Content-Type": expected_contenttype} + ) + response.status_code.should.equal(200) + + # Assert the object exists + obj = s3.get_object(Bucket=bucket, Key=key) + obj["ContentType"].should.equal(expected_contenttype) + obj["ContentLength"].should.equal(11) + obj["Body"].read().should.equal("filecontent") + obj["Metadata"].should.equal({}) + + s3.delete_object(Bucket=bucket, Key=key) + s3.delete_bucket(Bucket=bucket) + + +@mock_s3 +def test_presigned_put_url_with_custom_headers(): + bucket = str(uuid.uuid4()) + key = "file.txt" + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + s3 = boto3.client("s3", region_name="us-east-1") + + # Create a pre-signed url with some metadata. + url = s3.generate_presigned_url( + ClientMethod="put_object", + Params={"Bucket": bucket, "Key": key, "Metadata": {"venue": "123"}}, + ) + + # Verify S3 uploads correctly when providing the meta data + response = requests.put(url, data="filecontent") + response.status_code.should.equal(200) + + # Assert the object exists + obj = s3.get_object(Bucket=bucket, Key=key) + obj["ContentLength"].should.equal(11) + obj["Body"].read().should.equal("filecontent") + obj["Metadata"].should.equal({"venue": "123"}) + + s3.delete_object(Bucket=bucket, Key=key) + s3.delete_bucket(Bucket=bucket) From c5de56ce706ee6071d73a0b0d0edd3b8044d5136 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Sun, 12 Jul 2020 18:09:42 +0530 Subject: [PATCH 432/658] Fix: CloudFormation support status filter in list stacks (#3115) * Fix: CloudFormation support status filter in list stacks * Added test for non decorator Co-authored-by: usmankb --- moto/cloudformation/models.py | 15 +++++++++++++-- moto/cloudformation/responses.py | 3 ++- .../test_cloudformation_stack_crud.py | 13 +++++++++++++ .../test_cloudformation_stack_crud_boto3.py | 13 +++++++++++++ 4 files changed, 41 insertions(+), 3 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 23cdc0925b64..d3fb2870d9ac 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -449,6 +449,16 @@ def __init__( self.event_id = uuid.uuid4() +def filter_stacks(all_stacks, status_filter): + filtered_stacks = [] + if not status_filter: + return all_stacks + for stack in all_stacks: + if stack.status in status_filter: + filtered_stacks.append(stack) + return filtered_stacks + + class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() @@ -681,10 +691,11 @@ def describe_stacks(self, name_or_stack_id): def list_change_sets(self): return self.change_sets.values() - def list_stacks(self): - return [v for v in self.stacks.values()] + [ + def list_stacks(self, status_filter=None): + total_stacks = [v for v in self.stacks.values()] + [ v for v in self.deleted_stacks.values() ] + return filter_stacks(total_stacks, status_filter) def get_stack(self, name_or_stack_id): all_stacks = dict(self.deleted_stacks, **self.stacks) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 92a8b1cabfd9..8672c706df4d 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -233,7 +233,8 @@ def list_change_sets(self): return template.render(change_sets=change_sets) def list_stacks(self): - stacks = self.cloudformation_backend.list_stacks() + status_filter = self._get_multi_param("StackStatusFilter.member") + stacks = self.cloudformation_backend.list_stacks(status_filter) template = self.response_template(LIST_STACKS_RESPONSE) return template.render(stacks=stacks) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 8749d4cfb02d..29faa11cfdaa 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -233,6 +233,19 @@ def test_list_stacks(): stacks[0].template_description.should.equal("Stack 1") +@mock_cloudformation_deprecated +def test_list_stacks_with_filter(): + conn = boto.connect_cloudformation() + conn.create_stack("test_stack", template_body=dummy_template_json) + conn.create_stack("test_stack2", template_body=dummy_template_json) + conn.update_stack("test_stack", template_body=dummy_template_json2) + stacks = conn.list_stacks("CREATE_COMPLETE") + stacks.should.have.length_of(1) + stacks[0].template_description.should.equal("Stack 1") + stacks = conn.list_stacks("UPDATE_COMPLETE") + stacks.should.have.length_of(1) + + @mock_cloudformation_deprecated def test_delete_stack_by_name(): conn = boto.connect_cloudformation() diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 43f63dca268a..1ebce46d70cc 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -14,6 +14,7 @@ from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 from moto.core import ACCOUNT_ID +from .test_cloudformation_stack_crud import dummy_template_json2 dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -218,6 +219,18 @@ def test_boto3_list_stacksets_length(): stacksets.should.have.length_of(2) +@mock_cloudformation +def test_boto3_filter_stacks(): + conn = boto3.client("cloudformation", region_name="us-east-1") + conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json) + conn.create_stack(StackName="test_stack2", TemplateBody=dummy_template_json) + conn.update_stack(StackName="test_stack", TemplateBody=dummy_template_json2) + stacks = conn.list_stacks(StackStatusFilter=["CREATE_COMPLETE"]) + stacks.get("StackSummaries").should.have.length_of(1) + stacks = conn.list_stacks(StackStatusFilter=["UPDATE_COMPLETE"]) + stacks.get("StackSummaries").should.have.length_of(1) + + @mock_cloudformation def test_boto3_list_stacksets_contents(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") From 8b63421321f4d9426ea5f98d83114d10e8adca4c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 12 Jul 2020 18:29:10 +0100 Subject: [PATCH 433/658] S3 - Only add metadata once; use binary file content --- moto/s3/responses.py | 2 +- moto/s3/utils.py | 3 ++- tests/test_s3/test_s3.py | 22 ++++++++++++---------- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 89719773e02e..603571c0d852 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1081,7 +1081,7 @@ def _key_response(self, request, full_url, headers): return 403, {}, "" elif signed_url: # coming in from requests.get(s3.generate_presigned_url()) - if self._invalid_headers(request.url, dict(headers)): + if self._invalid_headers(request.url, dict(request.headers)): return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS if hasattr(request, "body"): diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 1ca31ce1bb9f..2cdb7e8623f8 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -6,6 +6,7 @@ import re import six from six.moves.urllib.parse import urlparse, unquote, quote +from requests.structures import CaseInsensitiveDict import sys @@ -62,7 +63,7 @@ def parse_region_from_url(url): def metadata_from_headers(headers): - metadata = {} + metadata = CaseInsensitiveDict() meta_regex = re.compile(r"^x-amz-meta-([a-zA-Z0-9\-_]+)$", flags=re.IGNORECASE) for header, value in headers.items(): if isinstance(header, six.string_types): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 4c32e38759c5..57f7454371ac 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4612,6 +4612,7 @@ def test_presigned_url_restrict_parameters(): def test_presigned_put_url_with_approved_headers(): bucket = str(uuid.uuid4()) key = "file.txt" + content = b"filecontent" expected_contenttype = "app/sth" conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket=bucket) @@ -4624,26 +4625,26 @@ def test_presigned_put_url_with_approved_headers(): ) # Verify S3 throws an error when the header is not provided - response = requests.put(url, data="filecontent") + response = requests.put(url, data=content) response.status_code.should.equal(403) - response.content.should.contain("SignatureDoesNotMatch") - response.content.should.contain( + str(response.content).should.contain("SignatureDoesNotMatch") + str(response.content).should.contain( "The request signature we calculated does not match the signature you provided. Check your key and signing method." ) # Verify S3 throws an error when the header has the wrong value response = requests.put( - url, data="filecontent", headers={"Content-Type": "application/unknown"} + url, data=content, headers={"Content-Type": "application/unknown"} ) response.status_code.should.equal(403) - response.content.should.contain("SignatureDoesNotMatch") - response.content.should.contain( + str(response.content).should.contain("SignatureDoesNotMatch") + str(response.content).should.contain( "The request signature we calculated does not match the signature you provided. Check your key and signing method." ) # Verify S3 uploads correctly when providing the meta data response = requests.put( - url, data="filecontent", headers={"Content-Type": expected_contenttype} + url, data=content, headers={"Content-Type": expected_contenttype} ) response.status_code.should.equal(200) @@ -4651,7 +4652,7 @@ def test_presigned_put_url_with_approved_headers(): obj = s3.get_object(Bucket=bucket, Key=key) obj["ContentType"].should.equal(expected_contenttype) obj["ContentLength"].should.equal(11) - obj["Body"].read().should.equal("filecontent") + obj["Body"].read().should.equal(content) obj["Metadata"].should.equal({}) s3.delete_object(Bucket=bucket, Key=key) @@ -4662,6 +4663,7 @@ def test_presigned_put_url_with_approved_headers(): def test_presigned_put_url_with_custom_headers(): bucket = str(uuid.uuid4()) key = "file.txt" + content = b"filecontent" conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket=bucket) s3 = boto3.client("s3", region_name="us-east-1") @@ -4673,13 +4675,13 @@ def test_presigned_put_url_with_custom_headers(): ) # Verify S3 uploads correctly when providing the meta data - response = requests.put(url, data="filecontent") + response = requests.put(url, data=content) response.status_code.should.equal(200) # Assert the object exists obj = s3.get_object(Bucket=bucket, Key=key) obj["ContentLength"].should.equal(11) - obj["Body"].read().should.equal("filecontent") + obj["Body"].read().should.equal(content) obj["Metadata"].should.equal({"venue": "123"}) s3.delete_object(Bucket=bucket, Key=key) From 55bb4eb08d5925cb53c90ad3c4069325c38e1f1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= Date: Mon, 13 Jul 2020 10:30:55 +0200 Subject: [PATCH 434/658] Config - implement Organization Conformance Pack functionality (#3116) * Add config.put_organization_conformance_pack * Add config.describe_organization_conformance_packs * Add config.get_organization_conformance_pack_detailed_status * Add config.describe_organization_conformance_pack_statuses * Add config.delete_organization_conformance_pack * Add an update method to OrganizationConformancePack --- moto/config/exceptions.py | 16 ++ moto/config/models.py | 177 +++++++++++++++++- moto/config/responses.py | 43 +++++ tests/test_config/test_config.py | 312 +++++++++++++++++++++++++++++++ 4 files changed, 547 insertions(+), 1 deletion(-) diff --git a/moto/config/exceptions.py b/moto/config/exceptions.py index 6b6498d342be..4030b87a3b45 100644 --- a/moto/config/exceptions.py +++ b/moto/config/exceptions.py @@ -376,3 +376,19 @@ def __init__(self): super(InvalidResultTokenException, self).__init__( "InvalidResultTokenException", message ) + + +class ValidationException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ValidationException, self).__init__("ValidationException", message) + + +class NoSuchOrganizationConformancePackException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(NoSuchOrganizationConformancePackException, self).__init__( + "NoSuchOrganizationConformancePackException", message + ) diff --git a/moto/config/models.py b/moto/config/models.py index 242a219e4c65..b6dc4672d8ce 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -41,6 +41,8 @@ ResourceNotDiscoveredException, TooManyResourceKeys, InvalidResultTokenException, + ValidationException, + NoSuchOrganizationConformancePackException, ) from moto.core import BaseBackend, BaseModel @@ -159,7 +161,8 @@ def __init__(self, capitalize_start=False, capitalize_arn=True): def to_dict(self): data = {} for item, value in self.__dict__.items(): - if value is not None: + # ignore private attributes + if not item.startswith("_") and value is not None: if isinstance(value, ConfigEmptyDictable): data[ snake_to_camels( @@ -367,12 +370,56 @@ def __init__( self.tags = tags or {} +class OrganizationConformancePack(ConfigEmptyDictable): + def __init__( + self, + region, + name, + delivery_s3_bucket, + delivery_s3_key_prefix=None, + input_parameters=None, + excluded_accounts=None, + ): + super(OrganizationConformancePack, self).__init__( + capitalize_start=True, capitalize_arn=False + ) + + self._status = "CREATE_SUCCESSFUL" + self._unique_pack_name = "{0}-{1}".format(name, random_string()) + + self.conformance_pack_input_parameters = input_parameters or [] + self.delivery_s3_bucket = delivery_s3_bucket + self.delivery_s3_key_prefix = delivery_s3_key_prefix + self.excluded_accounts = excluded_accounts or [] + self.last_update_time = datetime2int(datetime.utcnow()) + self.organization_conformance_pack_arn = "arn:aws:config:{0}:{1}:organization-conformance-pack/{2}".format( + region, DEFAULT_ACCOUNT_ID, self._unique_pack_name + ) + self.organization_conformance_pack_name = name + + def update( + self, + delivery_s3_bucket, + delivery_s3_key_prefix, + input_parameters, + excluded_accounts, + ): + self._status = "UPDATE_SUCCESSFUL" + + self.conformance_pack_input_parameters = input_parameters + self.delivery_s3_bucket = delivery_s3_bucket + self.delivery_s3_key_prefix = delivery_s3_key_prefix + self.excluded_accounts = excluded_accounts + self.last_update_time = datetime2int(datetime.utcnow()) + + class ConfigBackend(BaseBackend): def __init__(self): self.recorders = {} self.delivery_channels = {} self.config_aggregators = {} self.aggregation_authorizations = {} + self.organization_conformance_packs = {} @staticmethod def _validate_resource_types(resource_list): @@ -1110,6 +1157,134 @@ def put_evaluations(self, evaluations=None, result_token=None, test_mode=False): "FailedEvaluations": [], } # At this time, moto is not adding failed evaluations. + def put_organization_conformance_pack( + self, + region, + name, + template_s3_uri, + template_body, + delivery_s3_bucket, + delivery_s3_key_prefix, + input_parameters, + excluded_accounts, + ): + # a real validation of the content of the template is missing at the moment + if not template_s3_uri and not template_body: + raise ValidationException("Template body is invalid") + + if not re.match(r"s3://.*", template_s3_uri): + raise ValidationException( + "1 validation error detected: " + "Value '{}' at 'templateS3Uri' failed to satisfy constraint: " + "Member must satisfy regular expression pattern: " + "s3://.*".format(template_s3_uri) + ) + + pack = self.organization_conformance_packs.get(name) + + if pack: + pack.update( + delivery_s3_bucket=delivery_s3_bucket, + delivery_s3_key_prefix=delivery_s3_key_prefix, + input_parameters=input_parameters, + excluded_accounts=excluded_accounts, + ) + else: + pack = OrganizationConformancePack( + region=region, + name=name, + delivery_s3_bucket=delivery_s3_bucket, + delivery_s3_key_prefix=delivery_s3_key_prefix, + input_parameters=input_parameters, + excluded_accounts=excluded_accounts, + ) + + self.organization_conformance_packs[name] = pack + + return { + "OrganizationConformancePackArn": pack.organization_conformance_pack_arn + } + + def describe_organization_conformance_packs(self, names): + packs = [] + + for name in names: + pack = self.organization_conformance_packs.get(name) + + if not pack: + raise NoSuchOrganizationConformancePackException( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + packs.append(pack.to_dict()) + + return {"OrganizationConformancePacks": packs} + + def describe_organization_conformance_pack_statuses(self, names): + packs = [] + statuses = [] + + if names: + for name in names: + pack = self.organization_conformance_packs.get(name) + + if not pack: + raise NoSuchOrganizationConformancePackException( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + packs.append(pack) + else: + packs = list(self.organization_conformance_packs.values()) + + for pack in packs: + statuses.append( + { + "OrganizationConformancePackName": pack.organization_conformance_pack_name, + "Status": pack._status, + "LastUpdateTime": pack.last_update_time, + } + ) + + return {"OrganizationConformancePackStatuses": statuses} + + def get_organization_conformance_pack_detailed_status(self, name): + pack = self.organization_conformance_packs.get(name) + + if not pack: + raise NoSuchOrganizationConformancePackException( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + # actually here would be a list of all accounts in the organization + statuses = [ + { + "AccountId": DEFAULT_ACCOUNT_ID, + "ConformancePackName": "OrgConformsPack-{0}".format( + pack._unique_pack_name + ), + "Status": pack._status, + "LastUpdateTime": datetime2int(datetime.utcnow()), + } + ] + + return {"OrganizationConformancePackDetailedStatuses": statuses} + + def delete_organization_conformance_pack(self, name): + pack = self.organization_conformance_packs.get(name) + + if not pack: + raise NoSuchOrganizationConformancePackException( + "Could not find an OrganizationConformancePack for given request with resourceName {}".format( + name + ) + ) + + self.organization_conformance_packs.pop(name) + config_backends = {} for region in Session().get_available_regions("config"): diff --git a/moto/config/responses.py b/moto/config/responses.py index 3b647b5bff7e..7dcc9a01bc6d 100644 --- a/moto/config/responses.py +++ b/moto/config/responses.py @@ -159,3 +159,46 @@ def put_evaluations(self): self._get_param("TestMode"), ) return json.dumps(evaluations) + + def put_organization_conformance_pack(self): + conformance_pack = self.config_backend.put_organization_conformance_pack( + region=self.region, + name=self._get_param("OrganizationConformancePackName"), + template_s3_uri=self._get_param("TemplateS3Uri"), + template_body=self._get_param("TemplateBody"), + delivery_s3_bucket=self._get_param("DeliveryS3Bucket"), + delivery_s3_key_prefix=self._get_param("DeliveryS3KeyPrefix"), + input_parameters=self._get_param("ConformancePackInputParameters"), + excluded_accounts=self._get_param("ExcludedAccounts"), + ) + + return json.dumps(conformance_pack) + + def describe_organization_conformance_packs(self): + conformance_packs = self.config_backend.describe_organization_conformance_packs( + self._get_param("OrganizationConformancePackNames") + ) + + return json.dumps(conformance_packs) + + def describe_organization_conformance_pack_statuses(self): + statuses = self.config_backend.describe_organization_conformance_pack_statuses( + self._get_param("OrganizationConformancePackNames") + ) + + return json.dumps(statuses) + + def get_organization_conformance_pack_detailed_status(self): + # 'Filters' parameter is not implemented yet + statuses = self.config_backend.get_organization_conformance_pack_detailed_status( + self._get_param("OrganizationConformancePackName") + ) + + return json.dumps(statuses) + + def delete_organization_conformance_pack(self): + self.config_backend.delete_organization_conformance_pack( + self._get_param("OrganizationConformancePackName") + ) + + return "" diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index 1bf39428e0e9..34462222150e 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -1,5 +1,6 @@ import json import os +import time from datetime import datetime, timedelta import boto3 @@ -1874,3 +1875,314 @@ def test_put_evaluations(): response.should.equal( {"FailedEvaluations": [], "ResponseMetadata": {"HTTPStatusCode": 200,},} ) + + +@mock_config +def test_put_organization_conformance_pack(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + response = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + ) + + # then + arn = response["OrganizationConformancePackArn"] + arn.should.match( + r"arn:aws:config:us-east-1:\d{12}:organization-conformance-pack/test-pack-\w{8}" + ) + + # putting an organization conformance pack with the same name should result in an update + # when + response = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack-2.yaml", + ) + + # then + response["OrganizationConformancePackArn"].should.equal(arn) + + +@mock_config +def test_put_organization_conformance_pack_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with assert_raises(ClientError) as e: + client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutOrganizationConformancePack") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ValidationException") + ex.response["Error"]["Message"].should.equal("Template body is invalid") + + # when + with assert_raises(ClientError) as e: + client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="invalid-s3-uri", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutOrganizationConformancePack") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ValidationException") + ex.response["Error"]["Message"].should.equal( + "1 validation error detected: " + "Value 'invalid-s3-uri' at 'templateS3Uri' failed to satisfy constraint: " + "Member must satisfy regular expression pattern: " + "s3://.*" + ) + + +@mock_config +def test_describe_organization_conformance_packs(): + # given + client = boto3.client("config", region_name="us-east-1") + arn = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + )["OrganizationConformancePackArn"] + + # when + response = client.describe_organization_conformance_packs( + OrganizationConformancePackNames=["test-pack"] + ) + + # then + response["OrganizationConformancePacks"].should.have.length_of(1) + pack = response["OrganizationConformancePacks"][0] + pack["OrganizationConformancePackName"].should.equal("test-pack") + pack["OrganizationConformancePackArn"].should.equal(arn) + pack["DeliveryS3Bucket"].should.equal("awsconfigconforms-test-bucket") + pack["ConformancePackInputParameters"].should.have.length_of(0) + pack["ExcludedAccounts"].should.have.length_of(0) + pack["LastUpdateTime"].should.be.a("datetime.datetime") + + +@mock_config +def test_describe_organization_conformance_packs_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with assert_raises(ClientError) as e: + client.describe_organization_conformance_packs( + OrganizationConformancePackNames=["not-existing"] + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DescribeOrganizationConformancePacks") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain( + "NoSuchOrganizationConformancePackException" + ) + ex.response["Error"]["Message"].should.equal( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + +@mock_config +def test_describe_organization_conformance_pack_statuses(): + # given + client = boto3.client("config", region_name="us-east-1") + arn = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + )["OrganizationConformancePackArn"] + + # when + response = client.describe_organization_conformance_pack_statuses( + OrganizationConformancePackNames=["test-pack"] + ) + + # then + response["OrganizationConformancePackStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackStatuses"][0] + status["OrganizationConformancePackName"].should.equal("test-pack") + status["Status"].should.equal("CREATE_SUCCESSFUL") + update_time = status["LastUpdateTime"] + update_time.should.be.a("datetime.datetime") + + # when + response = client.describe_organization_conformance_pack_statuses() + + # then + response["OrganizationConformancePackStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackStatuses"][0] + status["OrganizationConformancePackName"].should.equal("test-pack") + status["Status"].should.equal("CREATE_SUCCESSFUL") + status["LastUpdateTime"].should.equal(update_time) + + # when + time.sleep(1) + client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack-2.yaml", + ) + + # then + response = client.describe_organization_conformance_pack_statuses( + OrganizationConformancePackNames=["test-pack"] + ) + response["OrganizationConformancePackStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackStatuses"][0] + status["OrganizationConformancePackName"].should.equal("test-pack") + status["Status"].should.equal("UPDATE_SUCCESSFUL") + status["LastUpdateTime"].should.be.greater_than(update_time) + + +@mock_config +def test_describe_organization_conformance_pack_statuses_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with assert_raises(ClientError) as e: + client.describe_organization_conformance_pack_statuses( + OrganizationConformancePackNames=["not-existing"] + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DescribeOrganizationConformancePackStatuses") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain( + "NoSuchOrganizationConformancePackException" + ) + ex.response["Error"]["Message"].should.equal( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + +@mock_config +def test_get_organization_conformance_pack_detailed_status(): + # given + client = boto3.client("config", region_name="us-east-1") + arn = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + )["OrganizationConformancePackArn"] + + # when + response = client.get_organization_conformance_pack_detailed_status( + OrganizationConformancePackName="test-pack" + ) + + # then + response["OrganizationConformancePackDetailedStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackDetailedStatuses"][0] + status["AccountId"].should.equal(ACCOUNT_ID) + status["ConformancePackName"].should.equal( + "OrgConformsPack-{}".format(arn[arn.rfind("/") + 1 :]) + ) + status["Status"].should.equal("CREATE_SUCCESSFUL") + update_time = status["LastUpdateTime"] + update_time.should.be.a("datetime.datetime") + + # when + time.sleep(1) + client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack-2.yaml", + ) + + # then + response = client.get_organization_conformance_pack_detailed_status( + OrganizationConformancePackName="test-pack" + ) + response["OrganizationConformancePackDetailedStatuses"].should.have.length_of(1) + status = response["OrganizationConformancePackDetailedStatuses"][0] + status["AccountId"].should.equal(ACCOUNT_ID) + status["ConformancePackName"].should.equal( + "OrgConformsPack-{}".format(arn[arn.rfind("/") + 1 :]) + ) + status["Status"].should.equal("UPDATE_SUCCESSFUL") + status["LastUpdateTime"].should.be.greater_than(update_time) + + +@mock_config +def test_get_organization_conformance_pack_detailed_status_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with assert_raises(ClientError) as e: + client.get_organization_conformance_pack_detailed_status( + OrganizationConformancePackName="not-existing" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("GetOrganizationConformancePackDetailedStatus") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain( + "NoSuchOrganizationConformancePackException" + ) + ex.response["Error"]["Message"].should.equal( + "One or more organization conformance packs with specified names are not present. " + "Ensure your names are correct and try your request again later." + ) + + +@mock_config +def test_delete_organization_conformance_pack(): + # given + client = boto3.client("config", region_name="us-east-1") + arn = client.put_organization_conformance_pack( + DeliveryS3Bucket="awsconfigconforms-test-bucket", + OrganizationConformancePackName="test-pack", + TemplateS3Uri="s3://test-bucket/test-pack.yaml", + )["OrganizationConformancePackArn"] + + # when + response = client.delete_organization_conformance_pack( + OrganizationConformancePackName="test-pack" + ) + + # then + response = client.describe_organization_conformance_pack_statuses() + response["OrganizationConformancePackStatuses"].should.have.length_of(0) + + +@mock_config +def test_delete_organization_conformance_pack_errors(): + # given + client = boto3.client("config", region_name="us-east-1") + + # when + with assert_raises(ClientError) as e: + client.delete_organization_conformance_pack( + OrganizationConformancePackName="not-existing" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeleteOrganizationConformancePack") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain( + "NoSuchOrganizationConformancePackException" + ) + ex.response["Error"]["Message"].should.equal( + "Could not find an OrganizationConformancePack for given request with resourceName not-existing" + ) From f31f8e08c1dc122ad51f58abe82970ca4671532c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= Date: Tue, 14 Jul 2020 11:27:39 +0200 Subject: [PATCH 435/658] Organizations - implement AWS Service Access functionality (#3122) * Add organizations.enable_aws_service_access * Add organizations.list_aws_service_access_for_organization * Add organizations.disable_aws_service_access --- moto/organizations/exceptions.py | 7 +- moto/organizations/models.py | 93 ++++++++++++++- moto/organizations/responses.py | 15 +++ .../test_organizations_boto3.py | 108 ++++++++++++++++++ 4 files changed, 215 insertions(+), 8 deletions(-) diff --git a/moto/organizations/exceptions.py b/moto/organizations/exceptions.py index b40908862631..3649e3a13022 100644 --- a/moto/organizations/exceptions.py +++ b/moto/organizations/exceptions.py @@ -5,11 +5,8 @@ class InvalidInputException(JsonRESTError): code = 400 - def __init__(self): - super(InvalidInputException, self).__init__( - "InvalidInputException", - "You provided a value that does not match the required pattern.", - ) + def __init__(self, message): + super(InvalidInputException, self).__init__("InvalidInputException", message) class DuplicateOrganizationalUnitException(JsonRESTError): diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 0db069f9afd9..d538ec1b8b95 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -173,12 +173,60 @@ def describe(self): } +class FakeServiceAccess(BaseModel): + # List of trusted services, which support trusted access with Organizations + # https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrated-services-list.html + TRUSTED_SERVICES = [ + "aws-artifact-account-sync.amazonaws.com", + "backup.amazonaws.com", + "member.org.stacksets.cloudformation.amazonaws.com", + "cloudtrail.amazonaws.com", + "compute-optimizer.amazonaws.com", + "config.amazonaws.com", + "config-multiaccountsetup.amazonaws.com", + "controltower.amazonaws.com", + "ds.amazonaws.com", + "fms.amazonaws.com", + "guardduty.amazonaws.com", + "access-analyzer.amazonaws.com", + "license-manager.amazonaws.com", + "license-manager.member-account.amazonaws.com.", + "macie.amazonaws.com", + "ram.amazonaws.com", + "servicecatalog.amazonaws.com", + "servicequotas.amazonaws.com", + "sso.amazonaws.com", + "ssm.amazonaws.com", + "tagpolicies.tag.amazonaws.com", + ] + + def __init__(self, **kwargs): + if not self.trusted_service(kwargs["ServicePrincipal"]): + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + self.service_principal = kwargs["ServicePrincipal"] + self.date_enabled = datetime.datetime.utcnow() + + def describe(self): + return { + "ServicePrincipal": self.service_principal, + "DateEnabled": unix_time(self.date_enabled), + } + + @staticmethod + def trusted_service(service_principal): + return service_principal in FakeServiceAccess.TRUSTED_SERVICES + + class OrganizationsBackend(BaseBackend): def __init__(self): self.org = None self.accounts = [] self.ou = [] self.policies = [] + self.services = [] def create_organization(self, **kwargs): self.org = FakeOrganization(kwargs["FeatureSet"]) @@ -459,7 +507,9 @@ def tag_resource(self, **kwargs): account = next((a for a in self.accounts if a.id == kwargs["ResourceId"]), None) if account is None: - raise InvalidInputException + raise InvalidInputException( + "You provided a value that does not match the required pattern." + ) new_tags = {tag["Key"]: tag["Value"] for tag in kwargs["Tags"]} account.tags.update(new_tags) @@ -468,7 +518,9 @@ def list_tags_for_resource(self, **kwargs): account = next((a for a in self.accounts if a.id == kwargs["ResourceId"]), None) if account is None: - raise InvalidInputException + raise InvalidInputException( + "You provided a value that does not match the required pattern." + ) tags = [{"Key": key, "Value": value} for key, value in account.tags.items()] return dict(Tags=tags) @@ -477,10 +529,45 @@ def untag_resource(self, **kwargs): account = next((a for a in self.accounts if a.id == kwargs["ResourceId"]), None) if account is None: - raise InvalidInputException + raise InvalidInputException( + "You provided a value that does not match the required pattern." + ) for key in kwargs["TagKeys"]: account.tags.pop(key, None) + def enable_aws_service_access(self, **kwargs): + service = FakeServiceAccess(**kwargs) + + # enabling an existing service results in no changes + if any( + service["ServicePrincipal"] == kwargs["ServicePrincipal"] + for service in self.services + ): + return + + self.services.append(service.describe()) + + def list_aws_service_access_for_organization(self): + return dict(EnabledServicePrincipals=self.services) + + def disable_aws_service_access(self, **kwargs): + if not FakeServiceAccess.trusted_service(kwargs["ServicePrincipal"]): + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + service_principal = next( + ( + service + for service in self.services + if service["ServicePrincipal"] == kwargs["ServicePrincipal"] + ), + None, + ) + + if service_principal: + self.services.remove(service_principal) + organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index ba7dd4453ea7..616deacbc84b 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -139,3 +139,18 @@ def untag_resource(self): return json.dumps( self.organizations_backend.untag_resource(**self.request_params) ) + + def enable_aws_service_access(self): + return json.dumps( + self.organizations_backend.enable_aws_service_access(**self.request_params) + ) + + def list_aws_service_access_for_organization(self): + return json.dumps( + self.organizations_backend.list_aws_service_access_for_organization() + ) + + def disable_aws_service_access(self): + return json.dumps( + self.organizations_backend.disable_aws_service_access(**self.request_params) + ) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 876e83712766..c2327dc408e7 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +from datetime import datetime + import boto3 import json import six @@ -751,3 +753,109 @@ def test_update_organizational_unit_duplicate_error(): exc.response["Error"]["Message"].should.equal( "An OU with the same name already exists." ) + + +@mock_organizations +def test_enable_aws_service_access(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + # when + client.enable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # then + response = client.list_aws_service_access_for_organization() + response["EnabledServicePrincipals"].should.have.length_of(1) + service = response["EnabledServicePrincipals"][0] + service["ServicePrincipal"].should.equal("config.amazonaws.com") + date_enabled = service["DateEnabled"] + date_enabled["DateEnabled"].should.be.a(datetime) + + # enabling the same service again should not result in any error or change + # when + client.enable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # then + response = client.list_aws_service_access_for_organization() + response["EnabledServicePrincipals"].should.have.length_of(1) + service = response["EnabledServicePrincipals"][0] + service["ServicePrincipal"].should.equal("config.amazonaws.com") + service["DateEnabled"].should.equal(date_enabled) + + +@mock_organizations +def test_enable_aws_service_access(): + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + with assert_raises(ClientError) as e: + client.enable_aws_service_access(ServicePrincipal="moto.amazonaws.com") + ex = e.exception + ex.operation_name.should.equal("EnableAWSServiceAccess") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) + + +@mock_organizations +def test_enable_aws_service_access(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + client.enable_aws_service_access(ServicePrincipal="config.amazonaws.com") + client.enable_aws_service_access(ServicePrincipal="ram.amazonaws.com") + + # when + response = client.list_aws_service_access_for_organization() + + # then + response["EnabledServicePrincipals"].should.have.length_of(2) + services = sorted( + response["EnabledServicePrincipals"], key=lambda i: i["ServicePrincipal"] + ) + services[0]["ServicePrincipal"].should.equal("config.amazonaws.com") + services[0]["DateEnabled"].should.be.a(datetime) + services[1]["ServicePrincipal"].should.equal("ram.amazonaws.com") + services[1]["DateEnabled"].should.be.a(datetime) + + +@mock_organizations +def test_disable_aws_service_access(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + client.enable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # when + client.disable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # then + response = client.list_aws_service_access_for_organization() + response["EnabledServicePrincipals"].should.have.length_of(0) + + # disabling the same service again should not result in any error + # when + client.disable_aws_service_access(ServicePrincipal="config.amazonaws.com") + + # then + response = client.list_aws_service_access_for_organization() + response["EnabledServicePrincipals"].should.have.length_of(0) + + +@mock_organizations +def test_disable_aws_service_access_errors(): + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + with assert_raises(ClientError) as e: + client.disable_aws_service_access(ServicePrincipal="moto.amazonaws.com") + ex = e.exception + ex.operation_name.should.equal("DisableAWSServiceAccess") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) From 890c3b4954a864f5362dc8d8645b8d0751de9331 Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Tue, 14 Jul 2020 03:29:49 -0700 Subject: [PATCH 436/658] Make batch.utils.lowercase_first_key() recursive (#3124) * Make batch.utils.lowercase_first_key() recursive * Reformat using black * Add test of recursive lowercase_first_key() * Fix typo in ttest_batch/test_cloud_formation.py --- moto/batch/utils.py | 10 +++++++++- tests/test_batch/test_cloudformation.py | 15 +++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/moto/batch/utils.py b/moto/batch/utils.py index ce9b2ffe88bb..d9f79e23627c 100644 --- a/moto/batch/utils.py +++ b/moto/batch/utils.py @@ -21,6 +21,14 @@ def lowercase_first_key(some_dict): new_dict = {} for key, value in some_dict.items(): new_key = key[0].lower() + key[1:] - new_dict[new_key] = value + try: + if isinstance(value, dict): + new_dict[new_key] = lowercase_first_key(value) + elif all([isinstance(v, dict) for v in value]): + new_dict[new_key] = [lowercase_first_key(v) for v in value] + else: + new_dict[new_key] = value + except TypeError: + new_dict[new_key] = value return new_dict diff --git a/tests/test_batch/test_cloudformation.py b/tests/test_batch/test_cloudformation.py index a6baedb38750..cc51b79f3ac1 100644 --- a/tests/test_batch/test_cloudformation.py +++ b/tests/test_batch/test_cloudformation.py @@ -234,6 +234,7 @@ def test_create_job_def_cf(): "Vcpus": 2, "Memory": 2000, "Command": ["echo", "Hello world"], + "LinuxParameters": {"Devices": [{"HostPath": "test-path"}]}, }, "RetryStrategy": {"Attempts": 1}, }, @@ -262,3 +263,17 @@ def test_create_job_def_cf(): job_def_resource["PhysicalResourceId"].startswith("arn:aws:batch:") job_def_resource["PhysicalResourceId"].should.contain("test_stack-JobDef") job_def_resource["PhysicalResourceId"].should.contain("job-definition/") + + # Test the linux parameter device host path + # This ensures that batch is parsing the parameter dictionaries + # correctly by recursively converting the first character of all + # dict keys to lowercase. + batch_conn = boto3.client("batch", DEFAULT_REGION) + response = batch_conn.describe_job_definitions( + jobDefinitions=[job_def_resource["PhysicalResourceId"]] + ) + job_def_linux_device_host_path = response.get("jobDefinitions")[0][ + "containerProperties" + ]["linuxParameters"]["devices"][0]["hostPath"] + + job_def_linux_device_host_path.should.equal("test-path") From 9072153474c9e4afbe271ab398a4c9fa7ec9aacd Mon Sep 17 00:00:00 2001 From: Mike Fogel Date: Tue, 14 Jul 2020 09:42:13 -0300 Subject: [PATCH 437/658] Fix dynamodb2 KEYS_ONLY Indexes (#3125) KEYS_ONLY indexes include table keys. --- moto/dynamodb2/models/__init__.py | 30 ++++++++++++++++++++------- tests/test_dynamodb2/test_dynamodb.py | 23 +++++++++++++++----- 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index eafa2743af4c..233c4001f100 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -283,17 +283,18 @@ def project(self, item): if self.projection: if self.projection.get("ProjectionType", None) == "KEYS_ONLY": allowed_attributes = ",".join( - [key["AttributeName"] for key in self.schema] + self.table_key_attrs + [key["AttributeName"] for key in self.schema] ) item.filter(allowed_attributes) return item class LocalSecondaryIndex(SecondaryIndex): - def __init__(self, index_name, schema, projection): + def __init__(self, index_name, schema, projection, table_key_attrs): self.name = index_name self.schema = schema self.projection = projection + self.table_key_attrs = table_key_attrs def describe(self): return { @@ -303,21 +304,29 @@ def describe(self): } @staticmethod - def create(dct): + def create(dct, table_key_attrs): return LocalSecondaryIndex( index_name=dct["IndexName"], schema=dct["KeySchema"], projection=dct["Projection"], + table_key_attrs=table_key_attrs, ) class GlobalSecondaryIndex(SecondaryIndex): def __init__( - self, index_name, schema, projection, status="ACTIVE", throughput=None + self, + index_name, + schema, + projection, + table_key_attrs, + status="ACTIVE", + throughput=None, ): self.name = index_name self.schema = schema self.projection = projection + self.table_key_attrs = table_key_attrs self.status = status self.throughput = throughput or { "ReadCapacityUnits": 0, @@ -334,11 +343,12 @@ def describe(self): } @staticmethod - def create(dct): + def create(dct, table_key_attrs): return GlobalSecondaryIndex( index_name=dct["IndexName"], schema=dct["KeySchema"], projection=dct["Projection"], + table_key_attrs=table_key_attrs, throughput=dct.get("ProvisionedThroughput", None), ) @@ -374,16 +384,20 @@ def __init__( else: self.range_key_attr = elem["AttributeName"] self.range_key_type = elem["KeyType"] + self.table_key_attrs = [ + key for key in (self.hash_key_attr, self.range_key_attr) if key + ] if throughput is None: self.throughput = {"WriteCapacityUnits": 10, "ReadCapacityUnits": 10} else: self.throughput = throughput self.throughput["NumberOfDecreasesToday"] = 0 self.indexes = [ - LocalSecondaryIndex.create(i) for i in (indexes if indexes else []) + LocalSecondaryIndex.create(i, self.table_key_attrs) + for i in (indexes if indexes else []) ] self.global_indexes = [ - GlobalSecondaryIndex.create(i) + GlobalSecondaryIndex.create(i, self.table_key_attrs) for i in (global_indexes if global_indexes else []) ] self.created_at = datetime.datetime.utcnow() @@ -1015,7 +1029,7 @@ def update_table_global_indexes(self, name, global_index_updates): ) gsis_by_name[gsi_to_create["IndexName"]] = GlobalSecondaryIndex.create( - gsi_to_create + gsi_to_create, table.table_key_attrs, ) # in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 2dfb8fd2ddd1..d56fd3f11eb3 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -5358,14 +5358,23 @@ def test_gsi_projection_type_keys_only(): IndexName="GSI-K1", )["Items"] items.should.have.length_of(1) - # Item should only include GSI Keys, as per the ProjectionType - items[0].should.equal({"gsiK1PartitionKey": "gsi-pk", "gsiK1SortKey": "gsi-sk"}) + # Item should only include GSI Keys and Table Keys, as per the ProjectionType + items[0].should.equal( + { + "gsiK1PartitionKey": "gsi-pk", + "gsiK1SortKey": "gsi-sk", + "partitionKey": "pk-1", + } + ) @mock_dynamodb2 def test_lsi_projection_type_keys_only(): table_schema = { - "KeySchema": [{"AttributeName": "partitionKey", "KeyType": "HASH"}], + "KeySchema": [ + {"AttributeName": "partitionKey", "KeyType": "HASH"}, + {"AttributeName": "sortKey", "KeyType": "RANGE"}, + ], "LocalSecondaryIndexes": [ { "IndexName": "LSI", @@ -5378,12 +5387,14 @@ def test_lsi_projection_type_keys_only(): ], "AttributeDefinitions": [ {"AttributeName": "partitionKey", "AttributeType": "S"}, + {"AttributeName": "sortKey", "AttributeType": "S"}, {"AttributeName": "lsiK1SortKey", "AttributeType": "S"}, ], } item = { "partitionKey": "pk-1", + "sortKey": "sk-1", "lsiK1SortKey": "lsi-sk", "someAttribute": "lore ipsum", } @@ -5399,5 +5410,7 @@ def test_lsi_projection_type_keys_only(): KeyConditionExpression=Key("partitionKey").eq("pk-1"), IndexName="LSI", )["Items"] items.should.have.length_of(1) - # Item should only include GSI Keys, as per the ProjectionType - items[0].should.equal({"partitionKey": "pk-1", "lsiK1SortKey": "lsi-sk"}) + # Item should only include GSI Keys and Table Keys, as per the ProjectionType + items[0].should.equal( + {"partitionKey": "pk-1", "sortKey": "sk-1", "lsiK1SortKey": "lsi-sk"} + ) From b5c7356b20ec0bf5292566f6cf3ac9e68f1a689c Mon Sep 17 00:00:00 2001 From: cm-iwata <38879253+cm-iwata@users.noreply.github.com> Date: Wed, 15 Jul 2020 15:00:44 +0900 Subject: [PATCH 438/658] fix #3129 API Gateway create_api_key generate wrong default value (#3130) --- moto/apigateway/models.py | 4 ++-- tests/test_apigateway/test_apigateway.py | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index fbd525df154e..e4cbac36281c 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -400,10 +400,10 @@ def __init__( self, name=None, description=None, - enabled=True, + enabled=False, generateDistinctId=False, value=None, - stageKeys=None, + stageKeys=[], tags=None, customerId=None, ): diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 1c7f6d3850f7..0aadec3e1792 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1845,7 +1845,11 @@ def test_create_api_key(): apikey_name = "TESTKEY1" payload = {"value": apikey_value, "name": apikey_name} - client.create_api_key(**payload) + response = client.create_api_key(**payload) + response["name"].should.equal(apikey_name) + response["value"].should.equal(apikey_value) + response["enabled"].should.equal(False) + response["stageKeys"].should.equal([]) response = client.get_api_keys() len(response["items"]).should.equal(1) From 1b355f7f06354cb31b71e549232da3aea7145da5 Mon Sep 17 00:00:00 2001 From: cm-iwata <38879253+cm-iwata@users.noreply.github.com> Date: Wed, 15 Jul 2020 17:41:41 +0900 Subject: [PATCH 439/658] fix #3131 fix API Gateway:delete_api_key return wrong status code (#3132) * fix #3131 fix API Gateway:delete_api_key return wrong status code * lint --- moto/apigateway/responses.py | 5 ++++- tests/test_apigateway/test_apigateway.py | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index e3951192b186..65a92276f062 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -466,6 +466,7 @@ def apikey_individual(self, request, full_url, headers): url_path_parts = self.path.split("/") apikey = url_path_parts[2] + status_code = 200 if self.method == "GET": apikey_response = self.backend.get_apikey(apikey) elif self.method == "PATCH": @@ -473,7 +474,9 @@ def apikey_individual(self, request, full_url, headers): apikey_response = self.backend.update_apikey(apikey, patch_operations) elif self.method == "DELETE": apikey_response = self.backend.delete_apikey(apikey) - return 200, {}, json.dumps(apikey_response) + status_code = 202 + + return status_code, {}, json.dumps(apikey_response) def usage_plans(self, request, full_url, headers): self.setup_class(request, full_url, headers) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 0aadec3e1792..210efd9f84b4 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1906,7 +1906,8 @@ def test_api_keys(): response = client.get_api_keys() len(response["items"]).should.equal(2) - client.delete_api_key(apiKey=apikey_id) + response = client.delete_api_key(apiKey=apikey_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(202) response = client.get_api_keys() len(response["items"]).should.equal(1) From 4e4ce5f9f109026cc0b794b9f7f8d89d30fc5598 Mon Sep 17 00:00:00 2001 From: cm-iwata <38879253+cm-iwata@users.noreply.github.com> Date: Wed, 15 Jul 2020 20:21:11 +0900 Subject: [PATCH 440/658] fix API Gateway:create_api_key return wrong status code (#3136) --- moto/apigateway/responses.py | 3 +-- tests/test_apigateway/test_apigateway.py | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 65a92276f062..93ea5d4ffa64 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -454,11 +454,10 @@ def apikeys(self, request, full_url, headers): error.message, error.error_type ), ) - + return 201, {}, json.dumps(apikey_response) elif self.method == "GET": apikeys_response = self.backend.get_apikeys() return 200, {}, json.dumps({"item": apikeys_response}) - return 200, {}, json.dumps(apikey_response) def apikey_individual(self, request, full_url, headers): self.setup_class(request, full_url, headers) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 210efd9f84b4..756da76e9f4e 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1846,6 +1846,7 @@ def test_create_api_key(): payload = {"value": apikey_value, "name": apikey_name} response = client.create_api_key(**payload) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(201) response["name"].should.equal(apikey_name) response["value"].should.equal(apikey_value) response["enabled"].should.equal(False) From 419f3fba5a32d2c77a6e7a89abf347acb4314f5a Mon Sep 17 00:00:00 2001 From: cm-iwata <38879253+cm-iwata@users.noreply.github.com> Date: Wed, 15 Jul 2020 22:01:03 +0900 Subject: [PATCH 441/658] fix API Gateway:create_usage_plan_key return wrong status code (#3134) --- moto/apigateway/responses.py | 4 +--- tests/test_apigateway/test_apigateway.py | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 93ea5d4ffa64..d8f3ed5051c2 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -528,13 +528,11 @@ def usage_plan_keys(self, request, full_url, headers): error.message, error.error_type ), ) - + return 201, {}, json.dumps(usage_plan_response) elif self.method == "GET": usage_plans_response = self.backend.get_usage_plan_keys(usage_plan_id) return 200, {}, json.dumps({"item": usage_plans_response}) - return 200, {}, json.dumps(usage_plan_response) - def usage_plan_key_individual(self, request, full_url, headers): self.setup_class(request, full_url, headers) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 756da76e9f4e..d79851ab0e2f 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1989,6 +1989,7 @@ def test_usage_plan_keys(): key_type = "API_KEY" payload = {"usagePlanId": usage_plan_id, "keyId": key_id, "keyType": key_type} response = client.create_usage_plan_key(**payload) + response["ResponseMetadata"]["HTTPStatusCode"].should.equals(201) usage_plan_key_id = response["id"] # Get current plan keys (expect 1) From 3e2a5e7ee80e32feab1bdce30c7fd9bf9bd2569a Mon Sep 17 00:00:00 2001 From: Koichi Ogura <12413803+number09@users.noreply.github.com> Date: Thu, 16 Jul 2020 17:13:12 +0900 Subject: [PATCH 442/658] fix #3133 Cognito Identity Provider : create_user_pool_client `GenerateSecret=True` doesn't work (#3135) * fix #3133 Cognito Identity Provider : create_user_pool_client `GenerateSecret=True` doesn't work * add test for update_user_pool_client --- moto/cognitoidp/models.py | 11 +++-- moto/cognitoidp/responses.py | 3 +- tests/test_cognitoidp/test_cognitoidp.py | 54 ++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 4 deletions(-) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 4b4e0a8b1ba4..c93563c2a59d 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -210,10 +210,11 @@ def to_json(self, extended=True): class CognitoIdpUserPoolClient(BaseModel): - def __init__(self, user_pool_id, extended_config): + def __init__(self, user_pool_id, generate_secret, extended_config): self.user_pool_id = user_pool_id self.id = str(uuid.uuid4()) self.secret = str(uuid.uuid4()) + self.generate_secret = generate_secret or False self.extended_config = extended_config or {} def _base_json(self): @@ -225,6 +226,8 @@ def _base_json(self): def to_json(self, extended=False): user_pool_client_json = self._base_json() + if self.generate_secret: + user_pool_client_json.update({"ClientSecret": self.secret}) if extended: user_pool_client_json.update(self.extended_config) @@ -402,12 +405,14 @@ def update_user_pool_domain(self, domain, custom_domain_config): return user_pool_domain # User pool client - def create_user_pool_client(self, user_pool_id, extended_config): + def create_user_pool_client(self, user_pool_id, generate_secret, extended_config): user_pool = self.user_pools.get(user_pool_id) if not user_pool: raise ResourceNotFoundError(user_pool_id) - user_pool_client = CognitoIdpUserPoolClient(user_pool_id, extended_config) + user_pool_client = CognitoIdpUserPoolClient( + user_pool_id, generate_secret, extended_config + ) user_pool.clients[user_pool_client.id] = user_pool_client return user_pool_client diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 6c89c48068d7..1c945b23ed78 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -84,8 +84,9 @@ def update_user_pool_domain(self): # User pool client def create_user_pool_client(self): user_pool_id = self.parameters.pop("UserPoolId") + generate_secret = self.parameters.pop("GenerateSecret", False) user_pool_client = cognitoidp_backends[self.region].create_user_pool_client( - user_pool_id, self.parameters + user_pool_id, generate_secret, self.parameters ) return json.dumps({"UserPoolClient": user_pool_client.to_json(extended=True)}) diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 1bd258c6b00f..d76587d1b022 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -213,6 +213,29 @@ def test_create_user_pool_client(): result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) result["UserPoolClient"]["ClientId"].should_not.be.none result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"].should_not.have.key("ClientSecret") + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) + + +@mock_cognitoidp +def test_create_user_pool_client_returns_secret(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=client_name, + GenerateSecret=True, + CallbackURLs=[value], + ) + + result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) + result["UserPoolClient"]["ClientId"].should_not.be.none + result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"]["ClientSecret"].should_not.be.none result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) @@ -331,6 +354,37 @@ def test_update_user_pool_client(): ) result["UserPoolClient"]["ClientName"].should.equal(new_client_name) + result["UserPoolClient"].should_not.have.key("ClientSecret") + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) + + +@mock_cognitoidp +def test_update_user_pool_client_returns_secret(): + conn = boto3.client("cognito-idp", "us-west-2") + + old_client_name = str(uuid.uuid4()) + new_client_name = str(uuid.uuid4()) + old_value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=old_client_name, + GenerateSecret=True, + CallbackURLs=[old_value], + ) + client_secret = client_details["UserPoolClient"]["ClientSecret"] + + result = conn.update_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ClientName=new_client_name, + CallbackURLs=[new_value], + ) + + result["UserPoolClient"]["ClientName"].should.equal(new_client_name) + result["UserPoolClient"]["ClientSecret"].should.equal(client_secret) result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) From 1b80b0a8109cf1e45199757b8c98f07f9f3d3107 Mon Sep 17 00:00:00 2001 From: jweite Date: Thu, 16 Jul 2020 08:12:25 -0400 Subject: [PATCH 443/658] Sagemaker models (#3105) * First failing test, and enough framework to run it. * Rudimentary passing test. * Sagemaker Notebook Support, take-1: create, describe, start, stop, delete. * Added list_tags. * Merged in model support from https://github.com/porthunt/moto/tree/sagemaker-support. * Re-org'd * Fixed up describe_model exception when no matching model. * Segregated tests by Sagemaker entity. Model arn check by regex.. * Python2 compabitility changes. * Added sagemaker to list of known backends. Corrected urls. * Added sagemaker special case to moto.server.infer_service_region_host due to irregular url format (use of 'api' subdomain) to support server mode. * Changes for PR 3105 comments of July 10, 2020 * PR3105 July 10, 2020, 8:55 AM EDT comment: dropped unnecessary re-addition of arn when formulating model list response. * PR 3105 July 15, 2020 9:10 AM EDT Comment: clean-up SageMakerModelBackend.describe_models logic for finding the model in the dict. * Optimized imports Co-authored-by: Joseph Weitekamp --- moto/__init__.py | 1 + moto/backends.py | 1 + moto/sagemaker/__init__.py | 5 + moto/sagemaker/exceptions.py | 47 +++ moto/sagemaker/models.py | 398 ++++++++++++++++++ moto/sagemaker/responses.py | 127 ++++++ moto/sagemaker/urls.py | 11 + moto/sagemaker/validators.py | 20 + moto/server.py | 4 + tests/test_sagemaker/__init__.py | 0 tests/test_sagemaker/test_sagemaker_models.py | 122 ++++++ .../test_sagemaker_notebooks.py | 227 ++++++++++ 12 files changed, 963 insertions(+) create mode 100644 moto/sagemaker/__init__.py create mode 100644 moto/sagemaker/exceptions.py create mode 100644 moto/sagemaker/models.py create mode 100644 moto/sagemaker/responses.py create mode 100644 moto/sagemaker/urls.py create mode 100644 moto/sagemaker/validators.py create mode 100644 tests/test_sagemaker/__init__.py create mode 100644 tests/test_sagemaker/test_sagemaker_models.py create mode 100644 tests/test_sagemaker/test_sagemaker_notebooks.py diff --git a/moto/__init__.py b/moto/__init__.py index b4375bfc6d83..5143a4933652 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -95,6 +95,7 @@ def f(*args, **kwargs): mock_route53_deprecated = lazy_load(".route53", "mock_route53_deprecated") mock_s3 = lazy_load(".s3", "mock_s3") mock_s3_deprecated = lazy_load(".s3", "mock_s3_deprecated") +mock_sagemaker = lazy_load(".sagemaker", "mock_sagemaker") mock_secretsmanager = lazy_load(".secretsmanager", "mock_secretsmanager") mock_ses = lazy_load(".ses", "mock_ses") mock_ses_deprecated = lazy_load(".ses", "mock_ses_deprecated") diff --git a/moto/backends.py b/moto/backends.py index 6f612bf1f65a..a73940909484 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -58,6 +58,7 @@ "route53": ("route53", "route53_backends"), "s3": ("s3", "s3_backends"), "s3bucket_path": ("s3", "s3_backends"), + "sagemaker": ("sagemaker", "sagemaker_backends"), "secretsmanager": ("secretsmanager", "secretsmanager_backends"), "ses": ("ses", "ses_backends"), "sns": ("sns", "sns_backends"), diff --git a/moto/sagemaker/__init__.py b/moto/sagemaker/__init__.py new file mode 100644 index 000000000000..85e6353801d4 --- /dev/null +++ b/moto/sagemaker/__init__.py @@ -0,0 +1,5 @@ +from __future__ import unicode_literals +from .models import sagemaker_backends + +sagemaker_backend = sagemaker_backends["us-east-1"] +mock_sagemaker = sagemaker_backend.decorator diff --git a/moto/sagemaker/exceptions.py b/moto/sagemaker/exceptions.py new file mode 100644 index 000000000000..dc2ce915aca1 --- /dev/null +++ b/moto/sagemaker/exceptions.py @@ -0,0 +1,47 @@ +from __future__ import unicode_literals +import json +from moto.core.exceptions import RESTError + + +ERROR_WITH_MODEL_NAME = """{% extends 'single_error' %} +{% block extra %}{{ model }}{% endblock %} +""" + + +class SagemakerClientError(RESTError): + def __init__(self, *args, **kwargs): + kwargs.setdefault("template", "single_error") + self.templates["model_error"] = ERROR_WITH_MODEL_NAME + super(SagemakerClientError, self).__init__(*args, **kwargs) + + +class ModelError(RESTError): + def __init__(self, *args, **kwargs): + kwargs.setdefault("template", "model_error") + self.templates["model_error"] = ERROR_WITH_MODEL_NAME + super(ModelError, self).__init__(*args, **kwargs) + + +class MissingModel(ModelError): + code = 404 + + def __init__(self, *args, **kwargs): + super(MissingModel, self).__init__( + "NoSuchModel", "Could not find model", *args, **kwargs + ) + + +class AWSError(Exception): + TYPE = None + STATUS = 400 + + def __init__(self, message, type=None, status=None): + self.message = message + self.type = type if type is not None else self.TYPE + self.status = status if status is not None else self.STATUS + + def response(self): + return ( + json.dumps({"__type": self.type, "message": self.message}), + dict(status=self.status), + ) diff --git a/moto/sagemaker/models.py b/moto/sagemaker/models.py new file mode 100644 index 000000000000..3e0dce87b680 --- /dev/null +++ b/moto/sagemaker/models.py @@ -0,0 +1,398 @@ +from __future__ import unicode_literals + +from copy import deepcopy +from datetime import datetime + +from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import RESTError +from moto.ec2 import ec2_backends +from moto.sagemaker import validators +from moto.sts.models import ACCOUNT_ID +from .exceptions import MissingModel + + +class BaseObject(BaseModel): + def camelCase(self, key): + words = [] + for i, word in enumerate(key.split("_")): + words.append(word.title()) + return "".join(words) + + def gen_response_object(self): + response_object = dict() + for key, value in self.__dict__.items(): + if "_" in key: + response_object[self.camelCase(key)] = value + else: + response_object[key[0].upper() + key[1:]] = value + return response_object + + @property + def response_object(self): + return self.gen_response_object() + + +class Model(BaseObject): + def __init__( + self, + region_name, + model_name, + execution_role_arn, + primary_container, + vpc_config, + containers=[], + tags=[], + ): + self.model_name = model_name + self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + self.containers = containers + self.tags = tags + self.enable_network_isolation = False + self.vpc_config = vpc_config + self.primary_container = primary_container + self.execution_role_arn = execution_role_arn or "arn:test" + self.model_arn = self.arn_for_model_name(self.model_name, region_name) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"ModelArn": self.model_arn} + + @staticmethod + def arn_for_model_name(model_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":model/" + + model_name + ) + + +class VpcConfig(BaseObject): + def __init__(self, security_group_ids, subnets): + self.security_group_ids = security_group_ids + self.subnets = subnets + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + +class Container(BaseObject): + def __init__(self, **kwargs): + self.container_hostname = kwargs.get("container_hostname", "localhost") + self.model_data_url = kwargs.get("data_url", "") + self.model_package_name = kwargs.get("package_name", "pkg") + self.image = kwargs.get("image", "") + self.environment = kwargs.get("environment", {}) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + +class FakeSagemakerNotebookInstance: + def __init__( + self, + region_name, + notebook_instance_name, + instance_type, + role_arn, + subnet_id, + security_group_ids, + kms_key_id, + tags, + lifecycle_config_name, + direct_internet_access, + volume_size_in_gb, + accelerator_types, + default_code_repository, + additional_code_repositories, + root_access, + ): + self.validate_volume_size_in_gb(volume_size_in_gb) + self.validate_instance_type(instance_type) + + self.region_name = region_name + self.notebook_instance_name = notebook_instance_name + self.instance_type = instance_type + self.role_arn = role_arn + self.subnet_id = subnet_id + self.security_group_ids = security_group_ids + self.kms_key_id = kms_key_id + self.tags = tags or [] + self.lifecycle_config_name = lifecycle_config_name + self.direct_internet_access = direct_internet_access + self.volume_size_in_gb = volume_size_in_gb + self.accelerator_types = accelerator_types + self.default_code_repository = default_code_repository + self.additional_code_repositories = additional_code_repositories + self.root_access = root_access + self.status = None + self.creation_time = self.last_modified_time = datetime.now() + self.start() + + def validate_volume_size_in_gb(self, volume_size_in_gb): + if not validators.is_integer_between(volume_size_in_gb, mn=5, optional=True): + message = "Invalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf" + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def validate_instance_type(self, instance_type): + VALID_INSTANCE_TYPES = [ + "ml.p2.xlarge", + "ml.m5.4xlarge", + "ml.m4.16xlarge", + "ml.t3.xlarge", + "ml.p3.16xlarge", + "ml.t2.xlarge", + "ml.p2.16xlarge", + "ml.c4.2xlarge", + "ml.c5.2xlarge", + "ml.c4.4xlarge", + "ml.c5d.2xlarge", + "ml.c5.4xlarge", + "ml.c5d.4xlarge", + "ml.c4.8xlarge", + "ml.c5d.xlarge", + "ml.c5.9xlarge", + "ml.c5.xlarge", + "ml.c5d.9xlarge", + "ml.c4.xlarge", + "ml.t2.2xlarge", + "ml.c5d.18xlarge", + "ml.t3.2xlarge", + "ml.t3.medium", + "ml.t2.medium", + "ml.c5.18xlarge", + "ml.p3.2xlarge", + "ml.m5.xlarge", + "ml.m4.10xlarge", + "ml.t2.large", + "ml.m5.12xlarge", + "ml.m4.xlarge", + "ml.t3.large", + "ml.m5.24xlarge", + "ml.m4.2xlarge", + "ml.p2.8xlarge", + "ml.m5.2xlarge", + "ml.p3.8xlarge", + "ml.m4.4xlarge", + ] + if not validators.is_one_of(instance_type, VALID_INSTANCE_TYPES): + message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format( + instance_type, VALID_INSTANCE_TYPES + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + @property + def arn(self): + return ( + "arn:aws:sagemaker:" + + self.region_name + + ":" + + str(ACCOUNT_ID) + + ":notebook-instance/" + + self.notebook_instance_name + ) + + @property + def url(self): + return "{}.notebook.{}.sagemaker.aws".format( + self.notebook_instance_name, self.region_name + ) + + def start(self): + self.status = "InService" + + @property + def is_deletable(self): + return self.status in ["Stopped", "Failed"] + + def stop(self): + self.status = "Stopped" + + +class SageMakerModelBackend(BaseBackend): + def __init__(self, region_name=None): + self._models = {} + self.notebook_instances = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_model(self, **kwargs): + model_obj = Model( + region_name=self.region_name, + model_name=kwargs.get("ModelName"), + execution_role_arn=kwargs.get("ExecutionRoleArn"), + primary_container=kwargs.get("PrimaryContainer", {}), + vpc_config=kwargs.get("VpcConfig", {}), + containers=kwargs.get("Containers", []), + tags=kwargs.get("Tags", []), + ) + + self._models[kwargs.get("ModelName")] = model_obj + return model_obj.response_create + + def describe_model(self, model_name=None): + model = self._models.get(model_name) + if model: + return model.response_object + message = "Could not find model '{}'.".format( + Model.arn_for_model_name(model_name, self.region_name) + ) + raise RESTError( + error_type="ValidationException", message=message, template="error_json", + ) + + def list_models(self): + models = [] + for model in self._models.values(): + model_response = deepcopy(model.response_object) + models.append(model_response) + return {"Models": models} + + def delete_model(self, model_name=None): + for model in self._models.values(): + if model.model_name == model_name: + self._models.pop(model.model_name) + break + else: + raise MissingModel(model=model_name) + + def create_notebook_instance( + self, + notebook_instance_name, + instance_type, + role_arn, + subnet_id=None, + security_group_ids=None, + kms_key_id=None, + tags=None, + lifecycle_config_name=None, + direct_internet_access="Enabled", + volume_size_in_gb=5, + accelerator_types=None, + default_code_repository=None, + additional_code_repositories=None, + root_access=None, + ): + self._validate_unique_notebook_instance_name(notebook_instance_name) + + notebook_instance = FakeSagemakerNotebookInstance( + self.region_name, + notebook_instance_name, + instance_type, + role_arn, + subnet_id=subnet_id, + security_group_ids=security_group_ids, + kms_key_id=kms_key_id, + tags=tags, + lifecycle_config_name=lifecycle_config_name, + direct_internet_access=direct_internet_access + if direct_internet_access is not None + else "Enabled", + volume_size_in_gb=volume_size_in_gb if volume_size_in_gb is not None else 5, + accelerator_types=accelerator_types, + default_code_repository=default_code_repository, + additional_code_repositories=additional_code_repositories, + root_access=root_access, + ) + self.notebook_instances[notebook_instance_name] = notebook_instance + return notebook_instance + + def _validate_unique_notebook_instance_name(self, notebook_instance_name): + if notebook_instance_name in self.notebook_instances: + duplicate_arn = self.notebook_instances[notebook_instance_name].arn + message = "Cannot create a duplicate Notebook Instance ({})".format( + duplicate_arn + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def get_notebook_instance(self, notebook_instance_name): + try: + return self.notebook_instances[notebook_instance_name] + except KeyError: + message = "RecordNotFound" + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def get_notebook_instance_by_arn(self, arn): + instances = [ + notebook_instance + for notebook_instance in self.notebook_instances.values() + if notebook_instance.arn == arn + ] + if len(instances) == 0: + message = "RecordNotFound" + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + return instances[0] + + def start_notebook_instance(self, notebook_instance_name): + notebook_instance = self.get_notebook_instance(notebook_instance_name) + notebook_instance.start() + + def stop_notebook_instance(self, notebook_instance_name): + notebook_instance = self.get_notebook_instance(notebook_instance_name) + notebook_instance.stop() + + def delete_notebook_instance(self, notebook_instance_name): + notebook_instance = self.get_notebook_instance(notebook_instance_name) + if not notebook_instance.is_deletable: + message = "Status ({}) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format( + notebook_instance.status, notebook_instance.arn + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + del self.notebook_instances[notebook_instance_name] + + def get_notebook_instance_tags(self, arn): + try: + notebook_instance = self.get_notebook_instance_by_arn(arn) + return notebook_instance.tags or [] + except RESTError: + return [] + + +sagemaker_backends = {} +for region, ec2_backend in ec2_backends.items(): + sagemaker_backends[region] = SageMakerModelBackend(region) diff --git a/moto/sagemaker/responses.py b/moto/sagemaker/responses.py new file mode 100644 index 000000000000..58e28ef01d3f --- /dev/null +++ b/moto/sagemaker/responses.py @@ -0,0 +1,127 @@ +from __future__ import unicode_literals + +import json + +from moto.core.responses import BaseResponse +from moto.core.utils import amzn_request_id +from .exceptions import AWSError +from .models import sagemaker_backends + + +class SageMakerResponse(BaseResponse): + @property + def sagemaker_backend(self): + return sagemaker_backends[self.region] + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + def describe_model(self): + model_name = self._get_param("ModelName") + response = self.sagemaker_backend.describe_model(model_name) + return json.dumps(response) + + def create_model(self): + response = self.sagemaker_backend.create_model(**self.request_params) + return json.dumps(response) + + def delete_model(self): + model_name = self._get_param("ModelName") + response = self.sagemaker_backend.delete_model(model_name) + return json.dumps(response) + + def list_models(self): + response = self.sagemaker_backend.list_models(**self.request_params) + return json.dumps(response) + + def _get_param(self, param, if_none=None): + return self.request_params.get(param, if_none) + + @amzn_request_id + def create_notebook_instance(self): + try: + sagemaker_notebook = self.sagemaker_backend.create_notebook_instance( + notebook_instance_name=self._get_param("NotebookInstanceName"), + instance_type=self._get_param("InstanceType"), + subnet_id=self._get_param("SubnetId"), + security_group_ids=self._get_param("SecurityGroupIds"), + role_arn=self._get_param("RoleArn"), + kms_key_id=self._get_param("KmsKeyId"), + tags=self._get_param("Tags"), + lifecycle_config_name=self._get_param("LifecycleConfigName"), + direct_internet_access=self._get_param("DirectInternetAccess"), + volume_size_in_gb=self._get_param("VolumeSizeInGB"), + accelerator_types=self._get_param("AcceleratorTypes"), + default_code_repository=self._get_param("DefaultCodeRepository"), + additional_code_repositories=self._get_param( + "AdditionalCodeRepositories" + ), + root_access=self._get_param("RootAccess"), + ) + response = { + "NotebookInstanceArn": sagemaker_notebook.arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_notebook_instance(self): + notebook_instance_name = self._get_param("NotebookInstanceName") + try: + notebook_instance = self.sagemaker_backend.get_notebook_instance( + notebook_instance_name + ) + response = { + "NotebookInstanceArn": notebook_instance.arn, + "NotebookInstanceName": notebook_instance.notebook_instance_name, + "NotebookInstanceStatus": notebook_instance.status, + "Url": notebook_instance.url, + "InstanceType": notebook_instance.instance_type, + "SubnetId": notebook_instance.subnet_id, + "SecurityGroups": notebook_instance.security_group_ids, + "RoleArn": notebook_instance.role_arn, + "KmsKeyId": notebook_instance.kms_key_id, + # ToDo: NetworkInterfaceId + "LastModifiedTime": str(notebook_instance.last_modified_time), + "CreationTime": str(notebook_instance.creation_time), + "NotebookInstanceLifecycleConfigName": notebook_instance.lifecycle_config_name, + "DirectInternetAccess": notebook_instance.direct_internet_access, + "VolumeSizeInGB": notebook_instance.volume_size_in_gb, + "AcceleratorTypes": notebook_instance.accelerator_types, + "DefaultCodeRepository": notebook_instance.default_code_repository, + "AdditionalCodeRepositories": notebook_instance.additional_code_repositories, + "RootAccess": notebook_instance.root_access, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def start_notebook_instance(self): + notebook_instance_name = self._get_param("NotebookInstanceName") + self.sagemaker_backend.start_notebook_instance(notebook_instance_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def stop_notebook_instance(self): + notebook_instance_name = self._get_param("NotebookInstanceName") + self.sagemaker_backend.stop_notebook_instance(notebook_instance_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def delete_notebook_instance(self): + notebook_instance_name = self._get_param("NotebookInstanceName") + self.sagemaker_backend.delete_notebook_instance(notebook_instance_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def list_tags(self): + arn = self._get_param("ResourceArn") + tags = self.sagemaker_backend.get_notebook_instance_tags(arn) + response = {"Tags": tags} + return 200, {}, json.dumps(response) diff --git a/moto/sagemaker/urls.py b/moto/sagemaker/urls.py new file mode 100644 index 000000000000..224342ce5408 --- /dev/null +++ b/moto/sagemaker/urls.py @@ -0,0 +1,11 @@ +from __future__ import unicode_literals +from .responses import SageMakerResponse + +url_bases = [ + "https?://api.sagemaker.(.+).amazonaws.com", + "https?://api-fips.sagemaker.(.+).amazonaws.com", +] + +url_paths = { + "{0}/$": SageMakerResponse.dispatch, +} diff --git a/moto/sagemaker/validators.py b/moto/sagemaker/validators.py new file mode 100644 index 000000000000..69cbee2a5be2 --- /dev/null +++ b/moto/sagemaker/validators.py @@ -0,0 +1,20 @@ +def is_integer_between(x, mn=None, mx=None, optional=False): + if optional and x is None: + return True + try: + if mn is not None and mx is not None: + return int(x) >= mn and int(x) < mx + elif mn is not None: + return int(x) >= mn + elif mx is not None: + return int(x) < mx + else: + return True + except ValueError: + return False + + +def is_one_of(x, choices, optional=False): + if optional and x is None: + return True + return x in choices diff --git a/moto/server.py b/moto/server.py index 46e37d921d9e..bf76095a67fe 100644 --- a/moto/server.py +++ b/moto/server.py @@ -102,6 +102,10 @@ def infer_service_region_host(self, environ): # If Newer API version, use dynamodb2 if dynamo_api_version > "20111205": host = "dynamodb2" + elif service == "sagemaker": + host = "api.sagemaker.{region}.amazonaws.com".format( + service=service, region=region + ) else: host = "{service}.{region}.amazonaws.com".format( service=service, region=region diff --git a/tests/test_sagemaker/__init__.py b/tests/test_sagemaker/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/test_sagemaker/test_sagemaker_models.py b/tests/test_sagemaker/test_sagemaker_models.py new file mode 100644 index 000000000000..4139ca575480 --- /dev/null +++ b/tests/test_sagemaker/test_sagemaker_models.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import boto3 +import tests.backport_assert_raises # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises +from moto import mock_sagemaker + +import sure # noqa + +from moto.sagemaker.models import VpcConfig + + +class MySageMakerModel(object): + def __init__(self, name, arn, container=None, vpc_config=None): + self.name = name + self.arn = arn + self.container = container if container else {} + self.vpc_config = ( + vpc_config if vpc_config else {"sg-groups": ["sg-123"], "subnets": ["123"]} + ) + + def save(self): + client = boto3.client("sagemaker", region_name="us-east-1") + vpc_config = VpcConfig( + self.vpc_config.get("sg-groups"), self.vpc_config.get("subnets") + ) + client.create_model( + ModelName=self.name, + ExecutionRoleArn=self.arn, + VpcConfig=vpc_config.response_object, + ) + + +@mock_sagemaker +def test_describe_model(): + client = boto3.client("sagemaker", region_name="us-east-1") + test_model = MySageMakerModel( + name="blah", + arn="arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar", + vpc_config={"sg-groups": ["sg-123"], "subnets": ["123"]}, + ) + test_model.save() + model = client.describe_model(ModelName="blah") + assert model.get("ModelName").should.equal("blah") + + +@mock_sagemaker +def test_create_model(): + client = boto3.client("sagemaker", region_name="us-east-1") + vpc_config = VpcConfig(["sg-foobar"], ["subnet-xxx"]) + exec_role_arn = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar" + name = "blah" + model = client.create_model( + ModelName=name, + ExecutionRoleArn=exec_role_arn, + VpcConfig=vpc_config.response_object, + ) + + model["ModelArn"].should.match(r"^arn:aws:sagemaker:.*:.*:model/{}$".format(name)) + + +@mock_sagemaker +def test_delete_model(): + client = boto3.client("sagemaker", region_name="us-east-1") + name = "blah" + arn = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar" + test_model = MySageMakerModel(name=name, arn=arn) + test_model.save() + + assert len(client.list_models()["Models"]).should.equal(1) + client.delete_model(ModelName=name) + assert len(client.list_models()["Models"]).should.equal(0) + + +@mock_sagemaker +def test_delete_model_not_found(): + with assert_raises(ClientError) as err: + boto3.client("sagemaker", region_name="us-east-1").delete_model( + ModelName="blah" + ) + assert err.exception.response["Error"]["Code"].should.equal("404") + + +@mock_sagemaker +def test_list_models(): + client = boto3.client("sagemaker", region_name="us-east-1") + name = "blah" + arn = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar" + test_model = MySageMakerModel(name=name, arn=arn) + test_model.save() + models = client.list_models() + assert len(models["Models"]).should.equal(1) + assert models["Models"][0]["ModelName"].should.equal(name) + assert models["Models"][0]["ModelArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:model/{}$".format(name) + ) + + +@mock_sagemaker +def test_list_models_multiple(): + client = boto3.client("sagemaker", region_name="us-east-1") + + name_model_1 = "blah" + arn_model_1 = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar" + test_model_1 = MySageMakerModel(name=name_model_1, arn=arn_model_1) + test_model_1.save() + + name_model_2 = "blah2" + arn_model_2 = "arn:aws:sagemaker:eu-west-1:000000000000:x-x/foobar2" + test_model_2 = MySageMakerModel(name=name_model_2, arn=arn_model_2) + test_model_2.save() + models = client.list_models() + assert len(models["Models"]).should.equal(2) + + +@mock_sagemaker +def test_list_models_none(): + client = boto3.client("sagemaker", region_name="us-east-1") + models = client.list_models() + assert len(models["Models"]).should.equal(0) diff --git a/tests/test_sagemaker/test_sagemaker_notebooks.py b/tests/test_sagemaker/test_sagemaker_notebooks.py new file mode 100644 index 000000000000..70cdc94234f9 --- /dev/null +++ b/tests/test_sagemaker/test_sagemaker_notebooks.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import datetime +import boto3 +from botocore.exceptions import ClientError, ParamValidationError +import sure # noqa + +from moto import mock_sagemaker +from moto.sts.models import ACCOUNT_ID +from nose.tools import assert_true, assert_equal, assert_raises + +TEST_REGION_NAME = "us-east-1" +FAKE_SUBNET_ID = "subnet-012345678" +FAKE_SECURITY_GROUP_IDS = ["sg-0123456789abcdef0", "sg-0123456789abcdef1"] +FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) +FAKE_KMS_KEY_ID = "62d4509a-9f96-446c-a9ba-6b1c353c8c58" +GENERIC_TAGS_PARAM = [ + {"Key": "newkey1", "Value": "newval1"}, + {"Key": "newkey2", "Value": "newval2"}, +] +FAKE_LIFECYCLE_CONFIG_NAME = "FakeLifecycleConfigName" +FAKE_DEFAULT_CODE_REPO = "https://github.com/user/repo1" +FAKE_ADDL_CODE_REPOS = [ + "https://github.com/user/repo2", + "https://github.com/user/repo2", +] + + +@mock_sagemaker +def test_create_notebook_instance_minimal_params(): + + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + NAME_PARAM = "MyNotebookInstance" + INSTANCE_TYPE_PARAM = "ml.t2.medium" + + args = { + "NotebookInstanceName": NAME_PARAM, + "InstanceType": INSTANCE_TYPE_PARAM, + "RoleArn": FAKE_ROLE_ARN, + } + resp = sagemaker.create_notebook_instance(**args) + assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) + assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) + assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) + assert_equal(resp["NotebookInstanceName"], NAME_PARAM) + assert_equal(resp["NotebookInstanceStatus"], "InService") + assert_equal( + resp["Url"], "{}.notebook.{}.sagemaker.aws".format(NAME_PARAM, TEST_REGION_NAME) + ) + assert_equal(resp["InstanceType"], INSTANCE_TYPE_PARAM) + assert_equal(resp["RoleArn"], FAKE_ROLE_ARN) + assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) + assert_true(isinstance(resp["CreationTime"], datetime.datetime)) + assert_equal(resp["DirectInternetAccess"], "Enabled") + assert_equal(resp["VolumeSizeInGB"], 5) + + +# assert_equal(resp["RootAccess"], True) # ToDo: Not sure if this defaults... + + +@mock_sagemaker +def test_create_notebook_instance_params(): + + sagemaker = boto3.client("sagemaker", region_name="us-east-1") + + NAME_PARAM = "MyNotebookInstance" + INSTANCE_TYPE_PARAM = "ml.t2.medium" + DIRECT_INTERNET_ACCESS_PARAM = "Enabled" + VOLUME_SIZE_IN_GB_PARAM = 7 + ACCELERATOR_TYPES_PARAM = ["ml.eia1.medium", "ml.eia2.medium"] + ROOT_ACCESS_PARAM = "Disabled" + + args = { + "NotebookInstanceName": NAME_PARAM, + "InstanceType": INSTANCE_TYPE_PARAM, + "SubnetId": FAKE_SUBNET_ID, + "SecurityGroupIds": FAKE_SECURITY_GROUP_IDS, + "RoleArn": FAKE_ROLE_ARN, + "KmsKeyId": FAKE_KMS_KEY_ID, + "Tags": GENERIC_TAGS_PARAM, + "LifecycleConfigName": FAKE_LIFECYCLE_CONFIG_NAME, + "DirectInternetAccess": DIRECT_INTERNET_ACCESS_PARAM, + "VolumeSizeInGB": VOLUME_SIZE_IN_GB_PARAM, + "AcceleratorTypes": ACCELERATOR_TYPES_PARAM, + "DefaultCodeRepository": FAKE_DEFAULT_CODE_REPO, + "AdditionalCodeRepositories": FAKE_ADDL_CODE_REPOS, + "RootAccess": ROOT_ACCESS_PARAM, + } + resp = sagemaker.create_notebook_instance(**args) + assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) + assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) + assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) + assert_equal(resp["NotebookInstanceName"], NAME_PARAM) + assert_equal(resp["NotebookInstanceStatus"], "InService") + assert_equal( + resp["Url"], "{}.notebook.{}.sagemaker.aws".format(NAME_PARAM, TEST_REGION_NAME) + ) + assert_equal(resp["InstanceType"], INSTANCE_TYPE_PARAM) + assert_equal(resp["RoleArn"], FAKE_ROLE_ARN) + assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) + assert_true(isinstance(resp["CreationTime"], datetime.datetime)) + assert_equal(resp["DirectInternetAccess"], "Enabled") + assert_equal(resp["VolumeSizeInGB"], VOLUME_SIZE_IN_GB_PARAM) + # assert_equal(resp["RootAccess"], True) # ToDo: Not sure if this defaults... + assert_equal(resp["SubnetId"], FAKE_SUBNET_ID) + assert_equal(resp["SecurityGroups"], FAKE_SECURITY_GROUP_IDS) + assert_equal(resp["KmsKeyId"], FAKE_KMS_KEY_ID) + assert_equal( + resp["NotebookInstanceLifecycleConfigName"], FAKE_LIFECYCLE_CONFIG_NAME + ) + assert_equal(resp["AcceleratorTypes"], ACCELERATOR_TYPES_PARAM) + assert_equal(resp["DefaultCodeRepository"], FAKE_DEFAULT_CODE_REPO) + assert_equal(resp["AdditionalCodeRepositories"], FAKE_ADDL_CODE_REPOS) + + resp = sagemaker.list_tags(ResourceArn=resp["NotebookInstanceArn"]) + assert_equal(resp["Tags"], GENERIC_TAGS_PARAM) + + +@mock_sagemaker +def test_create_notebook_instance_bad_volume_size(): + + sagemaker = boto3.client("sagemaker", region_name="us-east-1") + + vol_size = 2 + args = { + "NotebookInstanceName": "MyNotebookInstance", + "InstanceType": "ml.t2.medium", + "RoleArn": FAKE_ROLE_ARN, + "VolumeSizeInGB": vol_size, + } + with assert_raises(ParamValidationError) as ex: + resp = sagemaker.create_notebook_instance(**args) + assert_equal( + ex.exception.args[0], + "Parameter validation failed:\nInvalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf".format( + vol_size + ), + ) + + +@mock_sagemaker +def test_create_notebook_instance_invalid_instance_type(): + + sagemaker = boto3.client("sagemaker", region_name="us-east-1") + + instance_type = "undefined_instance_type" + args = { + "NotebookInstanceName": "MyNotebookInstance", + "InstanceType": instance_type, + "RoleArn": FAKE_ROLE_ARN, + } + with assert_raises(ClientError) as ex: + resp = sagemaker.create_notebook_instance(**args) + assert_equal(ex.exception.response["Error"]["Code"], "ValidationException") + expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( + instance_type + ) + + assert_true(expected_message in ex.exception.response["Error"]["Message"]) + + +@mock_sagemaker +def test_notebook_instance_lifecycle(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + NAME_PARAM = "MyNotebookInstance" + INSTANCE_TYPE_PARAM = "ml.t2.medium" + + args = { + "NotebookInstanceName": NAME_PARAM, + "InstanceType": INSTANCE_TYPE_PARAM, + "RoleArn": FAKE_ROLE_ARN, + } + resp = sagemaker.create_notebook_instance(**args) + assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) + assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + notebook_instance_arn = resp["NotebookInstanceArn"] + + with assert_raises(ClientError) as ex: + sagemaker.delete_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert_equal(ex.exception.response["Error"]["Code"], "ValidationException") + expected_message = "Status (InService) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format( + notebook_instance_arn + ) + assert_true(expected_message in ex.exception.response["Error"]["Message"]) + + sagemaker.stop_notebook_instance(NotebookInstanceName=NAME_PARAM) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert_equal(resp["NotebookInstanceStatus"], "Stopped") + + sagemaker.start_notebook_instance(NotebookInstanceName=NAME_PARAM) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert_equal(resp["NotebookInstanceStatus"], "InService") + + sagemaker.stop_notebook_instance(NotebookInstanceName=NAME_PARAM) + + resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert_equal(resp["NotebookInstanceStatus"], "Stopped") + + sagemaker.delete_notebook_instance(NotebookInstanceName=NAME_PARAM) + + with assert_raises(ClientError) as ex: + sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) + assert_equal(ex.exception.response["Error"]["Message"], "RecordNotFound") + + +@mock_sagemaker +def test_describe_nonexistent_model(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + with assert_raises(ClientError) as e: + resp = sagemaker.describe_model(ModelName="Nonexistent") + assert_true( + e.exception.response["Error"]["Message"].startswith("Could not find model") + ) From bed769a387e667f7d6cb4dbf306963f70f54afbd Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 17 Jul 2020 12:11:47 +0100 Subject: [PATCH 444/658] Tech debt - increase test timeouts to remove intermittant test failures (#3146) --- moto/batch/models.py | 4 ++-- tests/test_batch/test_batch.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index 95ad6478961e..fde744911a81 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -443,8 +443,8 @@ def run(self): now = datetime.datetime.now() i = 1 while container.status == "running" and not self.stop: - time.sleep(0.15) - if i % 10 == 0: + time.sleep(0.2) + if i % 5 == 0: logs_stderr.extend( container.logs( stdout=False, diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 4b75fb8577eb..566be6aca47b 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -6,6 +6,7 @@ from botocore.exceptions import ClientError import sure # noqa from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs + import functools import nose @@ -729,13 +730,13 @@ def test_submit_job(): future = datetime.datetime.now() + datetime.timedelta(seconds=30) while datetime.datetime.now() < future: + time.sleep(1) resp = batch_client.describe_jobs(jobs=[job_id]) if resp["jobs"][0]["status"] == "FAILED": raise RuntimeError("Batch job failed") if resp["jobs"][0]["status"] == "SUCCEEDED": break - time.sleep(0.5) else: raise RuntimeError("Batch job timed out") @@ -872,7 +873,7 @@ def test_terminate_job(): batch_client.terminate_job(jobId=job_id, reason="test_terminate") - time.sleep(1) + time.sleep(2) resp = batch_client.describe_jobs(jobs=[job_id]) resp["jobs"][0]["jobName"].should.equal("test1") From f69688b06492d806d413765942939c1daf980497 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 17 Jul 2020 12:50:06 +0100 Subject: [PATCH 445/658] ECS - UpdateService - Allow Service ARNs --- moto/ecs/models.py | 3 ++- tests/test_ecs/test_ecs_boto3.py | 9 +++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 1a385226bc91..254f220d418a 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -931,9 +931,10 @@ def describe_services(self, cluster_str, service_names_or_arns): return result def update_service( - self, cluster_str, service_name, task_definition_str, desired_count + self, cluster_str, service_str, task_definition_str, desired_count ): cluster_name = cluster_str.split("/")[-1] + service_name = service_str.split("/")[-1] cluster_service_pair = "{0}:{1}".format(cluster_name, service_name) if cluster_service_pair in self.services: if task_definition_str is not None: diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index f6de595974a4..80ba304fde95 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -660,6 +660,15 @@ def test_update_service(): response["service"]["desiredCount"].should.equal(0) response["service"]["schedulingStrategy"].should.equal("REPLICA") + # Verify we can pass the ARNs of the cluster and service + response = client.update_service( + cluster=response["service"]["clusterArn"], + service=response["service"]["serviceArn"], + taskDefinition="test_ecs_task", + desiredCount=1, + ) + response["service"]["desiredCount"].should.equal(1) + @mock_ecs def test_update_missing_service(): From 09b764148cea354c790443a318dabc976a2b59df Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Sat, 18 Jul 2020 15:17:53 +0530 Subject: [PATCH 446/658] Fix : Added implementation for CloudWatch Describe Metric for Alarm (#3148) * Fix : added implementation for CloudWatch Describe Metric for Alarm * Linting Co-authored-by: Bert Blommers --- moto/cloudwatch/responses.py | 67 ++++++++++++++++++- tests/test_cloudwatch/test_cloudwatch.py | 16 +++++ .../test_cloudwatch/test_cloudwatch_boto3.py | 18 +++++ 3 files changed, 100 insertions(+), 1 deletion(-) diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index 56ba68bb9305..f6e003ee24c0 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -161,9 +161,23 @@ def delete_dashboards(self): def describe_alarm_history(self): raise NotImplementedError() + @staticmethod + def filter_alarms(alarms, metric_name, namespace): + metric_filtered_alarms = [] + + for alarm in alarms: + if alarm.metric_name == metric_name and alarm.namespace == namespace: + metric_filtered_alarms.append(alarm) + return metric_filtered_alarms + @amzn_request_id def describe_alarms_for_metric(self): - raise NotImplementedError() + alarms = self.cloudwatch_backend.get_all_alarms() + namespace = self._get_param("Namespace") + metric_name = self._get_param("MetricName") + filtered_alarms = self.filter_alarms(alarms, metric_name, namespace) + template = self.response_template(DESCRIBE_METRIC_ALARMS_TEMPLATE) + return template.render(alarms=filtered_alarms) @amzn_request_id def disable_alarm_actions(self): @@ -282,6 +296,57 @@ def set_alarm_state(self): """ +DESCRIBE_METRIC_ALARMS_TEMPLATE = """ + + + {% for alarm in alarms %} + + {{ alarm.actions_enabled }} + + {% for action in alarm.alarm_actions %} + {{ action }} + {% endfor %} + + {{ alarm.arn }} + {{ alarm.configuration_updated_timestamp }} + {{ alarm.description }} + {{ alarm.name }} + {{ alarm.comparison_operator }} + + {% for dimension in alarm.dimensions %} + + {{ dimension.name }} + {{ dimension.value }} + + {% endfor %} + + {{ alarm.evaluation_periods }} + + {% for action in alarm.insufficient_data_actions %} + {{ action }} + {% endfor %} + + {{ alarm.metric_name }} + {{ alarm.namespace }} + + {% for action in alarm.ok_actions %} + {{ action }} + {% endfor %} + + {{ alarm.period }} + {{ alarm.state_reason }} + {{ alarm.state_reason_data }} + {{ alarm.state_updated_timestamp }} + {{ alarm.state_value }} + {{ alarm.statistic }} + {{ alarm.threshold }} + {{ alarm.unit }} + + {% endfor %} + + +""" + DELETE_METRIC_ALARMS_TEMPLATE = """ diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 60b6898bd5fe..b1f84ff4bf37 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -127,6 +127,22 @@ def test_describe_alarms(): alarms.should.have.length_of(0) +@mock_cloudwatch_deprecated +def test_describe_alarms_for_metric(): + conn = boto.connect_cloudwatch() + + conn.create_alarm(alarm_fixture(name="nfoobar", action="afoobar")) + conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) + conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) + conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) + + alarms = conn.describe_alarms_for_metric("nbarfoo_metric", "nbarfoo_namespace") + alarms.should.have.length_of(1) + + alarms = conn.describe_alarms_for_metric("nbazfoo_metric", "nbazfoo_namespace") + alarms.should.have.length_of(1) + + @mock_cloudwatch_deprecated def test_get_metric_statistics(): conn = boto.connect_cloudwatch() diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 926c321ba9c2..c38e2c77e3cb 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -123,6 +123,24 @@ def test_delete_invalid_alarm(): e.exception.response["Error"]["Code"].should.equal("ResourceNotFound") +@mock_cloudwatch +def test_describe_alarms_for_metric(): + conn = boto3.client("cloudwatch", region_name="eu-central-1") + conn.put_metric_alarm( + AlarmName="testalarm1", + MetricName="cpu", + Namespace="blah", + Period=10, + EvaluationPeriods=5, + Statistic="Average", + Threshold=2, + ComparisonOperator="GreaterThanThreshold", + ActionsEnabled=True, + ) + alarms = conn.describe_alarms_for_metric(MetricName="cpu", Namespace="blah") + alarms.get("MetricAlarms").should.have.length_of(1) + + @mock_cloudwatch def test_alarm_state(): client = boto3.client("cloudwatch", region_name="eu-central-1") From 6fb7867767c274c30be6d20b7b4a040dbbff59ad Mon Sep 17 00:00:00 2001 From: Logan Jones Date: Sun, 19 Jul 2020 05:00:24 -0400 Subject: [PATCH 447/658] Fix: Put Events API (#3145) * Fix: Put Events API * Update from code review. Co-authored-by: Logan Asher Jones --- moto/events/models.py | 4 +++- moto/events/responses.py | 13 +++++++------ tests/test_events/test_events.py | 4 +++- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index 360c8d63166c..d70898198e17 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -8,6 +8,8 @@ from moto.sts.models import ACCOUNT_ID from moto.utilities.tagging_service import TaggingService +from uuid import uuid4 + class Rule(BaseModel): def _generate_arn(self, name): @@ -344,7 +346,7 @@ def put_events(self, events): raise JsonRESTError("ValidationError", "Can only submit 10 events at once") # We dont really need to store the events yet - return [] + return [{"EventId": str(uuid4())} for _ in events] def remove_targets(self, name, ids): rule = self.rules.get(name) diff --git a/moto/events/responses.py b/moto/events/responses.py index 73db00bdd1de..76c590e16ba7 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -150,14 +150,15 @@ def list_targets_by_rule(self): def put_events(self): events = self._get_param("Entries") - failed_entries = self.events_backend.put_events(events) + entries = self.events_backend.put_events(events) - if failed_entries: - return json.dumps( - {"FailedEntryCount": len(failed_entries), "Entries": failed_entries} - ) + failed_count = len([e for e in entries if "ErrorCode" in e]) + response = { + "FailedEntryCount": failed_count, + "Entries": entries, + } - return "", self.response_headers + return json.dumps(response) def put_rule(self): name = self._get_param("Name") diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index f83c607649ee..678e0a6221c2 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -312,8 +312,10 @@ def test_put_events(): "DetailType": "myDetailType", } - client.put_events(Entries=[event]) + response = client.put_events(Entries=[event]) # Boto3 would error if it didn't return 200 OK + response["FailedEntryCount"].should.equal(0) + response["Entries"].should.have.length_of(1) with assert_raises(ClientError): client.put_events(Entries=[event] * 20) From 552b1294df3e26aad23eb4e6497d7cda55d7928e Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Sun, 19 Jul 2020 15:14:58 +0530 Subject: [PATCH 448/658] Fix : EC2 - Added ownerId filter for describe instances (#3149) * Fix : EC2 - Added ownerId filter for describe instances * linting --- moto/ec2/utils.py | 6 +++++- tests/test_ec2/test_instances.py | 14 ++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index b8c19b580f19..bc124bddf456 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -11,6 +11,7 @@ from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa +from moto.iam.models import ACCOUNT_ID EC2_RESOURCE_TO_PREFIX = { "customer-gateway": "cgw", @@ -291,7 +292,9 @@ def get_object_value(obj, attr): keys = attr.split(".") val = obj for key in keys: - if hasattr(val, key): + if key == "owner_id": + return ACCOUNT_ID + elif hasattr(val, key): val = getattr(val, key) elif isinstance(val, dict): val = val[key] @@ -364,6 +367,7 @@ def tag_filter_matches(obj, filter_name, filter_values): "image-id": "image_id", "network-interface.private-dns-name": "private_dns", "private-dns-name": "private_dns", + "owner-id": "owner_id", } diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index c775ab0abec6..1310b3a1d181 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -536,6 +536,20 @@ def test_get_instances_filtering_by_image_id(): reservations[0]["Instances"].should.have.length_of(1) +@mock_ec2 +def test_get_instances_filtering_by_account_id(): + image_id = "ami-1234abcd" + client = boto3.client("ec2", region_name="us-east-1") + conn = boto3.resource("ec2", "us-east-1") + conn.create_instances(ImageId=image_id, MinCount=1, MaxCount=1) + + reservations = client.describe_instances( + Filters=[{"Name": "owner-id", "Values": ["123456789012"]}] + )["Reservations"] + + reservations[0]["Instances"].should.have.length_of(1) + + @mock_ec2 def test_get_instances_filtering_by_private_dns(): image_id = "ami-1234abcd" From a123a22eebc9a6b14e58ca54a3aa613a00749d09 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Sun, 19 Jul 2020 16:29:19 +0530 Subject: [PATCH 449/658] Fix : cloudFormation dynamodb : delete resource on delete stack (#3120) * Fix : cloudFormation dynamodb : delete resource on delete stack * Delete function for dynamodb * Added tests for delete stack using dynamodb. * Added tests for non decorator * Linting Co-authored-by: Bert Blommers --- moto/dynamodb2/models/__init__.py | 14 +++++ .../test_cloudformation_stack_crud.py | 55 +++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 233c4001f100..fc178dd4ed31 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -452,6 +452,17 @@ def create_from_cloudformation_json( ) return table + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + + table = dynamodb_backends[region_name].delete_table( + name=properties["TableName"] + ) + return table + def _generate_arn(self, name): return "arn:aws:dynamodb:us-east-1:123456789011:table/" + name @@ -902,6 +913,9 @@ def lookup(self, *args, **kwargs): return None return ret + def delete(self, region_name): + dynamodb_backends[region_name].delete_table(self.name) + class DynamoDBBackend(BaseBackend): def __init__(self, region_name=None): diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 29faa11cfdaa..d7e26e85d737 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -4,6 +4,7 @@ import json import boto +import boto3 import boto.iam import boto.s3 import boto.s3.key @@ -21,6 +22,8 @@ mock_s3_deprecated, mock_route53_deprecated, mock_iam_deprecated, + mock_dynamodb2, + mock_cloudformation, ) from moto.cloudformation import cloudformation_backends @@ -45,6 +48,30 @@ }, } +dummy_template4 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "myDynamoDBTable": { + "Type": "AWS::DynamoDB::Table", + "Properties": { + "AttributeDefinitions": [ + {"AttributeName": "Name", "AttributeType": "S"}, + {"AttributeName": "Age", "AttributeType": "S"}, + ], + "KeySchema": [ + {"AttributeName": "Name", "KeyType": "HASH"}, + {"AttributeName": "Age", "KeyType": "RANGE"}, + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5, + }, + "TableName": "Person", + }, + } + }, +} + dummy_template_json = json.dumps(dummy_template) dummy_template_json2 = json.dumps(dummy_template2) dummy_template_json3 = json.dumps(dummy_template3) @@ -188,6 +215,34 @@ def test_describe_stack_by_stack_id(): stack_by_id.stack_name.should.equal("test_stack") +@mock_dynamodb2 +@mock_cloudformation_deprecated +def test_delete_stack_dynamo_template(): + conn = boto.connect_cloudformation() + dynamodb_client = boto3.client("dynamodb", region_name="us-east-1") + conn.create_stack("test_stack", template_body=dummy_template4) + table_desc = dynamodb_client.list_tables() + len(table_desc.get("TableNames")).should.equal(1) + conn.delete_stack("test_stack") + table_desc = dynamodb_client.list_tables() + len(table_desc.get("TableNames")).should.equal(0) + conn.create_stack("test_stack", template_body=dummy_template4) + + +@mock_dynamodb2 +@mock_cloudformation +def test_delete_stack_dynamo_template(): + conn = boto3.client("cloudformation", region_name="us-east-1") + dynamodb_client = boto3.client("dynamodb", region_name="us-east-1") + conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(dummy_template4)) + table_desc = dynamodb_client.list_tables() + len(table_desc.get("TableNames")).should.equal(1) + conn.delete_stack(StackName="test_stack") + table_desc = dynamodb_client.list_tables() + len(table_desc.get("TableNames")).should.equal(0) + conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(dummy_template4)) + + @mock_cloudformation_deprecated def test_describe_deleted_stack(): conn = boto.connect_cloudformation() From ba99c61477d92d2d653e30ff5fb12ac06d7a554d Mon Sep 17 00:00:00 2001 From: jweite Date: Sun, 19 Jul 2020 10:06:48 -0400 Subject: [PATCH 450/658] Basic Support for Endpoints, EndpointConfigs and TrainingJobs (#3142) * Basic upport for Endpoints, EndpointConfigs and TrainingJobs * Dropped extraneous pass statement. Co-authored-by: Joseph Weitekamp --- moto/sagemaker/models.py | 523 +++++++++++++++++- moto/sagemaker/responses.py | 116 +++- moto/sagemaker/urls.py | 1 - .../test_sagemaker/test_sagemaker_endpoint.py | 246 ++++++++ .../test_sagemaker/test_sagemaker_training.py | 127 +++++ 5 files changed, 1007 insertions(+), 6 deletions(-) create mode 100644 tests/test_sagemaker/test_sagemaker_endpoint.py create mode 100644 tests/test_sagemaker/test_sagemaker_training.py diff --git a/moto/sagemaker/models.py b/moto/sagemaker/models.py index 3e0dce87b680..6ff36249f1d8 100644 --- a/moto/sagemaker/models.py +++ b/moto/sagemaker/models.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +import os from copy import deepcopy from datetime import datetime @@ -32,6 +33,288 @@ def response_object(self): return self.gen_response_object() +class FakeTrainingJob(BaseObject): + def __init__( + self, + region_name, + training_job_name, + hyper_parameters, + algorithm_specification, + role_arn, + input_data_config, + output_data_config, + resource_config, + vpc_config, + stopping_condition, + tags, + enable_network_isolation, + enable_inter_container_traffic_encryption, + enable_managed_spot_training, + checkpoint_config, + debug_hook_config, + debug_rule_configurations, + tensor_board_output_config, + experiment_config, + ): + self.training_job_name = training_job_name + self.hyper_parameters = hyper_parameters + self.algorithm_specification = algorithm_specification + self.role_arn = role_arn + self.input_data_config = input_data_config + self.output_data_config = output_data_config + self.resource_config = resource_config + self.vpc_config = vpc_config + self.stopping_condition = stopping_condition + self.tags = tags + self.enable_network_isolation = enable_network_isolation + self.enable_inter_container_traffic_encryption = ( + enable_inter_container_traffic_encryption + ) + self.enable_managed_spot_training = enable_managed_spot_training + self.checkpoint_config = checkpoint_config + self.debug_hook_config = debug_hook_config + self.debug_rule_configurations = debug_rule_configurations + self.tensor_board_output_config = tensor_board_output_config + self.experiment_config = experiment_config + self.training_job_arn = FakeTrainingJob.arn_formatter( + training_job_name, region_name + ) + self.creation_time = self.last_modified_time = datetime.now().strftime( + "%Y-%m-%d %H:%M:%S" + ) + self.model_artifacts = { + "S3ModelArtifacts": os.path.join( + self.output_data_config["S3OutputPath"], + self.training_job_name, + "output", + "model.tar.gz", + ) + } + self.training_job_status = "Completed" + self.secondary_status = "Completed" + self.algorithm_specification["MetricDefinitions"] = [ + { + "Name": "test:dcg", + "Regex": "#quality_metric: host=\\S+, test dcg =(\\S+)", + } + ] + now_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + self.creation_time = now_string + self.last_modified_time = now_string + self.training_start_time = now_string + self.training_end_time = now_string + self.secondary_status_transitions = [ + { + "Status": "Starting", + "StartTime": self.creation_time, + "EndTime": self.creation_time, + "StatusMessage": "Preparing the instances for training", + } + ] + self.final_metric_data_list = [ + { + "MetricName": "train:progress", + "Value": 100.0, + "Timestamp": self.creation_time, + } + ] + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"TrainingJobArn": self.training_job_arn} + + @staticmethod + def arn_formatter(endpoint_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":training-job/" + + endpoint_name + ) + + +class FakeEndpoint(BaseObject): + def __init__( + self, + region_name, + endpoint_name, + endpoint_config_name, + production_variants, + data_capture_config, + tags, + ): + self.endpoint_name = endpoint_name + self.endpoint_arn = FakeEndpoint.arn_formatter(endpoint_name, region_name) + self.endpoint_config_name = endpoint_config_name + self.production_variants = production_variants + self.data_capture_config = data_capture_config + self.tags = tags or [] + self.endpoint_status = "InService" + self.failure_reason = None + self.creation_time = self.last_modified_time = datetime.now().strftime( + "%Y-%m-%d %H:%M:%S" + ) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"EndpointArn": self.endpoint_arn} + + @staticmethod + def arn_formatter(endpoint_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":endpoint/" + + endpoint_name + ) + + +class FakeEndpointConfig(BaseObject): + def __init__( + self, + region_name, + endpoint_config_name, + production_variants, + data_capture_config, + tags, + kms_key_id, + ): + self.validate_production_variants(production_variants) + + self.endpoint_config_name = endpoint_config_name + self.endpoint_config_arn = FakeEndpointConfig.arn_formatter( + endpoint_config_name, region_name + ) + self.production_variants = production_variants or [] + self.data_capture_config = data_capture_config or {} + self.tags = tags or [] + self.kms_key_id = kms_key_id + self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + def validate_production_variants(self, production_variants): + for production_variant in production_variants: + self.validate_instance_type(production_variant["InstanceType"]) + + def validate_instance_type(self, instance_type): + VALID_INSTANCE_TYPES = [ + "ml.r5d.12xlarge", + "ml.r5.12xlarge", + "ml.p2.xlarge", + "ml.m5.4xlarge", + "ml.m4.16xlarge", + "ml.r5d.24xlarge", + "ml.r5.24xlarge", + "ml.p3.16xlarge", + "ml.m5d.xlarge", + "ml.m5.large", + "ml.t2.xlarge", + "ml.p2.16xlarge", + "ml.m5d.12xlarge", + "ml.inf1.2xlarge", + "ml.m5d.24xlarge", + "ml.c4.2xlarge", + "ml.c5.2xlarge", + "ml.c4.4xlarge", + "ml.inf1.6xlarge", + "ml.c5d.2xlarge", + "ml.c5.4xlarge", + "ml.g4dn.xlarge", + "ml.g4dn.12xlarge", + "ml.c5d.4xlarge", + "ml.g4dn.2xlarge", + "ml.c4.8xlarge", + "ml.c4.large", + "ml.c5d.xlarge", + "ml.c5.large", + "ml.g4dn.4xlarge", + "ml.c5.9xlarge", + "ml.g4dn.16xlarge", + "ml.c5d.large", + "ml.c5.xlarge", + "ml.c5d.9xlarge", + "ml.c4.xlarge", + "ml.inf1.xlarge", + "ml.g4dn.8xlarge", + "ml.inf1.24xlarge", + "ml.m5d.2xlarge", + "ml.t2.2xlarge", + "ml.c5d.18xlarge", + "ml.m5d.4xlarge", + "ml.t2.medium", + "ml.c5.18xlarge", + "ml.r5d.2xlarge", + "ml.r5.2xlarge", + "ml.p3.2xlarge", + "ml.m5d.large", + "ml.m5.xlarge", + "ml.m4.10xlarge", + "ml.t2.large", + "ml.r5d.4xlarge", + "ml.r5.4xlarge", + "ml.m5.12xlarge", + "ml.m4.xlarge", + "ml.m5.24xlarge", + "ml.m4.2xlarge", + "ml.p2.8xlarge", + "ml.m5.2xlarge", + "ml.r5d.xlarge", + "ml.r5d.large", + "ml.r5.xlarge", + "ml.r5.large", + "ml.p3.8xlarge", + "ml.m4.4xlarge", + ] + if not validators.is_one_of(instance_type, VALID_INSTANCE_TYPES): + message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format( + instance_type, VALID_INSTANCE_TYPES + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"EndpointConfigArn": self.endpoint_config_arn} + + @staticmethod + def arn_formatter(model_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":endpoint-config/" + + model_name + ) + + class Model(BaseObject): def __init__( self, @@ -238,6 +521,9 @@ class SageMakerModelBackend(BaseBackend): def __init__(self, region_name=None): self._models = {} self.notebook_instances = {} + self.endpoint_configs = {} + self.endpoints = {} + self.training_jobs = {} self.region_name = region_name def reset(self): @@ -305,10 +591,10 @@ def create_notebook_instance( self._validate_unique_notebook_instance_name(notebook_instance_name) notebook_instance = FakeSagemakerNotebookInstance( - self.region_name, - notebook_instance_name, - instance_type, - role_arn, + region_name=self.region_name, + notebook_instance_name=notebook_instance_name, + instance_type=instance_type, + role_arn=role_arn, subnet_id=subnet_id, security_group_ids=security_group_ids, kms_key_id=kms_key_id, @@ -392,6 +678,235 @@ def get_notebook_instance_tags(self, arn): except RESTError: return [] + def create_endpoint_config( + self, + endpoint_config_name, + production_variants, + data_capture_config, + tags, + kms_key_id, + ): + endpoint_config = FakeEndpointConfig( + region_name=self.region_name, + endpoint_config_name=endpoint_config_name, + production_variants=production_variants, + data_capture_config=data_capture_config, + tags=tags, + kms_key_id=kms_key_id, + ) + self.validate_production_variants(production_variants) + + self.endpoint_configs[endpoint_config_name] = endpoint_config + return endpoint_config + + def validate_production_variants(self, production_variants): + for production_variant in production_variants: + if production_variant["ModelName"] not in self._models: + message = "Could not find model '{}'.".format( + Model.arn_for_model_name( + production_variant["ModelName"], self.region_name + ) + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def describe_endpoint_config(self, endpoint_config_name): + try: + return self.endpoint_configs[endpoint_config_name].response_object + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name) + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def delete_endpoint_config(self, endpoint_config_name): + try: + del self.endpoint_configs[endpoint_config_name] + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name) + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def create_endpoint( + self, endpoint_name, endpoint_config_name, tags, + ): + try: + endpoint_config = self.describe_endpoint_config(endpoint_config_name) + except KeyError: + message = "Could not find endpoint_config '{}'.".format( + FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name) + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + endpoint = FakeEndpoint( + region_name=self.region_name, + endpoint_name=endpoint_name, + endpoint_config_name=endpoint_config_name, + production_variants=endpoint_config["ProductionVariants"], + data_capture_config=endpoint_config["DataCaptureConfig"], + tags=tags, + ) + + self.endpoints[endpoint_name] = endpoint + return endpoint + + def describe_endpoint(self, endpoint_name): + try: + return self.endpoints[endpoint_name].response_object + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeEndpoint.arn_formatter(endpoint_name, self.region_name) + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def delete_endpoint(self, endpoint_name): + try: + del self.endpoints[endpoint_name] + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeEndpoint.arn_formatter(endpoint_name, self.region_name) + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def get_endpoint_by_arn(self, arn): + endpoints = [ + endpoint + for endpoint in self.endpoints.values() + if endpoint.endpoint_arn == arn + ] + if len(endpoints) == 0: + message = "RecordNotFound" + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + return endpoints[0] + + def get_endpoint_tags(self, arn): + try: + endpoint = self.get_endpoint_by_arn(arn) + return endpoint.tags or [] + except RESTError: + return [] + + def create_training_job( + self, + training_job_name, + hyper_parameters, + algorithm_specification, + role_arn, + input_data_config, + output_data_config, + resource_config, + vpc_config, + stopping_condition, + tags, + enable_network_isolation, + enable_inter_container_traffic_encryption, + enable_managed_spot_training, + checkpoint_config, + debug_hook_config, + debug_rule_configurations, + tensor_board_output_config, + experiment_config, + ): + training_job = FakeTrainingJob( + region_name=self.region_name, + training_job_name=training_job_name, + hyper_parameters=hyper_parameters, + algorithm_specification=algorithm_specification, + role_arn=role_arn, + input_data_config=input_data_config, + output_data_config=output_data_config, + resource_config=resource_config, + vpc_config=vpc_config, + stopping_condition=stopping_condition, + tags=tags, + enable_network_isolation=enable_network_isolation, + enable_inter_container_traffic_encryption=enable_inter_container_traffic_encryption, + enable_managed_spot_training=enable_managed_spot_training, + checkpoint_config=checkpoint_config, + debug_hook_config=debug_hook_config, + debug_rule_configurations=debug_rule_configurations, + tensor_board_output_config=tensor_board_output_config, + experiment_config=experiment_config, + ) + self.training_jobs[training_job_name] = training_job + return training_job + + def describe_training_job(self, training_job_name): + try: + return self.training_jobs[training_job_name].response_object + except KeyError: + message = "Could not find training job '{}'.".format( + FakeTrainingJob.arn_formatter(training_job_name, self.region_name) + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def delete_training_job(self, training_job_name): + try: + del self.training_jobs[training_job_name] + except KeyError: + message = "Could not find endpoint configuration '{}'.".format( + FakeTrainingJob.arn_formatter(training_job_name, self.region_name) + ) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + + def get_training_job_by_arn(self, arn): + training_jobs = [ + training_job + for training_job in self.training_jobs.values() + if training_job.training_job_arn == arn + ] + if len(training_jobs) == 0: + message = "RecordNotFound" + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) + return training_jobs[0] + + def get_training_job_tags(self, arn): + try: + training_job = self.get_training_job_by_arn(arn) + return training_job.tags or [] + except RESTError: + return [] + sagemaker_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/sagemaker/responses.py b/moto/sagemaker/responses.py index 58e28ef01d3f..48a3a643207e 100644 --- a/moto/sagemaker/responses.py +++ b/moto/sagemaker/responses.py @@ -122,6 +122,120 @@ def delete_notebook_instance(self): @amzn_request_id def list_tags(self): arn = self._get_param("ResourceArn") - tags = self.sagemaker_backend.get_notebook_instance_tags(arn) + try: + if ":notebook-instance/" in arn: + tags = self.sagemaker_backend.get_notebook_instance_tags(arn) + elif ":endpoint/" in arn: + tags = self.sagemaker_backend.get_endpoint_tags(arn) + elif ":training-job/" in arn: + tags = self.sagemaker_backend.get_training_job_tags(arn) + else: + tags = [] + except AWSError: + tags = [] response = {"Tags": tags} return 200, {}, json.dumps(response) + + @amzn_request_id + def create_endpoint_config(self): + try: + endpoint_config = self.sagemaker_backend.create_endpoint_config( + endpoint_config_name=self._get_param("EndpointConfigName"), + production_variants=self._get_param("ProductionVariants"), + data_capture_config=self._get_param("DataCaptureConfig"), + tags=self._get_param("Tags"), + kms_key_id=self._get_param("KmsKeyId"), + ) + response = { + "EndpointConfigArn": endpoint_config.endpoint_config_arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_endpoint_config(self): + endpoint_config_name = self._get_param("EndpointConfigName") + response = self.sagemaker_backend.describe_endpoint_config(endpoint_config_name) + return json.dumps(response) + + @amzn_request_id + def delete_endpoint_config(self): + endpoint_config_name = self._get_param("EndpointConfigName") + self.sagemaker_backend.delete_endpoint_config(endpoint_config_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def create_endpoint(self): + try: + endpoint = self.sagemaker_backend.create_endpoint( + endpoint_name=self._get_param("EndpointName"), + endpoint_config_name=self._get_param("EndpointConfigName"), + tags=self._get_param("Tags"), + ) + response = { + "EndpointArn": endpoint.endpoint_arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_endpoint(self): + endpoint_name = self._get_param("EndpointName") + response = self.sagemaker_backend.describe_endpoint(endpoint_name) + return json.dumps(response) + + @amzn_request_id + def delete_endpoint(self): + endpoint_name = self._get_param("EndpointName") + self.sagemaker_backend.delete_endpoint(endpoint_name) + return 200, {}, json.dumps("{}") + + @amzn_request_id + def create_training_job(self): + try: + training_job = self.sagemaker_backend.create_training_job( + training_job_name=self._get_param("TrainingJobName"), + hyper_parameters=self._get_param("HyperParameters"), + algorithm_specification=self._get_param("AlgorithmSpecification"), + role_arn=self._get_param("RoleArn"), + input_data_config=self._get_param("InputDataConfig"), + output_data_config=self._get_param("OutputDataConfig"), + resource_config=self._get_param("ResourceConfig"), + vpc_config=self._get_param("VpcConfig"), + stopping_condition=self._get_param("StoppingCondition"), + tags=self._get_param("Tags"), + enable_network_isolation=self._get_param( + "EnableNetworkIsolation", False + ), + enable_inter_container_traffic_encryption=self._get_param( + "EnableInterContainerTrafficEncryption", False + ), + enable_managed_spot_training=self._get_param( + "EnableManagedSpotTraining", False + ), + checkpoint_config=self._get_param("CheckpointConfig"), + debug_hook_config=self._get_param("DebugHookConfig"), + debug_rule_configurations=self._get_param("DebugRuleConfigurations"), + tensor_board_output_config=self._get_param("TensorBoardOutputConfig"), + experiment_config=self._get_param("ExperimentConfig"), + ) + response = { + "TrainingJobArn": training_job.training_job_arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_training_job(self): + training_job_name = self._get_param("TrainingJobName") + response = self.sagemaker_backend.describe_training_job(training_job_name) + return json.dumps(response) + + @amzn_request_id + def delete_training_job(self): + training_job_name = self._get_param("TrainingJobName") + self.sagemaker_backend.delete_training_job(training_job_name) + return 200, {}, json.dumps("{}") diff --git a/moto/sagemaker/urls.py b/moto/sagemaker/urls.py index 224342ce5408..9c039d899d95 100644 --- a/moto/sagemaker/urls.py +++ b/moto/sagemaker/urls.py @@ -3,7 +3,6 @@ url_bases = [ "https?://api.sagemaker.(.+).amazonaws.com", - "https?://api-fips.sagemaker.(.+).amazonaws.com", ] url_paths = { diff --git a/tests/test_sagemaker/test_sagemaker_endpoint.py b/tests/test_sagemaker/test_sagemaker_endpoint.py new file mode 100644 index 000000000000..b048439ff5db --- /dev/null +++ b/tests/test_sagemaker/test_sagemaker_endpoint.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import datetime +import boto3 +from botocore.exceptions import ClientError, ParamValidationError +import sure # noqa + +from moto import mock_sagemaker +from moto.sts.models import ACCOUNT_ID +from nose.tools import assert_true, assert_equal, assert_raises + +TEST_REGION_NAME = "us-east-1" +FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) +GENERIC_TAGS_PARAM = [ + {"Key": "newkey1", "Value": "newval1"}, + {"Key": "newkey2", "Value": "newval2"}, +] + + +@mock_sagemaker +def test_create_endpoint_config(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + model_name = "MyModel" + production_variants = [ + { + "VariantName": "MyProductionVariant", + "ModelName": model_name, + "InitialInstanceCount": 1, + "InstanceType": "ml.t2.medium", + }, + ] + + endpoint_config_name = "MyEndpointConfig" + with assert_raises(ClientError) as e: + sagemaker.create_endpoint_config( + EndpointConfigName=endpoint_config_name, + ProductionVariants=production_variants, + ) + assert_true( + e.exception.response["Error"]["Message"].startswith("Could not find model") + ) + + _create_model(sagemaker, model_name) + resp = sagemaker.create_endpoint_config( + EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants + ) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + + resp = sagemaker.describe_endpoint_config(EndpointConfigName=endpoint_config_name) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + resp["EndpointConfigName"].should.equal(endpoint_config_name) + resp["ProductionVariants"].should.equal(production_variants) + + +@mock_sagemaker +def test_delete_endpoint_config(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + model_name = "MyModel" + _create_model(sagemaker, model_name) + + endpoint_config_name = "MyEndpointConfig" + production_variants = [ + { + "VariantName": "MyProductionVariant", + "ModelName": model_name, + "InitialInstanceCount": 1, + "InstanceType": "ml.t2.medium", + }, + ] + + resp = sagemaker.create_endpoint_config( + EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants + ) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + + resp = sagemaker.describe_endpoint_config(EndpointConfigName=endpoint_config_name) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + + resp = sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) + with assert_raises(ClientError) as e: + sagemaker.describe_endpoint_config(EndpointConfigName=endpoint_config_name) + assert_true( + e.exception.response["Error"]["Message"].startswith( + "Could not find endpoint configuration" + ) + ) + + with assert_raises(ClientError) as e: + sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) + assert_true( + e.exception.response["Error"]["Message"].startswith( + "Could not find endpoint configuration" + ) + ) + pass + + +@mock_sagemaker +def test_create_endpoint_invalid_instance_type(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + model_name = "MyModel" + _create_model(sagemaker, model_name) + + instance_type = "InvalidInstanceType" + production_variants = [ + { + "VariantName": "MyProductionVariant", + "ModelName": model_name, + "InitialInstanceCount": 1, + "InstanceType": instance_type, + }, + ] + + endpoint_config_name = "MyEndpointConfig" + with assert_raises(ClientError) as e: + sagemaker.create_endpoint_config( + EndpointConfigName=endpoint_config_name, + ProductionVariants=production_variants, + ) + assert_equal(e.exception.response["Error"]["Code"], "ValidationException") + expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( + instance_type + ) + assert_true(expected_message in e.exception.response["Error"]["Message"]) + + +@mock_sagemaker +def test_create_endpoint(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + endpoint_name = "MyEndpoint" + with assert_raises(ClientError) as e: + sagemaker.create_endpoint( + EndpointName=endpoint_name, EndpointConfigName="NonexistentEndpointConfig" + ) + assert_true( + e.exception.response["Error"]["Message"].startswith( + "Could not find endpoint configuration" + ) + ) + + model_name = "MyModel" + _create_model(sagemaker, model_name) + + endpoint_config_name = "MyEndpointConfig" + _create_endpoint_config(sagemaker, endpoint_config_name, model_name) + + resp = sagemaker.create_endpoint( + EndpointName=endpoint_name, + EndpointConfigName=endpoint_config_name, + Tags=GENERIC_TAGS_PARAM, + ) + resp["EndpointArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(endpoint_name) + ) + + resp = sagemaker.describe_endpoint(EndpointName=endpoint_name) + resp["EndpointArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(endpoint_name) + ) + resp["EndpointName"].should.equal(endpoint_name) + resp["EndpointConfigName"].should.equal(endpoint_config_name) + resp["EndpointStatus"].should.equal("InService") + assert_true(isinstance(resp["CreationTime"], datetime.datetime)) + assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) + resp["ProductionVariants"][0]["VariantName"].should.equal("MyProductionVariant") + + resp = sagemaker.list_tags(ResourceArn=resp["EndpointArn"]) + assert_equal(resp["Tags"], GENERIC_TAGS_PARAM) + + +@mock_sagemaker +def test_delete_endpoint(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + model_name = "MyModel" + _create_model(sagemaker, model_name) + + endpoint_config_name = "MyEndpointConfig" + _create_endpoint_config(sagemaker, endpoint_config_name, model_name) + + endpoint_name = "MyEndpoint" + _create_endpoint(sagemaker, endpoint_name, endpoint_config_name) + + sagemaker.delete_endpoint(EndpointName=endpoint_name) + with assert_raises(ClientError) as e: + sagemaker.describe_endpoint(EndpointName=endpoint_name) + assert_true( + e.exception.response["Error"]["Message"].startswith("Could not find endpoint") + ) + + with assert_raises(ClientError) as e: + sagemaker.delete_endpoint(EndpointName=endpoint_name) + assert_true( + e.exception.response["Error"]["Message"].startswith("Could not find endpoint") + ) + + +def _create_model(boto_client, model_name): + resp = boto_client.create_model( + ModelName=model_name, + PrimaryContainer={ + "Image": "382416733822.dkr.ecr.us-east-1.amazonaws.com/factorization-machines:1", + "ModelDataUrl": "s3://MyBucket/model.tar.gz", + }, + ExecutionRoleArn=FAKE_ROLE_ARN, + ) + assert_equal(resp["ResponseMetadata"]["HTTPStatusCode"], 200) + + +def _create_endpoint_config(boto_client, endpoint_config_name, model_name): + production_variants = [ + { + "VariantName": "MyProductionVariant", + "ModelName": model_name, + "InitialInstanceCount": 1, + "InstanceType": "ml.t2.medium", + }, + ] + resp = boto_client.create_endpoint_config( + EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants + ) + resp["EndpointConfigArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + ) + + +def _create_endpoint(boto_client, endpoint_name, endpoint_config_name): + resp = boto_client.create_endpoint( + EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name + ) + resp["EndpointArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(endpoint_name) + ) diff --git a/tests/test_sagemaker/test_sagemaker_training.py b/tests/test_sagemaker/test_sagemaker_training.py new file mode 100644 index 000000000000..feaf9f7136f4 --- /dev/null +++ b/tests/test_sagemaker/test_sagemaker_training.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import boto3 +import datetime +import sure # noqa + +from moto import mock_sagemaker +from moto.sts.models import ACCOUNT_ID +from nose.tools import assert_true, assert_equal, assert_raises, assert_regexp_matches + +FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) +TEST_REGION_NAME = "us-east-1" + + +@mock_sagemaker +def test_create_training_job(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + training_job_name = "MyTrainingJob" + container = "382416733822.dkr.ecr.us-east-1.amazonaws.com/linear-learner:1" + bucket = "my-bucket" + prefix = "sagemaker/DEMO-breast-cancer-prediction/" + + params = { + "RoleArn": FAKE_ROLE_ARN, + "TrainingJobName": training_job_name, + "AlgorithmSpecification": { + "TrainingImage": container, + "TrainingInputMode": "File", + }, + "ResourceConfig": { + "InstanceCount": 1, + "InstanceType": "ml.c4.2xlarge", + "VolumeSizeInGB": 10, + }, + "InputDataConfig": [ + { + "ChannelName": "train", + "DataSource": { + "S3DataSource": { + "S3DataType": "S3Prefix", + "S3Uri": "s3://{}/{}/train/".format(bucket, prefix), + "S3DataDistributionType": "ShardedByS3Key", + } + }, + "CompressionType": "None", + "RecordWrapperType": "None", + }, + { + "ChannelName": "validation", + "DataSource": { + "S3DataSource": { + "S3DataType": "S3Prefix", + "S3Uri": "s3://{}/{}/validation/".format(bucket, prefix), + "S3DataDistributionType": "FullyReplicated", + } + }, + "CompressionType": "None", + "RecordWrapperType": "None", + }, + ], + "OutputDataConfig": {"S3OutputPath": "s3://{}/{}/".format(bucket, prefix)}, + "HyperParameters": { + "feature_dim": "30", + "mini_batch_size": "100", + "predictor_type": "regressor", + "epochs": "10", + "num_models": "32", + "loss": "absolute_loss", + }, + "StoppingCondition": {"MaxRuntimeInSeconds": 60 * 60}, + } + + resp = sagemaker.create_training_job(**params) + resp["TrainingJobArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:training-job/{}$".format(training_job_name) + ) + + resp = sagemaker.describe_training_job(TrainingJobName=training_job_name) + resp["TrainingJobName"].should.equal(training_job_name) + resp["TrainingJobArn"].should.match( + r"^arn:aws:sagemaker:.*:.*:training-job/{}$".format(training_job_name) + ) + assert_true( + resp["ModelArtifacts"]["S3ModelArtifacts"].startswith( + params["OutputDataConfig"]["S3OutputPath"] + ) + ) + assert_true(training_job_name in (resp["ModelArtifacts"]["S3ModelArtifacts"])) + assert_true( + resp["ModelArtifacts"]["S3ModelArtifacts"].endswith("output/model.tar.gz") + ) + assert_equal(resp["TrainingJobStatus"], "Completed") + assert_equal(resp["SecondaryStatus"], "Completed") + assert_equal(resp["HyperParameters"], params["HyperParameters"]) + assert_equal( + resp["AlgorithmSpecification"]["TrainingImage"], + params["AlgorithmSpecification"]["TrainingImage"], + ) + assert_equal( + resp["AlgorithmSpecification"]["TrainingInputMode"], + params["AlgorithmSpecification"]["TrainingInputMode"], + ) + assert_true("MetricDefinitions" in resp["AlgorithmSpecification"]) + assert_true("Name" in resp["AlgorithmSpecification"]["MetricDefinitions"][0]) + assert_true("Regex" in resp["AlgorithmSpecification"]["MetricDefinitions"][0]) + assert_equal(resp["RoleArn"], FAKE_ROLE_ARN) + assert_equal(resp["InputDataConfig"], params["InputDataConfig"]) + assert_equal(resp["OutputDataConfig"], params["OutputDataConfig"]) + assert_equal(resp["ResourceConfig"], params["ResourceConfig"]) + assert_equal(resp["StoppingCondition"], params["StoppingCondition"]) + assert_true(isinstance(resp["CreationTime"], datetime.datetime)) + assert_true(isinstance(resp["TrainingStartTime"], datetime.datetime)) + assert_true(isinstance(resp["TrainingEndTime"], datetime.datetime)) + assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) + assert_true("SecondaryStatusTransitions" in resp) + assert_true("Status" in resp["SecondaryStatusTransitions"][0]) + assert_true("StartTime" in resp["SecondaryStatusTransitions"][0]) + assert_true("EndTime" in resp["SecondaryStatusTransitions"][0]) + assert_true("StatusMessage" in resp["SecondaryStatusTransitions"][0]) + assert_true("FinalMetricDataList" in resp) + assert_true("MetricName" in resp["FinalMetricDataList"][0]) + assert_true("Value" in resp["FinalMetricDataList"][0]) + assert_true("Timestamp" in resp["FinalMetricDataList"][0]) + + pass From f2f25060feb00e8d8db67cff3de3cbc66344f5ac Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 20 Jul 2020 11:37:45 +0100 Subject: [PATCH 451/658] #3150 - AWSLambda - Verify Docker is running --- moto/awslambda/models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 91ecc42872aa..afbe9775a094 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -401,6 +401,7 @@ def _invoke_lambda(self, code, event=None, context=None): log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON) with _DockerDataVolumeContext(self) as data_vol: try: + self.docker_client.ping() # Verify Docker is running run_kwargs = ( dict(links={"motoserver": "motoserver"}) if settings.TEST_SERVER_MODE @@ -463,6 +464,9 @@ def _invoke_lambda(self, code, event=None, context=None): [line for line in self.convert(output).splitlines()[:-1]] ) return resp, False, logs + except docker.errors.DockerException as e: + # Docker itself is probably not running - there will be no Lambda-logs to handle + return "error running docker: {}".format(e), True, "" except BaseException as e: traceback.print_exc() logs = os.linesep.join( From fdeee077626fa0964089c84f90850ea674db808a Mon Sep 17 00:00:00 2001 From: cm-iwata <38879253+cm-iwata@users.noreply.github.com> Date: Mon, 20 Jul 2020 20:31:30 +0900 Subject: [PATCH 452/658] Add basic implement for cognito-idp create_resource_server (#3153) * Add basic implement for cognito-idp create_resource_server * lint --- IMPLEMENTATION_COVERAGE.md | 4 +-- moto/cognitoidp/exceptions.py | 9 ++++++ moto/cognitoidp/models.py | 37 +++++++++++++++++++++++ moto/cognitoidp/responses.py | 11 +++++++ tests/test_cognitoidp/test_cognitoidp.py | 38 ++++++++++++++++++++++++ 5 files changed, 97 insertions(+), 2 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 0c05e8b22dc3..db77f06e4c18 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1664,7 +1664,7 @@ ## cognito-idp
-37% implemented +38% implemented - [ ] add_custom_attributes - [X] admin_add_user_to_group @@ -1700,7 +1700,7 @@ - [ ] confirm_sign_up - [X] create_group - [X] create_identity_provider -- [ ] create_resource_server +- [X] create_resource_server - [ ] create_user_import_job - [X] create_user_pool - [X] create_user_pool_client diff --git a/moto/cognitoidp/exceptions.py b/moto/cognitoidp/exceptions.py index e52b7c49fcb1..c9b6368caa5e 100644 --- a/moto/cognitoidp/exceptions.py +++ b/moto/cognitoidp/exceptions.py @@ -2,6 +2,7 @@ import json from werkzeug.exceptions import BadRequest +from moto.core.exceptions import JsonRESTError class ResourceNotFoundError(BadRequest): @@ -42,3 +43,11 @@ def __init__(self, message): self.description = json.dumps( {"message": message, "__type": "NotAuthorizedException"} ) + + +class InvalidParameterException(JsonRESTError): + def __init__(self, msg=None): + self.code = 400 + super(InvalidParameterException, self).__init__( + "InvalidParameterException", msg or "A parameter is specified incorrectly." + ) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index c93563c2a59d..4d3280272839 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -21,6 +21,7 @@ ResourceNotFoundError, UserNotFoundError, UsernameExistsException, + InvalidParameterException, ) UserStatus = { @@ -83,6 +84,7 @@ def __init__(self, region, name, extended_config): self.identity_providers = OrderedDict() self.groups = OrderedDict() self.users = OrderedDict() + self.resource_servers = OrderedDict() self.refresh_tokens = {} self.access_tokens = {} self.id_tokens = {} @@ -337,6 +339,27 @@ def expand_attrs(attrs): self.attributes = expand_attrs(flat_attributes) +class CognitoResourceServer(BaseModel): + def __init__(self, user_pool_id, identifier, name, scopes): + + self.user_pool_id = user_pool_id + self.identifier = identifier + self.name = name + self.scopes = scopes + + def to_json(self): + res = { + "UserPoolId": self.user_pool_id, + "Identifier": self.identifier, + "Name": self.name, + } + + if len(self.scopes) != 0: + res.update({"Scopes": self.scopes}) + + return res + + class CognitoIdpBackend(BaseBackend): def __init__(self, region): super(CognitoIdpBackend, self).__init__() @@ -768,6 +791,20 @@ def admin_update_user_attributes(self, user_pool_id, username, attributes): user = user_pool.users[username] user.update_attributes(attributes) + def create_resource_server(self, user_pool_id, identifier, name, scopes): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if identifier in user_pool.resource_servers: + raise InvalidParameterException( + "%s already exists in user pool %s." % (identifier, user_pool_id) + ) + + resource_server = CognitoResourceServer(user_pool_id, identifier, name, scopes) + user_pool.resource_servers[identifier] = resource_server + return resource_server + cognitoidp_backends = {} for region in Session().get_available_regions("cognito-idp"): diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 1c945b23ed78..972ba883acd8 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -379,6 +379,17 @@ def admin_update_user_attributes(self): ) return "" + # Resource Server + def create_resource_server(self): + user_pool_id = self._get_param("UserPoolId") + identifier = self._get_param("Identifier") + name = self._get_param("Name") + scopes = self._get_param("Scopes") + resource_server = cognitoidp_backends[self.region].create_resource_server( + user_pool_id, identifier, name, scopes + ) + return json.dumps({"ResourceServer": resource_server.to_json()}) + class CognitoIdpJsonWebKeyResponse(BaseResponse): def __init__(self): diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index d76587d1b022..e05f4b45735c 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1398,6 +1398,44 @@ def test_admin_update_user_attributes(): val.should.equal("Jane") +@mock_cognitoidp +def test_resource_server(): + + client = boto3.client("cognito-idp", "us-west-2") + name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + res = client.create_user_pool(PoolName=name) + + user_pool_id = res["UserPool"]["Id"] + identifier = "http://localhost.localdomain" + name = "local server" + scopes = [ + {"ScopeName": "app:write", "ScopeDescription": "write scope"}, + {"ScopeName": "app:read", "ScopeDescription": "read scope"}, + ] + + res = client.create_resource_server( + UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes + ) + + res["ResourceServer"]["UserPoolId"].should.equal(user_pool_id) + res["ResourceServer"]["Identifier"].should.equal(identifier) + res["ResourceServer"]["Name"].should.equal(name) + res["ResourceServer"]["Scopes"].should.equal(scopes) + + with assert_raises(ClientError) as ex: + client.create_resource_server( + UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes + ) + + ex.exception.operation_name.should.equal("CreateResourceServer") + ex.exception.response["Error"]["Code"].should.equal("InvalidParameterException") + ex.exception.response["Error"]["Message"].should.equal( + "%s already exists in user pool %s." % (identifier, user_pool_id) + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + # Test will retrieve public key from cognito.amazonaws.com/.well-known/jwks.json, # which isnt mocked in ServerMode if not settings.TEST_SERVER_MODE: From 1e5b8acac6d67e8afb4b237cf221021b468aa79c Mon Sep 17 00:00:00 2001 From: Aaron Hill Date: Mon, 20 Jul 2020 23:17:37 -0700 Subject: [PATCH 453/658] Implementation: ECS Task Sets (#3152) * initial implementation of taskSets. Fixed a bug with ECS Service where task_definition was a required parameter. * Added update_task_set and tests. DRYed up ClusterNotFoundException. General cleanup. * Added support for filtering tags on include parameter to describe_task_sets. Added additional tests. * Fix copy/pasta in ClusterNotFoundException * styling updates * Added TODO for delete_task_set force parameter * Updated multiple function and constructor calls to use named variables. Updated tests to reference variables instead of hardcoded strings. * Run black for formatting * Updated create_service function call to use named variables --- IMPLEMENTATION_COVERAGE.md | 12 +- moto/ecs/exceptions.py | 19 ++ moto/ecs/models.py | 275 +++++++++++++++++++++++--- moto/ecs/responses.py | 82 +++++++- tests/test_ecs/test_ecs_boto3.py | 329 +++++++++++++++++++++++++++++++ 5 files changed, 683 insertions(+), 34 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index db77f06e4c18..8744f47596ca 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3031,17 +3031,17 @@ ## ecs
-62% implemented +73% implemented - [ ] create_capacity_provider - [X] create_cluster - [X] create_service -- [ ] create_task_set +- [X] create_task_set - [ ] delete_account_setting - [X] delete_attributes - [X] delete_cluster - [X] delete_service -- [ ] delete_task_set +- [X] delete_task_set - [X] deregister_container_instance - [X] deregister_task_definition - [ ] describe_capacity_providers @@ -3049,7 +3049,7 @@ - [X] describe_container_instances - [X] describe_services - [X] describe_task_definition -- [ ] describe_task_sets +- [X] describe_task_sets - [X] describe_tasks - [ ] discover_poll_endpoint - [ ] list_account_settings @@ -3079,8 +3079,8 @@ - [ ] update_container_agent - [X] update_container_instances_state - [X] update_service -- [ ] update_service_primary_task_set -- [ ] update_task_set +- [X] update_service_primary_task_set +- [X] update_task_set
## efs diff --git a/moto/ecs/exceptions.py b/moto/ecs/exceptions.py index d08066192f99..72129224ea37 100644 --- a/moto/ecs/exceptions.py +++ b/moto/ecs/exceptions.py @@ -21,3 +21,22 @@ def __init__(self): error_type="ClientException", message="The specified task definition does not exist.", ) + + +class TaskSetNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(TaskSetNotFoundException, self).__init__( + error_type="ClientException", + message="The specified task set does not exist.", + ) + + +class ClusterNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(ClusterNotFoundException, self).__init__( + error_type="ClientException", message="Cluster not found", + ) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 1a385226bc91..36c7cd44a091 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -13,7 +13,12 @@ from moto.ec2 import ec2_backends from copy import copy -from .exceptions import ServiceNotFoundException, TaskDefinitionNotFoundException +from .exceptions import ( + ServiceNotFoundException, + TaskDefinitionNotFoundException, + TaskSetNotFoundException, + ClusterNotFoundException, +) class BaseObject(BaseModel): @@ -176,7 +181,6 @@ def update_from_cloudformation_json( cls, original_resource, new_resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - family = properties.get( "Family", "task-definition-{0}".format(int(random() * 10 ** 6)) ) @@ -236,11 +240,12 @@ def __init__( self, cluster, service_name, - task_definition, desired_count, + task_definition=None, load_balancers=None, scheduling_strategy=None, tags=None, + deployment_controller=None, ): self.cluster_arn = cluster.arn self.arn = "arn:aws:ecs:{0}:012345678910:service/{1}".format( @@ -249,21 +254,29 @@ def __init__( self.name = service_name self.status = "ACTIVE" self.running_count = 0 - self.task_definition = task_definition.arn + if task_definition: + self.task_definition = task_definition.arn + else: + self.task_definition = None self.desired_count = desired_count + self.task_sets = [] + self.deployment_controller = deployment_controller or {"type": "ECS"} self.events = [] - self.deployments = [ - { - "createdAt": datetime.now(pytz.utc), - "desiredCount": self.desired_count, - "id": "ecs-svc/{}".format(randint(0, 32 ** 12)), - "pendingCount": self.desired_count, - "runningCount": 0, - "status": "PRIMARY", - "taskDefinition": task_definition.arn, - "updatedAt": datetime.now(pytz.utc), - } - ] + if self.deployment_controller["type"] == "ECS": + self.deployments = [ + { + "createdAt": datetime.now(pytz.utc), + "desiredCount": self.desired_count, + "id": "ecs-svc/{}".format(randint(0, 32 ** 12)), + "pendingCount": self.desired_count, + "runningCount": 0, + "status": "PRIMARY", + "taskDefinition": self.task_definition, + "updatedAt": datetime.now(pytz.utc), + } + ] + else: + self.deployments = [] self.load_balancers = load_balancers if load_balancers is not None else [] self.scheduling_strategy = ( scheduling_strategy if scheduling_strategy is not None else "REPLICA" @@ -282,6 +295,13 @@ def response_object(self): response_object["serviceName"] = self.name response_object["serviceArn"] = self.arn response_object["schedulingStrategy"] = self.scheduling_strategy + if response_object["deploymentController"]["type"] == "ECS": + del response_object["deploymentController"] + del response_object["taskSets"] + else: + response_object["taskSets"] = [ + t.response_object for t in response_object["taskSets"] + ] for deployment in response_object["deployments"]: if isinstance(deployment["createdAt"], datetime): @@ -315,7 +335,7 @@ def create_from_cloudformation_json( ecs_backend = ecs_backends[region_name] return ecs_backend.create_service( - cluster, service_name, task_definition, desired_count + cluster, service_name, desired_count, task_definition_str=task_definition ) @classmethod @@ -343,7 +363,10 @@ def update_from_cloudformation_json( cluster_name, int(random() * 10 ** 6) ) return ecs_backend.create_service( - cluster_name, new_service_name, task_definition, desired_count + cluster_name, + new_service_name, + desired_count, + task_definition_str=task_definition, ) else: return ecs_backend.update_service( @@ -494,6 +517,73 @@ def response_object(self): return response_object +class TaskSet(BaseObject): + def __init__( + self, + service, + cluster, + task_definition, + region_name, + external_id=None, + network_configuration=None, + load_balancers=None, + service_registries=None, + launch_type=None, + capacity_provider_strategy=None, + platform_version=None, + scale=None, + client_token=None, + tags=None, + ): + self.service = service + self.cluster = cluster + self.status = "ACTIVE" + self.task_definition = task_definition or "" + self.region_name = region_name + self.external_id = external_id or "" + self.network_configuration = network_configuration or {} + self.load_balancers = load_balancers or [] + self.service_registries = service_registries or [] + self.launch_type = launch_type + self.capacity_provider_strategy = capacity_provider_strategy or [] + self.platform_version = platform_version or "" + self.scale = scale or {"value": 100.0, "unit": "PERCENT"} + self.client_token = client_token or "" + self.tags = tags or [] + self.stabilityStatus = "STEADY_STATE" + self.createdAt = datetime.now(pytz.utc) + self.updatedAt = datetime.now(pytz.utc) + self.stabilityStatusAt = datetime.now(pytz.utc) + self.id = "ecs-svc/{}".format(randint(0, 32 ** 12)) + self.service_arn = "" + self.cluster_arn = "" + + cluster_name = self.cluster.split("/")[-1] + service_name = self.service.split("/")[-1] + self.task_set_arn = "arn:aws:ecs:{0}:012345678910:task-set/{1}/{2}/{3}".format( + region_name, cluster_name, service_name, self.id + ) + + @property + def response_object(self): + response_object = self.gen_response_object() + if isinstance(response_object["createdAt"], datetime): + response_object["createdAt"] = unix_time( + self.createdAt.replace(tzinfo=None) + ) + if isinstance(response_object["updatedAt"], datetime): + response_object["updatedAt"] = unix_time( + self.updatedAt.replace(tzinfo=None) + ) + if isinstance(response_object["stabilityStatusAt"], datetime): + response_object["stabilityStatusAt"] = unix_time( + self.stabilityStatusAt.replace(tzinfo=None) + ) + del response_object["service"] + del response_object["cluster"] + return response_object + + class EC2ContainerServiceBackend(BaseBackend): def __init__(self, region_name): super(EC2ContainerServiceBackend, self).__init__() @@ -502,6 +592,7 @@ def __init__(self, region_name): self.tasks = {} self.services = {} self.container_instances = {} + self.task_sets = {} self.region_name = region_name def reset(self): @@ -871,28 +962,33 @@ def create_service( self, cluster_str, service_name, - task_definition_str, desired_count, + task_definition_str=None, load_balancers=None, scheduling_strategy=None, tags=None, + deployment_controller=None, ): cluster_name = cluster_str.split("/")[-1] if cluster_name in self.clusters: cluster = self.clusters[cluster_name] else: raise Exception("{0} is not a cluster".format(cluster_name)) - task_definition = self.describe_task_definition(task_definition_str) + if task_definition_str is not None: + task_definition = self.describe_task_definition(task_definition_str) + else: + task_definition = None desired_count = desired_count if desired_count is not None else 0 service = Service( cluster, service_name, - task_definition, desired_count, + task_definition, load_balancers, scheduling_strategy, tags, + deployment_controller, ) cluster_service_pair = "{0}:{1}".format(cluster_name, service_name) self.services[cluster_service_pair] = service @@ -928,6 +1024,7 @@ def describe_services(self, cluster_str, service_names_or_arns): or existing_service_obj.arn == requested_name_or_arn ): result.append(existing_service_obj) + return result def update_service( @@ -1101,9 +1198,7 @@ def _respond_to_cluster_state_update(self, cluster_str): def put_attributes(self, cluster_name, attributes=None): if cluster_name is None or cluster_name not in self.clusters: - raise JsonRESTError( - "ClusterNotFoundException", "Cluster not found", status=400 - ) + raise ClusterNotFoundException if attributes is None: raise JsonRESTError( @@ -1192,9 +1287,7 @@ def list_attributes( def delete_attributes(self, cluster_name, attributes=None): if cluster_name is None or cluster_name not in self.clusters: - raise JsonRESTError( - "ClusterNotFoundException", "Cluster not found", status=400 - ) + raise ClusterNotFoundException if attributes is None: raise JsonRESTError( @@ -1327,6 +1420,134 @@ def untag_resource(self, resource_arn, tag_keys): raise ServiceNotFoundException(service_name=parsed_arn["id"]) raise NotImplementedError() + def create_task_set( + self, + service, + cluster, + task_definition, + external_id=None, + network_configuration=None, + load_balancers=None, + service_registries=None, + launch_type=None, + capacity_provider_strategy=None, + platform_version=None, + scale=None, + client_token=None, + tags=None, + ): + task_set = TaskSet( + service, + cluster, + task_definition, + self.region_name, + external_id=external_id, + network_configuration=network_configuration, + load_balancers=load_balancers, + service_registries=service_registries, + launch_type=launch_type, + capacity_provider_strategy=capacity_provider_strategy, + platform_version=platform_version, + scale=scale, + client_token=client_token, + tags=tags, + ) + + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + + service_obj = self.services.get("{0}:{1}".format(cluster_name, service_name)) + if not service_obj: + raise ServiceNotFoundException(service_name=service_name) + + cluster_obj = self.clusters.get(cluster_name) + if not cluster_obj: + raise ClusterNotFoundException + + task_set.task_definition = self.describe_task_definition(task_definition).arn + task_set.service_arn = service_obj.arn + task_set.cluster_arn = cluster_obj.arn + + service_obj.task_sets.append(task_set) + # TODO: validate load balancers + + return task_set + + def describe_task_sets(self, cluster, service, task_sets=None, include=None): + task_sets = task_sets or [] + include = include or [] + + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + service_key = "{0}:{1}".format(cluster_name, service_name) + + service_obj = self.services.get(service_key) + if not service_obj: + raise ServiceNotFoundException(service_name=service_name) + + cluster_obj = self.clusters.get(cluster_name) + if not cluster_obj: + raise ClusterNotFoundException + + task_set_results = [] + if task_sets: + for task_set in service_obj.task_sets: + if task_set.task_set_arn in task_sets: + task_set_results.append(task_set) + else: + task_set_results = service_obj.task_sets + + return task_set_results + + def delete_task_set(self, cluster, service, task_set, force=False): + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + + service_key = "{0}:{1}".format(cluster_name, service_name) + task_set_element = None + for i, ts in enumerate(self.services[service_key].task_sets): + if task_set == ts.task_set_arn: + task_set_element = i + + if task_set_element is not None: + deleted_task_set = self.services[service_key].task_sets.pop( + task_set_element + ) + else: + raise TaskSetNotFoundException + + # TODO: add logic for `force` to raise an exception if `PRIMARY` task has not been scaled to 0. + + return deleted_task_set + + def update_task_set(self, cluster, service, task_set, scale): + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + task_set_obj = self.describe_task_sets( + cluster_name, service_name, task_sets=[task_set] + )[0] + task_set_obj.scale = scale + return task_set_obj + + def update_service_primary_task_set(self, cluster, service, primary_task_set): + """ Updates task sets be PRIMARY or ACTIVE for given cluster:service task sets """ + cluster_name = cluster.split("/")[-1] + service_name = service.split("/")[-1] + task_set_obj = self.describe_task_sets( + cluster_name, service_name, task_sets=[primary_task_set] + )[0] + + service_obj = self.describe_services(cluster, [service])[0] + service_obj.load_balancers = task_set_obj.load_balancers + service_obj.task_definition = task_set_obj.task_definition + + for task_set in service_obj.task_sets: + if task_set.task_set_arn == primary_task_set: + task_set.status = "PRIMARY" + else: + task_set.status = "ACTIVE" + return task_set_obj + ecs_backends = {} for region in Session().get_available_regions("ecs"): diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index c8f1e06ce632..e911bb9432ca 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -162,14 +162,16 @@ def create_service(self): load_balancers = self._get_param("loadBalancers") scheduling_strategy = self._get_param("schedulingStrategy") tags = self._get_param("tags") + deployment_controller = self._get_param("deploymentController") service = self.ecs_backend.create_service( cluster_str, service_name, - task_definition_str, desired_count, + task_definition_str, load_balancers, scheduling_strategy, tags, + deployment_controller, ) return json.dumps({"service": service.response_object}) @@ -189,6 +191,7 @@ def describe_services(self): cluster_str = self._get_param("cluster") service_names = self._get_param("services") services = self.ecs_backend.describe_services(cluster_str, service_names) + return json.dumps( { "services": [service.response_object for service in services], @@ -347,3 +350,80 @@ def untag_resource(self): tag_keys = self._get_param("tagKeys") results = self.ecs_backend.untag_resource(resource_arn, tag_keys) return json.dumps(results) + + def create_task_set(self): + service_str = self._get_param("service") + cluster_str = self._get_param("cluster") + task_definition = self._get_param("taskDefinition") + external_id = self._get_param("externalId") + network_configuration = self._get_param("networkConfiguration") + load_balancers = self._get_param("loadBalancers") + service_registries = self._get_param("serviceRegistries") + launch_type = self._get_param("launchType") + capacity_provider_strategy = self._get_param("capacityProviderStrategy") + platform_version = self._get_param("platformVersion") + scale = self._get_param("scale") + client_token = self._get_param("clientToken") + tags = self._get_param("tags") + task_set = self.ecs_backend.create_task_set( + service_str, + cluster_str, + task_definition, + external_id=external_id, + network_configuration=network_configuration, + load_balancers=load_balancers, + service_registries=service_registries, + launch_type=launch_type, + capacity_provider_strategy=capacity_provider_strategy, + platform_version=platform_version, + scale=scale, + client_token=client_token, + tags=tags, + ) + return json.dumps({"taskSet": task_set.response_object}) + + def describe_task_sets(self): + cluster_str = self._get_param("cluster") + service_str = self._get_param("service") + task_sets = self._get_param("taskSets") + include = self._get_param("include", []) + task_set_objs = self.ecs_backend.describe_task_sets( + cluster_str, service_str, task_sets, include + ) + + response_objs = [t.response_object for t in task_set_objs] + if "TAGS" not in include: + for ro in response_objs: + del ro["tags"] + return json.dumps({"taskSets": response_objs}) + + def delete_task_set(self): + cluster_str = self._get_param("cluster") + service_str = self._get_param("service") + task_set = self._get_param("taskSet") + force = self._get_param("force") + task_set = self.ecs_backend.delete_task_set( + cluster_str, service_str, task_set, force + ) + return json.dumps({"taskSet": task_set.response_object}) + + def update_task_set(self): + cluster_str = self._get_param("cluster") + service_str = self._get_param("service") + task_set = self._get_param("taskSet") + scale = self._get_param("scale") + + task_set = self.ecs_backend.update_task_set( + cluster_str, service_str, task_set, scale + ) + return json.dumps({"taskSet": task_set.response_object}) + + def update_service_primary_task_set(self): + cluster_str = self._get_param("cluster") + service_str = self._get_param("service") + primary_task_set = self._get_param("primaryTaskSet") + + task_set = self.ecs_backend.update_service_primary_task_set( + cluster_str, service_str, primary_task_set + ) + return json.dumps({"taskSet": task_set.response_object}) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index f6de595974a4..3ef62582ebb9 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -2637,3 +2637,332 @@ def test_ecs_task_definition_placement_constraints(): response["taskDefinition"]["placementConstraints"].should.equal( [{"type": "memberOf", "expression": "attribute:ecs.instance-type =~ t2.*"}] ) + + +@mock_ecs +def test_create_task_set(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family="test_ecs_task", + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + taskDefinition=task_def_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + load_balancers = [ + { + "targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a", + "containerName": "hello_world", + "containerPort": 8080, + }, + ] + + task_set = client.create_task_set( + cluster=cluster_name, + service=service_name, + taskDefinition=task_def_name, + loadBalancers=load_balancers, + )["taskSet"] + + cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][ + "clusterArn" + ] + service_arn = client.describe_services( + cluster=cluster_name, services=[service_name] + )["services"][0]["serviceArn"] + assert task_set["clusterArn"] == cluster_arn + assert task_set["serviceArn"] == service_arn + assert task_set["taskDefinition"].endswith("{0}:1".format(task_def_name)) + assert task_set["scale"] == {"value": 100.0, "unit": "PERCENT"} + assert ( + task_set["loadBalancers"][0]["targetGroupArn"] + == "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a" + ) + assert task_set["loadBalancers"][0]["containerPort"] == 8080 + assert task_set["loadBalancers"][0]["containerName"] == "hello_world" + + +@mock_ecs +def test_describe_task_sets(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family=task_def_name, + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + taskDefinition=task_def_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + + load_balancers = [ + { + "targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a", + "containerName": "hello_world", + "containerPort": 8080, + } + ] + + _ = client.create_task_set( + cluster=cluster_name, + service=service_name, + taskDefinition=task_def_name, + loadBalancers=load_balancers, + ) + task_sets = client.describe_task_sets(cluster=cluster_name, service=service_name)[ + "taskSets" + ] + assert "tags" not in task_sets[0] + + task_sets = client.describe_task_sets( + cluster=cluster_name, service=service_name, include=["TAGS"], + )["taskSets"] + + cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][ + "clusterArn" + ] + + service_arn = client.describe_services( + cluster=cluster_name, services=[service_name] + )["services"][0]["serviceArn"] + + assert "tags" in task_sets[0] + assert len(task_sets) == 1 + assert task_sets[0]["taskDefinition"].endswith("{0}:1".format(task_def_name)) + assert task_sets[0]["clusterArn"] == cluster_arn + assert task_sets[0]["serviceArn"] == service_arn + assert task_sets[0]["serviceArn"].endswith(service_name) + assert task_sets[0]["scale"] == {"value": 100.0, "unit": "PERCENT"} + assert task_sets[0]["taskSetArn"].endswith(task_sets[0]["id"]) + assert ( + task_sets[0]["loadBalancers"][0]["targetGroupArn"] + == "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a" + ) + assert task_sets[0]["loadBalancers"][0]["containerPort"] == 8080 + assert task_sets[0]["loadBalancers"][0]["containerName"] == "hello_world" + + +@mock_ecs +def test_delete_task_set(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family=task_def_name, + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + taskDefinition=task_def_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + + task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + + task_sets = client.describe_task_sets( + cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], + )["taskSets"] + + assert len(task_sets) == 1 + + response = client.delete_task_set( + cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"], + ) + assert response["taskSet"]["taskSetArn"] == task_set["taskSetArn"] + + task_sets = client.describe_task_sets( + cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], + )["taskSets"] + + assert len(task_sets) == 0 + + with assert_raises(ClientError): + _ = client.delete_task_set( + cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"], + ) + + +@mock_ecs +def test_update_service_primary_task_set(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family="test_ecs_task", + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + + task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + + service = client.describe_services(cluster=cluster_name, services=[service_name],)[ + "services" + ][0] + + _ = client.update_service_primary_task_set( + cluster=cluster_name, + service=service_name, + primaryTaskSet=task_set["taskSetArn"], + ) + + service = client.describe_services(cluster=cluster_name, services=[service_name],)[ + "services" + ][0] + assert service["taskSets"][0]["status"] == "PRIMARY" + assert service["taskDefinition"] == service["taskSets"][0]["taskDefinition"] + + another_task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + service = client.describe_services(cluster=cluster_name, services=[service_name],)[ + "services" + ][0] + assert service["taskSets"][1]["status"] == "ACTIVE" + + _ = client.update_service_primary_task_set( + cluster=cluster_name, + service=service_name, + primaryTaskSet=another_task_set["taskSetArn"], + ) + service = client.describe_services(cluster=cluster_name, services=[service_name],)[ + "services" + ][0] + assert service["taskSets"][0]["status"] == "ACTIVE" + assert service["taskSets"][1]["status"] == "PRIMARY" + assert service["taskDefinition"] == service["taskSets"][1]["taskDefinition"] + + +@mock_ecs +def test_update_task_set(): + cluster_name = "test_ecs_cluster" + service_name = "test_ecs_service" + task_def_name = "test_ecs_task" + + client = boto3.client("ecs", region_name="us-east-1") + _ = client.create_cluster(clusterName=cluster_name) + _ = client.register_task_definition( + family=task_def_name, + containerDefinitions=[ + { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [ + {"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"} + ], + "logConfiguration": {"logDriver": "json-file"}, + } + ], + ) + _ = client.create_service( + cluster=cluster_name, + serviceName=service_name, + desiredCount=2, + deploymentController={"type": "EXTERNAL"}, + ) + + task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + + another_task_set = client.create_task_set( + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + )["taskSet"] + assert another_task_set["scale"]["unit"] == "PERCENT" + assert another_task_set["scale"]["value"] == 100.0 + + client.update_task_set( + cluster=cluster_name, + service=service_name, + taskSet=task_set["taskSetArn"], + scale={"value": 25.0, "unit": "PERCENT"}, + ) + + updated_task_set = client.describe_task_sets( + cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], + )["taskSets"][0] + assert updated_task_set["scale"]["value"] == 25.0 + assert updated_task_set["scale"]["unit"] == "PERCENT" From a507314d45e579e9fd9304e0b9cdb73287737010 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= Date: Tue, 21 Jul 2020 15:15:13 +0200 Subject: [PATCH 454/658] RAM - implement CRUD endpoints (#3158) * Add ram.create_resource_share * Add ram.get_resource_shares * Add ram.update_resource_share * Add ram.delete_resource_share * Add ram.enable_sharing_with_aws_organization * Fix server tests * Add CR suggestions --- moto/__init__.py | 1 + moto/backends.py | 1 + moto/ram/__init__.py | 5 + moto/ram/exceptions.py | 39 ++++ moto/ram/models.py | 247 ++++++++++++++++++++++++ moto/ram/responses.py | 39 ++++ moto/ram/urls.py | 12 ++ tests/test_ram/test_ram.py | 381 +++++++++++++++++++++++++++++++++++++ 8 files changed, 725 insertions(+) create mode 100644 moto/ram/__init__.py create mode 100644 moto/ram/exceptions.py create mode 100644 moto/ram/models.py create mode 100644 moto/ram/responses.py create mode 100644 moto/ram/urls.py create mode 100644 tests/test_ram/test_ram.py diff --git a/moto/__init__.py b/moto/__init__.py index 5143a4933652..7d841fbbc42d 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -81,6 +81,7 @@ def f(*args, **kwargs): mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated") mock_organizations = lazy_load(".organizations", "mock_organizations") mock_polly = lazy_load(".polly", "mock_polly") +mock_ram = lazy_load(".ram", "mock_ram") mock_rds = lazy_load(".rds", "mock_rds") mock_rds_deprecated = lazy_load(".rds", "mock_rds_deprecated") mock_rds2 = lazy_load(".rds2", "mock_rds2") diff --git a/moto/backends.py b/moto/backends.py index a73940909484..4252bfd9582b 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -48,6 +48,7 @@ "opsworks": ("opsworks", "opsworks_backends"), "organizations": ("organizations", "organizations_backends"), "polly": ("polly", "polly_backends"), + "ram": ("ram", "ram_backends"), "rds": ("rds2", "rds2_backends"), "redshift": ("redshift", "redshift_backends"), "resource-groups": ("resourcegroups", "resourcegroups_backends"), diff --git a/moto/ram/__init__.py b/moto/ram/__init__.py new file mode 100644 index 000000000000..a4925944f0bc --- /dev/null +++ b/moto/ram/__init__.py @@ -0,0 +1,5 @@ +from .models import ram_backends +from ..core.models import base_decorator + +ram_backend = ram_backends["us-east-1"] +mock_ram = base_decorator(ram_backends) diff --git a/moto/ram/exceptions.py b/moto/ram/exceptions.py new file mode 100644 index 000000000000..49e57a61a911 --- /dev/null +++ b/moto/ram/exceptions.py @@ -0,0 +1,39 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class InvalidParameterException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(InvalidParameterException, self).__init__( + "InvalidParameterException", message + ) + + +class MalformedArnException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(MalformedArnException, self).__init__("MalformedArnException", message) + + +class OperationNotPermittedException(JsonRESTError): + code = 400 + + def __init__(self): + super(OperationNotPermittedException, self).__init__( + "OperationNotPermittedException", + "Unable to enable sharing with AWS Organizations. " + "Received AccessDeniedException from AWSOrganizations with the following error message: " + "You don't have permissions to access this resource.", + ) + + +class UnknownResourceException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(UnknownResourceException, self).__init__( + "UnknownResourceException", message + ) diff --git a/moto/ram/models.py b/moto/ram/models.py new file mode 100644 index 000000000000..d38099374c62 --- /dev/null +++ b/moto/ram/models.py @@ -0,0 +1,247 @@ +import re +import string +from datetime import datetime +import random +from uuid import uuid4 + +from boto3 import Session +from moto.core import BaseBackend, BaseModel, ACCOUNT_ID +from moto.core.utils import unix_time +from moto.organizations import organizations_backends +from moto.ram.exceptions import ( + MalformedArnException, + InvalidParameterException, + UnknownResourceException, + OperationNotPermittedException, +) + + +def random_resource_id(size): + return "".join(random.choice(string.digits + "abcdef") for _ in range(size)) + + +class ResourceShare(BaseModel): + # List of shareable resources can be found here + # https://docs.aws.amazon.com/ram/latest/userguide/shareable.html + SHAREABLE_RESOURCES = [ + "cluster", # Amazon Aurora cluster + "component", # Amazon EC2 Image Builder component + "group", # AWS Resource Groups + "image", # Amazon EC2 Image Builder image + "image-recipe", # Amazon EC2 Image Builder image recipe + "license-configuration", # AWS License Manager configuration + "mesh", # AWS App Mesh + "prefix-list", # Amazon EC2 prefix list + "project", # AWS CodeBuild project + "report-group", # AWS CodeBuild report group + "resolver-rule", # Amazon Route 53 forwarding rule + "subnet", # Amazon EC2 subnet + "transit-gateway", # Amazon EC2 transit gateway + ] + + def __init__(self, region, **kwargs): + self.region = region + + self.allow_external_principals = kwargs.get("allowExternalPrincipals", True) + self.arn = "arn:aws:ram:{0}:{1}:resource-share/{2}".format( + self.region, ACCOUNT_ID, uuid4() + ) + self.creation_time = datetime.utcnow() + self.feature_set = "STANDARD" + self.last_updated_time = datetime.utcnow() + self.name = kwargs["name"] + self.owning_account_id = ACCOUNT_ID + self.principals = [] + self.resource_arns = [] + self.status = "ACTIVE" + + @property + def organizations_backend(self): + return organizations_backends["global"] + + def add_principals(self, principals): + for principal in principals: + match = re.search( + r"^arn:aws:organizations::\d{12}:organization/(o-\w+)$", principal + ) + if match: + organization = self.organizations_backend.describe_organization() + if principal == organization["Organization"]["Arn"]: + continue + else: + raise UnknownResourceException( + "Organization {} could not be found.".format(match.group(1)) + ) + + match = re.search( + r"^arn:aws:organizations::\d{12}:ou/(o-\w+)/(ou-[\w-]+)$", principal + ) + if match: + roots = self.organizations_backend.list_roots() + root_id = next( + ( + root["Id"] + for root in roots["Roots"] + if root["Name"] == "Root" and match.group(1) in root["Arn"] + ), + None, + ) + + if root_id: + ous = self.organizations_backend.list_organizational_units_for_parent( + ParentId=root_id + ) + if any(principal == ou["Arn"] for ou in ous["OrganizationalUnits"]): + continue + + raise UnknownResourceException( + "OrganizationalUnit {} in unknown organization could not be found.".format( + match.group(2) + ) + ) + + if not re.match(r"^\d{12}$", principal): + raise InvalidParameterException( + "Principal ID {} is malformed. " + "Verify the ID and try again.".format(principal) + ) + + for principal in principals: + self.principals.append(principal) + + def add_resources(self, resource_arns): + for resource in resource_arns: + match = re.search( + r"^arn:aws:[a-z0-9-]+:[a-z0-9-]*:[0-9]{12}:([a-z-]+)[/:].*$", resource + ) + if not match: + raise MalformedArnException( + "The specified resource ARN {} is not valid. " + "Verify the ARN and try again.".format(resource) + ) + + if match.group(1) not in self.SHAREABLE_RESOURCES: + raise MalformedArnException( + "You cannot share the selected resource type." + ) + + for resource in resource_arns: + self.resource_arns.append(resource) + + def delete(self): + self.last_updated_time = datetime.utcnow() + self.status = "DELETED" + + def describe(self): + return { + "allowExternalPrincipals": self.allow_external_principals, + "creationTime": unix_time(self.creation_time), + "featureSet": self.feature_set, + "lastUpdatedTime": unix_time(self.last_updated_time), + "name": self.name, + "owningAccountId": self.owning_account_id, + "resourceShareArn": self.arn, + "status": self.status, + } + + def update(self, **kwargs): + self.allow_external_principals = kwargs.get( + "allowExternalPrincipals", self.allow_external_principals + ) + self.last_updated_time = datetime.utcnow() + self.name = kwargs.get("name", self.name) + + +class ResourceAccessManagerBackend(BaseBackend): + def __init__(self, region_name=None): + super(ResourceAccessManagerBackend, self).__init__() + self.region_name = region_name + self.resource_shares = [] + + @property + def organizations_backend(self): + return organizations_backends["global"] + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_resource_share(self, **kwargs): + resource = ResourceShare(self.region_name, **kwargs) + resource.add_principals(kwargs.get("principals", [])) + resource.add_resources(kwargs.get("resourceArns", [])) + + self.resource_shares.append(resource) + + response = resource.describe() + response.pop("featureSet") + + return dict(resourceShare=response) + + def get_resource_shares(self, **kwargs): + owner = kwargs["resourceOwner"] + + if owner not in ["SELF", "OTHER-ACCOUNTS"]: + raise InvalidParameterException( + "{} is not a valid resource owner. " + "Specify either SELF or OTHER-ACCOUNTS and try again.".format(owner) + ) + + if owner == "OTHER-ACCOUNTS": + raise NotImplementedError( + "Value 'OTHER-ACCOUNTS' for parameter 'resourceOwner' not implemented." + ) + + resouces = [resource.describe() for resource in self.resource_shares] + + return dict(resourceShares=resouces) + + def update_resource_share(self, **kwargs): + arn = kwargs["resourceShareArn"] + + resource = next( + (resource for resource in self.resource_shares if arn == resource.arn), + None, + ) + + if not resource: + raise UnknownResourceException( + "ResourceShare {} could not be found.".format(arn) + ) + + resource.update(**kwargs) + response = resource.describe() + response.pop("featureSet") + + return dict(resourceShare=response) + + def delete_resource_share(self, arn): + resource = next( + (resource for resource in self.resource_shares if arn == resource.arn), + None, + ) + + if not resource: + raise UnknownResourceException( + "ResourceShare {} could not be found.".format(arn) + ) + + resource.delete() + + return dict(returnValue=True) + + def enable_sharing_with_aws_organization(self): + if not self.organizations_backend.org: + raise OperationNotPermittedException + + return dict(returnValue=True) + + +ram_backends = {} +for region in Session().get_available_regions("ram"): + ram_backends[region] = ResourceAccessManagerBackend(region) +for region in Session().get_available_regions("ram", partition_name="aws-us-gov"): + ram_backends[region] = ResourceAccessManagerBackend(region) +for region in Session().get_available_regions("ram", partition_name="aws-cn"): + ram_backends[region] = ResourceAccessManagerBackend(region) diff --git a/moto/ram/responses.py b/moto/ram/responses.py new file mode 100644 index 000000000000..b012540076d5 --- /dev/null +++ b/moto/ram/responses.py @@ -0,0 +1,39 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import ram_backends +import json + + +class ResourceAccessManagerResponse(BaseResponse): + SERVICE_NAME = "ram" + + @property + def ram_backend(self): + return ram_backends[self.region] + + @property + def request_params(self): + try: + if self.method == "DELETE": + return None + + return json.loads(self.body) + except ValueError: + return {} + + def create_resource_share(self): + return json.dumps(self.ram_backend.create_resource_share(**self.request_params)) + + def get_resource_shares(self): + return json.dumps(self.ram_backend.get_resource_shares(**self.request_params)) + + def update_resource_share(self): + return json.dumps(self.ram_backend.update_resource_share(**self.request_params)) + + def delete_resource_share(self): + return json.dumps( + self.ram_backend.delete_resource_share(self._get_param("resourceShareArn")) + ) + + def enable_sharing_with_aws_organization(self): + return json.dumps(self.ram_backend.enable_sharing_with_aws_organization()) diff --git a/moto/ram/urls.py b/moto/ram/urls.py new file mode 100644 index 000000000000..1414b89b06f5 --- /dev/null +++ b/moto/ram/urls.py @@ -0,0 +1,12 @@ +from __future__ import unicode_literals +from .responses import ResourceAccessManagerResponse + +url_bases = ["https?://ram.(.+).amazonaws.com"] + +url_paths = { + "{0}/createresourceshare$": ResourceAccessManagerResponse.dispatch, + "{0}/deleteresourceshare/?$": ResourceAccessManagerResponse.dispatch, + "{0}/enablesharingwithawsorganization$": ResourceAccessManagerResponse.dispatch, + "{0}/getresourceshares$": ResourceAccessManagerResponse.dispatch, + "{0}/updateresourceshare$": ResourceAccessManagerResponse.dispatch, +} diff --git a/tests/test_ram/test_ram.py b/tests/test_ram/test_ram.py new file mode 100644 index 000000000000..62422192958e --- /dev/null +++ b/tests/test_ram/test_ram.py @@ -0,0 +1,381 @@ +import time +from datetime import datetime + +import boto3 +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_ram, mock_organizations +from moto.core import ACCOUNT_ID + + +@mock_ram +def test_create_resource_share(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # when + response = client.create_resource_share(name="test") + + # then + resource = response["resourceShare"] + resource["allowExternalPrincipals"].should.be.ok + resource["creationTime"].should.be.a(datetime) + resource["lastUpdatedTime"].should.be.a(datetime) + resource["name"].should.equal("test") + resource["owningAccountId"].should.equal(ACCOUNT_ID) + resource["resourceShareArn"].should.match( + r"arn:aws:ram:us-east-1:\d{12}:resource-share/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + ) + resource["status"].should.equal("ACTIVE") + resource.should_not.have.key("featureSet") + + # creating a resource share with the name should result in a second one + # not overwrite/update the old one + # when + response = client.create_resource_share( + name="test", + allowExternalPrincipals=False, + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format(ACCOUNT_ID) + ], + ) + + # then + resource = response["resourceShare"] + resource["allowExternalPrincipals"].should_not.be.ok + resource["creationTime"].should.be.a(datetime) + resource["lastUpdatedTime"].should.be.a(datetime) + resource["name"].should.equal("test") + resource["owningAccountId"].should.equal(ACCOUNT_ID) + resource["resourceShareArn"].should.match( + r"arn:aws:ram:us-east-1:\d{12}:resource-share/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + ) + resource["status"].should.equal("ACTIVE") + + response = client.get_resource_shares(resourceOwner="SELF") + response["resourceShares"].should.have.length_of(2) + + +@mock_ram +def test_create_resource_share_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # invalid ARN + # when + with assert_raises(ClientError) as e: + client.create_resource_share(name="test", resourceArns=["inalid-arn"]) + ex = e.exception + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("MalformedArnException") + ex.response["Error"]["Message"].should.equal( + "The specified resource ARN inalid-arn is not valid. " + "Verify the ARN and try again." + ) + + # valid ARN, but not shareable resource type + # when + with assert_raises(ClientError) as e: + client.create_resource_share( + name="test", resourceArns=["arn:aws:iam::{}:role/test".format(ACCOUNT_ID)] + ) + ex = e.exception + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("MalformedArnException") + ex.response["Error"]["Message"].should.equal( + "You cannot share the selected resource type." + ) + + # invalid principal ID + # when + with assert_raises(ClientError) as e: + client.create_resource_share( + name="test", + principals=["invalid"], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format( + ACCOUNT_ID + ) + ], + ) + ex = e.exception + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Principal ID invalid is malformed. Verify the ID and try again." + ) + + +@mock_ram +@mock_organizations +def test_create_resource_share_with_organization(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org_arn = client.create_organization(FeatureSet="ALL")["Organization"]["Arn"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_arn = client.create_organizational_unit(ParentId=root_id, Name="test")[ + "OrganizationalUnit" + ]["Arn"] + client = boto3.client("ram", region_name="us-east-1") + + # share in whole Organization + # when + response = client.create_resource_share( + name="test", + principals=[org_arn], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format(ACCOUNT_ID) + ], + ) + + # then + response["resourceShare"]["name"].should.equal("test") + + # share in an OU + # when + response = client.create_resource_share( + name="test", + principals=[ou_arn], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format(ACCOUNT_ID) + ], + ) + + # then + response["resourceShare"]["name"].should.equal("test") + + +@mock_ram +@mock_organizations +def test_create_resource_share_with_organization_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + root_id = client.list_roots()["Roots"][0]["Id"] + client.create_organizational_unit(ParentId=root_id, Name="test") + client = boto3.client("ram", region_name="us-east-1") + + # unknown Organization + # when + with assert_raises(ClientError) as e: + client.create_resource_share( + name="test", + principals=[ + "arn:aws:organizations::{}:organization/o-unknown".format(ACCOUNT_ID) + ], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format( + ACCOUNT_ID + ) + ], + ) + ex = e.exception + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("UnknownResourceException") + ex.response["Error"]["Message"].should.equal( + "Organization o-unknown could not be found." + ) + + # unknown OU + # when + with assert_raises(ClientError) as e: + client.create_resource_share( + name="test", + principals=[ + "arn:aws:organizations::{}:ou/o-unknown/ou-unknown".format(ACCOUNT_ID) + ], + resourceArns=[ + "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format( + ACCOUNT_ID + ) + ], + ) + ex = e.exception + ex.operation_name.should.equal("CreateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("UnknownResourceException") + ex.response["Error"]["Message"].should.equal( + "OrganizationalUnit ou-unknown in unknown organization could not be found." + ) + + +@mock_ram +def test_get_resource_shares(): + # given + client = boto3.client("ram", region_name="us-east-1") + client.create_resource_share(name="test") + + # when + response = client.get_resource_shares(resourceOwner="SELF") + + # then + response["resourceShares"].should.have.length_of(1) + resource = response["resourceShares"][0] + resource["allowExternalPrincipals"].should.be.ok + resource["creationTime"].should.be.a(datetime) + resource["featureSet"].should.equal("STANDARD") + resource["lastUpdatedTime"].should.be.a(datetime) + resource["name"].should.equal("test") + resource["owningAccountId"].should.equal(ACCOUNT_ID) + resource["resourceShareArn"].should.match( + r"arn:aws:ram:us-east-1:\d{12}:resource-share/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + ) + resource["status"].should.equal("ACTIVE") + + +@mock_ram +def test_get_resource_shares_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # invalid resource owner + # when + with assert_raises(ClientError) as e: + client.get_resource_shares(resourceOwner="invalid") + ex = e.exception + ex.operation_name.should.equal("GetResourceShares") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "invalid is not a valid resource owner. " + "Specify either SELF or OTHER-ACCOUNTS and try again." + ) + + +@mock_ram +def test_update_resource_share(): + # given + client = boto3.client("ram", region_name="us-east-1") + arn = client.create_resource_share(name="test")["resourceShare"]["resourceShareArn"] + + # when + time.sleep(0.1) + response = client.update_resource_share(resourceShareArn=arn, name="test-update") + + # then + resource = response["resourceShare"] + resource["allowExternalPrincipals"].should.be.ok + resource["name"].should.equal("test-update") + resource["owningAccountId"].should.equal(ACCOUNT_ID) + resource["resourceShareArn"].should.match( + r"arn:aws:ram:us-east-1:\d{12}:resource-share/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + ) + resource["status"].should.equal("ACTIVE") + resource.should_not.have.key("featureSet") + creation_time = resource["creationTime"] + resource["lastUpdatedTime"].should.be.greater_than(creation_time) + + response = client.get_resource_shares(resourceOwner="SELF") + response["resourceShares"].should.have.length_of(1) + + +@mock_ram +def test_update_resource_share_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # invalid resource owner + # when + with assert_raises(ClientError) as e: + client.update_resource_share( + resourceShareArn="arn:aws:ram:us-east-1:{}:resource-share/not-existing".format( + ACCOUNT_ID + ), + name="test-update", + ) + ex = e.exception + ex.operation_name.should.equal("UpdateResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("UnknownResourceException") + ex.response["Error"]["Message"].should.equal( + "ResourceShare arn:aws:ram:us-east-1:{}:resource-share/not-existing could not be found.".format( + ACCOUNT_ID + ) + ) + + +@mock_ram +def test_delete_resource_share(): + # given + client = boto3.client("ram", region_name="us-east-1") + arn = client.create_resource_share(name="test")["resourceShare"]["resourceShareArn"] + + # when + time.sleep(0.1) + response = client.delete_resource_share(resourceShareArn=arn) + + # then + response["returnValue"].should.be.ok + + response = client.get_resource_shares(resourceOwner="SELF") + response["resourceShares"].should.have.length_of(1) + resource = response["resourceShares"][0] + resource["status"].should.equal("DELETED") + creation_time = resource["creationTime"] + resource["lastUpdatedTime"].should.be.greater_than(creation_time) + + +@mock_ram +def test_delete_resource_share_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # invalid resource owner + # when + with assert_raises(ClientError) as e: + client.delete_resource_share( + resourceShareArn="arn:aws:ram:us-east-1:{}:resource-share/not-existing".format( + ACCOUNT_ID + ) + ) + ex = e.exception + ex.operation_name.should.equal("DeleteResourceShare") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("UnknownResourceException") + ex.response["Error"]["Message"].should.equal( + "ResourceShare arn:aws:ram:us-east-1:{}:resource-share/not-existing could not be found.".format( + ACCOUNT_ID + ) + ) + + +@mock_ram +@mock_organizations +def test_enable_sharing_with_aws_organization(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + client = boto3.client("ram", region_name="us-east-1") + + # when + response = client.enable_sharing_with_aws_organization() + + # then + response["returnValue"].should.be.ok + + +@mock_ram +@mock_organizations +def test_enable_sharing_with_aws_organization_errors(): + # given + client = boto3.client("ram", region_name="us-east-1") + + # no Organization defined + # when + with assert_raises(ClientError) as e: + client.enable_sharing_with_aws_organization() + ex = e.exception + ex.operation_name.should.equal("EnableSharingWithAwsOrganization") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("OperationNotPermittedException") + ex.response["Error"]["Message"].should.equal( + "Unable to enable sharing with AWS Organizations. " + "Received AccessDeniedException from AWSOrganizations with the following error message: " + "You don't have permissions to access this resource." + ) From b09c8034e603a59906cf2fa33e8dc25336f1e179 Mon Sep 17 00:00:00 2001 From: Alan Baldwin Date: Tue, 21 Jul 2020 09:08:49 -0600 Subject: [PATCH 455/658] Adding VPN Gateway filters (#3155) * Adding attchment.vpc-id, attachment.state, type, and vpn-gateway-id filters for VPN Gateways. fixes #3154 * Run formatting on tests Co-authored-by: Alan Baldwin --- moto/ec2/models.py | 8 ++ .../test_ec2/test_virtual_private_gateways.py | 135 +++++++++++++++++- 2 files changed, 142 insertions(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 89dd753f949f..6e320c0aef20 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -4973,6 +4973,14 @@ def __init__(self, ec2_backend, id, type): super(VpnGateway, self).__init__() def get_filter_value(self, filter_name): + if filter_name == "attachment.vpc-id": + return self.attachments.keys() + elif filter_name == "attachment.state": + return [attachment.state for attachment in self.attachments.values()] + elif filter_name == "vpn-gateway-id": + return self.id + elif filter_name == "type": + return self.type return super(VpnGateway, self).get_filter_value( filter_name, "DescribeVpnGateways" ) diff --git a/tests/test_ec2/test_virtual_private_gateways.py b/tests/test_ec2/test_virtual_private_gateways.py index bb944df0bb83..23139c08e15f 100644 --- a/tests/test_ec2/test_virtual_private_gateways.py +++ b/tests/test_ec2/test_virtual_private_gateways.py @@ -1,8 +1,9 @@ from __future__ import unicode_literals import boto +import boto3 import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated @@ -33,6 +34,138 @@ def test_describe_vpn_gateway(): vpn_gateway.availability_zone.should.equal("us-east-1a") +@mock_ec2 +def test_describe_vpn_connections_attachment_vpc_id_filter(): + """ describe_vpn_gateways attachment.vpc-id filter """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + ec2.attach_vpn_gateway(VpcId=vpc_id, VpnGatewayId=gateway_id) + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "attachment.vpc-id", "Values": [vpc_id]}] + ) + + gateways["VpnGateways"].should.have.length_of(1) + gateways["VpnGateways"][0]["VpnGatewayId"].should.equal(gateway_id) + gateways["VpnGateways"][0]["VpcAttachments"].should.contain( + {"State": "attached", "VpcId": vpc_id} + ) + + +@mock_ec2 +def test_describe_vpn_connections_state_filter_attached(): + """ describe_vpn_gateways attachment.state filter - match attached """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + ec2.attach_vpn_gateway(VpcId=vpc_id, VpnGatewayId=gateway_id) + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "attachment.state", "Values": ["attached"]}] + ) + + gateways["VpnGateways"].should.have.length_of(1) + gateways["VpnGateways"][0]["VpnGatewayId"].should.equal(gateway_id) + gateways["VpnGateways"][0]["VpcAttachments"].should.contain( + {"State": "attached", "VpcId": vpc_id} + ) + + +@mock_ec2 +def test_describe_vpn_connections_state_filter_deatched(): + """ describe_vpn_gateways attachment.state filter - don't match detatched """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + ec2.attach_vpn_gateway(VpcId=vpc_id, VpnGatewayId=gateway_id) + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "attachment.state", "Values": ["detached"]}] + ) + + gateways["VpnGateways"].should.have.length_of(0) + + +@mock_ec2 +def test_describe_vpn_connections_id_filter_match(): + """ describe_vpn_gateways vpn-gateway-id filter - match correct id """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "vpn-gateway-id", "Values": [gateway_id]}] + ) + + gateways["VpnGateways"].should.have.length_of(1) + gateways["VpnGateways"][0]["VpnGatewayId"].should.equal(gateway_id) + + +@mock_ec2 +def test_describe_vpn_connections_id_filter_miss(): + """ describe_vpn_gateways vpn-gateway-id filter - don't match """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "vpn-gateway-id", "Values": ["unknown_gateway_id"]}] + ) + + gateways["VpnGateways"].should.have.length_of(0) + + +@mock_ec2 +def test_describe_vpn_connections_type_filter_match(): + """ describe_vpn_gateways type filter - match """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + gateway = ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + gateway_id = gateway["VpnGateway"]["VpnGatewayId"] + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "type", "Values": ["ipsec.1"]}] + ) + + gateways["VpnGateways"].should.have.length_of(1) + gateways["VpnGateways"][0]["VpnGatewayId"].should.equal(gateway_id) + + +@mock_ec2 +def test_describe_vpn_connections_type_filter_miss(): + """ describe_vpn_gateways type filter - don't match """ + + ec2 = boto3.client("ec2", region_name="us-east-1") + + ec2.create_vpn_gateway(AvailabilityZone="us-east-1a", Type="ipsec.1") + + gateways = ec2.describe_vpn_gateways( + Filters=[{"Name": "type", "Values": ["unknown_type"]}] + ) + + gateways["VpnGateways"].should.have.length_of(0) + + @mock_ec2_deprecated def test_vpn_gateway_vpc_attachment(): conn = boto.connect_vpc("the_key", "the_secret") From bf8eb11dc31d4114c0ddcd0f76e5777ce9f9eac3 Mon Sep 17 00:00:00 2001 From: ryanlchandler Date: Wed, 22 Jul 2020 08:09:12 -0400 Subject: [PATCH 456/658] Adding MessageGroupId and MessageDeduplicationId (#3163) Adding MessageGroupId and MessageDeduplicationId when sent from batch (#3101) --- moto/sqs/models.py | 2 ++ moto/sqs/responses.py | 10 ++++++++++ tests/test_sqs/test_sqs.py | 16 ++++++++++++++++ 3 files changed, 28 insertions(+) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index ea3b89f049b6..5b6e6410ab6a 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -685,6 +685,8 @@ def send_message_batch(self, queue_name, entries): entry["MessageBody"], message_attributes=entry["MessageAttributes"], delay_seconds=entry["DelaySeconds"], + group_id=entry.get("MessageGroupId"), + deduplication_id=entry.get("MessageDeduplicationId"), ) message.user_id = entry["Id"] diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index eed50a527a2d..54a8bc2672cb 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -291,6 +291,16 @@ def send_message_batch(self): [None], )[0], "MessageAttributes": message_attributes, + "MessageGroupId": self.querystring.get( + "SendMessageBatchRequestEntry.{}.MessageGroupId".format(index), + [None], + )[0], + "MessageDeduplicationId": self.querystring.get( + "SendMessageBatchRequestEntry.{}.MessageDeduplicationId".format( + index + ), + [None], + )[0], } if entries == {}: diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 31bbafffb1b8..2ed757f6acfa 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1044,6 +1044,8 @@ def test_send_message_batch(): "DataType": "String", } }, + "MessageGroupId": "message_group_id_1", + "MessageDeduplicationId": "message_deduplication_id_1", }, { "Id": "id_2", @@ -1052,6 +1054,8 @@ def test_send_message_batch(): "MessageAttributes": { "attribute_name_2": {"StringValue": "123", "DataType": "Number"} }, + "MessageGroupId": "message_group_id_2", + "MessageDeduplicationId": "message_deduplication_id_2", }, ], ) @@ -1066,10 +1070,22 @@ def test_send_message_batch(): response["Messages"][0]["MessageAttributes"].should.equal( {"attribute_name_1": {"StringValue": "attribute_value_1", "DataType": "String"}} ) + response["Messages"][0]["Attributes"]["MessageGroupId"].should.equal( + "message_group_id_1" + ) + response["Messages"][0]["Attributes"]["MessageDeduplicationId"].should.equal( + "message_deduplication_id_1" + ) response["Messages"][1]["Body"].should.equal("body_2") response["Messages"][1]["MessageAttributes"].should.equal( {"attribute_name_2": {"StringValue": "123", "DataType": "Number"}} ) + response["Messages"][1]["Attributes"]["MessageGroupId"].should.equal( + "message_group_id_2" + ) + response["Messages"][1]["Attributes"]["MessageDeduplicationId"].should.equal( + "message_deduplication_id_2" + ) @mock_sqs From 448ff45174c03d0a80a0227138ed213aba136765 Mon Sep 17 00:00:00 2001 From: Koichi Ogura <12413803+number09@users.noreply.github.com> Date: Wed, 22 Jul 2020 22:08:17 +0900 Subject: [PATCH 457/658] fix cognito-idp UserPool ClientId (#3165) * fix cognito-idp UserPool ClientId * add test * replace uuid4 to create_id --- moto/cognitoidp/models.py | 3 ++- moto/cognitoidp/utils.py | 10 ++++++++++ tests/test_cognitoidp/test_cognitoidp.py | 11 ++++++----- 3 files changed, 18 insertions(+), 6 deletions(-) create mode 100644 moto/cognitoidp/utils.py diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 4d3280272839..a3cb69084725 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -23,6 +23,7 @@ UsernameExistsException, InvalidParameterException, ) +from .utils import create_id UserStatus = { "FORCE_CHANGE_PASSWORD": "FORCE_CHANGE_PASSWORD", @@ -214,7 +215,7 @@ def to_json(self, extended=True): class CognitoIdpUserPoolClient(BaseModel): def __init__(self, user_pool_id, generate_secret, extended_config): self.user_pool_id = user_pool_id - self.id = str(uuid.uuid4()) + self.id = create_id() self.secret = str(uuid.uuid4()) self.generate_secret = generate_secret or False self.extended_config = extended_config or {} diff --git a/moto/cognitoidp/utils.py b/moto/cognitoidp/utils.py new file mode 100644 index 000000000000..5f5fe4f8f85e --- /dev/null +++ b/moto/cognitoidp/utils.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +import six +import random +import string + + +def create_id(): + size = 26 + chars = list(range(10)) + list(string.ascii_lowercase) + return "".join(six.text_type(random.choice(chars)) for x in range(size)) diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index e05f4b45735c..39875aeb4406 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -3,6 +3,8 @@ import json import os import random +import re + import requests import uuid @@ -15,6 +17,7 @@ from nose.tools import assert_raises from moto import mock_cognitoidp, settings +from moto.cognitoidp.utils import create_id from moto.core import ACCOUNT_ID @@ -211,7 +214,7 @@ def test_create_user_pool_client(): ) result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) - result["UserPoolClient"]["ClientId"].should_not.be.none + bool(re.match(r"^[0-9a-z]{26}$", result["UserPoolClient"]["ClientId"])).should.be.ok result["UserPoolClient"]["ClientName"].should.equal(client_name) result["UserPoolClient"].should_not.have.key("ClientSecret") result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) @@ -233,7 +236,7 @@ def test_create_user_pool_client_returns_secret(): ) result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) - result["UserPoolClient"]["ClientId"].should_not.be.none + bool(re.match(r"^[0-9a-z]{26}$", result["UserPoolClient"]["ClientId"])).should.be.ok result["UserPoolClient"]["ClientName"].should.equal(client_name) result["UserPoolClient"]["ClientSecret"].should_not.be.none result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) @@ -1334,9 +1337,7 @@ def test_change_password__using_custom_user_agent_header(): def test_forgot_password(): conn = boto3.client("cognito-idp", "us-west-2") - result = conn.forgot_password( - ClientId=str(uuid.uuid4()), Username=str(uuid.uuid4()) - ) + result = conn.forgot_password(ClientId=create_id(), Username=str(uuid.uuid4())) result["CodeDeliveryDetails"].should_not.be.none From 936c7c80f193ca5c7662a2b25742ac142bac5419 Mon Sep 17 00:00:00 2001 From: Dean Kleissas Date: Wed, 22 Jul 2020 12:01:31 -0400 Subject: [PATCH 458/658] When generating multi-part upload IDs remove `/` characters (#3164) * / are not allowed in upload ids * ran black --- moto/s3/models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index b809c0fc278b..233040435aca 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -280,7 +280,9 @@ def __init__(self, key_name, metadata): self.parts = {} self.partlist = [] # ordered list of part ID's rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES)) - self.id = rand_b64.decode("utf-8").replace("=", "").replace("+", "") + self.id = ( + rand_b64.decode("utf-8").replace("=", "").replace("+", "").replace("/", "") + ) def complete(self, body): decode_hex = codecs.getdecoder("hex_codec") From 2504a398f9ac9659986abaeb0ba19d6550eeb309 Mon Sep 17 00:00:00 2001 From: Larry Aiello Date: Sun, 26 Jul 2020 08:51:26 -0400 Subject: [PATCH 459/658] Implement ec2.register_image() (#3177) * implement register_image * format code --- moto/ec2/models.py | 13 +++++++++++++ moto/ec2/responses/amis.py | 11 ++++++++++- tests/test_ec2/test_amis.py | 10 ++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 6e320c0aef20..ad9ae3b1b12e 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1516,6 +1516,19 @@ def add_launch_permission(self, ami_id, user_ids=None, group=None): return True + def register_image(self, name=None, description=None): + ami_id = random_ami_id() + ami = Ami( + self, + ami_id, + instance=None, + source_ami=None, + name=name, + description=description, + ) + self.amis[ami_id] = ami + return ami + def remove_launch_permission(self, ami_id, user_ids=None, group=None): ami = self.describe_images(ami_ids=[ami_id])[0] self.validate_permission_targets(user_ids=user_ids, group=group) diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index 0e70182bb155..178d583e0c78 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -73,8 +73,12 @@ def modify_image_attribute(self): return MODIFY_IMAGE_ATTRIBUTE_RESPONSE def register_image(self): + name = self.querystring.get("Name")[0] + description = self._get_param("Description", if_none="") if self.is_not_dryrun("RegisterImage"): - raise NotImplementedError("AMIs.register_image is not yet implemented") + image = self.ec2_backend.register_image(name, description) + template = self.response_template(REGISTER_IMAGE_RESPONSE) + return template.render(image=image) def reset_image_attribute(self): if self.is_not_dryrun("ResetImageAttribute"): @@ -190,3 +194,8 @@ def reset_image_attribute(self): true """ + +REGISTER_IMAGE_RESPONSE = """ + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + {{ image.id }} +""" diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 275b12905b32..220dd143c176 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -773,6 +773,16 @@ def test_ami_describe_non_existent(): img.load() +@mock_ec2 +def test_ami_registration(): + ec2 = boto3.client("ec2", region_name="us-east-1") + image_id = ec2.register_image(Name="test-register-image").get("ImageId", "") + images = ec2.describe_images(ImageIds=[image_id]).get("Images", []) + assert images[0]["Name"] == "test-register-image", "No image was registered." + assert images[0]["RootDeviceName"] == "/dev/sda1", "Wrong root device name." + assert images[0]["State"] == "available", "State should be available." + + @mock_ec2 def test_ami_filter_wildcard(): ec2_resource = boto3.resource("ec2", region_name="us-west-1") From c166d97a977661771f40a773892fa1f28d85f0f5 Mon Sep 17 00:00:00 2001 From: Macwan Nevil Date: Sun, 26 Jul 2020 15:00:15 +0000 Subject: [PATCH 460/658] Bugfix: S3 time precision issue fixed (#3182) * Bugfix: S3 time precision issue fixed * Bugfix: S3 time precision issue fixed * s3 timeformat fix * Quickfix S3 timefix --- moto/core/utils.py | 6 ++++++ moto/s3/models.py | 6 +++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/moto/core/utils.py b/moto/core/utils.py index 921f64be2f0c..c9bf9347365a 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -190,6 +190,12 @@ def iso_8601_datetime_without_milliseconds(datetime): return None if datetime is None else datetime.strftime("%Y-%m-%dT%H:%M:%S") + "Z" +def iso_8601_datetime_without_milliseconds_s3(datetime): + return ( + None if datetime is None else datetime.strftime("%Y-%m-%dT%H:%M:%S.000") + "Z" + ) + + RFC1123 = "%a, %d %b %Y %H:%M:%S GMT" diff --git a/moto/s3/models.py b/moto/s3/models.py index 233040435aca..e5237168e630 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -22,7 +22,7 @@ from bisect import insort from moto.core import ACCOUNT_ID, BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime +from moto.core.utils import iso_8601_datetime_without_milliseconds_s3, rfc_1123_datetime from moto.cloudwatch.models import MetricDatum from moto.utilities.tagging_service import TaggingService from .exceptions import ( @@ -79,7 +79,7 @@ def __init__(self, key): @property def last_modified_ISO8601(self): - return iso_8601_datetime_with_milliseconds(self.last_modified) + return iso_8601_datetime_without_milliseconds_s3(self.last_modified) @property def version_id(self): @@ -206,7 +206,7 @@ def etag(self): @property def last_modified_ISO8601(self): - return iso_8601_datetime_with_milliseconds(self.last_modified) + return iso_8601_datetime_without_milliseconds_s3(self.last_modified) @property def last_modified_RFC1123(self): From cdc4385e2a0e86e3c24c1fccc4f93d474f3ebb7e Mon Sep 17 00:00:00 2001 From: Hector Acosta Date: Mon, 27 Jul 2020 06:32:11 -0500 Subject: [PATCH 461/658] Various changes to organizations endpoint (#3175) * Raise DuplicatePolicyException when a policy with the same name exists * Implement update_policy * Implement delete_policy --- IMPLEMENTATION_COVERAGE.md | 4 +- moto/organizations/exceptions.py | 9 ++ moto/organizations/models.py | 39 ++++++++- moto/organizations/responses.py | 9 ++ .../test_organizations_boto3.py | 86 +++++++++++++++++-- 5 files changed, 139 insertions(+), 8 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 8744f47596ca..d2696e6af3ef 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -6113,7 +6113,7 @@ - [ ] decline_handshake - [ ] delete_organization - [ ] delete_organizational_unit -- [ ] delete_policy +- [X] delete_policy - [ ] deregister_delegated_administrator - [X] describe_account - [X] describe_create_account_status @@ -6152,7 +6152,7 @@ - [X] tag_resource - [X] untag_resource - [X] update_organizational_unit -- [ ] update_policy +- [X] update_policy
## outposts diff --git a/moto/organizations/exceptions.py b/moto/organizations/exceptions.py index 3649e3a13022..036eeccbcb93 100644 --- a/moto/organizations/exceptions.py +++ b/moto/organizations/exceptions.py @@ -17,3 +17,12 @@ def __init__(self): "DuplicateOrganizationalUnitException", "An OU with the same name already exists.", ) + + +class DuplicatePolicyException(JsonRESTError): + code = 400 + + def __init__(self): + super(DuplicatePolicyException, self).__init__( + "DuplicatePolicyException", "A policy with the same name already exists." + ) diff --git a/moto/organizations/models.py b/moto/organizations/models.py index d538ec1b8b95..6c1dab15d0af 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -11,6 +11,7 @@ from moto.organizations.exceptions import ( InvalidInputException, DuplicateOrganizationalUnitException, + DuplicatePolicyException, ) @@ -409,6 +410,9 @@ def list_children(self, **kwargs): def create_policy(self, **kwargs): new_policy = FakeServiceControlPolicy(self.org, **kwargs) + for policy in self.policies: + if kwargs["Name"] == policy.name: + raise DuplicatePolicyException self.policies.append(new_policy) return new_policy.describe() @@ -426,8 +430,26 @@ def describe_policy(self, **kwargs): raise RESTError("InvalidInputException", "You specified an invalid value.") return policy.describe() + def get_policy_by_id(self, policy_id): + policy = next( + (policy for policy in self.policies if policy.id == policy_id), None + ) + if policy is None: + raise RESTError( + "PolicyNotFoundException", + "We can't find a policy with the PolicyId that you specified.", + ) + return policy + + def update_policy(self, **kwargs): + policy = self.get_policy_by_id(kwargs["PolicyId"]) + policy.name = kwargs.get("Name", policy.name) + policy.description = kwargs.get("Description", policy.description) + policy.content = kwargs.get("Content", policy.content) + return policy.describe() + def attach_policy(self, **kwargs): - policy = next((p for p in self.policies if p.id == kwargs["PolicyId"]), None) + policy = self.get_policy_by_id(kwargs["PolicyId"]) if re.compile(utils.ROOT_ID_REGEX).match(kwargs["TargetId"]) or re.compile( utils.OU_ID_REGEX ).match(kwargs["TargetId"]): @@ -462,6 +484,21 @@ def list_policies(self, **kwargs): Policies=[p.describe()["Policy"]["PolicySummary"] for p in self.policies] ) + def delete_policy(self, **kwargs): + for idx, policy in enumerate(self.policies): + if policy.id == kwargs["PolicyId"]: + if self.list_targets_for_policy(PolicyId=policy.id)["Targets"]: + raise RESTError( + "PolicyInUseException", + "The policy is attached to one or more entities. You must detach it from all roots, OUs, and accounts before performing this operation.", + ) + del self.policies[idx] + return + raise RESTError( + "PolicyNotFoundException", + "We can't find a policy with the PolicyId that you specified.", + ) + def list_policies_for_target(self, **kwargs): if re.compile(utils.OU_ID_REGEX).match(kwargs["TargetId"]): obj = next((ou for ou in self.ou if ou.id == kwargs["TargetId"]), None) diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index 616deacbc84b..a2bd028d9961 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -105,6 +105,11 @@ def describe_policy(self): self.organizations_backend.describe_policy(**self.request_params) ) + def update_policy(self): + return json.dumps( + self.organizations_backend.update_policy(**self.request_params) + ) + def attach_policy(self): return json.dumps( self.organizations_backend.attach_policy(**self.request_params) @@ -115,6 +120,10 @@ def list_policies(self): self.organizations_backend.list_policies(**self.request_params) ) + def delete_policy(self): + self.organizations_backend.delete_policy(**self.request_params) + return json.dumps({}) + def list_policies_for_target(self): return json.dumps( self.organizations_backend.list_policies_for_target(**self.request_params) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index c2327dc408e7..5f14d83a588c 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -420,18 +420,56 @@ def test_attach_policy(): account_id = client.create_account(AccountName=mockname, Email=mockemail)[ "CreateAccountStatus" ]["AccountId"] + + +@mock_organizations +def test_delete_policy(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + base_policies = client.list_policies(Filter="SERVICE_CONTROL_POLICY")["Policies"] + base_policies.should.have.length_of(1) policy_id = client.create_policy( Content=json.dumps(policy_doc01), Description="A dummy service control policy", Name="MockServiceControlPolicy", Type="SERVICE_CONTROL_POLICY", )["Policy"]["PolicySummary"]["Id"] - response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) - response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) - response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + new_policies = client.list_policies(Filter="SERVICE_CONTROL_POLICY")["Policies"] + new_policies.should.have.length_of(2) + response = client.delete_policy(PolicyId=policy_id) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + new_policies = client.list_policies(Filter="SERVICE_CONTROL_POLICY")["Policies"] + new_policies.should.equal(base_policies) + new_policies.should.have.length_of(1) + + +@mock_organizations +def test_delete_policy_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + non_existent_policy_id = utils.make_random_service_control_policy_id() + with assert_raises(ClientError) as e: + response = client.delete_policy(PolicyId=non_existent_policy_id) + ex = e.exception + ex.operation_name.should.equal("DeletePolicy") + ex.response["Error"]["Code"].should.equal("400") + ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") + + # Attempt to delete an attached policy + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + root_id = client.list_roots()["Roots"][0]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + with assert_raises(ClientError) as e: + response = client.delete_policy(PolicyId=policy_id) + ex = e.exception + ex.operation_name.should.equal("DeletePolicy") + ex.response["Error"]["Code"].should.equal("400") + ex.response["Error"]["Message"].should.contain("PolicyInUseException") @mock_organizations @@ -479,6 +517,44 @@ def test_attach_policy_exception(): ex.response["Error"]["Message"].should.contain("InvalidInputException") +@mock_organizations +def test_update_policy(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + + policy_dict = dict( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + ) + policy_id = client.create_policy(**policy_dict)["Policy"]["PolicySummary"]["Id"] + + for key in ("Description", "Name"): + response = client.update_policy(**{"PolicyId": policy_id, key: "foobar"}) + policy = client.describe_policy(PolicyId=policy_id) + policy["Policy"]["PolicySummary"][key].should.equal("foobar") + validate_service_control_policy(org, response["Policy"]) + + response = client.update_policy(PolicyId=policy_id, Content="foobar") + policy = client.describe_policy(PolicyId=policy_id) + policy["Policy"]["Content"].should.equal("foobar") + validate_service_control_policy(org, response["Policy"]) + + +@mock_organizations +def test_update_policy_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + non_existent_policy_id = utils.make_random_service_control_policy_id() + with assert_raises(ClientError) as e: + response = client.update_policy(PolicyId=non_existent_policy_id) + ex = e.exception + ex.operation_name.should.equal("UpdatePolicy") + ex.response["Error"]["Code"].should.equal("400") + ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") + + @mock_organizations def test_list_polices(): client = boto3.client("organizations", region_name="us-east-1") From 03dd55d39d5b9abcb9031be1e5b772053daa9102 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 27 Jul 2020 12:36:31 +0100 Subject: [PATCH 462/658] Organisations - Put back assertions that got lost in a merge (#3184) --- tests/test_organizations/test_organizations_boto3.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 5f14d83a588c..decc0a17810d 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -420,6 +420,18 @@ def test_attach_policy(): account_id = client.create_account(AccountName=mockname, Email=mockemail)[ "CreateAccountStatus" ]["AccountId"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @mock_organizations From bc1674cb197af5866a43ef11d3d60ea5f3fe02ab Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 27 Jul 2020 18:38:01 +0530 Subject: [PATCH 463/658] CF : Added support for get template summary (#3179) * CF : Added support for get template summary * Linting Co-authored-by: Bert Blommers --- moto/cloudformation/responses.py | 64 ++++++++++++++++--- .../test_cloudformation_stack_crud_boto3.py | 50 +++++++++++++++ 2 files changed, 105 insertions(+), 9 deletions(-) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 8672c706df4d..c7ced01869a7 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -10,6 +10,31 @@ from moto.core import ACCOUNT_ID from .models import cloudformation_backends from .exceptions import ValidationError +from .utils import yaml_tag_constructor + + +def get_template_summary_response_from_template(template_body): + def get_resource_types(template_dict): + resources = {} + for key, value in template_dict.items(): + if key == "Resources": + resources = value + + resource_types = [] + for key, value in resources.items(): + resource_types.append(value["Type"]) + return resource_types + + yaml.add_multi_constructor("", yaml_tag_constructor) + + try: + template_dict = yaml.load(template_body, Loader=yaml.Loader) + except (yaml.parser.ParserError, yaml.scanner.ScannerError): + template_dict = json.loads(template_body) + + resources_types = get_resource_types(template_dict) + template_dict["resourceTypes"] = resources_types + return template_dict class CloudFormationResponse(BaseResponse): @@ -269,6 +294,20 @@ def get_template(self): template = self.response_template(GET_TEMPLATE_RESPONSE_TEMPLATE) return template.render(stack=stack) + def get_template_summary(self): + stack_name = self._get_param("StackName") + template_url = self._get_param("TemplateURL") + stack_body = self._get_param("TemplateBody") + + if stack_name: + stack_body = self.cloudformation_backend.get_stack(stack_name).template + elif template_url: + stack_body = self._get_stack_from_s3_url(template_url) + + template_summary = get_template_summary_response_from_template(stack_body) + template = self.response_template(GET_TEMPLATE_SUMMARY_TEMPLATE) + return template.render(template_summary=template_summary) + def update_stack(self): stack_name = self._get_param("StackName") role_arn = self._get_param("RoleARN") @@ -743,7 +782,6 @@ def update_stack_instances(self): """ - DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE = """ @@ -758,7 +796,6 @@ def update_stack_instances(self): """ - DESCRIBE_STACK_RESOURCES_RESPONSE = """ @@ -777,7 +814,6 @@ def update_stack_instances(self): """ - DESCRIBE_STACK_EVENTS_RESPONSE = """ @@ -802,7 +838,6 @@ def update_stack_instances(self):
""" - LIST_CHANGE_SETS_RESPONSE = """ @@ -823,7 +858,6 @@ def update_stack_instances(self): """ - LIST_STACKS_RESPONSE = """ @@ -840,7 +874,6 @@ def update_stack_instances(self): """ - LIST_STACKS_RESOURCES_RESPONSE = """ @@ -860,7 +893,6 @@ def update_stack_instances(self): """ - GET_TEMPLATE_RESPONSE_TEMPLATE = """ {{ stack.template }} @@ -870,7 +902,6 @@ def update_stack_instances(self): """ - DELETE_STACK_RESPONSE_TEMPLATE = """ 5ccc7dcd-744c-11e5-be70-example @@ -878,7 +909,6 @@ def update_stack_instances(self): """ - LIST_EXPORTS_RESPONSE = """ @@ -1139,3 +1169,19 @@ def update_stack_instances(self): """ ) + +GET_TEMPLATE_SUMMARY_TEMPLATE = """ + + {{ template_summary.Description }} + {% for resource in template_summary.resourceTypes %} + + {{ resource }} + + {% endfor %} + {{ template_summary.AWSTemplateFormatVersion }} + + + b9b4b068-3a41-11e5-94eb-example + + +""" diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 1ebce46d70cc..0bfaf9f09abe 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -35,6 +35,14 @@ }, } +dummy_template3 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 3", + "Resources": { + "VPC": {"Properties": {"CidrBlock": "192.168.0.0/16"}, "Type": "AWS::EC2::VPC"} + }, +} + dummy_template_yaml = """--- AWSTemplateFormatVersion: 2010-09-09 Description: Stack1 with yaml template @@ -668,6 +676,48 @@ def test_boto3_create_stack_with_short_form_func_yaml(): ) +@mock_s3 +@mock_cloudformation +def test_get_template_summary(): + s3 = boto3.client("s3") + s3_conn = boto3.resource("s3", region_name="us-east-1") + + conn = boto3.client("cloudformation", region_name="us-east-1") + result = conn.get_template_summary(TemplateBody=json.dumps(dummy_template3)) + + result["ResourceTypes"].should.equal(["AWS::EC2::VPC"]) + result["Version"].should.equal("2010-09-09") + result["Description"].should.equal("Stack 3") + + conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(dummy_template3)) + + result = conn.get_template_summary(StackName="test_stack") + + result["ResourceTypes"].should.equal(["AWS::EC2::VPC"]) + result["Version"].should.equal("2010-09-09") + result["Description"].should.equal("Stack 3") + + s3_conn.create_bucket(Bucket="foobar") + s3_conn.Object("foobar", "template-key").put(Body=json.dumps(dummy_template3)) + + key_url = s3.generate_presigned_url( + ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} + ) + + conn.create_stack(StackName="stack_from_url", TemplateURL=key_url) + result = conn.get_template_summary(TemplateURL=key_url) + result["ResourceTypes"].should.equal(["AWS::EC2::VPC"]) + result["Version"].should.equal("2010-09-09") + result["Description"].should.equal("Stack 3") + + conn = boto3.client("cloudformation", region_name="us-east-1") + result = conn.get_template_summary(TemplateBody=dummy_template_yaml) + + result["ResourceTypes"].should.equal(["AWS::EC2::Instance"]) + result["Version"].should.equal("2010-09-09") + result["Description"].should.equal("Stack1 with yaml template") + + @mock_cloudformation def test_boto3_create_stack_with_ref_yaml(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") From 1db42fb865aabffdd9b6c60e286231a64e87f77f Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 27 Jul 2020 20:02:41 +0530 Subject: [PATCH 464/658] FIX : IAM - Added support for pathPrefix in list_users_function (#3180) * FIX:IAM-Added support for pathPrefix in list_users_function * removed changes for roles * Added test for non decorator * changed filter function Co-authored-by: usmankb --- moto/iam/models.py | 8 ++++++++ moto/iam/responses.py | 1 - tests/test_iam/test_iam.py | 6 ++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 82dc84be5afb..49755e57ab43 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -946,6 +946,10 @@ def _users(self): return len(self._iam_backend.users) +def filter_items_with_path_prefix(path_prefix, items): + return [role for role in items if role.path.startswith(path_prefix)] + + class IAMBackend(BaseBackend): def __init__(self): self.instance_profiles = {} @@ -1490,7 +1494,11 @@ def get_user(self, user_name): def list_users(self, path_prefix, marker, max_items): users = None try: + users = self.users.values() + if path_prefix: + users = filter_items_with_path_prefix(path_prefix, users) + except KeyError: raise IAMNotFoundException( "Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 8eb1730ea0b2..6f785f8acc62 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -337,7 +337,6 @@ def remove_role_from_instance_profile(self): def list_roles(self): roles = iam_backend.get_roles() - template = self.response_template(LIST_ROLES_TEMPLATE) return template.render(roles=roles) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 4ae5ad49e7ef..610333303135 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -762,6 +762,12 @@ def test_list_users(): user["Path"].should.equal("/") user["Arn"].should.equal("arn:aws:iam::{}:user/my-user".format(ACCOUNT_ID)) + conn.create_user(UserName="my-user-1", Path="myUser") + response = conn.list_users(PathPrefix="my") + user = response["Users"][0] + user["UserName"].should.equal("my-user-1") + user["Path"].should.equal("myUser") + @mock_iam() def test_user_policies(): From 6adee0cbaf7a1971fff8809cc07dc4b23a08cc2e Mon Sep 17 00:00:00 2001 From: Macwan Nevil Date: Mon, 27 Jul 2020 23:23:15 +0530 Subject: [PATCH 465/658] Bugfix: RedrivePolicy Issue SNS (#3186) * Bugfix: S3 time precision issue fixed * Bugfix: S3 time precision issue fixed * s3 timeformat fix * Quickfix S3 timefix * Bugfix: Redrive Policy Allow * Linting Fixed --- moto/sns/models.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 85196cd8f8bb..76376e58fce8 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -580,7 +580,12 @@ def get_subscription_attributes(self, arn): return subscription.attributes def set_subscription_attributes(self, arn, name, value): - if name not in ["RawMessageDelivery", "DeliveryPolicy", "FilterPolicy"]: + if name not in [ + "RawMessageDelivery", + "DeliveryPolicy", + "FilterPolicy", + "RedrivePolicy", + ]: raise SNSInvalidParameter("AttributeName") # TODO: should do validation From 126f5a5155af519673faf8aef4dcc3232c969447 Mon Sep 17 00:00:00 2001 From: Jordan Reiter Date: Tue, 28 Jul 2020 05:17:35 -0400 Subject: [PATCH 466/658] Implement Filter: Contains functionality for describe_params (#3189) * Implement Filter: Contains functionality for describe_params This commit adds the Contains functionality. Tests were created to mimic behavior in AWS/boto3, including that filters with values in the form of `/name` will match parameters named `/name/match` but not parameters named `match/with/other-name`. In the test example, a Contains filter with the value `/tan` would match: `/tangent-3` and `tangram-4` but not `standby-5`. * Enforce parameter filter restrictions on get_parameters_by_path According to the boto3 documentation [1], `Name`, `Path`, and `Tier` are not allowed values for `Key` in a parameter filter for `get_parameters_by_path`. This commit enforces this by calling `_validate_parameter_filters` from the `get_parameters_by_path` method, and adding a check to `_validate_parameter_filters`. I added 3 test cases to `test_get_parameters_by_path` which check for the correct exception when calling with a parameter filter using any of these keys. * Code formatted to match style * Refactored logic --- moto/ssm/models.py | 20 ++++++++++++-- tests/test_ssm/test_ssm_boto3.py | 46 ++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 28175bb06e66..3c29097e870b 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -965,6 +965,13 @@ def _validate_parameter_filters(self, parameter_filters, by_path): "The following filter key is not valid: Label. Valid filter keys include: [Path, Name, Type, KeyId, Tier]." ) + if by_path and key in ["Name", "Path", "Tier"]: + raise InvalidFilterKey( + "The following filter key is not valid: {key}. Valid filter keys include: [Type, KeyId].".format( + key=key + ) + ) + if not values: raise InvalidFilterValue( "The following filter values are missing : null for filter key Name." @@ -1024,7 +1031,10 @@ def _validate_parameter_filters(self, parameter_filters, by_path): ) ) - if key != "Path" and option not in ["Equals", "BeginsWith"]: + allowed_options = ["Equals", "BeginsWith"] + if key == "Name": + allowed_options += ["Contains"] + if key != "Path" and option not in allowed_options: raise InvalidFilterOption( "The following filter option is not valid: {option}. Valid options include: [BeginsWith, Equals].".format( option=option @@ -1084,6 +1094,9 @@ def get_parameters_by_path( max_results=10, ): """Implement the get-parameters-by-path-API in the backend.""" + + self._validate_parameter_filters(filters, by_path=True) + result = [] # path could be with or without a trailing /. we handle this # difference here. @@ -1134,7 +1147,8 @@ def _match_filters(self, parameter, filters=None): what = parameter.keyid elif key == "Name": what = "/" + parameter.name.lstrip("/") - values = ["/" + value.lstrip("/") for value in values] + if option != "Contains": + values = ["/" + value.lstrip("/") for value in values] elif key == "Path": what = "/" + parameter.name.lstrip("/") values = ["/" + value.strip("/") for value in values] @@ -1147,6 +1161,8 @@ def _match_filters(self, parameter, filters=None): what.startswith(value) for value in values ): return False + elif option == "Contains" and not any(value in what for value in values): + return False elif option == "Equals" and not any(what == value for value in values): return False elif option == "OneLevel": diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 837f81bf5529..e899613e0e41 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -198,6 +198,33 @@ def test_get_parameters_by_path(): len(response["Parameters"]).should.equal(1) response.should_not.have.key("NextToken") + filters = [{"Key": "Name", "Values": ["error"]}] + client.get_parameters_by_path.when.called_with( + Path="/baz", ParameterFilters=filters + ).should.throw( + ClientError, + "The following filter key is not valid: Name. " + "Valid filter keys include: [Type, KeyId].", + ) + + filters = [{"Key": "Path", "Values": ["/error"]}] + client.get_parameters_by_path.when.called_with( + Path="/baz", ParameterFilters=filters + ).should.throw( + ClientError, + "The following filter key is not valid: Path. " + "Valid filter keys include: [Type, KeyId].", + ) + + filters = [{"Key": "Tier", "Values": ["Standard"]}] + client.get_parameters_by_path.when.called_with( + Path="/baz", ParameterFilters=filters + ).should.throw( + ClientError, + "The following filter key is not valid: Tier. " + "Valid filter keys include: [Type, KeyId].", + ) + @mock_ssm def test_put_parameter(): @@ -504,6 +531,9 @@ def test_describe_parameters_with_parameter_filters_name(): client = boto3.client("ssm", region_name="us-east-1") client.put_parameter(Name="param", Value="value", Type="String") client.put_parameter(Name="/param-2", Value="value-2", Type="String") + client.put_parameter(Name="/tangent-3", Value="value-3", Type="String") + client.put_parameter(Name="tangram-4", Value="value-4", Type="String") + client.put_parameter(Name="standby-5", Value="value-5", Type="String") response = client.describe_parameters( ParameterFilters=[{"Key": "Name", "Values": ["param"]}] @@ -543,6 +573,22 @@ def test_describe_parameters_with_parameter_filters_name(): parameters.should.have.length_of(2) response.should_not.have.key("NextToken") + response = client.describe_parameters( + ParameterFilters=[{"Key": "Name", "Option": "Contains", "Values": ["ram"]}] + ) + + parameters = response["Parameters"] + parameters.should.have.length_of(3) + response.should_not.have.key("NextToken") + + response = client.describe_parameters( + ParameterFilters=[{"Key": "Name", "Option": "Contains", "Values": ["/tan"]}] + ) + + parameters = response["Parameters"] + parameters.should.have.length_of(2) + response.should_not.have.key("NextToken") + @mock_ssm def test_describe_parameters_with_parameter_filters_path(): From 97139d4253db409b28265c87dcbadc529a6549de Mon Sep 17 00:00:00 2001 From: Ninh Khong Date: Tue, 28 Jul 2020 20:34:26 +0700 Subject: [PATCH 467/658] Fix : SQS - Added support for attribute labels for send_message function (#3181) * Fix : SQS - Added support for attribute labels for send_message function * Add integration test on receive message function * Add send message invalid datetype integration test and fix SQS MessageAttributesInvalid exceptions --- moto/sqs/exceptions.py | 8 ++-- moto/sqs/models.py | 18 ++++++-- moto/sqs/responses.py | 13 +----- moto/sqs/utils.py | 2 +- tests/test_sqs/test_sqs.py | 92 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 114 insertions(+), 19 deletions(-) diff --git a/moto/sqs/exceptions.py b/moto/sqs/exceptions.py index 77d7b9fb2ebc..46d2af400fc5 100644 --- a/moto/sqs/exceptions.py +++ b/moto/sqs/exceptions.py @@ -16,11 +16,13 @@ def __init__(self): ) -class MessageAttributesInvalid(Exception): - status_code = 400 +class MessageAttributesInvalid(RESTError): + code = 400 def __init__(self, description): - self.description = description + super(MessageAttributesInvalid, self).__init__( + "MessageAttributesInvalid", description + ) class QueueDoesNotExist(RESTError): diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 5b6e6410ab6a..0854164576e3 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -87,7 +87,19 @@ def utf8(str): struct_format = "!I".encode("ascii") # ensure it's a bytestring for name in sorted(self.message_attributes.keys()): attr = self.message_attributes[name] - data_type = attr["data_type"] + data_type_parts = attr["data_type"].split(".") + data_type = data_type_parts[0] + + if data_type not in [ + "String", + "Binary", + "Number", + ]: + raise MessageAttributesInvalid( + "The message attribute '{0}' has an invalid message attribute type, the set of supported type prefixes is Binary, Number, and String.".format( + name[0] + ) + ) encoded = utf8("") # Each part of each attribute is encoded right after it's @@ -243,9 +255,7 @@ def __init__(self, name, region, **kwargs): # Check some conditions if self.fifo_queue and not self.name.endswith(".fifo"): - raise MessageAttributesInvalid( - "Queue name must end in .fifo for FIFO queues" - ) + raise InvalidParameterValue("Queue name must end in .fifo for FIFO queues") @property def pending_messages(self): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 54a8bc2672cb..29804256c2b3 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -9,7 +9,6 @@ from .exceptions import ( EmptyBatchRequest, InvalidAttributeName, - MessageAttributesInvalid, MessageNotInflight, ReceiptHandleIsInvalid, ) @@ -82,12 +81,7 @@ def create_queue(self): request_url = urlparse(self.uri) queue_name = self._get_param("QueueName") - try: - queue = self.sqs_backend.create_queue( - queue_name, self.tags, **self.attribute - ) - except MessageAttributesInvalid as e: - return self._error("InvalidParameterValue", e.description) + queue = self.sqs_backend.create_queue(queue_name, self.tags, **self.attribute) template = self.response_template(CREATE_QUEUE_RESPONSE) return template.render(queue_url=queue.url(request_url)) @@ -225,10 +219,7 @@ def send_message(self): if len(message) > MAXIMUM_MESSAGE_LENGTH: return ERROR_TOO_LONG_RESPONSE, dict(status=400) - try: - message_attributes = parse_message_attributes(self.querystring) - except MessageAttributesInvalid as e: - return e.description, dict(status=e.status_code) + message_attributes = parse_message_attributes(self.querystring) queue_name = self._get_queue_name() diff --git a/moto/sqs/utils.py b/moto/sqs/utils.py index f3b8bbfe825b..315fce56b0d5 100644 --- a/moto/sqs/utils.py +++ b/moto/sqs/utils.py @@ -34,7 +34,7 @@ def parse_message_attributes(querystring, base="", value_namespace="Value."): ) data_type_parts = data_type[0].split(".") - if len(data_type_parts) > 2 or data_type_parts[0] not in [ + if data_type_parts[0] not in [ "String", "Binary", "Number", diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 2ed757f6acfa..9e3896154c82 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -248,6 +248,50 @@ def test_message_with_complex_attributes(): messages.should.have.length_of(1) +@mock_sqs +def test_message_with_attributes_have_labels(): + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + "timestamp": { + "DataType": "Number.java.lang.Long", + "StringValue": "1493147359900", + } + }, + ) + msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") + msg.get("MD5OfMessageAttributes").should.equal("235c5c510d26fb653d073faed50ae77c") + msg.get("MessageId").should_not.contain(" \n") + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_with_attributes_invalid_datatype(): + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="blah") + + with assert_raises(ClientError) as e: + queue.send_message( + MessageBody="derp", + MessageAttributes={ + "timestamp": { + "DataType": "InvalidNumber", + "StringValue": "149314735990a", + } + }, + ) + ex = e.exception + ex.response["Error"]["Code"].should.equal("MessageAttributesInvalid") + ex.response["Error"]["Message"].should.equal( + "The message attribute 'timestamp' has an invalid message attribute type, the set of supported type " + "prefixes is Binary, Number, and String." + ) + + @mock_sqs def test_send_message_with_message_group_id(): sqs = boto3.resource("sqs", region_name="us-east-1") @@ -532,6 +576,54 @@ def test_send_receive_message_with_attributes(): ) +@mock_sqs +def test_send_receive_message_with_attributes_with_labels(): + sqs = boto3.resource("sqs", region_name="us-east-1") + conn = boto3.client("sqs", region_name="us-east-1") + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = "this is a test message" + body_two = "this is another test message" + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + }, + ) + + queue.send_message( + MessageBody=body_two, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359901", + "DataType": "Number.java.lang.Long", + } + }, + ) + + messages = conn.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=2)[ + "Messages" + ] + + message1 = messages[0] + message2 = messages[1] + + message1.get("Body").should.equal(body_one) + message2.get("Body").should.equal(body_two) + + message1.get("MD5OfMessageAttributes").should.equal( + "235c5c510d26fb653d073faed50ae77c" + ) + message2.get("MD5OfMessageAttributes").should.equal( + "994258b45346a2cc3f9cbb611aa7af30" + ) + + @mock_sqs def test_send_receive_message_timestamps(): sqs = boto3.resource("sqs", region_name="us-east-1") From 28d1d762af57393e34e222496b574c253597f149 Mon Sep 17 00:00:00 2001 From: Jordan Reiter Date: Tue, 28 Jul 2020 10:26:59 -0400 Subject: [PATCH 468/658] Enforce parameter naming (#3190) * Enforce parameter naming Parameters are not allowed to start with `ssm` or `aws`. This commit adds error messages which correspond exactly to the error messages returned by boto3. * Fix for Python 2 compatibility f-strings not supported in Python 2.7 --- moto/ssm/exceptions.py | 7 ++++ moto/ssm/models.py | 18 +++++++++ tests/test_ssm/test_ssm_boto3.py | 67 ++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+) diff --git a/moto/ssm/exceptions.py b/moto/ssm/exceptions.py index 2e715f16a24c..f68e47029860 100644 --- a/moto/ssm/exceptions.py +++ b/moto/ssm/exceptions.py @@ -78,6 +78,13 @@ def __init__(self, message): ) +class AccessDeniedException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(AccessDeniedException, self).__init__("AccessDeniedException", message) + + class InvalidDocumentContent(JsonRESTError): code = 400 diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 3c29097e870b..37d56c2ddd5f 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -27,6 +27,7 @@ ParameterNotFound, DocumentAlreadyExists, InvalidDocumentOperation, + AccessDeniedException, InvalidDocument, InvalidDocumentContent, InvalidDocumentVersion, @@ -1254,6 +1255,23 @@ def label_parameter_version(self, name, version, labels): def put_parameter( self, name, description, value, type, allowed_pattern, keyid, overwrite ): + if name.lower().lstrip("/").startswith("aws") or name.lower().lstrip( + "/" + ).startswith("ssm"): + is_path = name.count("/") > 1 + if name.lower().startswith("/aws") and is_path: + raise AccessDeniedException( + "No access to reserved parameter name: {name}.".format(name=name) + ) + if not is_path: + invalid_prefix_error = 'Parameter name: can\'t be prefixed with "aws" or "ssm" (case-insensitive).' + else: + invalid_prefix_error = ( + 'Parameter name: can\'t be prefixed with "ssm" (case-insensitive). ' + "If formed as a path, it can consist of sub-paths divided by slash symbol; each sub-path can be " + "formed as a mix of letters, numbers and the following 3 symbols .-_" + ) + raise ValidationException(invalid_prefix_error) previous_parameter_versions = self._parameters[name] if len(previous_parameter_versions) == 0: previous_parameter = None diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index e899613e0e41..9715866e9f45 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -299,6 +299,73 @@ def test_put_parameter(): ) +@mock_ssm +def test_put_parameter_invalid_names(): + client = boto3.client("ssm", region_name="us-east-1") + + invalid_prefix_err = ( + 'Parameter name: can\'t be prefixed with "aws" or "ssm" (case-insensitive).' + ) + + client.put_parameter.when.called_with( + Name="ssm_test", Value="value", Type="String" + ).should.throw( + ClientError, invalid_prefix_err, + ) + + client.put_parameter.when.called_with( + Name="SSM_TEST", Value="value", Type="String" + ).should.throw( + ClientError, invalid_prefix_err, + ) + + client.put_parameter.when.called_with( + Name="aws_test", Value="value", Type="String" + ).should.throw( + ClientError, invalid_prefix_err, + ) + + client.put_parameter.when.called_with( + Name="AWS_TEST", Value="value", Type="String" + ).should.throw( + ClientError, invalid_prefix_err, + ) + + ssm_path = "/ssm_test/path/to/var" + client.put_parameter.when.called_with( + Name=ssm_path, Value="value", Type="String" + ).should.throw( + ClientError, + 'Parameter name: can\'t be prefixed with "ssm" (case-insensitive). If formed as a path, it can consist of ' + "sub-paths divided by slash symbol; each sub-path can be formed as a mix of letters, numbers and the following " + "3 symbols .-_", + ) + + ssm_path = "/SSM/PATH/TO/VAR" + client.put_parameter.when.called_with( + Name=ssm_path, Value="value", Type="String" + ).should.throw( + ClientError, + 'Parameter name: can\'t be prefixed with "ssm" (case-insensitive). If formed as a path, it can consist of ' + "sub-paths divided by slash symbol; each sub-path can be formed as a mix of letters, numbers and the following " + "3 symbols .-_", + ) + + aws_path = "/aws_test/path/to/var" + client.put_parameter.when.called_with( + Name=aws_path, Value="value", Type="String" + ).should.throw( + ClientError, "No access to reserved parameter name: {}.".format(aws_path), + ) + + aws_path = "/AWS/PATH/TO/VAR" + client.put_parameter.when.called_with( + Name=aws_path, Value="value", Type="String" + ).should.throw( + ClientError, "No access to reserved parameter name: {}.".format(aws_path), + ) + + @mock_ssm def test_put_parameter_china(): client = boto3.client("ssm", region_name="cn-north-1") From ff1f5651429d69fbb390caf6ffc7b430dca8ded9 Mon Sep 17 00:00:00 2001 From: Ninh Khong Date: Tue, 28 Jul 2020 22:59:22 +0700 Subject: [PATCH 469/658] Enhance function get_parameter by parameter name, version or labels (#3191) Co-authored-by: Ninh Khong --- moto/ssm/models.py | 29 ++++++++++++++-- tests/test_ssm/test_ssm_boto3.py | 58 ++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 2 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 37d56c2ddd5f..07812c316592 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -1189,8 +1189,33 @@ def _match_filters(self, parameter, filters=None): return True def get_parameter(self, name, with_decryption): - if name in self._parameters: - return self._parameters[name][-1] + name_parts = name.split(":") + name_prefix = name_parts[0] + + if len(name_parts) > 2: + return None + + if name_prefix in self._parameters: + if len(name_parts) == 1: + return self._parameters[name][-1] + + if len(name_parts) == 2: + version_or_label = name_parts[1] + parameters = self._parameters[name_prefix] + + if version_or_label.isdigit(): + result = list( + filter(lambda x: str(x.version) == version_or_label, parameters) + ) + if len(result) > 0: + return result[-1] + + result = list( + filter(lambda x: version_or_label in x.labels, parameters) + ) + if len(result) > 0: + return result[-1] + return None def label_parameter_version(self, name, version, labels): diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 9715866e9f45..cc79ce93ddf3 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -396,6 +396,64 @@ def test_get_parameter(): ) +@mock_ssm +def test_get_parameter_with_version_and_labels(): + client = boto3.client("ssm", region_name="us-east-1") + + client.put_parameter( + Name="test-1", Description="A test parameter", Value="value", Type="String" + ) + client.put_parameter( + Name="test-2", Description="A test parameter", Value="value", Type="String" + ) + + client.label_parameter_version( + Name="test-2", ParameterVersion=1, Labels=["test-label"] + ) + + response = client.get_parameter(Name="test-1:1", WithDecryption=False) + + response["Parameter"]["Name"].should.equal("test-1") + response["Parameter"]["Value"].should.equal("value") + response["Parameter"]["Type"].should.equal("String") + response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) + response["Parameter"]["ARN"].should.equal( + "arn:aws:ssm:us-east-1:1234567890:parameter/test-1" + ) + + response = client.get_parameter(Name="test-2:1", WithDecryption=False) + response["Parameter"]["Name"].should.equal("test-2") + response["Parameter"]["Value"].should.equal("value") + response["Parameter"]["Type"].should.equal("String") + response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) + response["Parameter"]["ARN"].should.equal( + "arn:aws:ssm:us-east-1:1234567890:parameter/test-2" + ) + + response = client.get_parameter(Name="test-2:test-label", WithDecryption=False) + response["Parameter"]["Name"].should.equal("test-2") + response["Parameter"]["Value"].should.equal("value") + response["Parameter"]["Type"].should.equal("String") + response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) + response["Parameter"]["ARN"].should.equal( + "arn:aws:ssm:us-east-1:1234567890:parameter/test-2" + ) + + with assert_raises(ClientError) as ex: + client.get_parameter(Name="test-2:2:3", WithDecryption=False) + ex.exception.response["Error"]["Code"].should.equal("ParameterNotFound") + ex.exception.response["Error"]["Message"].should.equal( + "Parameter test-2:2:3 not found." + ) + + with assert_raises(ClientError) as ex: + client.get_parameter(Name="test-2:2", WithDecryption=False) + ex.exception.response["Error"]["Code"].should.equal("ParameterNotFound") + ex.exception.response["Error"]["Message"].should.equal( + "Parameter test-2:2 not found." + ) + + @mock_ssm def test_get_parameters_errors(): client = boto3.client("ssm", region_name="us-east-1") From 736c8b77ce8620ecb45d5babaa37715976543bd3 Mon Sep 17 00:00:00 2001 From: jweite Date: Wed, 29 Jul 2020 02:47:18 -0400 Subject: [PATCH 470/658] Fixed Failures in CloudFormation Provisioning of S3 Buckets When Stack has Long Name... (#3169) * Fixed defect with CloudFormation provisioning of S3 buckets occuring when stack has a long name, resulting in the default S3 bucket name's length exceeding its 63 char limit. * PR 3169 July 23, 2020 2:57a ET comment: added additional asserts to assure provisioned bucket's name complies. Fixed bug in my earlier change that could produce default bucket names with illegal upper-case characters in it. Co-authored-by: Joseph Weitekamp --- moto/cloudformation/parsing.py | 6 +++++ .../test_cloudformation_stack_crud_boto3.py | 25 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 0a3e0a0c296a..58409901d4a4 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -317,6 +317,12 @@ def generate_resource_name(resource_type, stack_name, logical_id): if truncated_name_prefix.endswith("-"): truncated_name_prefix = truncated_name_prefix[:-1] return "{0}-{1}".format(truncated_name_prefix, my_random_suffix) + elif resource_type == "AWS::S3::Bucket": + right_hand_part_of_name = "-{0}-{1}".format(logical_id, random_suffix()) + max_stack_name_portion_len = 63 - len(right_hand_part_of_name) + return "{0}{1}".format( + stack_name[:max_stack_name_portion_len], right_hand_part_of_name + ).lower() else: return "{0}-{1}-{2}".format(stack_name, logical_id, random_suffix()) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 0bfaf9f09abe..41d3fad3ec8f 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -654,6 +654,31 @@ def test_boto3_create_stack(): ) +@mock_cloudformation +def test_boto3_create_stack_s3_long_name(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyLongStackName01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012" + + template = '{"Resources":{"HelloBucket":{"Type":"AWS::S3::Bucket"}}}' + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + + cf_conn.get_template(StackName=stack_name)["TemplateBody"].should.equal( + json.loads(template, object_pairs_hook=OrderedDict) + ) + provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + provisioned_bucket_name = provisioned_resource["PhysicalResourceId"] + len(provisioned_bucket_name).should.be.lower_than(64) + logical_name_lower_case = provisioned_resource["LogicalResourceId"].lower() + bucket_name_stack_name_prefix = provisioned_bucket_name[ + : provisioned_bucket_name.index("-" + logical_name_lower_case) + ] + stack_name.lower().should.contain(bucket_name_stack_name_prefix) + + @mock_cloudformation def test_boto3_create_stack_with_yaml(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") From 08a08b6af8572f71870344b24c9de46e05315cf3 Mon Sep 17 00:00:00 2001 From: Waldemar Hummer Date: Wed, 29 Jul 2020 12:44:02 +0200 Subject: [PATCH 471/658] Fix SQS tag list from CloudFormation resource creation (#3197) * fix sqs tag list from cloudformation resource creation the method `create_from_cloudformation_json` of the Sqs resource does not handle the difference of format of the Tags field in the resource template and the format expected in Sqs resource class. In cfn resource template Tags is specified as a list of dicts. But the Sqs resource expects that the tags field be a single dict. This behaviour causes a crash when a queue is created with tags from `create_from_cloudformation_json` and later the list_queue_tags is called because it tries to call `items` from `queue.tags` but tags is actually a list of dicts. * fix comment * fix linter * minor Co-authored-by: Hudo Assenco --- moto/core/utils.py | 11 +++++++++++ moto/sqs/models.py | 11 +++++++++-- tests/test_sqs/test_sqs.py | 38 +++++++++++++++++++++++++++++++++++++- 3 files changed, 57 insertions(+), 3 deletions(-) diff --git a/moto/core/utils.py b/moto/core/utils.py index c9bf9347365a..235b895eca3c 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -356,3 +356,14 @@ def tags_from_query_string( else: response_values[tag_key] = None return response_values + + +def tags_from_cloudformation_tags_list(tags_list): + """Return tags in dict form from cloudformation resource tags form (list of dicts)""" + tags = {} + for entry in tags_list: + key = entry["Key"] + value = entry["Value"] + tags[key] = value + + return tags diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 0854164576e3..4befbb50ac08 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -18,6 +18,7 @@ get_random_message_id, unix_time, unix_time_millis, + tags_from_cloudformation_tags_list, ) from .utils import generate_receipt_handle from .exceptions import ( @@ -357,11 +358,17 @@ def _setup_dlq(self, policy): def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] + properties = deepcopy(cloudformation_json["Properties"]) + # remove Tags from properties and convert tags list to dict + tags = properties.pop("Tags", []) + tags_dict = tags_from_cloudformation_tags_list(tags) sqs_backend = sqs_backends[region_name] return sqs_backend.create_queue( - name=properties["QueueName"], region=region_name, **properties + name=properties["QueueName"], + tags=tags_dict, + region=region_name, + **properties ) @classmethod diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9e3896154c82..61edcaa9b2a9 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -17,12 +17,34 @@ from boto.sqs.message import Message, RawMessage from botocore.exceptions import ClientError from freezegun import freeze_time -from moto import mock_sqs, mock_sqs_deprecated, settings +from moto import mock_sqs, mock_sqs_deprecated, mock_cloudformation, settings from nose import SkipTest from nose.tools import assert_raises from tests.helpers import requires_boto_gte from moto.core import ACCOUNT_ID +sqs_template_with_tags = """ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "SQSQueue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "Tags" : [ + { + "Key" : "keyname1", + "Value" : "value1" + }, + { + "Key" : "keyname2", + "Value" : "value2" + } + ] + } + } + } +}""" + @mock_sqs def test_create_fifo_queue_fail(): @@ -1933,3 +1955,17 @@ def test_send_messages_to_fifo_without_message_group_id(): ex.response["Error"]["Message"].should.equal( "The request must contain the parameter MessageGroupId." ) + + +@mock_sqs +@mock_cloudformation +def test_create_from_cloudformation_json_with_tags(): + cf = boto3.client("cloudformation", region_name="us-east-1") + client = boto3.client("sqs", region_name="us-east-1") + + cf.create_stack(StackName="test-sqs", TemplateBody=sqs_template_with_tags) + + queue_url = client.list_queues()["QueueUrls"][0] + + queue_tags = client.list_queue_tags(QueueUrl=queue_url)["Tags"] + queue_tags.should.equal({"keyname1": "value1", "keyname2": "value2"}) From 50d71eccbee12ce5cf2c1ecf61bfcbd60317976d Mon Sep 17 00:00:00 2001 From: Matt Williams Date: Wed, 29 Jul 2020 18:36:37 +0100 Subject: [PATCH 472/658] Fix XML schema for ec2.describe_instance_types (#3194) * Add test for describe_instance_types It currently fails due to an invalid XML schema * Add more detail to test * Fix the XML schema for describe_instance_types --- moto/ec2/responses/instances.py | 26 +++++++++++++++++++------- tests/test_ec2/test_instance_types.py | 18 ++++++++++++++++++ 2 files changed, 37 insertions(+), 7 deletions(-) create mode 100644 tests/test_ec2/test_instance_types.py diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 9090847be45b..e9843399f7ee 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -818,13 +818,25 @@ def _convert_to_bool(bool_str): {% for instance_type in instance_types %} - {{ instance_type.name }} - {{ instance_type.cores }} - {{ instance_type.memory }} - {{ instance_type.disk }} - {{ instance_type.storageCount }} - {{ instance_type.maxIpAddresses }} - {{ instance_type.ebsOptimizedAvailable }} + {{ instance_type.name }} + + {{ instance_type.cores }} + {{ instance_type.cores }} + 1 + + + {{ instance_type.memory }} + + + {{ instance_type.disk }} + + + + + x86_64 + + + {% endfor %} diff --git a/tests/test_ec2/test_instance_types.py b/tests/test_ec2/test_instance_types.py new file mode 100644 index 000000000000..1385d6113a5e --- /dev/null +++ b/tests/test_ec2/test_instance_types.py @@ -0,0 +1,18 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_describe_instance_types(): + client = boto3.client("ec2", "us-east-1") + instance_types = client.describe_instance_types() + + instance_types.should.have.key("InstanceTypes") + instance_types["InstanceTypes"].should_not.be.empty + instance_types["InstanceTypes"][0].should.have.key("InstanceType") + instance_types["InstanceTypes"][0].should.have.key("MemoryInfo") + instance_types["InstanceTypes"][0]["MemoryInfo"].should.have.key("SizeInMiB") From a9ac09952b31ddd9fb2ab5bf92bf603ca72e10d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= Date: Fri, 31 Jul 2020 08:18:52 +0200 Subject: [PATCH 473/658] Fix resource groups tests (#3204) --- moto/resourcegroups/urls.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/moto/resourcegroups/urls.py b/moto/resourcegroups/urls.py index b40179145fc2..3e5f7b7f50a6 100644 --- a/moto/resourcegroups/urls.py +++ b/moto/resourcegroups/urls.py @@ -4,9 +4,14 @@ url_bases = ["https?://resource-groups(-fips)?.(.+).amazonaws.com"] url_paths = { + "{0}/delete-group$": ResourceGroupsResponse.dispatch, + "{0}/get-group$": ResourceGroupsResponse.dispatch, + "{0}/get-group-query$": ResourceGroupsResponse.dispatch, "{0}/groups$": ResourceGroupsResponse.dispatch, "{0}/groups/(?P[^/]+)$": ResourceGroupsResponse.dispatch, "{0}/groups/(?P[^/]+)/query$": ResourceGroupsResponse.dispatch, "{0}/groups-list$": ResourceGroupsResponse.dispatch, "{0}/resources/(?P[^/]+)/tags$": ResourceGroupsResponse.dispatch, + "{0}/update-group$": ResourceGroupsResponse.dispatch, + "{0}/update-group-query$": ResourceGroupsResponse.dispatch, } From 943ecb7ea798e6832a72663b66dfaa44d1f3fe3e Mon Sep 17 00:00:00 2001 From: Chris Kilding <590569+chriskilding@users.noreply.github.com> Date: Fri, 31 Jul 2020 15:31:18 +0100 Subject: [PATCH 474/658] Support --filters option in secretsmanager:ListSecrets (#3173) * Feature: Support --filters opton in secretsmanager:ListSecrets * Implement some of the secret filters * Check listSecrets filters combine with an implicit AND operator * Test all filter and multi-value filter and multi-word filter * Fix matcher behavior, restructure code * Implement remaining listSecrets filter cases * Linter fixes * Use contains-in-any-order assertions for test_list_secrets * Linter fix again * Attempt Python 2 fix for assert_items_equal * Remove docstrings from test_list_secrets tests as they make the test reports weird * Test and handle listSecrets filter with no values --- moto/secretsmanager/exceptions.py | 5 + moto/secretsmanager/list_secrets/__init__.py | 0 moto/secretsmanager/list_secrets/filters.py | 44 +++ moto/secretsmanager/models.py | 75 ++++-- moto/secretsmanager/responses.py | 31 ++- .../test_secretsmanager/test_list_secrets.py | 251 ++++++++++++++++++ .../test_secretsmanager.py | 30 --- 7 files changed, 378 insertions(+), 58 deletions(-) create mode 100644 moto/secretsmanager/list_secrets/__init__.py create mode 100644 moto/secretsmanager/list_secrets/filters.py create mode 100644 tests/test_secretsmanager/test_list_secrets.py diff --git a/moto/secretsmanager/exceptions.py b/moto/secretsmanager/exceptions.py index bf717e20c4b3..6618cd3aca70 100644 --- a/moto/secretsmanager/exceptions.py +++ b/moto/secretsmanager/exceptions.py @@ -57,3 +57,8 @@ def __init__(self, message): super(InvalidRequestException, self).__init__( "InvalidRequestException", message ) + + +class ValidationException(SecretsManagerClientError): + def __init__(self, message): + super(ValidationException, self).__init__("ValidationException", message) diff --git a/moto/secretsmanager/list_secrets/__init__.py b/moto/secretsmanager/list_secrets/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/moto/secretsmanager/list_secrets/filters.py b/moto/secretsmanager/list_secrets/filters.py new file mode 100644 index 000000000000..813b1f544cf0 --- /dev/null +++ b/moto/secretsmanager/list_secrets/filters.py @@ -0,0 +1,44 @@ +def _matcher(pattern, str): + for word in pattern.split(" "): + if word not in str: + return False + return True + + +def name(secret, names): + for n in names: + if _matcher(n, secret["name"]): + return True + return False + + +def description(secret, descriptions): + for d in descriptions: + if _matcher(d, secret["description"]): + return True + return False + + +def tag_key(secret, tag_keys): + for k in tag_keys: + for tag in secret["tags"]: + if _matcher(k, tag["Key"]): + return True + return False + + +def tag_value(secret, tag_values): + for v in tag_values: + for tag in secret["tags"]: + if _matcher(v, tag["Value"]): + return True + return False + + +def all(secret, values): + return ( + name(secret, values) + or description(secret, values) + or tag_key(secret, values) + or tag_value(secret, values) + ) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 8641916a7d3b..0339dc575b48 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -18,6 +18,31 @@ ClientError, ) from .utils import random_password, secret_arn, get_secret_name_from_arn +from .list_secrets.filters import all, tag_key, tag_value, description, name + + +_filter_functions = { + "all": all, + "name": name, + "description": description, + "tag-key": tag_key, + "tag-value": tag_value, +} + + +def filter_keys(): + return list(_filter_functions.keys()) + + +def _matches(secret, filters): + is_match = True + + for f in filters: + # Filter names are pre-validated in the resource layer + filter_function = _filter_functions.get(f["Key"]) + is_match = is_match and filter_function(secret, f["Values"]) + + return is_match class SecretsManager(BaseModel): @@ -442,35 +467,35 @@ def list_secret_version_ids(self, secret_id): return response - def list_secrets(self, max_results, next_token): + def list_secrets(self, filters, max_results, next_token): # TODO implement pagination and limits secret_list = [] for secret in self.secrets.values(): - - versions_to_stages = {} - for version_id, version in secret["versions"].items(): - versions_to_stages[version_id] = version["version_stages"] - - secret_list.append( - { - "ARN": secret_arn(self.region, secret["secret_id"]), - "DeletedDate": secret.get("deleted_date", None), - "Description": secret.get("description", ""), - "KmsKeyId": "", - "LastAccessedDate": None, - "LastChangedDate": None, - "LastRotatedDate": None, - "Name": secret["name"], - "RotationEnabled": secret["rotation_enabled"], - "RotationLambdaARN": secret["rotation_lambda_arn"], - "RotationRules": { - "AutomaticallyAfterDays": secret["auto_rotate_after_days"] - }, - "SecretVersionsToStages": versions_to_stages, - "Tags": secret["tags"], - } - ) + if _matches(secret, filters): + versions_to_stages = {} + for version_id, version in secret["versions"].items(): + versions_to_stages[version_id] = version["version_stages"] + + secret_list.append( + { + "ARN": secret_arn(self.region, secret["secret_id"]), + "DeletedDate": secret.get("deleted_date", None), + "Description": secret.get("description", ""), + "KmsKeyId": "", + "LastAccessedDate": None, + "LastChangedDate": None, + "LastRotatedDate": None, + "Name": secret["name"], + "RotationEnabled": secret["rotation_enabled"], + "RotationLambdaARN": secret["rotation_lambda_arn"], + "RotationRules": { + "AutomaticallyAfterDays": secret["auto_rotate_after_days"] + }, + "SecretVersionsToStages": versions_to_stages, + "Tags": secret["tags"], + } + ) return secret_list, None diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index 9a899c90dac8..fcf991ea2557 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -1,13 +1,36 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from moto.secretsmanager.exceptions import InvalidRequestException +from moto.secretsmanager.exceptions import ( + InvalidRequestException, + InvalidParameterException, + ValidationException, +) -from .models import secretsmanager_backends +from .models import secretsmanager_backends, filter_keys import json +def _validate_filters(filters): + for idx, f in enumerate(filters): + filter_key = f.get("Key", None) + filter_values = f.get("Values", None) + if filter_key is None: + raise InvalidParameterException("Invalid filter key") + if filter_key not in filter_keys(): + raise ValidationException( + "1 validation error detected: Value '{}' at 'filters.{}.member.key' failed to satisfy constraint: " + "Member must satisfy enum value set: [all, name, tag-key, description, tag-value]".format( + filter_key, idx + 1 + ) + ) + if filter_values is None: + raise InvalidParameterException( + "Invalid filter values for key: {}".format(filter_key) + ) + + class SecretsManagerResponse(BaseResponse): def get_secret_value(self): secret_id = self._get_param("SecretId") @@ -102,10 +125,12 @@ def list_secret_version_ids(self): ) def list_secrets(self): + filters = self._get_param("Filters", if_none=[]) + _validate_filters(filters) max_results = self._get_int_param("MaxResults") next_token = self._get_param("NextToken") secret_list, next_token = secretsmanager_backends[self.region].list_secrets( - max_results=max_results, next_token=next_token + filters=filters, max_results=max_results, next_token=next_token ) return json.dumps(dict(SecretList=secret_list, NextToken=next_token)) diff --git a/tests/test_secretsmanager/test_list_secrets.py b/tests/test_secretsmanager/test_list_secrets.py new file mode 100644 index 000000000000..da3c4eb7efa1 --- /dev/null +++ b/tests/test_secretsmanager/test_list_secrets.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import boto3 + +from moto import mock_secretsmanager +from botocore.exceptions import ClientError +import sure # noqa +from nose.tools import assert_raises + +try: + from nose.tools import assert_items_equal +except ImportError: + from nose.tools import assert_count_equal as assert_items_equal + + +def boto_client(): + return boto3.client("secretsmanager", region_name="us-west-2") + + +@mock_secretsmanager +def test_empty(): + conn = boto_client() + + secrets = conn.list_secrets() + + assert_items_equal(secrets["SecretList"], []) + + +@mock_secretsmanager +def test_list_secrets(): + conn = boto_client() + + conn.create_secret(Name="test-secret", SecretString="foosecret") + + conn.create_secret( + Name="test-secret-2", + SecretString="barsecret", + Tags=[{"Key": "a", "Value": "1"}], + ) + + secrets = conn.list_secrets() + + assert secrets["SecretList"][0]["ARN"] is not None + assert secrets["SecretList"][0]["Name"] == "test-secret" + assert secrets["SecretList"][1]["ARN"] is not None + assert secrets["SecretList"][1]["Name"] == "test-secret-2" + assert secrets["SecretList"][1]["Tags"] == [{"Key": "a", "Value": "1"}] + + +@mock_secretsmanager +def test_with_name_filter(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret") + conn.create_secret(Name="bar", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "name", "Values": ["foo"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert_items_equal(secret_names, ["foo"]) + + +@mock_secretsmanager +def test_with_tag_key_filter(): + conn = boto_client() + + conn.create_secret( + Name="foo", SecretString="secret", Tags=[{"Key": "baz", "Value": "1"}] + ) + conn.create_secret(Name="bar", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "tag-key", "Values": ["baz"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert_items_equal(secret_names, ["foo"]) + + +@mock_secretsmanager +def test_with_tag_value_filter(): + conn = boto_client() + + conn.create_secret( + Name="foo", SecretString="secret", Tags=[{"Key": "1", "Value": "baz"}] + ) + conn.create_secret(Name="bar", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "tag-value", "Values": ["baz"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert_items_equal(secret_names, ["foo"]) + + +@mock_secretsmanager +def test_with_description_filter(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret", Description="baz qux") + conn.create_secret(Name="bar", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "description", "Values": ["baz"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert_items_equal(secret_names, ["foo"]) + + +@mock_secretsmanager +def test_with_all_filter(): + # The 'all' filter will match a secret that contains ANY field with the criteria. In other words an implicit OR. + + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret") + conn.create_secret(Name="bar", SecretString="secret", Description="foo") + conn.create_secret( + Name="baz", SecretString="secret", Tags=[{"Key": "foo", "Value": "1"}] + ) + conn.create_secret( + Name="qux", SecretString="secret", Tags=[{"Key": "1", "Value": "foo"}] + ) + conn.create_secret( + Name="multi", SecretString="secret", Tags=[{"Key": "foo", "Value": "foo"}] + ) + conn.create_secret(Name="none", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "all", "Values": ["foo"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert_items_equal(secret_names, ["foo", "bar", "baz", "qux", "multi"]) + + +@mock_secretsmanager +def test_with_no_filter_key(): + conn = boto_client() + + with assert_raises(ClientError) as ire: + conn.list_secrets(Filters=[{"Values": ["foo"]}]) + + ire.exception.response["Error"]["Code"].should.equal("InvalidParameterException") + ire.exception.response["Error"]["Message"].should.equal("Invalid filter key") + + +@mock_secretsmanager +def test_with_no_filter_values(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret", Description="hello") + + with assert_raises(ClientError) as ire: + conn.list_secrets(Filters=[{"Key": "description"}]) + + ire.exception.response["Error"]["Code"].should.equal("InvalidParameterException") + ire.exception.response["Error"]["Message"].should.equal( + "Invalid filter values for key: description" + ) + + +@mock_secretsmanager +def test_with_invalid_filter_key(): + conn = boto_client() + + with assert_raises(ClientError) as ire: + conn.list_secrets(Filters=[{"Key": "invalid", "Values": ["foo"]}]) + + ire.exception.response["Error"]["Code"].should.equal("ValidationException") + ire.exception.response["Error"]["Message"].should.equal( + "1 validation error detected: Value 'invalid' at 'filters.1.member.key' failed to satisfy constraint: Member " + "must satisfy enum value set: [all, name, tag-key, description, tag-value]" + ) + + +@mock_secretsmanager +def test_with_duplicate_filter_keys(): + # Multiple filters with the same key combine with an implicit AND operator + + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret", Description="one two") + conn.create_secret(Name="bar", SecretString="secret", Description="one") + conn.create_secret(Name="baz", SecretString="secret", Description="two") + conn.create_secret(Name="qux", SecretString="secret", Description="unrelated") + + secrets = conn.list_secrets( + Filters=[ + {"Key": "description", "Values": ["one"]}, + {"Key": "description", "Values": ["two"]}, + ] + ) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert_items_equal(secret_names, ["foo"]) + + +@mock_secretsmanager +def test_with_multiple_filters(): + # Multiple filters combine with an implicit AND operator + + conn = boto_client() + + conn.create_secret( + Name="foo", SecretString="secret", Tags=[{"Key": "right", "Value": "right"}] + ) + conn.create_secret( + Name="bar", SecretString="secret", Tags=[{"Key": "right", "Value": "wrong"}] + ) + conn.create_secret( + Name="baz", SecretString="secret", Tags=[{"Key": "wrong", "Value": "right"}] + ) + conn.create_secret( + Name="qux", SecretString="secret", Tags=[{"Key": "wrong", "Value": "wrong"}] + ) + + secrets = conn.list_secrets( + Filters=[ + {"Key": "tag-key", "Values": ["right"]}, + {"Key": "tag-value", "Values": ["right"]}, + ] + ) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert_items_equal(secret_names, ["foo"]) + + +@mock_secretsmanager +def test_with_filter_with_multiple_values(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret") + conn.create_secret(Name="bar", SecretString="secret") + conn.create_secret(Name="baz", SecretString="secret") + + secrets = conn.list_secrets(Filters=[{"Key": "name", "Values": ["foo", "bar"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert_items_equal(secret_names, ["foo", "bar"]) + + +@mock_secretsmanager +def test_with_filter_with_value_with_multiple_words(): + conn = boto_client() + + conn.create_secret(Name="foo", SecretString="secret", Description="one two") + conn.create_secret(Name="bar", SecretString="secret", Description="one and two") + conn.create_secret(Name="baz", SecretString="secret", Description="one") + conn.create_secret(Name="qux", SecretString="secret", Description="two") + conn.create_secret(Name="none", SecretString="secret", Description="unrelated") + + secrets = conn.list_secrets(Filters=[{"Key": "description", "Values": ["one two"]}]) + + secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) + assert_items_equal(secret_names, ["foo", "bar"]) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 59992e094b20..0bd66b12885b 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -459,36 +459,6 @@ def test_describe_secret_that_does_not_match(): result = conn.get_secret_value(SecretId="i-dont-match") -@mock_secretsmanager -def test_list_secrets_empty(): - conn = boto3.client("secretsmanager", region_name="us-west-2") - - secrets = conn.list_secrets() - - assert secrets["SecretList"] == [] - - -@mock_secretsmanager -def test_list_secrets(): - conn = boto3.client("secretsmanager", region_name="us-west-2") - - conn.create_secret(Name="test-secret", SecretString="foosecret") - - conn.create_secret( - Name="test-secret-2", - SecretString="barsecret", - Tags=[{"Key": "a", "Value": "1"}], - ) - - secrets = conn.list_secrets() - - assert secrets["SecretList"][0]["ARN"] is not None - assert secrets["SecretList"][0]["Name"] == "test-secret" - assert secrets["SecretList"][1]["ARN"] is not None - assert secrets["SecretList"][1]["Name"] == "test-secret-2" - assert secrets["SecretList"][1]["Tags"] == [{"Key": "a", "Value": "1"}] - - @mock_secretsmanager def test_restore_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") From 8162947ebb320c769b951ead443e31acc33c967f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= Date: Fri, 31 Jul 2020 17:32:57 +0200 Subject: [PATCH 475/658] Organizations - implement Delegated Administrator functionality (#3200) * Add organizations.register_delegated_administrator * Add organizations.list_delegated_administrators * Add organizations.list_delegated_services_for_account * Add organizations.deregister_delegated_administrator * Fix Python2 incompatibility --- moto/organizations/exceptions.py | 48 +++ moto/organizations/models.py | 207 +++++++-- moto/organizations/responses.py | 28 ++ .../test_organizations_boto3.py | 394 +++++++++++++++++- 4 files changed, 619 insertions(+), 58 deletions(-) diff --git a/moto/organizations/exceptions.py b/moto/organizations/exceptions.py index 036eeccbcb93..2d1ee7328351 100644 --- a/moto/organizations/exceptions.py +++ b/moto/organizations/exceptions.py @@ -2,6 +2,54 @@ from moto.core.exceptions import JsonRESTError +class AccountAlreadyRegisteredException(JsonRESTError): + code = 400 + + def __init__(self): + super(AccountAlreadyRegisteredException, self).__init__( + "AccountAlreadyRegisteredException", + "The provided account is already a delegated administrator for your organization.", + ) + + +class AccountNotRegisteredException(JsonRESTError): + code = 400 + + def __init__(self): + super(AccountNotRegisteredException, self).__init__( + "AccountNotRegisteredException", + "The provided account is not a registered delegated administrator for your organization.", + ) + + +class AccountNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(AccountNotFoundException, self).__init__( + "AccountNotFoundException", "You specified an account that doesn't exist." + ) + + +class AWSOrganizationsNotInUseException(JsonRESTError): + code = 400 + + def __init__(self): + super(AWSOrganizationsNotInUseException, self).__init__( + "AWSOrganizationsNotInUseException", + "Your account is not a member of an organization.", + ) + + +class ConstraintViolationException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ConstraintViolationException, self).__init__( + "ConstraintViolationException", message + ) + + class InvalidInputException(JsonRESTError): code = 400 diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 6c1dab15d0af..6c8029e3d26b 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -4,7 +4,7 @@ import re import json -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, ACCOUNT_ID from moto.core.exceptions import RESTError from moto.core.utils import unix_time from moto.organizations import utils @@ -12,6 +12,11 @@ InvalidInputException, DuplicateOrganizationalUnitException, DuplicatePolicyException, + AccountNotFoundException, + ConstraintViolationException, + AccountAlreadyRegisteredException, + AWSOrganizationsNotInUseException, + AccountNotRegisteredException, ) @@ -85,15 +90,13 @@ def create_account_status(self): def describe(self): return { - "Account": { - "Id": self.id, - "Arn": self.arn, - "Email": self.email, - "Name": self.name, - "Status": self.status, - "JoinedMethod": self.joined_method, - "JoinedTimestamp": unix_time(self.create_time), - } + "Id": self.id, + "Arn": self.arn, + "Email": self.email, + "Name": self.name, + "Status": self.status, + "JoinedMethod": self.joined_method, + "JoinedTimestamp": unix_time(self.create_time), } @@ -221,6 +224,56 @@ def trusted_service(service_principal): return service_principal in FakeServiceAccess.TRUSTED_SERVICES +class FakeDelegatedAdministrator(BaseModel): + # List of services, which support a different Account to ba a delegated administrator + # https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrated-services-list.html + SUPPORTED_SERVICES = [ + "config-multiaccountsetup.amazonaws.com", + "guardduty.amazonaws.com", + "access-analyzer.amazonaws.com", + "macie.amazonaws.com", + "servicecatalog.amazonaws.com", + "ssm.amazonaws.com", + ] + + def __init__(self, account): + self.account = account + self.enabled_date = datetime.datetime.utcnow() + self.services = {} + + def add_service_principal(self, service_principal): + if service_principal in self.services: + raise AccountAlreadyRegisteredException + + if not self.supported_service(service_principal): + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + self.services[service_principal] = { + "ServicePrincipal": service_principal, + "DelegationEnabledDate": unix_time(datetime.datetime.utcnow()), + } + + def remove_service_principal(self, service_principal): + if service_principal not in self.services: + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + self.services.pop(service_principal) + + def describe(self): + admin = self.account.describe() + admin["DelegationEnabledDate"] = unix_time(self.enabled_date) + + return admin + + @staticmethod + def supported_service(service_principal): + return service_principal in FakeDelegatedAdministrator.SUPPORTED_SERVICES + + class OrganizationsBackend(BaseBackend): def __init__(self): self.org = None @@ -228,6 +281,7 @@ def __init__(self): self.ou = [] self.policies = [] self.services = [] + self.admins = [] def create_organization(self, **kwargs): self.org = FakeOrganization(kwargs["FeatureSet"]) @@ -259,10 +313,7 @@ def create_organization(self, **kwargs): def describe_organization(self): if not self.org: - raise RESTError( - "AWSOrganizationsNotInUseException", - "Your account is not a member of an organization.", - ) + raise AWSOrganizationsNotInUseException return self.org.describe() def list_roots(self): @@ -325,10 +376,7 @@ def get_account_by_id(self, account_id): (account for account in self.accounts if account.id == account_id), None ) if account is None: - raise RESTError( - "AccountNotFoundException", - "You specified an account that doesn't exist.", - ) + raise AccountNotFoundException return account def get_account_by_attr(self, attr, value): @@ -341,15 +389,12 @@ def get_account_by_attr(self, attr, value): None, ) if account is None: - raise RESTError( - "AccountNotFoundException", - "You specified an account that doesn't exist.", - ) + raise AccountNotFoundException return account def describe_account(self, **kwargs): account = self.get_account_by_id(kwargs["AccountId"]) - return account.describe() + return dict(Account=account.describe()) def describe_create_account_status(self, **kwargs): account = self.get_account_by_attr( @@ -358,15 +403,13 @@ def describe_create_account_status(self, **kwargs): return account.create_account_status def list_accounts(self): - return dict( - Accounts=[account.describe()["Account"] for account in self.accounts] - ) + return dict(Accounts=[account.describe() for account in self.accounts]) def list_accounts_for_parent(self, **kwargs): parent_id = self.validate_parent_id(kwargs["ParentId"]) return dict( Accounts=[ - account.describe()["Account"] + account.describe() for account in self.accounts if account.parent_id == parent_id ] @@ -399,7 +442,7 @@ def list_children(self, **kwargs): elif kwargs["ChildType"] == "ORGANIZATIONAL_UNIT": obj_list = self.ou else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") return dict( Children=[ {"Id": obj.id, "Type": kwargs["ChildType"]} @@ -427,7 +470,7 @@ def describe_policy(self, **kwargs): "You specified a policy that doesn't exist.", ) else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") return policy.describe() def get_policy_by_id(self, policy_id): @@ -472,12 +515,9 @@ def attach_policy(self, **kwargs): account.attached_policies.append(policy) policy.attachments.append(account) else: - raise RESTError( - "AccountNotFoundException", - "You specified an account that doesn't exist.", - ) + raise AccountNotFoundException else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") def list_policies(self, **kwargs): return dict( @@ -510,12 +550,9 @@ def list_policies_for_target(self, **kwargs): elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs["TargetId"]): obj = next((a for a in self.accounts if a.id == kwargs["TargetId"]), None) if obj is None: - raise RESTError( - "AccountNotFoundException", - "You specified an account that doesn't exist.", - ) + raise AccountNotFoundException else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") return dict( Policies=[ p.describe()["Policy"]["PolicySummary"] for p in obj.attached_policies @@ -533,7 +570,7 @@ def list_targets_for_policy(self, **kwargs): "You specified a policy that doesn't exist.", ) else: - raise RESTError("InvalidInputException", "You specified an invalid value.") + raise InvalidInputException("You specified an invalid value.") objects = [ {"TargetId": obj.id, "Arn": obj.arn, "Name": obj.name, "Type": obj.type} for obj in policy.attachments @@ -606,5 +643,95 @@ def disable_aws_service_access(self, **kwargs): if service_principal: self.services.remove(service_principal) + def register_delegated_administrator(self, **kwargs): + account_id = kwargs["AccountId"] + + if account_id == ACCOUNT_ID: + raise ConstraintViolationException( + "You cannot register master account/yourself as delegated administrator for your organization." + ) + + account = self.get_account_by_id(account_id) + + admin = next( + (admin for admin in self.admins if admin.account.id == account_id), None + ) + if admin is None: + admin = FakeDelegatedAdministrator(account) + self.admins.append(admin) + + admin.add_service_principal(kwargs["ServicePrincipal"]) + + def list_delegated_administrators(self, **kwargs): + admins = self.admins + service = kwargs.get("ServicePrincipal") + + if service: + if not FakeDelegatedAdministrator.supported_service(service): + raise InvalidInputException( + "You specified an unrecognized service principal." + ) + + admins = [admin for admin in admins if service in admin.services] + + delegated_admins = [admin.describe() for admin in admins] + + return dict(DelegatedAdministrators=delegated_admins) + + def list_delegated_services_for_account(self, **kwargs): + admin = next( + (admin for admin in self.admins if admin.account.id == kwargs["AccountId"]), + None, + ) + if admin is None: + account = next( + ( + account + for account in self.accounts + if account.id == kwargs["AccountId"] + ), + None, + ) + if account: + raise AccountNotRegisteredException + + raise AWSOrganizationsNotInUseException + + services = [service for service in admin.services.values()] + + return dict(DelegatedServices=services) + + def deregister_delegated_administrator(self, **kwargs): + account_id = kwargs["AccountId"] + service = kwargs["ServicePrincipal"] + + if account_id == ACCOUNT_ID: + raise ConstraintViolationException( + "You cannot register master account/yourself as delegated administrator for your organization." + ) + + admin = next( + (admin for admin in self.admins if admin.account.id == account_id), None, + ) + if admin is None: + account = next( + ( + account + for account in self.accounts + if account.id == kwargs["AccountId"] + ), + None, + ) + if account: + raise AccountNotRegisteredException + + raise AccountNotFoundException + + admin.remove_service_principal(service) + + # remove account, when no services attached + if not admin.services: + self.admins.remove(admin) + organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index a2bd028d9961..4689db5d7cee 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -163,3 +163,31 @@ def disable_aws_service_access(self): return json.dumps( self.organizations_backend.disable_aws_service_access(**self.request_params) ) + + def register_delegated_administrator(self): + return json.dumps( + self.organizations_backend.register_delegated_administrator( + **self.request_params + ) + ) + + def list_delegated_administrators(self): + return json.dumps( + self.organizations_backend.list_delegated_administrators( + **self.request_params + ) + ) + + def list_delegated_services_for_account(self): + return json.dumps( + self.organizations_backend.list_delegated_services_for_account( + **self.request_params + ) + ) + + def deregister_delegated_administrator(self): + return json.dumps( + self.organizations_backend.deregister_delegated_administrator( + **self.request_params + ) + ) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index decc0a17810d..90bee1edbd83 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -10,6 +10,7 @@ from nose.tools import assert_raises from moto import mock_organizations +from moto.core import ACCOUNT_ID from moto.organizations import utils from .organizations_test_utils import ( validate_organization, @@ -64,8 +65,11 @@ def test_describe_organization_exception(): response = client.describe_organization() ex = e.exception ex.operation_name.should.equal("DescribeOrganization") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("AWSOrganizationsNotInUseException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AWSOrganizationsNotInUseException") + ex.response["Error"]["Message"].should.equal( + "Your account is not a member of an organization." + ) # Organizational Units @@ -193,8 +197,11 @@ def test_describe_account_exception(): response = client.describe_account(AccountId=utils.make_random_account_id()) ex = e.exception ex.operation_name.should.equal("DescribeAccount") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("AccountNotFoundException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) @mock_organizations @@ -340,8 +347,9 @@ def test_list_children_exception(): response = client.list_children(ParentId=root_id, ChildType="BLEE") ex = e.exception ex.operation_name.should.equal("ListChildren") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") # Service Control Policies @@ -405,8 +413,9 @@ def test_describe_policy_exception(): response = client.describe_policy(PolicyId="meaninglessstring") ex = e.exception ex.operation_name.should.equal("DescribePolicy") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") @mock_organizations @@ -517,16 +526,20 @@ def test_attach_policy_exception(): response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) ex = e.exception ex.operation_name.should.equal("AttachPolicy") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("AccountNotFoundException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) with assert_raises(ClientError) as e: response = client.attach_policy( PolicyId=policy_id, TargetId="meaninglessstring" ) ex = e.exception ex.operation_name.should.equal("AttachPolicy") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") @mock_organizations @@ -636,16 +649,20 @@ def test_list_policies_for_target_exception(): ) ex = e.exception ex.operation_name.should.equal("ListPoliciesForTarget") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("AccountNotFoundException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) with assert_raises(ClientError) as e: response = client.list_policies_for_target( TargetId="meaninglessstring", Filter="SERVICE_CONTROL_POLICY" ) ex = e.exception ex.operation_name.should.equal("ListPoliciesForTarget") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") @mock_organizations @@ -694,8 +711,9 @@ def test_list_targets_for_policy_exception(): response = client.list_targets_for_policy(PolicyId="meaninglessstring") ex = e.exception ex.operation_name.should.equal("ListTargetsForPolicy") - ex.response["Error"]["Code"].should.equal("400") - ex.response["Error"]["Message"].should.contain("InvalidInputException") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") @mock_organizations @@ -947,3 +965,343 @@ def test_disable_aws_service_access_errors(): ex.response["Error"]["Message"].should.equal( "You specified an unrecognized service principal." ) + + +@mock_organizations +def test_register_delegated_administrator(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org_id = client.create_organization(FeatureSet="ALL")["Organization"]["Id"] + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + + # when + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + response = client.list_delegated_administrators() + response["DelegatedAdministrators"].should.have.length_of(1) + admin = response["DelegatedAdministrators"][0] + admin["Id"].should.equal(account_id) + admin["Arn"].should.equal( + "arn:aws:organizations::{0}:account/{1}/{2}".format( + ACCOUNT_ID, org_id, account_id + ) + ) + admin["Email"].should.equal(mockemail) + admin["Name"].should.equal(mockname) + admin["Status"].should.equal("ACTIVE") + admin["JoinedMethod"].should.equal("CREATED") + admin["JoinedTimestamp"].should.be.a(datetime) + admin["DelegationEnabledDate"].should.be.a(datetime) + + +@mock_organizations +def test_register_delegated_administrator_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # register master Account + # when + with assert_raises(ClientError) as e: + client.register_delegated_administrator( + AccountId=ACCOUNT_ID, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("RegisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ConstraintViolationException") + ex.response["Error"]["Message"].should.equal( + "You cannot register master account/yourself as delegated administrator for your organization." + ) + + # register not existing Account + # when + with assert_raises(ClientError) as e: + client.register_delegated_administrator( + AccountId="000000000000", ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("RegisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) + + # register not supported service + # when + with assert_raises(ClientError) as e: + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="moto.amazonaws.com" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("RegisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) + + # register service again + # when + with assert_raises(ClientError) as e: + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("RegisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountAlreadyRegisteredException") + ex.response["Error"]["Message"].should.equal( + "The provided account is already a delegated administrator for your organization." + ) + + +@mock_organizations +def test_list_delegated_administrators(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org_id = client.create_organization(FeatureSet="ALL")["Organization"]["Id"] + account_id_1 = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + account_id_2 = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + client.register_delegated_administrator( + AccountId=account_id_1, ServicePrincipal="ssm.amazonaws.com" + ) + client.register_delegated_administrator( + AccountId=account_id_2, ServicePrincipal="guardduty.amazonaws.com" + ) + + # when + response = client.list_delegated_administrators() + + # then + response["DelegatedAdministrators"].should.have.length_of(2) + sorted([admin["Id"] for admin in response["DelegatedAdministrators"]]).should.equal( + sorted([account_id_1, account_id_2]) + ) + + # when + response = client.list_delegated_administrators( + ServicePrincipal="ssm.amazonaws.com" + ) + + # then + response["DelegatedAdministrators"].should.have.length_of(1) + admin = response["DelegatedAdministrators"][0] + admin["Id"].should.equal(account_id_1) + admin["Arn"].should.equal( + "arn:aws:organizations::{0}:account/{1}/{2}".format( + ACCOUNT_ID, org_id, account_id_1 + ) + ) + admin["Email"].should.equal(mockemail) + admin["Name"].should.equal(mockname) + admin["Status"].should.equal("ACTIVE") + admin["JoinedMethod"].should.equal("CREATED") + admin["JoinedTimestamp"].should.be.a(datetime) + admin["DelegationEnabledDate"].should.be.a(datetime) + + +@mock_organizations +def test_list_delegated_administrators_erros(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + # list not supported service + # when + with assert_raises(ClientError) as e: + client.list_delegated_administrators(ServicePrincipal="moto.amazonaws.com") + + # then + ex = e.exception + ex.operation_name.should.equal("ListDelegatedAdministrators") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) + + +@mock_organizations +def test_list_delegated_services_for_account(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="guardduty.amazonaws.com" + ) + + # when + response = client.list_delegated_services_for_account(AccountId=account_id) + + # then + response["DelegatedServices"].should.have.length_of(2) + sorted( + [service["ServicePrincipal"] for service in response["DelegatedServices"]] + ).should.equal(["guardduty.amazonaws.com", "ssm.amazonaws.com"]) + + +@mock_organizations +def test_list_delegated_services_for_account_erros(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + # list services for not existing Account + # when + with assert_raises(ClientError) as e: + client.list_delegated_services_for_account(AccountId="000000000000") + + # then + ex = e.exception + ex.operation_name.should.equal("ListDelegatedServicesForAccount") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AWSOrganizationsNotInUseException") + ex.response["Error"]["Message"].should.equal( + "Your account is not a member of an organization." + ) + + # list services for not registered Account + # when + with assert_raises(ClientError) as e: + client.list_delegated_services_for_account(AccountId=ACCOUNT_ID) + + # then + ex = e.exception + ex.operation_name.should.equal("ListDelegatedServicesForAccount") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotRegisteredException") + ex.response["Error"]["Message"].should.equal( + "The provided account is not a registered delegated administrator for your organization." + ) + + +@mock_organizations +def test_deregister_delegated_administrator(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # when + client.deregister_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + response = client.list_delegated_administrators() + response["DelegatedAdministrators"].should.have.length_of(0) + + +@mock_organizations +def test_deregister_delegated_administrator_erros(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + + # deregister master Account + # when + with assert_raises(ClientError) as e: + client.deregister_delegated_administrator( + AccountId=ACCOUNT_ID, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeregisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ConstraintViolationException") + ex.response["Error"]["Message"].should.equal( + "You cannot register master account/yourself as delegated administrator for your organization." + ) + + # deregister not existing Account + # when + with assert_raises(ClientError) as e: + client.deregister_delegated_administrator( + AccountId="000000000000", ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeregisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) + + # deregister not registered Account + # when + with assert_raises(ClientError) as e: + client.deregister_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeregisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotRegisteredException") + ex.response["Error"]["Message"].should.equal( + "The provided account is not a registered delegated administrator for your organization." + ) + + # given + client.register_delegated_administrator( + AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" + ) + + # deregister not registered service + # when + with assert_raises(ClientError) as e: + client.deregister_delegated_administrator( + AccountId=account_id, ServicePrincipal="guardduty.amazonaws.com" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeregisterDelegatedAdministrator") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal( + "You specified an unrecognized service principal." + ) From 88a11346576d740983fe648e62d88e5ba780e38d Mon Sep 17 00:00:00 2001 From: Iain Bullard Date: Fri, 31 Jul 2020 17:46:48 +0100 Subject: [PATCH 476/658] Fix DynamoDb2 ExpressionAttributeNames can start with a number (#3206) When using pynamodb's support for transactions it makes use of of ExpressionAttributeNames that look like #0 #1 etc. According to https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html And when testing against dynamodb-local these work without issue, however, when testing with moto they fail. --- moto/dynamodb2/parsing/tokens.py | 2 +- .../test_dynamodb_expression_tokenizer.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/parsing/tokens.py b/moto/dynamodb2/parsing/tokens.py index 4fbb7883afa3..34c3151efbfb 100644 --- a/moto/dynamodb2/parsing/tokens.py +++ b/moto/dynamodb2/parsing/tokens.py @@ -109,7 +109,7 @@ def is_possible_token_boundary(cls, character): @classmethod def is_expression_attribute(cls, input_string): - return re.compile("^[a-zA-Z][a-zA-Z0-9_]*$").match(input_string) is not None + return re.compile("^[a-zA-Z0-9][a-zA-Z0-9_]*$").match(input_string) is not None @classmethod def is_expression_attribute_name(cls, input_string): diff --git a/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py b/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py index 3330d431ecd8..ddfb81d1a258 100644 --- a/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py +++ b/tests/test_dynamodb2/test_dynamodb_expression_tokenizer.py @@ -219,6 +219,18 @@ def test_expression_tokenizer_single_set_action_attribute_name_valid_key(): ] +def test_expression_tokenizer_single_set_action_attribute_name_leading_number(): + set_action = "SET attr=#0" + token_list = ExpressionTokenizer.make_list(set_action) + assert token_list == [ + Token(Token.ATTRIBUTE, "SET"), + Token(Token.WHITESPACE, " "), + Token(Token.ATTRIBUTE, "attr"), + Token(Token.EQUAL_SIGN, "="), + Token(Token.ATTRIBUTE_NAME, "#0"), + ] + + def test_expression_tokenizer_just_a_pipe(): set_action = "|" try: From 9a9a1d8413e1c93800b6ae695842a91e9487c6d8 Mon Sep 17 00:00:00 2001 From: Adam Richie-Halford Date: Sat, 1 Aug 2020 07:23:36 -0700 Subject: [PATCH 477/658] Decentralize cloudformation naming responsibilities (#3201) * #3127 - Decentralize CF naming responsibilities * Decentralize CloudFormation naming responsibilities * Update URLs in cloudformation_resource_type functions * Fix flake8 errors * Black formatting * Add a bunch of imports to populate CloudFormationModel.__subclasses__ * Add noqa to s3 models import statement in cloudformation/parsing.py * Black formatting * Remove debugging print statement Co-authored-by: Bert Blommers --- moto/autoscaling/models.py | 24 +++- moto/awslambda/models.py | 35 +++++- moto/batch/models.py | 35 +++++- moto/cloudformation/parsing.py | 160 +++++++------------------ moto/cloudwatch/models.py | 13 +- moto/core/__init__.py | 1 + moto/core/models.py | 42 +++++++ moto/datapipeline/models.py | 13 +- moto/dynamodb/models.py | 13 +- moto/dynamodb2/models/__init__.py | 13 +- moto/ec2/models.py | 189 +++++++++++++++++++++++++++--- moto/ecr/models.py | 13 +- moto/ecs/models.py | 35 +++++- moto/elb/models.py | 13 +- moto/elbv2/models.py | 35 +++++- moto/events/models.py | 24 +++- moto/iam/models.py | 24 +++- moto/kinesis/models.py | 13 +- moto/kms/models.py | 13 +- moto/rds/models.py | 42 +++++-- moto/rds2/models.py | 52 ++++++-- moto/redshift/models.py | 36 +++++- moto/route53/models.py | 46 +++++++- moto/s3/models.py | 13 +- moto/sns/models.py | 15 ++- moto/sqs/models.py | 13 +- 26 files changed, 717 insertions(+), 208 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index f4185da6ce88..d82f15095f31 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -6,7 +6,7 @@ from moto.ec2.exceptions import InvalidInstanceIdError from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.ec2 import ec2_backends from moto.elb import elb_backends from moto.elbv2 import elbv2_backends @@ -74,7 +74,7 @@ def execute(self): ) -class FakeLaunchConfiguration(BaseModel): +class FakeLaunchConfiguration(CloudFormationModel): def __init__( self, name, @@ -127,6 +127,15 @@ def create_from_instance(cls, name, instance, backend): ) return config + @staticmethod + def cloudformation_name_type(): + return "LaunchConfigurationName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-autoscaling-launchconfiguration.html + return "AWS::AutoScaling::LaunchConfiguration" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -215,7 +224,7 @@ def _parse_block_device_mappings(self): return block_device_map -class FakeAutoScalingGroup(BaseModel): +class FakeAutoScalingGroup(CloudFormationModel): def __init__( self, name, @@ -309,6 +318,15 @@ def __set_string_propagate_at_launch_booleans_on_tags(tags): tag["PropagateAtLaunch"] = bool_to_string[tag["PropagateAtLaunch"]] return tags + @staticmethod + def cloudformation_name_type(): + return "AutoScalingGroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-autoscaling-autoscalinggroup.html + return "AWS::AutoScaling::AutoScalingGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index afbe9775a094..a234fbe01d76 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -28,7 +28,7 @@ from boto3 import Session from moto.awslambda.policy import Policy -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, CloudFormationModel from moto.core.exceptions import RESTError from moto.iam.models import iam_backend from moto.iam.exceptions import IAMNotFoundException @@ -151,7 +151,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): raise # multiple processes trying to use same volume? -class LambdaFunction(BaseModel): +class LambdaFunction(CloudFormationModel): def __init__(self, spec, region, validate_s3=True, version=1): # required self.region = region @@ -492,6 +492,15 @@ def invoke(self, body, request_headers, response_headers): return result + @staticmethod + def cloudformation_name_type(): + return "FunctionName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html + return "AWS::Lambda::Function" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -556,7 +565,7 @@ def delete(self, region): lambda_backends[region].delete_function(self.function_name) -class EventSourceMapping(BaseModel): +class EventSourceMapping(CloudFormationModel): def __init__(self, spec): # required self.function_name = spec["FunctionName"] @@ -633,6 +642,15 @@ def delete(self, region_name): lambda_backend = lambda_backends[region_name] lambda_backend.delete_event_source_mapping(self.uuid) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + return "AWS::Lambda::EventSourceMapping" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -667,13 +685,22 @@ def delete_from_cloudformation_json( esm.delete(region_name) -class LambdaVersion(BaseModel): +class LambdaVersion(CloudFormationModel): def __init__(self, spec): self.version = spec["Version"] def __repr__(self): return str(self.logical_resource_id) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-version.html + return "AWS::Lambda::Version" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/batch/models.py b/moto/batch/models.py index fde744911a81..c4bc81a73788 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -13,7 +13,7 @@ import dateutil.parser from boto3 import Session -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.iam import iam_backends from moto.ec2 import ec2_backends from moto.ecs import ecs_backends @@ -42,7 +42,7 @@ def datetime2int(date): return int(time.mktime(date.timetuple())) -class ComputeEnvironment(BaseModel): +class ComputeEnvironment(CloudFormationModel): def __init__( self, compute_environment_name, @@ -76,6 +76,15 @@ def set_ecs(self, arn, name): def physical_resource_id(self): return self.arn + @staticmethod + def cloudformation_name_type(): + return "ComputeEnvironmentName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-computeenvironment.html + return "AWS::Batch::ComputeEnvironment" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -95,7 +104,7 @@ def create_from_cloudformation_json( return backend.get_compute_environment_by_arn(arn) -class JobQueue(BaseModel): +class JobQueue(CloudFormationModel): def __init__( self, name, priority, state, environments, env_order_json, region_name ): @@ -139,6 +148,15 @@ def describe(self): def physical_resource_id(self): return self.arn + @staticmethod + def cloudformation_name_type(): + return "JobQueueName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobqueue.html + return "AWS::Batch::JobQueue" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -164,7 +182,7 @@ def create_from_cloudformation_json( return backend.get_job_queue_by_arn(arn) -class JobDefinition(BaseModel): +class JobDefinition(CloudFormationModel): def __init__( self, name, @@ -264,6 +282,15 @@ def describe(self): def physical_resource_id(self): return self.arn + @staticmethod + def cloudformation_name_type(): + return "JobDefinitionName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-batch-jobdefinition.html + return "AWS::Batch::JobDefinition" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 58409901d4a4..2c212a148c98 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -6,31 +6,43 @@ import warnings import re -from moto.autoscaling import models as autoscaling_models -from moto.awslambda import models as lambda_models -from moto.batch import models as batch_models -from moto.cloudwatch import models as cloudwatch_models -from moto.cognitoidentity import models as cognitoidentity_models from moto.compat import collections_abc -from moto.datapipeline import models as datapipeline_models -from moto.dynamodb2 import models as dynamodb2_models + +# This ugly section of imports is necessary because we +# build the list of CloudFormationModel subclasses using +# CloudFormationModel.__subclasses__(). However, if the class +# definition of a subclass hasn't been executed yet - for example, if +# the subclass's module hasn't been imported yet - then that subclass +# doesn't exist yet, and __subclasses__ won't find it. +# So we import here to populate the list of subclasses. +from moto.autoscaling import models as autoscaling_models # noqa +from moto.awslambda import models as awslambda_models # noqa +from moto.batch import models as batch_models # noqa +from moto.cloudwatch import models as cloudwatch_models # noqa +from moto.datapipeline import models as datapipeline_models # noqa +from moto.dynamodb2 import models as dynamodb2_models # noqa +from moto.ecr import models as ecr_models # noqa +from moto.ecs import models as ecs_models # noqa +from moto.elb import models as elb_models # noqa +from moto.elbv2 import models as elbv2_models # noqa +from moto.events import models as events_models # noqa +from moto.iam import models as iam_models # noqa +from moto.kinesis import models as kinesis_models # noqa +from moto.kms import models as kms_models # noqa +from moto.rds import models as rds_models # noqa +from moto.rds2 import models as rds2_models # noqa +from moto.redshift import models as redshift_models # noqa +from moto.route53 import models as route53_models # noqa +from moto.s3 import models as s3_models # noqa +from moto.sns import models as sns_models # noqa +from moto.sqs import models as sqs_models # noqa + +# End ugly list of imports + from moto.ec2 import models as ec2_models -from moto.ecs import models as ecs_models -from moto.elb import models as elb_models -from moto.elbv2 import models as elbv2_models -from moto.events import models as events_models -from moto.iam import models as iam_models -from moto.kinesis import models as kinesis_models -from moto.kms import models as kms_models -from moto.rds import models as rds_models -from moto.rds2 import models as rds2_models -from moto.redshift import models as redshift_models -from moto.route53 import models as route53_models -from moto.s3 import models as s3_models, s3_backend +from moto.s3 import models as _, s3_backend # noqa from moto.s3.utils import bucket_and_name_from_url -from moto.sns import models as sns_models -from moto.sqs import models as sqs_models -from moto.core import ACCOUNT_ID +from moto.core import ACCOUNT_ID, CloudFormationModel from .utils import random_suffix from .exceptions import ( ExportNotFound, @@ -40,105 +52,13 @@ ) from boto.cloudformation.stack import Output -MODEL_MAP = { - "AWS::AutoScaling::AutoScalingGroup": autoscaling_models.FakeAutoScalingGroup, - "AWS::AutoScaling::LaunchConfiguration": autoscaling_models.FakeLaunchConfiguration, - "AWS::Batch::JobDefinition": batch_models.JobDefinition, - "AWS::Batch::JobQueue": batch_models.JobQueue, - "AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment, - "AWS::DynamoDB::Table": dynamodb2_models.Table, - "AWS::Kinesis::Stream": kinesis_models.Stream, - "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, - "AWS::Lambda::Function": lambda_models.LambdaFunction, - "AWS::Lambda::Version": lambda_models.LambdaVersion, - "AWS::EC2::EIP": ec2_models.ElasticAddress, - "AWS::EC2::Instance": ec2_models.Instance, - "AWS::EC2::InternetGateway": ec2_models.InternetGateway, - "AWS::EC2::NatGateway": ec2_models.NatGateway, - "AWS::EC2::NetworkInterface": ec2_models.NetworkInterface, - "AWS::EC2::Route": ec2_models.Route, - "AWS::EC2::RouteTable": ec2_models.RouteTable, - "AWS::EC2::SecurityGroup": ec2_models.SecurityGroup, - "AWS::EC2::SecurityGroupIngress": ec2_models.SecurityGroupIngress, - "AWS::EC2::SpotFleet": ec2_models.SpotFleetRequest, - "AWS::EC2::Subnet": ec2_models.Subnet, - "AWS::EC2::SubnetRouteTableAssociation": ec2_models.SubnetRouteTableAssociation, - "AWS::EC2::Volume": ec2_models.Volume, - "AWS::EC2::VolumeAttachment": ec2_models.VolumeAttachment, - "AWS::EC2::VPC": ec2_models.VPC, - "AWS::EC2::VPCGatewayAttachment": ec2_models.VPCGatewayAttachment, - "AWS::EC2::VPCPeeringConnection": ec2_models.VPCPeeringConnection, - "AWS::ECS::Cluster": ecs_models.Cluster, - "AWS::ECS::TaskDefinition": ecs_models.TaskDefinition, - "AWS::ECS::Service": ecs_models.Service, - "AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer, - "AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer, - "AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup, - "AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener, - "AWS::Cognito::IdentityPool": cognitoidentity_models.CognitoIdentity, - "AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline, - "AWS::IAM::InstanceProfile": iam_models.InstanceProfile, - "AWS::IAM::Role": iam_models.Role, - "AWS::KMS::Key": kms_models.Key, - "AWS::Logs::LogGroup": cloudwatch_models.LogGroup, - "AWS::RDS::DBInstance": rds_models.Database, - "AWS::RDS::DBSecurityGroup": rds_models.SecurityGroup, - "AWS::RDS::DBSubnetGroup": rds_models.SubnetGroup, - "AWS::RDS::DBParameterGroup": rds2_models.DBParameterGroup, - "AWS::Redshift::Cluster": redshift_models.Cluster, - "AWS::Redshift::ClusterParameterGroup": redshift_models.ParameterGroup, - "AWS::Redshift::ClusterSubnetGroup": redshift_models.SubnetGroup, - "AWS::Route53::HealthCheck": route53_models.HealthCheck, - "AWS::Route53::HostedZone": route53_models.FakeZone, - "AWS::Route53::RecordSet": route53_models.RecordSet, - "AWS::Route53::RecordSetGroup": route53_models.RecordSetGroup, - "AWS::SNS::Topic": sns_models.Topic, - "AWS::S3::Bucket": s3_models.FakeBucket, - "AWS::SQS::Queue": sqs_models.Queue, - "AWS::Events::Rule": events_models.Rule, - "AWS::Events::EventBus": events_models.EventBus, -} - -UNDOCUMENTED_NAME_TYPE_MAP = { - "AWS::AutoScaling::AutoScalingGroup": "AutoScalingGroupName", - "AWS::AutoScaling::LaunchConfiguration": "LaunchConfigurationName", - "AWS::IAM::InstanceProfile": "InstanceProfileName", -} - -# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html +# List of supported CloudFormation models +MODEL_LIST = CloudFormationModel.__subclasses__() +MODEL_MAP = {model.cloudformation_type(): model for model in MODEL_LIST} NAME_TYPE_MAP = { - "AWS::ApiGateway::ApiKey": "Name", - "AWS::ApiGateway::Model": "Name", - "AWS::CloudWatch::Alarm": "AlarmName", - "AWS::DynamoDB::Table": "TableName", - "AWS::ElasticBeanstalk::Application": "ApplicationName", - "AWS::ElasticBeanstalk::Environment": "EnvironmentName", - "AWS::CodeDeploy::Application": "ApplicationName", - "AWS::CodeDeploy::DeploymentConfig": "DeploymentConfigName", - "AWS::CodeDeploy::DeploymentGroup": "DeploymentGroupName", - "AWS::Config::ConfigRule": "ConfigRuleName", - "AWS::Config::DeliveryChannel": "Name", - "AWS::Config::ConfigurationRecorder": "Name", - "AWS::ElasticLoadBalancing::LoadBalancer": "LoadBalancerName", - "AWS::ElasticLoadBalancingV2::LoadBalancer": "Name", - "AWS::ElasticLoadBalancingV2::TargetGroup": "Name", - "AWS::EC2::SecurityGroup": "GroupName", - "AWS::ElastiCache::CacheCluster": "ClusterName", - "AWS::ECR::Repository": "RepositoryName", - "AWS::ECS::Cluster": "ClusterName", - "AWS::Elasticsearch::Domain": "DomainName", - "AWS::Events::Rule": "Name", - "AWS::IAM::Group": "GroupName", - "AWS::IAM::ManagedPolicy": "ManagedPolicyName", - "AWS::IAM::Role": "RoleName", - "AWS::IAM::User": "UserName", - "AWS::Lambda::Function": "FunctionName", - "AWS::RDS::DBInstance": "DBInstanceIdentifier", - "AWS::S3::Bucket": "BucketName", - "AWS::SNS::Topic": "TopicName", - "AWS::SQS::Queue": "QueueName", + model.cloudformation_type(): model.cloudformation_name_type() + for model in MODEL_LIST } -NAME_TYPE_MAP.update(UNDOCUMENTED_NAME_TYPE_MAP) # Just ignore these models types for now NULL_MODELS = [ @@ -292,9 +212,11 @@ def clean_json(resource_json, resources_map): def resource_class_from_type(resource_type): if resource_type in NULL_MODELS: return None + if resource_type not in MODEL_MAP: logger.warning("No Moto CloudFormation support for %s", resource_type) return None + return MODEL_MAP.get(resource_type) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index f089acb1414c..d8b28bc9709c 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -3,7 +3,7 @@ from boto3 import Session from moto.core.utils import iso_8601_datetime_without_milliseconds -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.exceptions import RESTError from moto.logs import logs_backends from datetime import datetime, timedelta @@ -490,13 +490,22 @@ def _get_paginated(self, metrics): return None, metrics -class LogGroup(BaseModel): +class LogGroup(CloudFormationModel): def __init__(self, spec): # required self.name = spec["LogGroupName"] # optional self.tags = spec.get("Tags", []) + @staticmethod + def cloudformation_name_type(): + return "LogGroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html + return "AWS::Logs::LogGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/core/__init__.py b/moto/core/__init__.py index 045124fabb24..09f5b1e16da9 100644 --- a/moto/core/__init__.py +++ b/moto/core/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import BaseModel, BaseBackend, moto_api_backend, ACCOUNT_ID # noqa +from .models import CloudFormationModel # noqa from .responses import ActionAuthenticatorMixin moto_api_backends = {"global": moto_api_backend} diff --git a/moto/core/models.py b/moto/core/models.py index 26ee1a1f5a29..ded6a4fc1254 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -8,6 +8,7 @@ import re import six import types +from abc import abstractmethod from io import BytesIO from collections import defaultdict from botocore.config import Config @@ -534,6 +535,47 @@ def __new__(cls, *args, **kwargs): return instance +# Parent class for every Model that can be instantiated by CloudFormation +# On subclasses, implement the two methods as @staticmethod to ensure correct behaviour of the CF parser +class CloudFormationModel(BaseModel): + @abstractmethod + def cloudformation_name_type(self): + # This must be implemented as a staticmethod with no parameters + # Return None for resources that do not have a name property + pass + + @abstractmethod + def cloudformation_type(self): + # This must be implemented as a staticmethod with no parameters + # See for example https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html + return "AWS::SERVICE::RESOURCE" + + @abstractmethod + def create_from_cloudformation_json(self): + # This must be implemented as a classmethod with parameters: + # cls, resource_name, cloudformation_json, region_name + # Extract the resource parameters from the cloudformation json + # and return an instance of the resource class + pass + + @abstractmethod + def update_from_cloudformation_json(self): + # This must be implemented as a classmethod with parameters: + # cls, original_resource, new_resource_name, cloudformation_json, region_name + # Extract the resource parameters from the cloudformation json, + # delete the old resource and return the new one. Optionally inspect + # the change in parameters and no-op when nothing has changed. + pass + + @abstractmethod + def delete_from_cloudformation_json(self): + # This must be implemented as a classmethod with parameters: + # cls, resource_name, cloudformation_json, region_name + # Extract the resource parameters from the cloudformation json + # and delete the resource. Do not include a return statement. + pass + + class BaseBackend(object): def _reset_model_refs(self): # Remove all references to the models stored diff --git a/moto/datapipeline/models.py b/moto/datapipeline/models.py index d93deea61e6f..b17da1f098bd 100644 --- a/moto/datapipeline/models.py +++ b/moto/datapipeline/models.py @@ -4,7 +4,7 @@ from boto3 import Session from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from .utils import get_random_pipeline_id, remove_capitalization_of_dict_keys @@ -18,7 +18,7 @@ def to_json(self): return {"fields": self.fields, "id": self.object_id, "name": self.name} -class Pipeline(BaseModel): +class Pipeline(CloudFormationModel): def __init__(self, name, unique_id, **kwargs): self.name = name self.unique_id = unique_id @@ -74,6 +74,15 @@ def set_pipeline_objects(self, pipeline_objects): def activate(self): self.status = "SCHEDULED" + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-datapipeline-pipeline.html + return "AWS::DataPipeline::Pipeline" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py index f5771ec6e966..1a3b4afce136 100644 --- a/moto/dynamodb/models.py +++ b/moto/dynamodb/models.py @@ -4,7 +4,7 @@ import json from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import unix_time from moto.core import ACCOUNT_ID from .comparisons import get_comparison_func @@ -82,7 +82,7 @@ def describe_attrs(self, attributes): return {"Item": included} -class Table(BaseModel): +class Table(CloudFormationModel): def __init__( self, name, @@ -135,6 +135,15 @@ def describe(self): } return results + @staticmethod + def cloudformation_name_type(): + return "TableName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html + return "AWS::DynamoDB::Table" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 70fcc5d097bb..175ed64f8057 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -9,7 +9,7 @@ from boto3 import Session from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import unix_time from moto.core.exceptions import JsonRESTError from moto.dynamodb2.comparisons import get_filter_expression @@ -359,7 +359,7 @@ def update(self, u): self.throughput = u.get("ProvisionedThroughput", self.throughput) -class Table(BaseModel): +class Table(CloudFormationModel): def __init__( self, table_name, @@ -431,6 +431,15 @@ def get_cfn_attribute(self, attribute_name): def physical_resource_id(self): return self.name + @staticmethod + def cloudformation_name_type(): + return "TableName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html + return "AWS::DynamoDB::Table" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/ec2/models.py b/moto/ec2/models.py index ad9ae3b1b12e..3d60654a9cfb 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -22,7 +22,7 @@ from moto.compat import OrderedDict from moto.core import BaseBackend -from moto.core.models import Model, BaseModel +from moto.core.models import Model, BaseModel, CloudFormationModel from moto.core.utils import ( iso_8601_datetime_with_milliseconds, camelcase_to_underscores, @@ -219,7 +219,7 @@ def get_filter_value(self, filter_name, method_name=None): raise FilterNotImplementedError(filter_name, method_name) -class NetworkInterface(TaggedEC2Resource): +class NetworkInterface(TaggedEC2Resource, CloudFormationModel): def __init__( self, ec2_backend, @@ -268,6 +268,15 @@ def __init__( if group: self._group_set.append(group) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-networkinterface.html + return "AWS::EC2::NetworkInterface" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -454,7 +463,7 @@ def get_all_network_interfaces(self, eni_ids=None, filters=None): return generic_filter(filters, enis) -class Instance(TaggedEC2Resource, BotoInstance): +class Instance(TaggedEC2Resource, BotoInstance, CloudFormationModel): VALID_ATTRIBUTES = { "instanceType", "kernel", @@ -621,6 +630,15 @@ def public_dns(self): formatted_ip, self.region_name ) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-instance.html + return "AWS::EC2::Instance" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -1843,7 +1861,7 @@ def __eq__(self, other): return True -class SecurityGroup(TaggedEC2Resource): +class SecurityGroup(TaggedEC2Resource, CloudFormationModel): def __init__(self, ec2_backend, group_id, name, description, vpc_id=None): self.ec2_backend = ec2_backend self.id = group_id @@ -1861,6 +1879,15 @@ def __init__(self, ec2_backend, group_id, name, description, vpc_id=None): if vpc and len(vpc.get_cidr_block_association_set(ipv6=True)) > 0: self.egress_rules.append(SecurityRule("-1", None, None, [], [])) + @staticmethod + def cloudformation_name_type(): + return "GroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-securitygroup.html + return "AWS::EC2::SecurityGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -2260,11 +2287,20 @@ def _verify_group_will_respect_rule_count_limit( raise RulesPerSecurityGroupLimitExceededError -class SecurityGroupIngress(object): +class SecurityGroupIngress(CloudFormationModel): def __init__(self, security_group, properties): self.security_group = security_group self.properties = properties + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-securitygroupingress.html + return "AWS::EC2::SecurityGroupIngress" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -2328,7 +2364,7 @@ def create_from_cloudformation_json( return cls(security_group, properties) -class VolumeAttachment(object): +class VolumeAttachment(CloudFormationModel): def __init__(self, volume, instance, device, status): self.volume = volume self.attach_time = utc_date_and_time() @@ -2336,6 +2372,15 @@ def __init__(self, volume, instance, device, status): self.device = device self.status = status + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-volumeattachment.html + return "AWS::EC2::VolumeAttachment" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -2354,7 +2399,7 @@ def create_from_cloudformation_json( return attachment -class Volume(TaggedEC2Resource): +class Volume(TaggedEC2Resource, CloudFormationModel): def __init__( self, ec2_backend, volume_id, size, zone, snapshot_id=None, encrypted=False ): @@ -2367,6 +2412,15 @@ def __init__( self.ec2_backend = ec2_backend self.encrypted = encrypted + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-volume.html + return "AWS::EC2::Volume" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -2623,7 +2677,7 @@ def remove_create_volume_permission(self, snapshot_id, user_id=None, group=None) return True -class VPC(TaggedEC2Resource): +class VPC(TaggedEC2Resource, CloudFormationModel): def __init__( self, ec2_backend, @@ -2656,6 +2710,15 @@ def __init__( amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_block, ) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-vpc.html + return "AWS::EC2::VPC" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3022,13 +3085,22 @@ def reject(self): self.message = "Inactive" -class VPCPeeringConnection(TaggedEC2Resource): +class VPCPeeringConnection(TaggedEC2Resource, CloudFormationModel): def __init__(self, vpc_pcx_id, vpc, peer_vpc): self.id = vpc_pcx_id self.vpc = vpc self.peer_vpc = peer_vpc self._status = VPCPeeringConnectionStatus() + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-vpcpeeringconnection.html + return "AWS::EC2::VPCPeeringConnection" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3114,7 +3186,7 @@ def reject_vpc_peering_connection(self, vpc_pcx_id): return vpc_pcx -class Subnet(TaggedEC2Resource): +class Subnet(TaggedEC2Resource, CloudFormationModel): def __init__( self, ec2_backend, @@ -3150,6 +3222,15 @@ def __init__( self._unused_ips = set() # if instance is destroyed hold IP here for reuse self._subnet_ips = {} # has IP: instance + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-subnet.html + return "AWS::EC2::Subnet" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3377,11 +3458,20 @@ def modify_subnet_attribute(self, subnet_id, attr_name, attr_value): raise InvalidParameterValueError(attr_name) -class SubnetRouteTableAssociation(object): +class SubnetRouteTableAssociation(CloudFormationModel): def __init__(self, route_table_id, subnet_id): self.route_table_id = route_table_id self.subnet_id = subnet_id + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-subnetroutetableassociation.html + return "AWS::EC2::SubnetRouteTableAssociation" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3411,7 +3501,7 @@ def create_subnet_association(self, route_table_id, subnet_id): return subnet_association -class RouteTable(TaggedEC2Resource): +class RouteTable(TaggedEC2Resource, CloudFormationModel): def __init__(self, ec2_backend, route_table_id, vpc_id, main=False): self.ec2_backend = ec2_backend self.id = route_table_id @@ -3420,6 +3510,15 @@ def __init__(self, ec2_backend, route_table_id, vpc_id, main=False): self.associations = {} self.routes = {} + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-routetable.html + return "AWS::EC2::RouteTable" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3555,7 +3654,7 @@ def replace_route_table_association(self, association_id, route_table_id): return self.associate_route_table(route_table_id, subnet_id) -class Route(object): +class Route(CloudFormationModel): def __init__( self, route_table, @@ -3581,6 +3680,15 @@ def __init__( self.interface = interface self.vpc_pcx = vpc_pcx + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-route.html + return "AWS::EC2::Route" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3748,12 +3856,21 @@ def delete_route(self, route_table_id, destination_cidr_block): return deleted -class InternetGateway(TaggedEC2Resource): +class InternetGateway(TaggedEC2Resource, CloudFormationModel): def __init__(self, ec2_backend): self.ec2_backend = ec2_backend self.id = random_internet_gateway_id() self.vpc = None + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-internetgateway.html + return "AWS::EC2::InternetGateway" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -3826,11 +3943,20 @@ def get_internet_gateway(self, internet_gateway_id): return self.describe_internet_gateways(internet_gateway_ids=igw_ids)[0] -class VPCGatewayAttachment(BaseModel): +class VPCGatewayAttachment(CloudFormationModel): def __init__(self, gateway_id, vpc_id): self.gateway_id = gateway_id self.vpc_id = vpc_id + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-vpcgatewayattachment.html + return "AWS::EC2::VPCGatewayAttachment" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -4051,7 +4177,7 @@ def __init__( self.weighted_capacity = float(weighted_capacity) -class SpotFleetRequest(TaggedEC2Resource): +class SpotFleetRequest(TaggedEC2Resource, CloudFormationModel): def __init__( self, ec2_backend, @@ -4100,6 +4226,15 @@ def __init__( def physical_resource_id(self): return self.id + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-spotfleet.html + return "AWS::EC2::SpotFleet" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -4323,7 +4458,7 @@ def modify_spot_fleet_request( return True -class ElasticAddress(object): +class ElasticAddress(CloudFormationModel): def __init__(self, domain, address=None): if address: self.public_ip = address @@ -4335,6 +4470,15 @@ def __init__(self, domain, address=None): self.eni = None self.association_id = None + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-eip.html + return "AWS::EC2::EIP" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -5095,7 +5239,7 @@ def delete_customer_gateway(self, customer_gateway_id): return deleted -class NatGateway(object): +class NatGateway(CloudFormationModel): def __init__(self, backend, subnet_id, allocation_id): # public properties self.id = random_nat_gateway_id() @@ -5133,6 +5277,15 @@ def public_ip(self): eips = self._backend.address_by_allocation([self.allocation_id]) return eips[0].public_ip + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-natgateway.html + return "AWS::EC2::NatGateway" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/ecr/models.py b/moto/ecr/models.py index 88b058e1e515..a1d5aa6e5911 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -7,7 +7,7 @@ from botocore.exceptions import ParamValidationError -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.ec2 import ec2_backends from moto.ecr.exceptions import ImageNotFoundException, RepositoryNotFoundException @@ -38,7 +38,7 @@ def response_object(self): return self.gen_response_object() -class Repository(BaseObject): +class Repository(BaseObject, CloudFormationModel): def __init__(self, repository_name): self.registry_id = DEFAULT_REGISTRY_ID self.arn = "arn:aws:ecr:us-east-1:{0}:repository/{1}".format( @@ -67,6 +67,15 @@ def response_object(self): del response_object["arn"], response_object["name"], response_object["images"] return response_object + @staticmethod + def cloudformation_name_type(): + return "RepositoryName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html + return "AWS::ECR::Repository" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/ecs/models.py b/moto/ecs/models.py index a78614cc569a..bf20c2245c49 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -8,7 +8,7 @@ from boto3 import Session from moto.core.exceptions import JsonRESTError -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import unix_time from moto.ec2 import ec2_backends from copy import copy @@ -44,7 +44,7 @@ def response_object(self): return self.gen_response_object() -class Cluster(BaseObject): +class Cluster(BaseObject, CloudFormationModel): def __init__(self, cluster_name, region_name): self.active_services_count = 0 self.arn = "arn:aws:ecs:{0}:012345678910:cluster/{1}".format( @@ -69,6 +69,15 @@ def response_object(self): del response_object["arn"], response_object["name"] return response_object + @staticmethod + def cloudformation_name_type(): + return "ClusterName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html + return "AWS::ECS::Cluster" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -116,7 +125,7 @@ def get_cfn_attribute(self, attribute_name): raise UnformattedGetAttTemplateException() -class TaskDefinition(BaseObject): +class TaskDefinition(BaseObject, CloudFormationModel): def __init__( self, family, @@ -159,6 +168,15 @@ def response_object(self): def physical_resource_id(self): return self.arn + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-taskdefinition.html + return "AWS::ECS::TaskDefinition" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -235,7 +253,7 @@ def response_object(self): return response_object -class Service(BaseObject): +class Service(BaseObject, CloudFormationModel): def __init__( self, cluster, @@ -315,6 +333,15 @@ def response_object(self): return response_object + @staticmethod + def cloudformation_name_type(): + return "ServiceName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-service.html + return "AWS::ECS::Service" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/elb/models.py b/moto/elb/models.py index 4991b0754421..715758090cde 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -13,7 +13,7 @@ ) from boto.ec2.elb.policies import Policies, OtherPolicy from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.ec2.models import ec2_backends from .exceptions import ( BadHealthCheckDefinition, @@ -69,7 +69,7 @@ def __repr__(self): ) -class FakeLoadBalancer(BaseModel): +class FakeLoadBalancer(CloudFormationModel): def __init__( self, name, @@ -119,6 +119,15 @@ def __init__( ) self.backends.append(backend) + @staticmethod + def cloudformation_name_type(): + return "LoadBalancerName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancing-loadbalancer.html + return "AWS::ElasticLoadBalancing::LoadBalancer" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index a6da0d01c419..1deaac9c4d5d 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -6,7 +6,7 @@ from botocore.exceptions import ParamValidationError from moto.compat import OrderedDict from moto.core.exceptions import RESTError -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import camelcase_to_underscores, underscores_to_camelcase from moto.ec2.models import ec2_backends from moto.acm.models import acm_backends @@ -50,7 +50,7 @@ def __init__( self.description = description -class FakeTargetGroup(BaseModel): +class FakeTargetGroup(CloudFormationModel): HTTP_CODE_REGEX = re.compile(r"(?:(?:\d+-\d+|\d+),?)+") def __init__( @@ -143,6 +143,15 @@ def health_for(self, target, ec2_backend): ) return FakeHealthStatus(t["id"], t["port"], self.healthcheck_port, "healthy") + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html + return "AWS::ElasticLoadBalancingV2::TargetGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -183,7 +192,7 @@ def create_from_cloudformation_json( return target_group -class FakeListener(BaseModel): +class FakeListener(CloudFormationModel): def __init__( self, load_balancer_arn, @@ -228,6 +237,15 @@ def register(self, rule): self._non_default_rules, key=lambda x: x.priority ) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listener.html + return "AWS::ElasticLoadBalancingV2::Listener" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -343,7 +361,7 @@ def __repr__(self): ) -class FakeLoadBalancer(BaseModel): +class FakeLoadBalancer(CloudFormationModel): VALID_ATTRS = { "access_logs.s3.enabled", "access_logs.s3.bucket", @@ -402,6 +420,15 @@ def delete(self, region): """ Not exposed as part of the ELB API - used for CloudFormation. """ elbv2_backends[region].delete_load_balancer(self.arn) + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-loadbalancer.html + return "AWS::ElasticLoadBalancingV2::LoadBalancer" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/events/models.py b/moto/events/models.py index d70898198e17..7fa7d225f561 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -4,14 +4,14 @@ from boto3 import Session from moto.core.exceptions import JsonRESTError -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, CloudFormationModel from moto.sts.models import ACCOUNT_ID from moto.utilities.tagging_service import TaggingService from uuid import uuid4 -class Rule(BaseModel): +class Rule(CloudFormationModel): def _generate_arn(self, name): return "arn:aws:events:{region_name}:111111111111:rule/{name}".format( region_name=self.region_name, name=name @@ -73,6 +73,15 @@ def get_cfn_attribute(self, attribute_name): raise UnformattedGetAttTemplateException() + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html + return "AWS::Events::Rule" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -101,7 +110,7 @@ def delete_from_cloudformation_json( event_backend.delete_rule(name=event_name) -class EventBus(BaseModel): +class EventBus(CloudFormationModel): def __init__(self, region_name, name): self.region = region_name self.name = name @@ -152,6 +161,15 @@ def get_cfn_attribute(self, attribute_name): raise UnformattedGetAttTemplateException() + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbus.html + return "AWS::Events::EventBus" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/iam/models.py b/moto/iam/models.py index 49755e57ab43..16b3ac0abe2f 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -15,7 +15,7 @@ from uuid import uuid4 from moto.core.exceptions import RESTError -from moto.core import BaseBackend, BaseModel, ACCOUNT_ID +from moto.core import BaseBackend, BaseModel, ACCOUNT_ID, CloudFormationModel from moto.core.utils import ( iso_8601_datetime_without_milliseconds, iso_8601_datetime_with_milliseconds, @@ -299,7 +299,7 @@ class InlinePolicy(Policy): """TODO: is this needed?""" -class Role(BaseModel): +class Role(CloudFormationModel): def __init__( self, role_id, @@ -327,6 +327,15 @@ def __init__( def created_iso_8601(self): return iso_8601_datetime_with_milliseconds(self.create_date) + @staticmethod + def cloudformation_name_type(): + return "RoleName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html + return "AWS::IAM::Role" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -384,7 +393,7 @@ def get_tags(self): return [self.tags[tag] for tag in self.tags] -class InstanceProfile(BaseModel): +class InstanceProfile(CloudFormationModel): def __init__(self, instance_profile_id, name, path, roles): self.id = instance_profile_id self.name = name @@ -396,6 +405,15 @@ def __init__(self, instance_profile_id, name, path, roles): def created_iso_8601(self): return iso_8601_datetime_with_milliseconds(self.create_date) + @staticmethod + def cloudformation_name_type(): + return "InstanceProfileName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html + return "AWS::IAM::InstanceProfile" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index ec9655bfaeaa..c4b04d924cd7 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -12,7 +12,7 @@ from boto3 import Session from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import unix_time from moto.core import ACCOUNT_ID from .exceptions import ( @@ -129,7 +129,7 @@ def to_json(self): } -class Stream(BaseModel): +class Stream(CloudFormationModel): def __init__(self, stream_name, shard_count, region): self.stream_name = stream_name self.shard_count = shard_count @@ -216,6 +216,15 @@ def to_json_summary(self): } } + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html + return "AWS::Kinesis::Stream" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/kms/models.py b/moto/kms/models.py index 36f72e6de8ba..2eb7cb771cc4 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -6,7 +6,7 @@ from boto3 import Session -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, CloudFormationModel from moto.core.utils import unix_time from moto.utilities.tagging_service import TaggingService from moto.core.exceptions import JsonRESTError @@ -15,7 +15,7 @@ from .utils import decrypt, encrypt, generate_key_id, generate_master_key -class Key(BaseModel): +class Key(CloudFormationModel): def __init__( self, policy, key_usage, customer_master_key_spec, description, region ): @@ -99,6 +99,15 @@ def to_dict(self): def delete(self, region_name): kms_backends[region_name].delete_key(self.id) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html + return "AWS::KMS::Key" + @classmethod def create_from_cloudformation_json( self, resource_name, cloudformation_json, region_name diff --git a/moto/rds/models.py b/moto/rds/models.py index 40b1197b6326..440da34d221d 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -3,14 +3,14 @@ import boto.rds from jinja2 import Template -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, CloudFormationModel from moto.core.utils import get_random_hex from moto.ec2.models import ec2_backends from moto.rds.exceptions import UnformattedGetAttTemplateException from moto.rds2.models import rds2_backends -class Database(BaseModel): +class Database(CloudFormationModel): def get_cfn_attribute(self, attribute_name): if attribute_name == "Endpoint.Address": return self.address @@ -18,13 +18,22 @@ def get_cfn_attribute(self, attribute_name): return self.port raise UnformattedGetAttTemplateException() + @staticmethod + def cloudformation_name_type(): + return "DBInstanceIdentifier" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html + return "AWS::RDS::DBInstance" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - db_instance_identifier = properties.get("DBInstanceIdentifier") + db_instance_identifier = properties.get(cls.cloudformation_name_type()) if not db_instance_identifier: db_instance_identifier = resource_name.lower() + get_random_hex(12) db_security_groups = properties.get("DBSecurityGroups") @@ -163,7 +172,7 @@ def delete(self, region_name): backend.delete_database(self.db_instance_identifier) -class SecurityGroup(BaseModel): +class SecurityGroup(CloudFormationModel): def __init__(self, group_name, description): self.group_name = group_name self.description = description @@ -206,6 +215,15 @@ def authorize_cidr(self, cidr_ip): def authorize_security_group(self, security_group): self.ec2_security_groups.append(security_group) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsecuritygroup.html + return "AWS::RDS::DBSecurityGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -239,7 +257,7 @@ def delete(self, region_name): backend.delete_security_group(self.group_name) -class SubnetGroup(BaseModel): +class SubnetGroup(CloudFormationModel): def __init__(self, subnet_name, description, subnets): self.subnet_name = subnet_name self.description = description @@ -271,13 +289,23 @@ def to_xml(self): ) return template.render(subnet_group=self) + @staticmethod + def cloudformation_name_type(): + return "DBSubnetGroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsubnetgroup.html + return "AWS::RDS::DBSubnetGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - - subnet_name = resource_name.lower() + get_random_hex(12) + subnet_name = properties.get(cls.cloudformation_name_type()) + if not subnet_name: + subnet_name = resource_name.lower() + get_random_hex(12) description = properties["DBSubnetGroupDescription"] subnet_ids = properties["SubnetIds"] tags = properties.get("Tags") diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 7fa4f3316f06..5f46311ece90 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -9,7 +9,7 @@ from jinja2 import Template from re import compile as re_compile from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import get_random_hex from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2.models import ec2_backends @@ -28,7 +28,7 @@ ) -class Database(BaseModel): +class Database(CloudFormationModel): def __init__(self, **kwargs): self.status = "available" self.is_replica = False @@ -356,13 +356,22 @@ def default_allocated_storage(engine, storage_type): "sqlserver-web": {"gp2": 20, "io1": 100, "standard": 20}, }[engine][storage_type] + @staticmethod + def cloudformation_name_type(): + return "DBInstanceIdentifier" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html + return "AWS::RDS::DBInstance" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - db_instance_identifier = properties.get("DBInstanceIdentifier") + db_instance_identifier = properties.get(cls.cloudformation_name_type()) if not db_instance_identifier: db_instance_identifier = resource_name.lower() + get_random_hex(12) db_security_groups = properties.get("DBSecurityGroups") @@ -564,7 +573,7 @@ def remove_tags(self, tag_keys): self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys] -class SecurityGroup(BaseModel): +class SecurityGroup(CloudFormationModel): def __init__(self, group_name, description, tags): self.group_name = group_name self.description = description @@ -627,6 +636,15 @@ def authorize_cidr(self, cidr_ip): def authorize_security_group(self, security_group): self.ec2_security_groups.append(security_group) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsecuritygroup.html + return "AWS::RDS::DBSecurityGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -671,7 +689,7 @@ def delete(self, region_name): backend.delete_security_group(self.group_name) -class SubnetGroup(BaseModel): +class SubnetGroup(CloudFormationModel): def __init__(self, subnet_name, description, subnets, tags): self.subnet_name = subnet_name self.description = description @@ -726,13 +744,24 @@ def to_json(self): ) return template.render(subnet_group=self) + @staticmethod + def cloudformation_name_type(): + return "DBSubnetGroupName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsubnetgroup.html + return "AWS::RDS::DBSubnetGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - subnet_name = resource_name.lower() + get_random_hex(12) + subnet_name = properties.get(cls.cloudformation_name_type()) + if not subnet_name: + subnet_name = resource_name.lower() + get_random_hex(12) description = properties["DBSubnetGroupDescription"] subnet_ids = properties["SubnetIds"] tags = properties.get("Tags") @@ -1441,7 +1470,7 @@ def to_xml(self): return template.render(option_group_option_setting=self) -class DBParameterGroup(object): +class DBParameterGroup(CloudFormationModel): def __init__(self, name, description, family, tags): self.name = name self.description = description @@ -1480,6 +1509,15 @@ def delete(self, region_name): backend = rds2_backends[region_name] backend.delete_db_parameter_group(self.name) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbparametergroup.html + return "AWS::RDS::DBParameterGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 07baf18c0c3c..0bdb14edc138 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -6,7 +6,7 @@ from boto3 import Session from botocore.exceptions import ClientError from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2 import ec2_backends from .exceptions import ( @@ -63,7 +63,7 @@ def delete_tags(self, tag_keys): return self.tags -class Cluster(TaggableResourceMixin, BaseModel): +class Cluster(TaggableResourceMixin, CloudFormationModel): resource_type = "cluster" @@ -157,6 +157,15 @@ def __init__( self.iam_roles_arn = iam_roles_arn or [] self.restored_from_snapshot = restored_from_snapshot + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html + return "AWS::Redshift::Cluster" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -170,6 +179,7 @@ def create_from_cloudformation_json( ].cluster_subnet_group_name else: subnet_group_name = None + cluster = redshift_backend.create_cluster( cluster_identifier=resource_name, node_type=properties.get("NodeType"), @@ -321,7 +331,7 @@ def to_json(self): } -class SubnetGroup(TaggableResourceMixin, BaseModel): +class SubnetGroup(TaggableResourceMixin, CloudFormationModel): resource_type = "subnetgroup" @@ -342,6 +352,15 @@ def __init__( if not self.subnets: raise InvalidSubnetError(subnet_ids) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clustersubnetgroup.html + return "AWS::Redshift::ClusterSubnetGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -412,7 +431,7 @@ def to_json(self): } -class ParameterGroup(TaggableResourceMixin, BaseModel): +class ParameterGroup(TaggableResourceMixin, CloudFormationModel): resource_type = "parametergroup" @@ -429,6 +448,15 @@ def __init__( self.group_family = group_family self.description = description + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clusterparametergroup.html + return "AWS::Redshift::ClusterParameterGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/route53/models.py b/moto/route53/models.py index 0bdefd25b0fd..52f60d971c2e 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -7,7 +7,7 @@ import uuid from jinja2 import Template -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, CloudFormationModel ROUTE53_ID_CHOICE = string.ascii_uppercase + string.digits @@ -18,7 +18,7 @@ def create_route53_zone_id(): return "".join([random.choice(ROUTE53_ID_CHOICE) for _ in range(0, 15)]) -class HealthCheck(BaseModel): +class HealthCheck(CloudFormationModel): def __init__(self, health_check_id, health_check_args): self.id = health_check_id self.ip_address = health_check_args.get("ip_address") @@ -34,6 +34,15 @@ def __init__(self, health_check_id, health_check_args): def physical_resource_id(self): return self.id + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-healthcheck.html + return "AWS::Route53::HealthCheck" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -75,7 +84,7 @@ def to_xml(self): return template.render(health_check=self) -class RecordSet(BaseModel): +class RecordSet(CloudFormationModel): def __init__(self, kwargs): self.name = kwargs.get("Name") self.type_ = kwargs.get("Type") @@ -91,6 +100,15 @@ def __init__(self, kwargs): self.failover = kwargs.get("Failover") self.geo_location = kwargs.get("GeoLocation") + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-recordset.html + return "AWS::Route53::RecordSet" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -202,7 +220,7 @@ def reverse_domain_name(domain_name): return ".".join(reversed(domain_name.split("."))) -class FakeZone(BaseModel): +class FakeZone(CloudFormationModel): def __init__(self, name, id_, private_zone, comment=None): self.name = name self.id = id_ @@ -267,6 +285,15 @@ def get_record_sets(self, start_type, start_name): def physical_resource_id(self): return self.id + @staticmethod + def cloudformation_name_type(): + return "Name" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-hostedzone.html + return "AWS::Route53::HostedZone" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -278,7 +305,7 @@ def create_from_cloudformation_json( return hosted_zone -class RecordSetGroup(BaseModel): +class RecordSetGroup(CloudFormationModel): def __init__(self, hosted_zone_id, record_sets): self.hosted_zone_id = hosted_zone_id self.record_sets = record_sets @@ -287,6 +314,15 @@ def __init__(self, hosted_zone_id, record_sets): def physical_resource_id(self): return "arn:aws:route53:::hostedzone/{0}".format(self.hosted_zone_id) + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-route53-recordsetgroup.html + return "AWS::Route53::RecordSetGroup" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/s3/models.py b/moto/s3/models.py index e5237168e630..800601690383 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -21,7 +21,7 @@ import six from bisect import insort -from moto.core import ACCOUNT_ID, BaseBackend, BaseModel +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import iso_8601_datetime_without_milliseconds_s3, rfc_1123_datetime from moto.cloudwatch.models import MetricDatum from moto.utilities.tagging_service import TaggingService @@ -763,7 +763,7 @@ def to_config_dict(self): } -class FakeBucket(BaseModel): +class FakeBucket(CloudFormationModel): def __init__(self, name, region_name): self.name = name self.region_name = region_name @@ -1070,6 +1070,15 @@ def arn(self): def physical_resource_id(self): return self.name + @staticmethod + def cloudformation_name_type(): + return "BucketName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-s3-bucket.html + return "AWS::S3::Bucket" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/moto/sns/models.py b/moto/sns/models.py index 76376e58fce8..8a4771a3754f 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -11,7 +11,7 @@ from boto3 import Session from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import ( iso_8601_datetime_with_milliseconds, camelcase_to_underscores, @@ -37,7 +37,7 @@ MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB -class Topic(BaseModel): +class Topic(CloudFormationModel): def __init__(self, name, sns_backend): self.name = name self.sns_backend = sns_backend @@ -87,6 +87,15 @@ def policy(self): def policy(self, policy): self._policy_json = json.loads(policy) + @staticmethod + def cloudformation_name_type(): + return "TopicName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sns-topic.html + return "AWS::SNS::Topic" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name @@ -94,7 +103,7 @@ def create_from_cloudformation_json( sns_backend = sns_backends[region_name] properties = cloudformation_json["Properties"] - topic = sns_backend.create_topic(properties.get("TopicName")) + topic = sns_backend.create_topic(properties.get(cls.cloudformation_name_type())) for subscription in properties.get("Subscription", []): sns_backend.subscribe( topic.arn, subscription["Endpoint"], subscription["Protocol"] diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 4befbb50ac08..a3642c17e075 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -12,7 +12,7 @@ from boto3 import Session from moto.core.exceptions import RESTError -from moto.core import BaseBackend, BaseModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import ( camelcase_to_underscores, get_random_message_id, @@ -188,7 +188,7 @@ def delayed(self): return False -class Queue(BaseModel): +class Queue(CloudFormationModel): BASE_ATTRIBUTES = [ "ApproximateNumberOfMessages", "ApproximateNumberOfMessagesDelayed", @@ -354,6 +354,15 @@ def _setup_dlq(self, policy): ), ) + @staticmethod + def cloudformation_name_type(): + return "QueueName" + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sqs-queue.html + return "AWS::SQS::Queue" + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name From 06ed67a8e5c6ffddb7b588798141d9c5a70f183f Mon Sep 17 00:00:00 2001 From: Larry Aiello Date: Sat, 1 Aug 2020 12:03:54 -0400 Subject: [PATCH 478/658] Implement UserIds for Snapshot Attributes (#3192) * implement register_image * format code * add user_ids to snapshot model * implement register_image * format code * add user_ids to snapshot model * trying to un-deprecate tests * Write tests and finalize implementation * Add region parameter to boto3 resource call * fixed test error --- moto/ec2/models.py | 39 +++-- moto/ec2/responses/elastic_block_store.py | 35 ++-- tests/test_ec2/test_amis.py | 10 ++ tests/test_ec2/test_elastic_block_store.py | 183 +++++++++++++++++++-- 4 files changed, 220 insertions(+), 47 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 3d60654a9cfb..e6c57dcdde21 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2488,6 +2488,7 @@ def __init__( self.description = description self.start_time = utc_date_and_time() self.create_volume_permission_groups = set() + self.create_volume_permission_userids = set() self.ec2_backend = ec2_backend self.status = "completed" self.encrypted = encrypted @@ -2652,28 +2653,32 @@ def get_create_volume_permission_groups(self, snapshot_id): snapshot = self.get_snapshot(snapshot_id) return snapshot.create_volume_permission_groups - def add_create_volume_permission(self, snapshot_id, user_id=None, group=None): - if user_id: - self.raise_not_implemented_error( - "The UserId parameter for ModifySnapshotAttribute" - ) + def get_create_volume_permission_userids(self, snapshot_id): + snapshot = self.get_snapshot(snapshot_id) + return snapshot.create_volume_permission_userids - if group != "all": - raise InvalidAMIAttributeItemValueError("UserGroup", group) + def add_create_volume_permission(self, snapshot_id, user_ids=None, groups=None): snapshot = self.get_snapshot(snapshot_id) - snapshot.create_volume_permission_groups.add(group) - return True + if user_ids: + snapshot.create_volume_permission_userids.update(user_ids) - def remove_create_volume_permission(self, snapshot_id, user_id=None, group=None): - if user_id: - self.raise_not_implemented_error( - "The UserId parameter for ModifySnapshotAttribute" - ) + if groups and groups != ["all"]: + raise InvalidAMIAttributeItemValueError("UserGroup", groups) + else: + snapshot.create_volume_permission_groups.update(groups) - if group != "all": - raise InvalidAMIAttributeItemValueError("UserGroup", group) + return True + + def remove_create_volume_permission(self, snapshot_id, user_ids=None, groups=None): snapshot = self.get_snapshot(snapshot_id) - snapshot.create_volume_permission_groups.discard(group) + if user_ids: + snapshot.create_volume_permission_userids.difference_update(user_ids) + + if groups and groups != ["all"]: + raise InvalidAMIAttributeItemValueError("UserGroup", groups) + else: + snapshot.create_volume_permission_groups.difference_update(groups) + return True diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index d11470242a44..f7f4df9dc8d8 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -116,22 +116,23 @@ def import_volume(self): def describe_snapshot_attribute(self): snapshot_id = self._get_param("SnapshotId") groups = self.ec2_backend.get_create_volume_permission_groups(snapshot_id) + user_ids = self.ec2_backend.get_create_volume_permission_userids(snapshot_id) template = self.response_template(DESCRIBE_SNAPSHOT_ATTRIBUTES_RESPONSE) - return template.render(snapshot_id=snapshot_id, groups=groups) + return template.render(snapshot_id=snapshot_id, groups=groups, userIds=user_ids) def modify_snapshot_attribute(self): snapshot_id = self._get_param("SnapshotId") operation_type = self._get_param("OperationType") - group = self._get_param("UserGroup.1") - user_id = self._get_param("UserId.1") + groups = self._get_multi_param("UserGroup") + user_ids = self._get_multi_param("UserId") if self.is_not_dryrun("ModifySnapshotAttribute"): if operation_type == "add": self.ec2_backend.add_create_volume_permission( - snapshot_id, user_id=user_id, group=group + snapshot_id, user_ids=user_ids, groups=groups ) elif operation_type == "remove": self.ec2_backend.remove_create_volume_permission( - snapshot_id, user_id=user_id, group=group + snapshot_id, user_ids=user_ids, groups=groups ) return MODIFY_SNAPSHOT_ATTRIBUTE_RESPONSE @@ -311,18 +312,18 @@ def reset_snapshot_attribute(self): a9540c9f-161a-45d8-9cc1-1182b89ad69f snap-a0332ee0 - {% if not groups %} - - {% endif %} - {% if groups %} - - {% for group in groups %} - - {{ group }} - - {% endfor %} - - {% endif %} + + {% for group in groups %} + + {{ group }} + + {% endfor %} + {% for userId in userIds %} + + {{ userId }} + + {% endfor %} + """ diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 220dd143c176..e32ef97800cb 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -783,6 +783,16 @@ def test_ami_registration(): assert images[0]["State"] == "available", "State should be available." +@mock_ec2 +def test_ami_registration(): + ec2 = boto3.client("ec2", region_name="us-east-1") + image_id = ec2.register_image(Name="test-register-image").get("ImageId", "") + images = ec2.describe_images(ImageIds=[image_id]).get("Images", []) + assert images[0]["Name"] == "test-register-image", "No image was registered." + assert images[0]["RootDeviceName"] == "/dev/sda1", "Wrong root device name." + assert images[0]["State"] == "available", "State should be available." + + @mock_ec2 def test_ami_filter_wildcard(): ec2_resource = boto3.resource("ec2", region_name="us-west-1") diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 4bd2a8dfa0bf..7f8313da4af8 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -562,19 +562,176 @@ def test_snapshot_attribute(): cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - # Error: Add or remove with user ID instead of group - conn.modify_snapshot_attribute.when.called_with( - snapshot.id, - attribute="createVolumePermission", - operation="add", - user_ids=["user"], - ).should.throw(NotImplementedError) - conn.modify_snapshot_attribute.when.called_with( - snapshot.id, - attribute="createVolumePermission", - operation="remove", - user_ids=["user"], - ).should.throw(NotImplementedError) + +@mock_ec2 +def test_modify_snapshot_attribute(): + import copy + + ec2_client = boto3.client("ec2", region_name="us-east-1") + response = ec2_client.create_volume(Size=80, AvailabilityZone="us-east-1a") + volume = boto3.resource("ec2", region_name="us-east-1").Volume(response["VolumeId"]) + snapshot = volume.create_snapshot() + + # Baseline + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert not attributes[ + "CreateVolumePermissions" + ], "Snapshot should have no permissions." + + ADD_GROUP_ARGS = { + "SnapshotId": snapshot.id, + "Attribute": "createVolumePermission", + "OperationType": "add", + "GroupNames": ["all"], + } + + REMOVE_GROUP_ARGS = { + "SnapshotId": snapshot.id, + "Attribute": "createVolumePermission", + "OperationType": "remove", + "GroupNames": ["all"], + } + + # Add 'all' group and confirm + with assert_raises(ClientError) as cm: + ec2_client.modify_snapshot_attribute(**dict(ADD_GROUP_ARGS, **{"DryRun": True})) + + cm.exception.response["Error"]["Code"].should.equal("DryRunOperation") + cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + ec2_client.modify_snapshot_attribute(**ADD_GROUP_ARGS) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert attributes["CreateVolumePermissions"] == [ + {"Group": "all"} + ], "This snapshot should have public group permissions." + + # Add is idempotent + ec2_client.modify_snapshot_attribute.when.called_with( + **ADD_GROUP_ARGS + ).should_not.throw(ClientError) + assert attributes["CreateVolumePermissions"] == [ + {"Group": "all"} + ], "This snapshot should have public group permissions." + + # Remove 'all' group and confirm + with assert_raises(ClientError) as ex: + ec2_client.modify_snapshot_attribute( + **dict(REMOVE_GROUP_ARGS, **{"DryRun": True}) + ) + cm.exception.response["Error"]["Code"].should.equal("DryRunOperation") + cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + ec2_client.modify_snapshot_attribute(**REMOVE_GROUP_ARGS) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert not attributes[ + "CreateVolumePermissions" + ], "This snapshot should have no permissions." + + # Remove is idempotent + ec2_client.modify_snapshot_attribute.when.called_with( + **REMOVE_GROUP_ARGS + ).should_not.throw(ClientError) + assert not attributes[ + "CreateVolumePermissions" + ], "This snapshot should have no permissions." + + # Error: Add with group != 'all' + with assert_raises(ClientError) as cm: + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="add", + GroupNames=["everyone"], + ) + cm.exception.response["Error"]["Code"].should.equal("InvalidAMIAttributeItemValue") + cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + # Error: Add with invalid snapshot ID + with assert_raises(ClientError) as cm: + ec2_client.modify_snapshot_attribute( + SnapshotId="snapshot-abcd1234", + Attribute="createVolumePermission", + OperationType="add", + GroupNames=["all"], + ) + cm.exception.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") + cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + # Error: Remove with invalid snapshot ID + with assert_raises(ClientError) as cm: + ec2_client.modify_snapshot_attribute( + SnapshotId="snapshot-abcd1234", + Attribute="createVolumePermission", + OperationType="remove", + GroupNames=["all"], + ) + cm.exception.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") + cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + # Test adding user id + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="add", + UserIds=["1234567891"], + ) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert len(attributes["CreateVolumePermissions"]) == 1 + + # Test adding user id again along with additional. + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="add", + UserIds=["1234567891", "2345678912"], + ) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert len(attributes["CreateVolumePermissions"]) == 2 + + # Test removing both user IDs. + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="remove", + UserIds=["1234567891", "2345678912"], + ) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert len(attributes["CreateVolumePermissions"]) == 0 + + # Idempotency when removing users. + ec2_client.modify_snapshot_attribute( + SnapshotId=snapshot.id, + Attribute="createVolumePermission", + OperationType="remove", + UserIds=["1234567891"], + ) + + attributes = ec2_client.describe_snapshot_attribute( + SnapshotId=snapshot.id, Attribute="createVolumePermission" + ) + assert len(attributes["CreateVolumePermissions"]) == 0 @mock_ec2_deprecated From 3342d49a43c260ad8016968b7d36cb54c382270e Mon Sep 17 00:00:00 2001 From: jweite Date: Sat, 1 Aug 2020 17:43:03 -0400 Subject: [PATCH 479/658] S3 cloudformation update (#3199) * First cut of S3 Cloudformation Update support: encryption property. * Update type support for S3. Abstract base class for CloudFormation-aware models, as designed by @bblommers, introduced to decentralize CloudFormation resource and name property values to model objects. * Blackened... * Un-renamed param in s3.models.update_from_cloudformation_json() and its call to stay compatible with other modules. Co-authored-by: Bert Blommers Co-authored-by: Joseph Weitekamp Co-authored-by: Bert Blommers --- moto/cloudformation/parsing.py | 38 ++++----- moto/core/models.py | 1 + moto/s3/cloud_formation.py | 33 ++++++++ moto/s3/models.py | 47 +++++++++++ tests/test_s3/test_s3.py | 141 ++++++++++++++++++++++++++++++++- 5 files changed, 241 insertions(+), 19 deletions(-) create mode 100644 moto/s3/cloud_formation.py diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 2c212a148c98..a1e1bb18b419 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -21,6 +21,7 @@ from moto.cloudwatch import models as cloudwatch_models # noqa from moto.datapipeline import models as datapipeline_models # noqa from moto.dynamodb2 import models as dynamodb2_models # noqa +from moto.ec2 import models as ec2_models from moto.ecr import models as ecr_models # noqa from moto.ecs import models as ecs_models # noqa from moto.elb import models as elb_models # noqa @@ -33,15 +34,13 @@ from moto.rds2 import models as rds2_models # noqa from moto.redshift import models as redshift_models # noqa from moto.route53 import models as route53_models # noqa -from moto.s3 import models as s3_models # noqa +from moto.s3 import models as s3_models, s3_backend # noqa +from moto.s3.utils import bucket_and_name_from_url from moto.sns import models as sns_models # noqa from moto.sqs import models as sqs_models # noqa # End ugly list of imports -from moto.ec2 import models as ec2_models -from moto.s3 import models as _, s3_backend # noqa -from moto.s3.utils import bucket_and_name_from_url from moto.core import ACCOUNT_ID, CloudFormationModel from .utils import random_suffix from .exceptions import ( @@ -212,7 +211,6 @@ def clean_json(resource_json, resources_map): def resource_class_from_type(resource_type): if resource_type in NULL_MODELS: return None - if resource_type not in MODEL_MAP: logger.warning("No Moto CloudFormation support for %s", resource_type) return None @@ -221,6 +219,9 @@ def resource_class_from_type(resource_type): def resource_name_property_from_type(resource_type): + for model in MODEL_LIST: + if model.cloudformation_type() == resource_type: + return model.cloudformation_name_type() return NAME_TYPE_MAP.get(resource_type) @@ -249,7 +250,9 @@ def generate_resource_name(resource_type, stack_name, logical_id): return "{0}-{1}-{2}".format(stack_name, logical_id, random_suffix()) -def parse_resource(logical_id, resource_json, resources_map): +def parse_resource( + logical_id, resource_json, resources_map, add_name_to_resource_json=True +): resource_type = resource_json["Type"] resource_class = resource_class_from_type(resource_type) if not resource_class: @@ -261,21 +264,20 @@ def parse_resource(logical_id, resource_json, resources_map): return None resource_json = clean_json(resource_json, resources_map) + resource_name = generate_resource_name( + resource_type, resources_map.get("AWS::StackName"), logical_id + ) resource_name_property = resource_name_property_from_type(resource_type) if resource_name_property: if "Properties" not in resource_json: resource_json["Properties"] = dict() - if resource_name_property not in resource_json["Properties"]: - resource_json["Properties"][ - resource_name_property - ] = generate_resource_name( - resource_type, resources_map.get("AWS::StackName"), logical_id - ) - resource_name = resource_json["Properties"][resource_name_property] - else: - resource_name = generate_resource_name( - resource_type, resources_map.get("AWS::StackName"), logical_id - ) + if ( + add_name_to_resource_json + and resource_name_property not in resource_json["Properties"] + ): + resource_json["Properties"][resource_name_property] = resource_name + if resource_name_property in resource_json["Properties"]: + resource_name = resource_json["Properties"][resource_name_property] return resource_class, resource_json, resource_name @@ -301,7 +303,7 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n def parse_and_update_resource(logical_id, resource_json, resources_map, region_name): resource_class, new_resource_json, new_resource_name = parse_resource( - logical_id, resource_json, resources_map + logical_id, resource_json, resources_map, False ) original_resource = resources_map[logical_id] new_resource = resource_class.update_from_cloudformation_json( diff --git a/moto/core/models.py b/moto/core/models.py index ded6a4fc1254..cf78be3f8b8e 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -540,6 +540,7 @@ def __new__(cls, *args, **kwargs): class CloudFormationModel(BaseModel): @abstractmethod def cloudformation_name_type(self): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html # This must be implemented as a staticmethod with no parameters # Return None for resources that do not have a name property pass diff --git a/moto/s3/cloud_formation.py b/moto/s3/cloud_formation.py new file mode 100644 index 000000000000..0bf6022ef490 --- /dev/null +++ b/moto/s3/cloud_formation.py @@ -0,0 +1,33 @@ +from collections import OrderedDict + + +def cfn_to_api_encryption(bucket_encryption_properties): + + sse_algorithm = bucket_encryption_properties["ServerSideEncryptionConfiguration"][ + 0 + ]["ServerSideEncryptionByDefault"]["SSEAlgorithm"] + kms_master_key_id = bucket_encryption_properties[ + "ServerSideEncryptionConfiguration" + ][0]["ServerSideEncryptionByDefault"].get("KMSMasterKeyID") + apply_server_side_encryption_by_default = OrderedDict() + apply_server_side_encryption_by_default["SSEAlgorithm"] = sse_algorithm + if kms_master_key_id: + apply_server_side_encryption_by_default["KMSMasterKeyID"] = kms_master_key_id + rule = OrderedDict( + {"ApplyServerSideEncryptionByDefault": apply_server_side_encryption_by_default} + ) + bucket_encryption = OrderedDict( + {"@xmlns": "http://s3.amazonaws.com/doc/2006-03-01/"} + ) + bucket_encryption["Rule"] = rule + return bucket_encryption + + +def is_replacement_update(properties): + properties_requiring_replacement_update = ["BucketName", "ObjectLockEnabled"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) diff --git a/moto/s3/models.py b/moto/s3/models.py index 800601690383..70e33fdfb066 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -43,6 +43,7 @@ WrongPublicAccessBlockAccountIdError, NoSuchUpload, ) +from .cloud_formation import cfn_to_api_encryption, is_replacement_update from .utils import clean_key_name, _VersionedKeyStore MAX_BUCKET_NAME_LENGTH = 63 @@ -1084,8 +1085,54 @@ def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): bucket = s3_backend.create_bucket(resource_name, region_name) + + properties = cloudformation_json["Properties"] + + if "BucketEncryption" in properties: + bucket_encryption = cfn_to_api_encryption(properties["BucketEncryption"]) + s3_backend.put_bucket_encryption( + bucket_name=resource_name, encryption=[bucket_encryption] + ) + return bucket + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + if "BucketEncryption" in properties: + bucket_encryption = cfn_to_api_encryption( + properties["BucketEncryption"] + ) + s3_backend.put_bucket_encryption( + bucket_name=original_resource.name, encryption=[bucket_encryption] + ) + return original_resource + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + bucket_name = properties[cls.cloudformation_name_type()] + s3_backend.delete_bucket(bucket_name) + def to_config_dict(self): """Return the AWS Config JSON format of this S3 bucket. diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 57f7454371ac..c8e3ed4de346 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -36,7 +36,7 @@ import sure # noqa -from moto import settings, mock_s3, mock_s3_deprecated, mock_config +from moto import settings, mock_s3, mock_s3_deprecated, mock_config, mock_cloudformation import moto.s3.models as s3model from moto.core.exceptions import InvalidNextTokenException from moto.core.utils import py2_strip_unicode_keys @@ -4686,3 +4686,142 @@ def test_presigned_put_url_with_custom_headers(): s3.delete_object(Bucket=bucket, Key=key) s3.delete_bucket(Bucket=bucket) + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_basic(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ + "StackId" + ] + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_with_properties(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + bucket_name = "MyBucket" + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketName": bucket_name, + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + }, + }, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ + "StackId" + ] + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=bucket_name) + + encryption = s3.get_bucket_encryption(Bucket=bucket_name) + encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ + "ApplyServerSideEncryptionByDefault" + ]["SSEAlgorithm"].should.equal("AES256") + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_update_no_interruption(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + } + }, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.update_stack(StackName="test_stack", TemplateBody=template_json) + encryption = s3.get_bucket_encryption( + Bucket=stack_description["Outputs"][0]["OutputValue"] + ) + encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ + "ApplyServerSideEncryptionByDefault" + ]["SSEAlgorithm"].should.equal("AES256") + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_update_replacement(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": "MyNewBucketName"}, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.update_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) From 252d679b275d840311dec3b337563764c970a966 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= Date: Sun, 2 Aug 2020 11:56:19 +0200 Subject: [PATCH 480/658] Organizations - implement Policy Type functionality (#3207) * Add organizations.enable_policy_type * Add organizations.disable_policy_type * Add support for AISERVICES_OPT_OUT_POLICY --- moto/organizations/exceptions.py | 38 +++ moto/organizations/models.py | 106 ++++++- moto/organizations/responses.py | 10 + moto/organizations/utils.py | 11 +- .../organizations_test_utils.py | 8 +- .../test_organizations_boto3.py | 265 +++++++++++++++++- 6 files changed, 419 insertions(+), 19 deletions(-) diff --git a/moto/organizations/exceptions.py b/moto/organizations/exceptions.py index 2d1ee7328351..ca64b9931482 100644 --- a/moto/organizations/exceptions.py +++ b/moto/organizations/exceptions.py @@ -74,3 +74,41 @@ def __init__(self): super(DuplicatePolicyException, self).__init__( "DuplicatePolicyException", "A policy with the same name already exists." ) + + +class PolicyTypeAlreadyEnabledException(JsonRESTError): + code = 400 + + def __init__(self): + super(PolicyTypeAlreadyEnabledException, self).__init__( + "PolicyTypeAlreadyEnabledException", + "The specified policy type is already enabled.", + ) + + +class PolicyTypeNotEnabledException(JsonRESTError): + code = 400 + + def __init__(self): + super(PolicyTypeNotEnabledException, self).__init__( + "PolicyTypeNotEnabledException", + "This operation can be performed only for enabled policy types.", + ) + + +class RootNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(RootNotFoundException, self).__init__( + "RootNotFoundException", "You specified a root that doesn't exist." + ) + + +class TargetNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(TargetNotFoundException, self).__init__( + "TargetNotFoundException", "You specified a target that doesn't exist." + ) diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 6c8029e3d26b..09bd62b79010 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -17,6 +17,10 @@ AccountAlreadyRegisteredException, AWSOrganizationsNotInUseException, AccountNotRegisteredException, + RootNotFoundException, + PolicyTypeAlreadyEnabledException, + PolicyTypeNotEnabledException, + TargetNotFoundException, ) @@ -124,6 +128,13 @@ def describe(self): class FakeRoot(FakeOrganizationalUnit): + SUPPORTED_POLICY_TYPES = [ + "AISERVICES_OPT_OUT_POLICY", + "BACKUP_POLICY", + "SERVICE_CONTROL_POLICY", + "TAG_POLICY", + ] + def __init__(self, organization, **kwargs): super(FakeRoot, self).__init__(organization, **kwargs) self.type = "ROOT" @@ -141,20 +152,55 @@ def describe(self): "PolicyTypes": self.policy_types, } + def add_policy_type(self, policy_type): + if policy_type not in self.SUPPORTED_POLICY_TYPES: + raise InvalidInputException("You specified an invalid value.") + + if any(type["Type"] == policy_type for type in self.policy_types): + raise PolicyTypeAlreadyEnabledException + + self.policy_types.append({"Type": policy_type, "Status": "ENABLED"}) + + def remove_policy_type(self, policy_type): + if not FakePolicy.supported_policy_type(policy_type): + raise InvalidInputException("You specified an invalid value.") + + if all(type["Type"] != policy_type for type in self.policy_types): + raise PolicyTypeNotEnabledException + + self.policy_types.remove({"Type": policy_type, "Status": "ENABLED"}) + + +class FakePolicy(BaseModel): + SUPPORTED_POLICY_TYPES = [ + "AISERVICES_OPT_OUT_POLICY", + "BACKUP_POLICY", + "SERVICE_CONTROL_POLICY", + "TAG_POLICY", + ] -class FakeServiceControlPolicy(BaseModel): def __init__(self, organization, **kwargs): self.content = kwargs.get("Content") self.description = kwargs.get("Description") self.name = kwargs.get("Name") self.type = kwargs.get("Type") - self.id = utils.make_random_service_control_policy_id() + self.id = utils.make_random_policy_id() self.aws_managed = False self.organization_id = organization.id self.master_account_id = organization.master_account_id - self._arn_format = utils.SCP_ARN_FORMAT self.attachments = [] + if not FakePolicy.supported_policy_type(self.type): + raise InvalidInputException("You specified an invalid value.") + elif self.type == "AISERVICES_OPT_OUT_POLICY": + self._arn_format = utils.AI_POLICY_ARN_FORMAT + elif self.type == "SERVICE_CONTROL_POLICY": + self._arn_format = utils.SCP_ARN_FORMAT + else: + raise NotImplementedError( + "The {0} policy type has not been implemented".format(self.type) + ) + @property def arn(self): return self._arn_format.format( @@ -176,6 +222,10 @@ def describe(self): } } + @staticmethod + def supported_policy_type(policy_type): + return policy_type in FakePolicy.SUPPORTED_POLICY_TYPES + class FakeServiceAccess(BaseModel): # List of trusted services, which support trusted access with Organizations @@ -283,6 +333,13 @@ def __init__(self): self.services = [] self.admins = [] + def _get_root_by_id(self, root_id): + root = next((ou for ou in self.ou if ou.id == root_id), None) + if not root: + raise RootNotFoundException + + return root + def create_organization(self, **kwargs): self.org = FakeOrganization(kwargs["FeatureSet"]) root_ou = FakeRoot(self.org) @@ -292,7 +349,7 @@ def create_organization(self, **kwargs): ) master_account.id = self.org.master_account_id self.accounts.append(master_account) - default_policy = FakeServiceControlPolicy( + default_policy = FakePolicy( self.org, Name="FullAWSAccess", Description="Allows access to every operation", @@ -452,7 +509,7 @@ def list_children(self, **kwargs): ) def create_policy(self, **kwargs): - new_policy = FakeServiceControlPolicy(self.org, **kwargs) + new_policy = FakePolicy(self.org, **kwargs) for policy in self.policies: if kwargs["Name"] == policy.name: raise DuplicatePolicyException @@ -460,7 +517,7 @@ def create_policy(self, **kwargs): return new_policy.describe() def describe_policy(self, **kwargs): - if re.compile(utils.SCP_ID_REGEX).match(kwargs["PolicyId"]): + if re.compile(utils.POLICY_ID_REGEX).match(kwargs["PolicyId"]): policy = next( (p for p in self.policies if p.id == kwargs["PolicyId"]), None ) @@ -540,7 +597,13 @@ def delete_policy(self, **kwargs): ) def list_policies_for_target(self, **kwargs): - if re.compile(utils.OU_ID_REGEX).match(kwargs["TargetId"]): + filter = kwargs["Filter"] + + if re.match(utils.ROOT_ID_REGEX, kwargs["TargetId"]): + obj = next((ou for ou in self.ou if ou.id == kwargs["TargetId"]), None) + if obj is None: + raise TargetNotFoundException + elif re.compile(utils.OU_ID_REGEX).match(kwargs["TargetId"]): obj = next((ou for ou in self.ou if ou.id == kwargs["TargetId"]), None) if obj is None: raise RESTError( @@ -553,14 +616,25 @@ def list_policies_for_target(self, **kwargs): raise AccountNotFoundException else: raise InvalidInputException("You specified an invalid value.") + + if not FakePolicy.supported_policy_type(filter): + raise InvalidInputException("You specified an invalid value.") + + if filter not in ["AISERVICES_OPT_OUT_POLICY", "SERVICE_CONTROL_POLICY"]: + raise NotImplementedError( + "The {0} policy type has not been implemented".format(filter) + ) + return dict( Policies=[ - p.describe()["Policy"]["PolicySummary"] for p in obj.attached_policies + p.describe()["Policy"]["PolicySummary"] + for p in obj.attached_policies + if p.type == filter ] ) def list_targets_for_policy(self, **kwargs): - if re.compile(utils.SCP_ID_REGEX).match(kwargs["PolicyId"]): + if re.compile(utils.POLICY_ID_REGEX).match(kwargs["PolicyId"]): policy = next( (p for p in self.policies if p.id == kwargs["PolicyId"]), None ) @@ -733,5 +807,19 @@ def deregister_delegated_administrator(self, **kwargs): if not admin.services: self.admins.remove(admin) + def enable_policy_type(self, **kwargs): + root = self._get_root_by_id(kwargs["RootId"]) + + root.add_policy_type(kwargs["PolicyType"]) + + return dict(Root=root.describe()) + + def disable_policy_type(self, **kwargs): + root = self._get_root_by_id(kwargs["RootId"]) + + root.remove_policy_type(kwargs["PolicyType"]) + + return dict(Root=root.describe()) + organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index 4689db5d7cee..ae0bb731b1c4 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -191,3 +191,13 @@ def deregister_delegated_administrator(self): **self.request_params ) ) + + def enable_policy_type(self): + return json.dumps( + self.organizations_backend.enable_policy_type(**self.request_params) + ) + + def disable_policy_type(self): + return json.dumps( + self.organizations_backend.disable_policy_type(**self.request_params) + ) diff --git a/moto/organizations/utils.py b/moto/organizations/utils.py index e71357ce6994..cec34834cb52 100644 --- a/moto/organizations/utils.py +++ b/moto/organizations/utils.py @@ -14,6 +14,9 @@ ROOT_ARN_FORMAT = "arn:aws:organizations::{0}:root/{1}/{2}" OU_ARN_FORMAT = "arn:aws:organizations::{0}:ou/{1}/{2}" SCP_ARN_FORMAT = "arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}" +AI_POLICY_ARN_FORMAT = ( + "arn:aws:organizations::{0}:policy/{1}/aiservices_opt_out_policy/{2}" +) CHARSET = string.ascii_lowercase + string.digits ORG_ID_SIZE = 10 @@ -21,7 +24,7 @@ ACCOUNT_ID_SIZE = 12 OU_ID_SUFFIX_SIZE = 8 CREATE_ACCOUNT_STATUS_ID_SIZE = 8 -SCP_ID_SIZE = 8 +POLICY_ID_SIZE = 8 EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" ORG_ID_REGEX = r"o-[a-z0-9]{%s}" % ORG_ID_SIZE @@ -29,7 +32,7 @@ OU_ID_REGEX = r"ou-[a-z0-9]{%s}-[a-z0-9]{%s}" % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE) ACCOUNT_ID_REGEX = r"[0-9]{%s}" % ACCOUNT_ID_SIZE CREATE_ACCOUNT_STATUS_ID_REGEX = r"car-[a-z0-9]{%s}" % CREATE_ACCOUNT_STATUS_ID_SIZE -SCP_ID_REGEX = r"%s|p-[a-z0-9]{%s}" % (DEFAULT_POLICY_ID, SCP_ID_SIZE) +POLICY_ID_REGEX = r"%s|p-[a-z0-9]{%s}" % (DEFAULT_POLICY_ID, POLICY_ID_SIZE) def make_random_org_id(): @@ -76,8 +79,8 @@ def make_random_create_account_status_id(): ) -def make_random_service_control_policy_id(): +def make_random_policy_id(): # The regex pattern for a policy ID string requires "p-" followed by # from 8 to 128 lower-case letters or digits. # e.g. 'p-k2av4a8a' - return "p-" + "".join(random.choice(CHARSET) for x in range(SCP_ID_SIZE)) + return "p-" + "".join(random.choice(CHARSET) for x in range(POLICY_ID_SIZE)) diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py index 12189c5301e7..4c26d788d4bd 100644 --- a/tests/test_organizations/organizations_test_utils.py +++ b/tests/test_organizations/organizations_test_utils.py @@ -31,9 +31,9 @@ def test_make_random_create_account_status_id(): create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) -def test_make_random_service_control_policy_id(): - service_control_policy_id = utils.make_random_service_control_policy_id() - service_control_policy_id.should.match(utils.SCP_ID_REGEX) +def test_make_random_policy_id(): + policy_id = utils.make_random_policy_id() + policy_id.should.match(utils.POLICY_ID_REGEX) def validate_organization(response): @@ -128,7 +128,7 @@ def validate_create_account_status(create_status): def validate_policy_summary(org, summary): summary.should.be.a(dict) - summary.should.have.key("Id").should.match(utils.SCP_ID_REGEX) + summary.should.have.key("Id").should.match(utils.POLICY_ID_REGEX) summary.should.have.key("Arn").should.equal( utils.SCP_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], summary["Id"]) ) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 90bee1edbd83..647236118014 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -379,6 +379,30 @@ def test_create_policy(): policy["Content"].should.equal(json.dumps(policy_doc01)) +@mock_organizations +def test_create_policy_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + # invalid policy type + # when + with assert_raises(ClientError) as e: + client.create_policy( + Content=json.dumps(policy_doc01), + Description="moto", + Name="moto", + Type="MOTO", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("CreatePolicy") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + @mock_organizations def test_describe_policy(): client = boto3.client("organizations", region_name="us-east-1") @@ -468,7 +492,7 @@ def test_delete_policy(): def test_delete_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] - non_existent_policy_id = utils.make_random_service_control_policy_id() + non_existent_policy_id = utils.make_random_policy_id() with assert_raises(ClientError) as e: response = client.delete_policy(PolicyId=non_existent_policy_id) ex = e.exception @@ -571,7 +595,7 @@ def test_update_policy(): def test_update_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] - non_existent_policy_id = utils.make_random_service_control_policy_id() + non_existent_policy_id = utils.make_random_policy_id() with assert_raises(ClientError) as e: response = client.update_policy(PolicyId=non_existent_policy_id) ex = e.exception @@ -631,6 +655,7 @@ def test_list_policies_for_target(): def test_list_policies_for_target_exception(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] ou_id = "ou-gi99-i7r8eh2i2" account_id = "126644886543" with assert_raises(ClientError) as e: @@ -664,6 +689,34 @@ def test_list_policies_for_target_exception(): ex.response["Error"]["Code"].should.contain("InvalidInputException") ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + # not existing root + # when + with assert_raises(ClientError) as e: + client.list_policies_for_target( + TargetId="r-0000", Filter="SERVICE_CONTROL_POLICY" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("ListPoliciesForTarget") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TargetNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified a target that doesn't exist." + ) + + # invalid policy type + # when + with assert_raises(ClientError) as e: + client.list_policies_for_target(TargetId=root_id, Filter="MOTO") + + # then + ex = e.exception + ex.operation_name.should.equal("ListPoliciesForTarget") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + @mock_organizations def test_list_targets_for_policy(): @@ -1305,3 +1358,211 @@ def test_deregister_delegated_administrator_erros(): ex.response["Error"]["Message"].should.equal( "You specified an unrecognized service principal." ) + + +@mock_organizations +def test_enable_policy_type(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + + # when + response = client.enable_policy_type( + RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + root = response["Root"] + root["Id"].should.equal(root_id) + root["Arn"].should.equal( + utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root_id) + ) + root["Name"].should.equal("Root") + sorted(root["PolicyTypes"], key=lambda x: x["Type"]).should.equal( + [ + {"Type": "AISERVICES_OPT_OUT_POLICY", "Status": "ENABLED"}, + {"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"}, + ] + ) + + +@mock_organizations +def test_enable_policy_type_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + root_id = client.list_roots()["Roots"][0]["Id"] + + # not existing root + # when + with assert_raises(ClientError) as e: + client.enable_policy_type( + RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("EnablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("RootNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified a root that doesn't exist." + ) + + # enable policy again ('SERVICE_CONTROL_POLICY' is enabled by default) + # when + with assert_raises(ClientError) as e: + client.enable_policy_type(RootId=root_id, PolicyType="SERVICE_CONTROL_POLICY") + + # then + ex = e.exception + ex.operation_name.should.equal("EnablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("PolicyTypeAlreadyEnabledException") + ex.response["Error"]["Message"].should.equal( + "The specified policy type is already enabled." + ) + + # invalid policy type + # when + with assert_raises(ClientError) as e: + client.enable_policy_type(RootId=root_id, PolicyType="MOTO") + + # then + ex = e.exception + ex.operation_name.should.equal("EnablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + +@mock_organizations +def test_disable_policy_type(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + client.enable_policy_type(RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY") + + # when + response = client.disable_policy_type( + RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + root = response["Root"] + root["Id"].should.equal(root_id) + root["Arn"].should.equal( + utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root_id) + ) + root["Name"].should.equal("Root") + root["PolicyTypes"].should.equal( + [{"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"}] + ) + + +@mock_organizations +def test_disable_policy_type_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + root_id = client.list_roots()["Roots"][0]["Id"] + + # not existing root + # when + with assert_raises(ClientError) as e: + client.disable_policy_type( + RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DisablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("RootNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified a root that doesn't exist." + ) + + # disable not enabled policy + # when + with assert_raises(ClientError) as e: + client.disable_policy_type( + RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DisablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("PolicyTypeNotEnabledException") + ex.response["Error"]["Message"].should.equal( + "This operation can be performed only for enabled policy types." + ) + + # invalid policy type + # when + with assert_raises(ClientError) as e: + client.disable_policy_type(RootId=root_id, PolicyType="MOTO") + + # then + ex = e.exception + ex.operation_name.should.equal("DisablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + +@mock_organizations +def test_aiservices_opt_out_policy(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + client.enable_policy_type(RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY") + ai_policy = { + "services": { + "@@operators_allowed_for_child_policies": ["@@none"], + "default": { + "@@operators_allowed_for_child_policies": ["@@none"], + "opt_out_policy": { + "@@operators_allowed_for_child_policies": ["@@none"], + "@@assign": "optOut", + }, + }, + } + } + + # when + response = client.create_policy( + Content=json.dumps(ai_policy), + Description="Opt out of all AI services", + Name="ai-opt-out", + Type="AISERVICES_OPT_OUT_POLICY", + ) + + # then + summary = response["Policy"]["PolicySummary"] + policy_id = summary["Id"] + summary["Id"].should.match(utils.POLICY_ID_REGEX) + summary["Arn"].should.equal( + utils.AI_POLICY_ARN_FORMAT.format( + org["MasterAccountId"], org["Id"], summary["Id"] + ) + ) + summary["Name"].should.equal("ai-opt-out") + summary["Description"].should.equal("Opt out of all AI services") + summary["Type"].should.equal("AISERVICES_OPT_OUT_POLICY") + summary["AwsManaged"].should_not.be.ok + json.loads(response["Policy"]["Content"]).should.equal(ai_policy) + + # when + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + + # then + response = client.list_policies_for_target( + TargetId=root_id, Filter="AISERVICES_OPT_OUT_POLICY" + ) + response["Policies"].should.have.length_of(1) + response["Policies"][0]["Id"].should.equal(policy_id) From 061c609a8f24bd84d0d0892a78074aad5c35b42a Mon Sep 17 00:00:00 2001 From: Ninh Khong Date: Mon, 3 Aug 2020 19:42:42 +0700 Subject: [PATCH 481/658] Fix secretsmanager random password wrong length (#3213) * Enhance function get_parameter by parameter name, version or labels * Fix random password with exclude characters return wrong length --- moto/secretsmanager/utils.py | 3 ++- tests/test_secretsmanager/test_secretsmanager.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/secretsmanager/utils.py b/moto/secretsmanager/utils.py index 6033db6132da..ab0f584f0eb6 100644 --- a/moto/secretsmanager/utils.py +++ b/moto/secretsmanager/utils.py @@ -51,6 +51,8 @@ def random_password( if include_space: password += " " required_characters += " " + if exclude_characters: + password = _exclude_characters(password, exclude_characters) password = "".join( six.text_type(random.choice(password)) for x in range(password_length) @@ -61,7 +63,6 @@ def random_password( password, required_characters ) - password = _exclude_characters(password, exclude_characters) return password diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 0bd66b12885b..69e055bb2107 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -338,6 +338,7 @@ def test_get_random_exclude_characters_and_symbols(): PasswordLength=20, ExcludeCharacters="xyzDje@?!." ) assert any(c in "xyzDje@?!." for c in random_password["RandomPassword"]) == False + assert len(random_password["RandomPassword"]) == 20 @mock_secretsmanager From 99736c3101a4cdc896053a119b0bb45ea921023b Mon Sep 17 00:00:00 2001 From: Yuuki Takahashi <20282867+yktakaha4@users.noreply.github.com> Date: Mon, 3 Aug 2020 23:09:25 +0900 Subject: [PATCH 482/658] fix clear pending messages when call purge_queue (#3208) --- moto/sqs/models.py | 1 + tests/test_sqs/test_sqs.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index a3642c17e075..a34e95c4f0ff 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -844,6 +844,7 @@ def change_message_visibility(self, queue_name, receipt_handle, visibility_timeo def purge_queue(self, queue_name): queue = self.get_queue(queue_name) queue._messages = [] + queue._pending_messages = set() def list_dead_letter_source_queues(self, queue_name): dlq = self.get_queue(queue_name) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 61edcaa9b2a9..4de5b90180ec 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1098,6 +1098,38 @@ def test_purge_action(): queue.count().should.equal(0) +@mock_sqs +def test_purge_queue_before_delete_message(): + client = boto3.client("sqs", region_name="us-east-1") + + create_resp = client.create_queue( + QueueName="test-dlr-queue.fifo", Attributes={"FifoQueue": "true"} + ) + queue_url = create_resp["QueueUrl"] + + client.send_message( + QueueUrl=queue_url, + MessageGroupId="test", + MessageDeduplicationId="first_message", + MessageBody="first_message", + ) + receive_resp1 = client.receive_message(QueueUrl=queue_url) + + # purge before call delete_message + client.purge_queue(QueueUrl=queue_url) + + client.send_message( + QueueUrl=queue_url, + MessageGroupId="test", + MessageDeduplicationId="second_message", + MessageBody="second_message", + ) + receive_resp2 = client.receive_message(QueueUrl=queue_url) + + len(receive_resp2.get("Messages", [])).should.equal(1) + receive_resp2["Messages"][0]["Body"].should.equal("second_message") + + @mock_sqs_deprecated def test_delete_message_after_visibility_timeout(): VISIBILITY_TIMEOUT = 1 From da07adae525a51849b5b18b6f840e979e2d364fc Mon Sep 17 00:00:00 2001 From: jweite Date: Mon, 3 Aug 2020 11:04:05 -0400 Subject: [PATCH 483/658] * Support for CloudFormation update and delete of Kinesis Streams (#3212) * Support for CloudFormation stack resource deletion via backend resource method delete_from_cloudformation_json() via parse_and_delete_resource(). * Correction to the inappropriate inclusion of EndingSequenceNumber in open shards. This attribute should only appear in closed shards. This regretfully prevents confirmation of consistent record counts after split/merge in unit tests. * Added parameters/decorator to CloudFormationModel method declarations to calm-down Pycharm. Co-authored-by: Joseph Weitekamp --- moto/cloudformation/parsing.py | 17 +++ moto/core/models.py | 18 ++- moto/kinesis/models.py | 89 +++++++++-- tests/test_kinesis/test_kinesis.py | 36 ++--- .../test_kinesis_cloudformation.py | 144 ++++++++++++++++++ 5 files changed, 268 insertions(+), 36 deletions(-) create mode 100644 tests/test_kinesis/test_kinesis_cloudformation.py diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index a1e1bb18b419..272856367881 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -649,6 +649,23 @@ def delete(self): try: if parsed_resource and hasattr(parsed_resource, "delete"): parsed_resource.delete(self._region_name) + else: + resource_name_attribute = ( + parsed_resource.cloudformation_name_type() + if hasattr(parsed_resource, "cloudformation_name_type") + else resource_name_property_from_type(parsed_resource.type) + ) + if resource_name_attribute: + resource_json = self._resource_json_map[ + parsed_resource.logical_resource_id + ] + resource_name = resource_json["Properties"][ + resource_name_attribute + ] + parse_and_delete_resource( + resource_name, resource_json, self, self._region_name + ) + self._parsed_resources.pop(parsed_resource.logical_resource_id) except Exception as e: # skip over dependency violations, and try again in a # second pass diff --git a/moto/core/models.py b/moto/core/models.py index cf78be3f8b8e..ae241322c211 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -538,21 +538,25 @@ def __new__(cls, *args, **kwargs): # Parent class for every Model that can be instantiated by CloudFormation # On subclasses, implement the two methods as @staticmethod to ensure correct behaviour of the CF parser class CloudFormationModel(BaseModel): + @staticmethod @abstractmethod - def cloudformation_name_type(self): + def cloudformation_name_type(): # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html # This must be implemented as a staticmethod with no parameters # Return None for resources that do not have a name property pass + @staticmethod @abstractmethod - def cloudformation_type(self): + def cloudformation_type(): # This must be implemented as a staticmethod with no parameters # See for example https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html return "AWS::SERVICE::RESOURCE" @abstractmethod - def create_from_cloudformation_json(self): + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): # This must be implemented as a classmethod with parameters: # cls, resource_name, cloudformation_json, region_name # Extract the resource parameters from the cloudformation json @@ -560,7 +564,9 @@ def create_from_cloudformation_json(self): pass @abstractmethod - def update_from_cloudformation_json(self): + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name + ): # This must be implemented as a classmethod with parameters: # cls, original_resource, new_resource_name, cloudformation_json, region_name # Extract the resource parameters from the cloudformation json, @@ -569,7 +575,9 @@ def update_from_cloudformation_json(self): pass @abstractmethod - def delete_from_cloudformation_json(self): + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): # This must be implemented as a classmethod with parameters: # cls, resource_name, cloudformation_json, region_name # Extract the resource parameters from the cloudformation json diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index c4b04d924cd7..a9c4f547656a 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -53,6 +53,7 @@ def __init__(self, shard_id, starting_hash, ending_hash): self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() + self.is_open = True @property def shard_id(self): @@ -116,29 +117,41 @@ def get_sequence_number_at(self, at_timestamp): return r.sequence_number def to_json(self): - return { + response = { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash), }, "SequenceNumberRange": { - "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id, } + if not self.is_open: + response["SequenceNumberRange"][ + "EndingSequenceNumber" + ] = self.get_max_sequence_number() + return response class Stream(CloudFormationModel): - def __init__(self, stream_name, shard_count, region): + def __init__(self, stream_name, shard_count, region_name): self.stream_name = stream_name - self.shard_count = shard_count self.creation_datetime = datetime.datetime.now() - self.region = region + self.region = region_name self.account_number = ACCOUNT_ID self.shards = {} self.tags = {} self.status = "ACTIVE" + self.shard_count = None + self.update_shard_count(shard_count) + + def update_shard_count(self, shard_count): + # ToDo: This was extracted from init. It's only accurate for new streams. + # It doesn't (yet) try to accurately mimic the more complex re-sharding behavior. + # It makes the stream as if it had been created with this number of shards. + # Logically consistent, but not what AWS does. + self.shard_count = shard_count step = 2 ** 128 // shard_count hash_ranges = itertools.chain( @@ -146,7 +159,6 @@ def __init__(self, stream_name, shard_count, region): [(shard_count - 1, (shard_count - 1) * step, 2 ** 128)], ) for index, start, end in hash_ranges: - shard = Shard(index, start, end) self.shards[shard.shard_id] = shard @@ -229,10 +241,65 @@ def cloudformation_type(): def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - region = properties.get("Region", "us-east-1") + properties = cloudformation_json.get("Properties", {}) shard_count = properties.get("ShardCount", 1) - return Stream(properties["Name"], shard_count, region) + name = properties.get("Name", resource_name) + backend = kinesis_backends[region_name] + return backend.create_stream(name, shard_count, region_name) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if Stream.is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + if "ShardCount" in properties: + original_resource.update_shard_count(properties["ShardCount"]) + return original_resource + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + backend = kinesis_backends[region_name] + properties = cloudformation_json.get("Properties", {}) + stream_name = properties.get(cls.cloudformation_name_type(), resource_name) + backend.delete_stream(stream_name) + + @staticmethod + def is_replacement_update(properties): + properties_requiring_replacement_update = ["BucketName", "ObjectLockEnabled"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) + + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Arn": + return self.arn + raise UnformattedGetAttTemplateException() + + @property + def physical_resource_id(self): + return self.stream_name class FirehoseRecord(BaseModel): @@ -331,10 +398,10 @@ def __init__(self): self.streams = OrderedDict() self.delivery_streams = {} - def create_stream(self, stream_name, shard_count, region): + def create_stream(self, stream_name, shard_count, region_name): if stream_name in self.streams: raise ResourceInUseError(stream_name) - stream = Stream(stream_name, shard_count, region) + stream = Stream(stream_name, shard_count, region_name) self.streams[stream_name] = stream return stream diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index b3251bb0faf5..85f248572a89 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -10,6 +10,8 @@ from moto import mock_kinesis, mock_kinesis_deprecated from moto.core import ACCOUNT_ID +import sure # noqa + @mock_kinesis_deprecated def test_create_cluster(): @@ -601,9 +603,6 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(2) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) shard_range = shards[0]["HashKeyRange"] new_starting_hash = ( @@ -616,9 +615,6 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(3) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) shard_range = shards[2]["HashKeyRange"] new_starting_hash = ( @@ -631,9 +627,6 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(4) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) @mock_kinesis_deprecated @@ -662,9 +655,6 @@ def test_merge_shards(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(4) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) conn.merge_shards(stream_name, "shardId-000000000000", "shardId-000000000001") @@ -672,17 +662,23 @@ def test_merge_shards(): stream = stream_response["StreamDescription"] shards = stream["Shards"] - shards.should.have.length_of(3) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) + active_shards = [ + shard + for shard in shards + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + active_shards.should.have.length_of(3) + conn.merge_shards(stream_name, "shardId-000000000002", "shardId-000000000000") stream_response = conn.describe_stream(stream_name) stream = stream_response["StreamDescription"] shards = stream["Shards"] - shards.should.have.length_of(2) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) + active_shards = [ + shard + for shard in shards + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + + active_shards.should.have.length_of(2) diff --git a/tests/test_kinesis/test_kinesis_cloudformation.py b/tests/test_kinesis/test_kinesis_cloudformation.py new file mode 100644 index 000000000000..7f3aef0ded08 --- /dev/null +++ b/tests/test_kinesis/test_kinesis_cloudformation.py @@ -0,0 +1,144 @@ +import boto3 +import sure # noqa + +from moto import mock_kinesis, mock_cloudformation + + +@mock_cloudformation +def test_kinesis_cloudformation_create_stream(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = '{"Resources":{"MyStream":{"Type":"AWS::Kinesis::Stream"}}}' + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + provisioned_resource["LogicalResourceId"].should.equal("MyStream") + len(provisioned_resource["PhysicalResourceId"]).should.be.greater_than(0) + + +@mock_cloudformation +@mock_kinesis +def test_kinesis_cloudformation_get_attr(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheStream: + Type: AWS::Kinesis::Stream +Outputs: + StreamName: + Value: !Ref TheStream + StreamArn: + Value: !GetAtt TheStream.Arn +""".strip() + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0] + output_stream_name = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "StreamName" + ][0] + output_stream_arn = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "StreamArn" + ][0] + + kinesis_conn = boto3.client("kinesis", region_name="us-east-1") + stream_description = kinesis_conn.describe_stream(StreamName=output_stream_name)[ + "StreamDescription" + ] + output_stream_arn.should.equal(stream_description["StreamARN"]) + + +@mock_cloudformation +@mock_kinesis +def test_kinesis_cloudformation_update(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheStream: + Type: AWS::Kinesis::Stream + Properties: + Name: MyStream + ShardCount: 4 +""".strip() + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0] + stack_description["StackName"].should.equal(stack_name) + + kinesis_conn = boto3.client("kinesis", region_name="us-east-1") + stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ + "StreamDescription" + ] + shards_provisioned = len( + [ + shard + for shard in stream_description["Shards"] + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + ) + shards_provisioned.should.equal(4) + + template = """ + Resources: + TheStream: + Type: AWS::Kinesis::Stream + Properties: + ShardCount: 6 + """.strip() + cf_conn.update_stack(StackName=stack_name, TemplateBody=template) + + stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ + "StreamDescription" + ] + shards_provisioned = len( + [ + shard + for shard in stream_description["Shards"] + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + ) + shards_provisioned.should.equal(6) + + +@mock_cloudformation +@mock_kinesis +def test_kinesis_cloudformation_delete(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheStream: + Type: AWS::Kinesis::Stream + Properties: + Name: MyStream +""".strip() + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0] + stack_description["StackName"].should.equal(stack_name) + + kinesis_conn = boto3.client("kinesis", region_name="us-east-1") + stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ + "StreamDescription" + ] + stream_description["StreamName"].should.equal("MyStream") + + cf_conn.delete_stack(StackName=stack_name) + streams = kinesis_conn.list_streams()["StreamNames"] + len(streams).should.equal(0) From a7ddcd7da314507975246a256a5ebc4aaca1f4be Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Tue, 4 Aug 2020 11:20:57 +0530 Subject: [PATCH 484/658] Fix:EC2-authorize_security_group_ingress- add description to IP-Ranges (#3196) * Fix:EC2-authorize_security_group_ingress- add description to IP-Ranges * Fix:EC2-authorize_security_group_ingress- add test when description is not present. * part commit * Fix:fixed build errors * Linting * Allow for Python2 string/unicodes Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ec2/models.py | 26 +++++++--- moto/ec2/responses/security_groups.py | 12 ++++- tests/test_ec2/test_security_groups.py | 72 ++++++++++++++++++++++++-- 3 files changed, 96 insertions(+), 14 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index e6c57dcdde21..2498726b8736 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -160,7 +160,6 @@ def _load_resource(filename): or resource_filename(__name__, "resources/amis.json"), ) - OWNER_ID = ACCOUNT_ID @@ -1405,7 +1404,6 @@ def get_filter_value(self, filter_name): class AmiBackend(object): - AMI_REGEX = re.compile("ami-[a-z0-9]+") def __init__(self): @@ -2118,11 +2116,16 @@ def authorize_security_group_ingress( vpc_id=None, ): group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id) - if ip_ranges and not isinstance(ip_ranges, list): - ip_ranges = [ip_ranges] + if ip_ranges: + if isinstance(ip_ranges, str) or ( + six.PY2 and isinstance(ip_ranges, unicode) # noqa + ): + ip_ranges = [{"CidrIp": str(ip_ranges)}] + elif not isinstance(ip_ranges, list): + ip_ranges = [json.loads(ip_ranges)] if ip_ranges: for cidr in ip_ranges: - if not is_valid_cidr(cidr): + if not is_valid_cidr(cidr["CidrIp"]): raise InvalidCIDRSubnetError(cidr=cidr) self._verify_group_will_respect_rule_count_limit( @@ -2200,10 +2203,14 @@ def authorize_security_group_egress( group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id) if ip_ranges and not isinstance(ip_ranges, list): - ip_ranges = [ip_ranges] + + if isinstance(ip_ranges, str) and "CidrIp" not in ip_ranges: + ip_ranges = [{"CidrIp": ip_ranges}] + else: + ip_ranges = [json.loads(ip_ranges)] if ip_ranges: for cidr in ip_ranges: - if not is_valid_cidr(cidr): + if not is_valid_cidr(cidr["CidrIp"]): raise InvalidCIDRSubnetError(cidr=cidr) self._verify_group_will_respect_rule_count_limit( @@ -2259,9 +2266,13 @@ def revoke_security_group_egress( if source_group: source_groups.append(source_group) + for ip in ip_ranges: + ip_ranges = [ip.get("CidrIp") if ip.get("CidrIp") == "0.0.0.0/0" else ip] + security_rule = SecurityRule( ip_protocol, from_port, to_port, ip_ranges, source_groups ) + if security_rule in group.egress_rules: group.egress_rules.remove(security_rule) return security_rule @@ -3737,7 +3748,6 @@ def __init__( tag_specifications=None, private_dns_enabled=None, ): - self.id = id self.vpc_id = vpc_id self.service_name = service_name diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index f0002d5bdfac..af84b7738e1d 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -20,7 +20,11 @@ def parse_sg_attributes_from_dict(sg_attributes): ip_ranges = [] ip_ranges_tree = sg_attributes.get("IpRanges") or {} for ip_range_idx in sorted(ip_ranges_tree.keys()): - ip_ranges.append(ip_ranges_tree[ip_range_idx]["CidrIp"][0]) + ip_range = {"CidrIp": ip_ranges_tree[ip_range_idx]["CidrIp"][0]} + if ip_ranges_tree[ip_range_idx].get("Description"): + ip_range["Description"] = ip_ranges_tree[ip_range_idx].get("Description")[0] + + ip_ranges.append(ip_range) source_groups = [] source_group_ids = [] @@ -61,6 +65,7 @@ def _process_rules_from_querystring(self): source_groups, source_group_ids, ) = parse_sg_attributes_from_dict(querytree) + yield ( group_name_or_id, ip_protocol, @@ -211,7 +216,10 @@ def revoke_security_group_ingress(self): {% for ip_range in rule.ip_ranges %} - {{ ip_range }} + {{ ip_range['CidrIp'] }} + {% if ip_range['Description'] %} + {{ ip_range['Description'] }} + {% endif %} {% endfor %} diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 7e936b7a5872..90f395507104 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import copy +import json # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises # noqa @@ -272,9 +273,10 @@ def test_authorize_ip_range_and_revoke(): # There are two egress rules associated with the security group: # the default outbound rule and the new one int(egress_security_group.rules_egress[1].to_port).should.equal(2222) - egress_security_group.rules_egress[1].grants[0].cidr_ip.should.equal( - "123.123.123.123/32" - ) + actual_cidr = egress_security_group.rules_egress[1].grants[0].cidr_ip + # Deal with Python2 dict->unicode, instead of dict->string + actual_cidr = json.loads(actual_cidr.replace("u'", "'").replace("'", '"')) + actual_cidr.should.equal({"CidrIp": "123.123.123.123/32"}) # Wrong Cidr should throw error egress_security_group.revoke.when.called_with( @@ -690,6 +692,68 @@ def test_add_same_rule_twice_throws_error(): sg.authorize_ingress(IpPermissions=ip_permissions) +@mock_ec2 +def test_description_in_ip_permissions(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + conn = boto3.client("ec2", region_name="us-east-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + sg = conn.create_security_group( + GroupName="sg1", Description="Test security group sg1", VpcId=vpc.id + ) + + ip_permissions = [ + { + "IpProtocol": "tcp", + "FromPort": 27017, + "ToPort": 27017, + "IpRanges": [{"CidrIp": "1.2.3.4/32", "Description": "testDescription"}], + } + ] + conn.authorize_security_group_ingress( + GroupId=sg["GroupId"], IpPermissions=ip_permissions + ) + + result = conn.describe_security_groups(GroupIds=[sg["GroupId"]]) + + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["Description"] + == "testDescription" + ) + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["CidrIp"] + == "1.2.3.4/32" + ) + + sg = conn.create_security_group( + GroupName="sg2", Description="Test security group sg1", VpcId=vpc.id + ) + + ip_permissions = [ + { + "IpProtocol": "tcp", + "FromPort": 27017, + "ToPort": 27017, + "IpRanges": [{"CidrIp": "1.2.3.4/32"}], + } + ] + conn.authorize_security_group_ingress( + GroupId=sg["GroupId"], IpPermissions=ip_permissions + ) + + result = conn.describe_security_groups(GroupIds=[sg["GroupId"]]) + + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0].get( + "Description" + ) + is None + ) + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["CidrIp"] + == "1.2.3.4/32" + ) + + @mock_ec2 def test_security_group_tagging_boto3(): conn = boto3.client("ec2", region_name="us-east-1") @@ -868,7 +932,7 @@ def test_revoke_security_group_egress(): { "FromPort": 0, "IpProtocol": "-1", - "IpRanges": [{"CidrIp": "0.0.0.0/0"},], + "IpRanges": [{"CidrIp": "0.0.0.0/0"}], "ToPort": 123, }, ] From 9894e1785a610f1dd8c9bfce1fa416692b3e0c2f Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Thu, 6 Aug 2020 10:56:44 +0530 Subject: [PATCH 485/658] Enhancement : Ec2 - Add describe-vpc-endpoint-services method support. (#3108) * Enhancement : Ec2 - Add describe-vpc-endpoint-services method support. * Fix:EC2-describe_vpc_endPoint_services changed the template * Fixed comments * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ec2/models.py | 15 +++++++++++++++ moto/ec2/responses/vpcs.py | 37 +++++++++++++++++++++++++++++++++++++ tests/test_ec2/test_vpcs.py | 31 +++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 2498726b8736..63ebd1738642 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3074,6 +3074,21 @@ def create_vpc_endpoint( return vpc_end_point + def get_vpc_end_point_services(self): + vpc_end_point_services = self.vpc_end_points.values() + + services = [] + for value in vpc_end_point_services: + services.append(value.service_name) + + availability_zones = EC2Backend.describe_availability_zones(self) + + return { + "servicesDetails": vpc_end_point_services, + "services": services, + "availability_zones": availability_zones, + } + class VPCPeeringConnectionStatus(object): def __init__(self, code="initiating-request", message=""): diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 59222207dd56..fc752fa7de45 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -191,6 +191,11 @@ def create_vpc_endpoint(self): template = self.response_template(CREATE_VPC_END_POINT) return template.render(vpc_end_point=vpc_end_point) + def describe_vpc_endpoint_services(self): + vpc_end_point_services = self.ec2_backend.get_vpc_end_point_services() + template = self.response_template(DESCRIBE_VPC_ENDPOINT_RESPONSE) + return template.render(vpc_end_points=vpc_end_point_services) + CREATE_VPC_RESPONSE = """ @@ -449,3 +454,35 @@ def create_vpc_endpoint(self): {{ vpc_end_point.created_at }} """ + +DESCRIBE_VPC_ENDPOINT_RESPONSE = """ + 19a9ff46-7df6-49b8-9726-3df27527089d + + {% for serviceName in vpc_end_points.services %} + {{ serviceName }} + {% endfor %} + + + + {% for service in vpc_end_points.servicesDetails %} + amazon + + + {{ service.type }} + + + + {{ ".".join((service.service_name.split(".")[::-1])) }} + + false + + {% for zone in vpc_end_points.availability_zones %} + {{ zone.name }} + {% endfor %} + + {{ service.service_name }} + true + {% endfor %} + + +""" diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 1bc3ddd98492..35705e482025 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -825,3 +825,34 @@ def test_describe_classic_link_dns_support_multiple(): assert response.get("Vpcs").sort(key=lambda x: x["VpcId"]) == expected.sort( key=lambda x: x["VpcId"] ) + + +@mock_ec2 +def test_describe_vpc_end_point_services(): + ec2 = boto3.client("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + + route_table = ec2.create_route_table(VpcId=vpc["Vpc"]["VpcId"]) + + ec2.create_vpc_endpoint( + VpcId=vpc["Vpc"]["VpcId"], + ServiceName="com.amazonaws.us-east-1.s3", + RouteTableIds=[route_table["RouteTable"]["RouteTableId"]], + VpcEndpointType="gateway", + ) + + vpc_end_point_services = ec2.describe_vpc_endpoint_services() + + assert vpc_end_point_services.get("ServiceDetails").should.be.true + assert vpc_end_point_services.get("ServiceNames").should.be.true + assert vpc_end_point_services.get("ServiceNames") == ["com.amazonaws.us-east-1.s3"] + assert ( + vpc_end_point_services.get("ServiceDetails")[0] + .get("ServiceType", [])[0] + .get("ServiceType") + == "gateway" + ) + assert vpc_end_point_services.get("ServiceDetails")[0].get("AvailabilityZones") == [ + "us-west-1a", + "us-west-1b", + ] From c917ac6ecb76fc319869a4e1c575877d25c54344 Mon Sep 17 00:00:00 2001 From: "Kamil Mankowski (kam193)" Date: Wed, 19 Aug 2020 23:18:32 +0200 Subject: [PATCH 486/658] Set more lambda runtime variables --- moto/awslambda/models.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index a234fbe01d76..2c0d4d9e29f5 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -388,11 +388,16 @@ def _invoke_lambda(self, code, event=None, context=None): # also need to hook it up to the other services so it can make kws/s3 etc calls # Should get invoke_id /RequestId from invocation env_vars = { + "_HANDLER": self.handler, + "AWS_EXECUTION_ENV": "AWS_Lambda_{}".format(self.run_time), "AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout, "AWS_LAMBDA_FUNCTION_NAME": self.function_name, "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size, "AWS_LAMBDA_FUNCTION_VERSION": self.version, "AWS_REGION": self.region, + "AWS_ACCESS_KEY_ID": "role-account-id", + "AWS_SECRET_ACCESS_KEY": "role-secret-key", + "AWS_SESSION_TOKEN": "session-token", } env_vars.update(self.environment_vars) From 6ec7c4faa44bfaa1694751c52cc1ee15b45913e0 Mon Sep 17 00:00:00 2001 From: "Kamil Mankowski (kam193)" Date: Wed, 19 Aug 2020 23:48:07 +0200 Subject: [PATCH 487/658] Lower docker package in Travis --- .travis.yml | 1 + travis_moto_server.sh | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/.travis.yml b/.travis.yml index 8f218134b84d..ed9084f19029 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,6 +27,7 @@ install: docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${PYTHON_DOCKER_TAG} /moto/travis_moto_server.sh & fi travis_retry pip install -r requirements-dev.txt + travis_retry pip install "docker>=2.5.1,<=4.2.2" # Limit version due to old Docker Engine in Travis https://github.com/docker/docker-py/issues/2639 travis_retry pip install boto==2.45.0 travis_retry pip install boto3 travis_retry pip install dist/moto*.gz diff --git a/travis_moto_server.sh b/travis_moto_server.sh index 902644b20556..4be26073e717 100755 --- a/travis_moto_server.sh +++ b/travis_moto_server.sh @@ -1,5 +1,9 @@ #!/usr/bin/env bash set -e pip install flask +# TravisCI on bionic dist uses old version of Docker Engine +# which is incompatibile with newer docker-py +# See https://github.com/docker/docker-py/issues/2639 +pip install "docker>=2.5.1,<=4.2.2" pip install /moto/dist/moto*.gz moto_server -H 0.0.0.0 -p 5000 \ No newline at end of file From 8a551a975451c38e9002cd445d8a0b44f5f955ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20Ma=C5=84kowski?= Date: Tue, 25 Aug 2020 14:05:49 +0200 Subject: [PATCH 488/658] [SNS] Mock sending directly SMS (#3253) * [SNS] Mock sending directly SMS Proper behaviour when publishing to PhoneNumber is sending message directly to this number, without any topic or previous confirmation. https://docs.aws.amazon.com/sns/latest/dg/sns-mobile-phone-number-as-subscriber.html * Fix arguments order * Omit checking local backend when tests in server mode --- moto/ses/models.py | 2 +- moto/sns/models.py | 26 ++++++++++++----- moto/sns/responses.py | 21 +++++--------- tests/test_sns/test_publishing_boto3.py | 38 +++++++++++-------------- 4 files changed, 44 insertions(+), 43 deletions(-) diff --git a/moto/ses/models.py b/moto/ses/models.py index e90f66fa8627..a817444bdc20 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -202,7 +202,7 @@ def __process_sns_feedback__(self, source, destinations, region): if sns_topic is not None: message = self.__generate_feedback__(msg_type) if message: - sns_backends[region].publish(sns_topic, message) + sns_backends[region].publish(message, arn=sns_topic) def send_raw_email(self, source, destinations, raw_data, region): if source is not None: diff --git a/moto/sns/models.py b/moto/sns/models.py index 8a4771a3754f..779a0fb06db9 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -35,6 +35,7 @@ DEFAULT_PAGE_SIZE = 100 MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB +MAXIMUM_SMS_MESSAGE_BYTES = 1600 # Amazon limit for a single publish SMS action class Topic(CloudFormationModel): @@ -365,6 +366,7 @@ def __init__(self, region_name): self.platform_endpoints = {} self.region_name = region_name self.sms_attributes = {} + self.sms_messages = OrderedDict() self.opt_out_numbers = [ "+447420500600", "+447420505401", @@ -432,12 +434,6 @@ def get_topic(self, arn): except KeyError: raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) - def get_topic_from_phone_number(self, number): - for subscription in self.subscriptions.values(): - if subscription.protocol == "sms" and subscription.endpoint == number: - return subscription.topic.arn - raise SNSNotFoundError("Could not find valid subscription") - def set_topic_attribute(self, topic_arn, attribute_name, attribute_value): topic = self.get_topic(topic_arn) setattr(topic, attribute_name, attribute_value) @@ -501,11 +497,27 @@ def list_subscriptions(self, topic_arn=None, next_token=None): else: return self._get_values_nexttoken(self.subscriptions, next_token) - def publish(self, arn, message, subject=None, message_attributes=None): + def publish( + self, + message, + arn=None, + phone_number=None, + subject=None, + message_attributes=None, + ): if subject is not None and len(subject) > 100: # Note that the AWS docs around length are wrong: https://github.com/spulec/moto/issues/1503 raise ValueError("Subject must be less than 100 characters") + if phone_number: + # This is only an approximation. In fact, we should try to use GSM-7 or UCS-2 encoding to count used bytes + if len(message) > MAXIMUM_SMS_MESSAGE_BYTES: + raise ValueError("SMS message must be less than 1600 bytes") + + message_id = six.text_type(uuid.uuid4()) + self.sms_messages[message_id] = (phone_number, message) + return message_id + if len(message) > MAXIMUM_MESSAGE_LENGTH: raise InvalidParameterValue( "An error occurred (InvalidParameter) when calling the Publish operation: Invalid parameter: Message too long" diff --git a/moto/sns/responses.py b/moto/sns/responses.py index c2eb3e7c3d10..7fdc37ab6362 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -6,7 +6,7 @@ from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from .models import sns_backends -from .exceptions import SNSNotFoundError, InvalidParameterValue +from .exceptions import InvalidParameterValue from .utils import is_e164 @@ -327,6 +327,7 @@ def publish(self): message_attributes = self._parse_message_attributes() + arn = None if phone_number is not None: # Check phone is correct syntax (e164) if not is_e164(phone_number): @@ -336,18 +337,6 @@ def publish(self): ), dict(status=400), ) - - # Look up topic arn by phone number - try: - arn = self.backend.get_topic_from_phone_number(phone_number) - except SNSNotFoundError: - return ( - self._error( - "ParameterValueInvalid", - "Could not find topic associated with phone number", - ), - dict(status=400), - ) elif target_arn is not None: arn = target_arn else: @@ -357,7 +346,11 @@ def publish(self): try: message_id = self.backend.publish( - arn, message, subject=subject, message_attributes=message_attributes + message, + arn=arn, + phone_number=phone_number, + subject=subject, + message_attributes=message_attributes, ) except ValueError as err: error_response = self._error("InvalidParameter", str(err)) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index fddd9125c692..99e7ae7a45b2 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -11,8 +11,9 @@ import responses from botocore.exceptions import ClientError from nose.tools import assert_raises -from moto import mock_sns, mock_sqs +from moto import mock_sns, mock_sqs, settings from moto.core import ACCOUNT_ID +from moto.sns import sns_backend MESSAGE_FROM_SQS_TEMPLATE = ( '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:' @@ -223,36 +224,31 @@ def test_publish_to_sqs_msg_attr_number_type(): @mock_sns def test_publish_sms(): client = boto3.client("sns", region_name="us-east-1") - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp["TopicArn"] - - client.subscribe(TopicArn=arn, Protocol="sms", Endpoint="+15551234567") result = client.publish(PhoneNumber="+15551234567", Message="my message") + result.should.contain("MessageId") + if not settings.TEST_SERVER_MODE: + sns_backend.sms_messages.should.have.key(result["MessageId"]).being.equal( + ("+15551234567", "my message") + ) @mock_sns def test_publish_bad_sms(): client = boto3.client("sns", region_name="us-east-1") - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp["TopicArn"] - client.subscribe(TopicArn=arn, Protocol="sms", Endpoint="+15551234567") - - try: - # Test invalid number + # Test invalid number + with assert_raises(ClientError) as cm: client.publish(PhoneNumber="NAA+15551234567", Message="my message") - except ClientError as err: - err.response["Error"]["Code"].should.equal("InvalidParameter") - - try: - # Test not found number - client.publish(PhoneNumber="+44001234567", Message="my message") - except ClientError as err: - err.response["Error"]["Code"].should.equal("ParameterValueInvalid") + cm.exception.response["Error"]["Code"].should.equal("InvalidParameter") + cm.exception.response["Error"]["Message"].should.contain("not meet the E164") + + # Test to long ASCII message + with assert_raises(ClientError) as cm: + client.publish(PhoneNumber="+15551234567", Message="a" * 1601) + cm.exception.response["Error"]["Code"].should.equal("InvalidParameter") + cm.exception.response["Error"]["Message"].should.contain("must be less than 1600") @mock_sqs From 47a227921d26845f57766cb73e610ddbc30737d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kamil=20Ma=C5=84kowski?= Date: Tue, 25 Aug 2020 14:51:58 +0200 Subject: [PATCH 489/658] SES: Fix sending email when use verify_email_address (#3242) --- moto/ses/models.py | 2 ++ tests/test_ses/test_ses_boto3.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/moto/ses/models.py b/moto/ses/models.py index a817444bdc20..d9a44a3703fa 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -103,6 +103,8 @@ def _is_verified_address(self, source): _, address = parseaddr(source) if address in self.addresses: return True + if address in self.email_addresses: + return True user, host = address.split("@", 1) return host in self.domains diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index de8ec7261e20..efd4b980c986 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -84,6 +84,35 @@ def test_send_email(): sent_count.should.equal(3) +@mock_ses +def test_send_email_when_verify_source(): + conn = boto3.client("ses", region_name="us-east-1") + + kwargs = dict( + Destination={"ToAddresses": ["test_to@example.com"],}, + Message={ + "Subject": {"Data": "test subject"}, + "Body": {"Text": {"Data": "test body"}}, + }, + ) + + conn.send_email.when.called_with( + Source="verify_email_address@example.com", **kwargs + ).should.throw(ClientError) + conn.verify_email_address(EmailAddress="verify_email_address@example.com") + conn.send_email(Source="verify_email_address@example.com", **kwargs) + + conn.send_email.when.called_with( + Source="verify_email_identity@example.com", **kwargs + ).should.throw(ClientError) + conn.verify_email_identity(EmailAddress="verify_email_identity@example.com") + conn.send_email(Source="verify_email_identity@example.com", **kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota["SentLast24Hours"]) + sent_count.should.equal(2) + + @mock_ses def test_send_templated_email(): conn = boto3.client("ses", region_name="us-east-1") From f744356da72a3c8be7e0cfeffb607b0d54a88bd9 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Wed, 26 Aug 2020 07:06:53 -0300 Subject: [PATCH 490/658] Lambda reserved concurrency (#3215) * lambda-responses: add method to dispatch concurrency calls * lambda-resources: add route to handle concurrency requests * lambda-model: implement put_function_concurrency and concurrency attribute * put-concurrency-tests: add one simple test * get_function: add concurrency entry - with test * lambda-reserved-concurrency: cloudformation support * lambda-concurrency: implement delete_reserved with tests * lambda-concurrency: implement get_reserved with tests * lint * implementation-cov: mark delete_function_concurrency, put_function_concurrency and get_function_concurrency * botocore doesn't display concurrency entry for lambdas without it * lambda(refactor): improvements on response's handler --- IMPLEMENTATION_COVERAGE.md | 6 +- moto/awslambda/models.py | 39 +++++++++- moto/awslambda/responses.py | 48 ++++++++++++ moto/awslambda/urls.py | 1 + tests/test_awslambda/test_lambda.py | 78 ++++++++++++++++++- .../test_cloudformation_stack_integration.py | 6 ++ 6 files changed, 170 insertions(+), 8 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index d2696e6af3ef..7b35c34ef9bf 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -5110,7 +5110,7 @@ - [ ] delete_alias - [X] delete_event_source_mapping - [X] delete_function -- [ ] delete_function_concurrency +- [X] delete_function_concurrency - [ ] delete_function_event_invoke_config - [ ] delete_layer_version - [ ] delete_provisioned_concurrency_config @@ -5118,7 +5118,7 @@ - [ ] get_alias - [X] get_event_source_mapping - [X] get_function -- [ ] get_function_concurrency +- [X] get_function_concurrency - [ ] get_function_configuration - [ ] get_function_event_invoke_config - [ ] get_layer_version @@ -5139,7 +5139,7 @@ - [X] list_versions_by_function - [ ] publish_layer_version - [ ] publish_version -- [ ] put_function_concurrency +- [X] put_function_concurrency - [ ] put_function_event_invoke_config - [ ] put_provisioned_concurrency_config - [ ] remove_layer_version_permission diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 2c0d4d9e29f5..2aa207da9417 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -165,6 +165,7 @@ def __init__(self, spec, region, validate_s3=True, version=1): self.docker_client = docker.from_env() self.policy = None self.state = "Active" + self.reserved_concurrency = spec.get("ReservedConcurrentExecutions", None) # Unfortunately mocking replaces this method w/o fallback enabled, so we # need to replace it if we detect it's been mocked @@ -285,7 +286,7 @@ def get_configuration(self): return config def get_code(self): - return { + code = { "Code": { "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/{1}".format( self.region, self.code["S3Key"] @@ -294,6 +295,15 @@ def get_code(self): }, "Configuration": self.get_configuration(), } + if self.reserved_concurrency: + code.update( + { + "Concurrency": { + "ReservedConcurrentExecutions": self.reserved_concurrency + } + } + ) + return code def update_configuration(self, config_updates): for key, value in config_updates.items(): @@ -511,6 +521,15 @@ def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] + optional_properties = ( + "Description", + "MemorySize", + "Publish", + "Timeout", + "VpcConfig", + "Environment", + "ReservedConcurrentExecutions", + ) # required spec = { @@ -520,9 +539,7 @@ def create_from_cloudformation_json( "Role": properties["Role"], "Runtime": properties["Runtime"], } - optional_properties = ( - "Description MemorySize Publish Timeout VpcConfig Environment".split() - ) + # NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the # default logic for prop in optional_properties: @@ -1157,6 +1174,20 @@ def invoke(self, function_name, qualifier, body, headers, response_headers): else: return None + def put_function_concurrency(self, function_name, reserved_concurrency): + fn = self.get_function(function_name) + fn.reserved_concurrency = reserved_concurrency + return fn.reserved_concurrency + + def delete_function_concurrency(self, function_name): + fn = self.get_function(function_name) + fn.reserved_concurrency = None + return fn.reserved_concurrency + + def get_function_concurrency(self, function_name): + fn = self.get_function(function_name) + return fn.reserved_concurrency + def do_validate_s3(): return os.environ.get("VALIDATE_LAMBDA_S3", "") in ["", "1", "true"] diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index a4f559fc2076..6447cde139ff 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -141,6 +141,19 @@ def code(self, request, full_url, headers): else: raise ValueError("Cannot handle request") + def function_concurrency(self, request, full_url, headers): + http_method = request.method + self.setup_class(request, full_url, headers) + + if http_method == "GET": + return self._get_function_concurrency(request) + elif http_method == "DELETE": + return self._delete_function_concurrency(request) + elif http_method == "PUT": + return self._put_function_concurrency(request) + else: + raise ValueError("Cannot handle request") + def _add_policy(self, request, full_url, headers): path = request.path if hasattr(request, "path") else path_url(request.url) function_name = path.split("/")[-2] @@ -359,3 +372,38 @@ def _put_code(self): return 200, {}, json.dumps(resp) else: return 404, {}, "{}" + + def _get_function_concurrency(self, request): + path_function_name = self.path.rsplit("/", 2)[-2] + function_name = self.lambda_backend.get_function(path_function_name) + + if function_name is None: + return 404, {}, "{}" + + resp = self.lambda_backend.get_function_concurrency(path_function_name) + return 200, {}, json.dumps({"ReservedConcurrentExecutions": resp}) + + def _delete_function_concurrency(self, request): + path_function_name = self.path.rsplit("/", 2)[-2] + function_name = self.lambda_backend.get_function(path_function_name) + + if function_name is None: + return 404, {}, "{}" + + self.lambda_backend.delete_function_concurrency(path_function_name) + + return 204, {}, "{}" + + def _put_function_concurrency(self, request): + path_function_name = self.path.rsplit("/", 2)[-2] + function = self.lambda_backend.get_function(path_function_name) + + if function is None: + return 404, {}, "{}" + + concurrency = self._get_param("ReservedConcurrentExecutions", None) + resp = self.lambda_backend.put_function_concurrency( + path_function_name, concurrency + ) + + return 200, {}, json.dumps({"ReservedConcurrentExecutions": resp}) diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index c25e58dbaf26..03cedc5e4878 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -19,4 +19,5 @@ r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/policy/?$": response.policy, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/configuration/?$": response.configuration, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/code/?$": response.code, + r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/concurrency/?$": response.function_concurrency, } diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 1cd943f04192..ca05d4aa4f15 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -489,7 +489,7 @@ def test_get_function(): {"test_variable": "test_value"} ) - # Test get function with + # Test get function with qualifier result = conn.get_function(FunctionName="testFunction", Qualifier="$LATEST") result["Configuration"]["Version"].should.equal("$LATEST") result["Configuration"]["FunctionArn"].should.equal( @@ -1721,6 +1721,82 @@ def test_remove_function_permission(): policy["Statement"].should.equal([]) +@mock_lambda +def test_put_function_concurrency(): + expected_concurrency = 15 + function_name = "test" + + conn = boto3.client("lambda", _lambda_region) + conn.create_function( + FunctionName=function_name, + Runtime="python3.8", + Role=(get_role_name()), + Handler="lambda_function.handler", + Code={"ZipFile": get_test_zip_file1()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + result = conn.put_function_concurrency( + FunctionName=function_name, ReservedConcurrentExecutions=expected_concurrency + ) + + result["ReservedConcurrentExecutions"].should.equal(expected_concurrency) + + +@mock_lambda +def test_delete_function_concurrency(): + function_name = "test" + + conn = boto3.client("lambda", _lambda_region) + conn.create_function( + FunctionName=function_name, + Runtime="python3.8", + Role=(get_role_name()), + Handler="lambda_function.handler", + Code={"ZipFile": get_test_zip_file1()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + conn.put_function_concurrency( + FunctionName=function_name, ReservedConcurrentExecutions=15 + ) + + conn.delete_function_concurrency(FunctionName=function_name) + result = conn.get_function(FunctionName=function_name) + + result.doesnt.have.key("Concurrency") + + +@mock_lambda +def test_get_function_concurrency(): + expected_concurrency = 15 + function_name = "test" + + conn = boto3.client("lambda", _lambda_region) + conn.create_function( + FunctionName=function_name, + Runtime="python3.8", + Role=(get_role_name()), + Handler="lambda_function.handler", + Code={"ZipFile": get_test_zip_file1()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + conn.put_function_concurrency( + FunctionName=function_name, ReservedConcurrentExecutions=expected_concurrency + ) + + result = conn.get_function_concurrency(FunctionName=function_name) + + result["ReservedConcurrentExecutions"].should.equal(expected_concurrency) + + def create_invalid_lambda(role): conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 5a8e9cd683ba..ee2fbc94cfbc 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -1777,6 +1777,7 @@ def lambda_handler(event, context): "Role": {"Fn::GetAtt": ["MyRole", "Arn"]}, "Runtime": "python2.7", "Environment": {"Variables": {"TEST_ENV_KEY": "test-env-val"}}, + "ReservedConcurrentExecutions": 10, }, }, "MyRole": { @@ -1811,6 +1812,11 @@ def lambda_handler(event, context): {"Variables": {"TEST_ENV_KEY": "test-env-val"}} ) + function_name = result["Functions"][0]["FunctionName"] + result = conn.get_function(FunctionName=function_name) + + result["Concurrency"]["ReservedConcurrentExecutions"].should.equal(10) + @mock_cloudformation @mock_ec2 From 2a27e457bfbe743494b4dc7be48bf72ec9429d40 Mon Sep 17 00:00:00 2001 From: jmbollard <69733522+jmbollard@users.noreply.github.com> Date: Wed, 26 Aug 2020 08:27:45 -0500 Subject: [PATCH 491/658] Security group egress ip permissions fix (#3250) * Add support for Description in egress rule response * Update SecurityGroup default egress rule ip range * Remove extra commas * Remove extra commas * Lower docker package in Travis * Add more lambda vars per PR 3247 * Remove code added in 3247 * Add tests for egress rules with Descriptions * Reformat based on black Co-authored-by: spillin --- moto/ec2/models.py | 13 +++++++--- moto/ec2/responses/security_groups.py | 5 +++- tests/test_ec2/test_security_groups.py | 36 ++++++++++++++++++++------ 3 files changed, 41 insertions(+), 13 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 63ebd1738642..95ed0cb89205 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1866,7 +1866,9 @@ def __init__(self, ec2_backend, group_id, name, description, vpc_id=None): self.name = name self.description = description self.ingress_rules = [] - self.egress_rules = [SecurityRule("-1", None, None, ["0.0.0.0/0"], [])] + self.egress_rules = [ + SecurityRule("-1", None, None, [{"CidrIp": "0.0.0.0/0"}], []) + ] self.enis = {} self.vpc_id = vpc_id self.owner_id = OWNER_ID @@ -2266,13 +2268,16 @@ def revoke_security_group_egress( if source_group: source_groups.append(source_group) - for ip in ip_ranges: - ip_ranges = [ip.get("CidrIp") if ip.get("CidrIp") == "0.0.0.0/0" else ip] + # I don't believe this is required after changing the default egress rule + # to be {'CidrIp': '0.0.0.0/0'} instead of just '0.0.0.0/0' + # Not sure why this would return only the IP if it was 0.0.0.0/0 instead of + # the ip_range? + # for ip in ip_ranges: + # ip_ranges = [ip.get("CidrIp") if ip.get("CidrIp") == "0.0.0.0/0" else ip] security_rule = SecurityRule( ip_protocol, from_port, to_port, ip_ranges, source_groups ) - if security_rule in group.egress_rules: group.egress_rules.remove(security_rule) return security_rule diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index af84b7738e1d..5c0d1c852ea1 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -250,7 +250,10 @@ def revoke_security_group_ingress(self): {% for ip_range in rule.ip_ranges %} - {{ ip_range }} + {{ ip_range['CidrIp'] }} + {% if ip_range['Description'] %} + {{ ip_range['Description'] }} + {% endif %} {% endfor %} diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 90f395507104..10885df189d4 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -275,8 +275,9 @@ def test_authorize_ip_range_and_revoke(): int(egress_security_group.rules_egress[1].to_port).should.equal(2222) actual_cidr = egress_security_group.rules_egress[1].grants[0].cidr_ip # Deal with Python2 dict->unicode, instead of dict->string - actual_cidr = json.loads(actual_cidr.replace("u'", "'").replace("'", '"')) - actual_cidr.should.equal({"CidrIp": "123.123.123.123/32"}) + if type(actual_cidr) == "unicode": + actual_cidr = json.loads(actual_cidr.replace("u'", "'").replace("'", '"')) + actual_cidr.should.equal("123.123.123.123/32") # Wrong Cidr should throw error egress_security_group.revoke.when.called_with( @@ -810,7 +811,9 @@ def test_authorize_and_revoke_in_bulk(): sg03 = ec2.create_security_group( GroupName="sg03", Description="Test security group sg03" ) - + sg04 = ec2.create_security_group( + GroupName="sg04", Description="Test security group sg04" + ) ip_permissions = [ { "IpProtocol": "tcp", @@ -835,13 +838,31 @@ def test_authorize_and_revoke_in_bulk(): "UserIdGroupPairs": [{"GroupName": "sg03", "UserId": sg03.owner_id}], "IpRanges": [], }, + { + "IpProtocol": "tcp", + "FromPort": 27015, + "ToPort": 27015, + "UserIdGroupPairs": [{"GroupName": "sg04", "UserId": sg04.owner_id}], + "IpRanges": [ + {"CidrIp": "10.10.10.0/24", "Description": "Some Description"} + ], + }, + { + "IpProtocol": "tcp", + "FromPort": 27016, + "ToPort": 27016, + "UserIdGroupPairs": [{"GroupId": sg04.id, "UserId": sg04.owner_id}], + "IpRanges": [{"CidrIp": "10.10.10.0/24"}], + }, ] expected_ip_permissions = copy.deepcopy(ip_permissions) expected_ip_permissions[1]["UserIdGroupPairs"][0]["GroupName"] = "sg02" expected_ip_permissions[2]["UserIdGroupPairs"][0]["GroupId"] = sg03.id + expected_ip_permissions[3]["UserIdGroupPairs"][0]["GroupId"] = sg04.id + expected_ip_permissions[4]["UserIdGroupPairs"][0]["GroupName"] = "sg04" sg01.authorize_ingress(IpPermissions=ip_permissions) - sg01.ip_permissions.should.have.length_of(3) + sg01.ip_permissions.should.have.length_of(5) for ip_permission in expected_ip_permissions: sg01.ip_permissions.should.contain(ip_permission) @@ -851,7 +872,7 @@ def test_authorize_and_revoke_in_bulk(): sg01.ip_permissions.shouldnt.contain(ip_permission) sg01.authorize_egress(IpPermissions=ip_permissions) - sg01.ip_permissions_egress.should.have.length_of(4) + sg01.ip_permissions_egress.should.have.length_of(6) for ip_permission in expected_ip_permissions: sg01.ip_permissions_egress.should.contain(ip_permission) @@ -930,11 +951,10 @@ def test_revoke_security_group_egress(): sg.revoke_egress( IpPermissions=[ { - "FromPort": 0, "IpProtocol": "-1", "IpRanges": [{"CidrIp": "0.0.0.0/0"}], - "ToPort": 123, - }, + "UserIdGroupPairs": [], + } ] ) From 55b02c6ee92ff6b0bd8dd381b7fc8b1520230603 Mon Sep 17 00:00:00 2001 From: Kevin Frommelt Date: Wed, 26 Aug 2020 09:15:07 -0500 Subject: [PATCH 492/658] Add support for Launch Templates in Auto Scaling Groups (#3236) * Add support for Launch Templates in Auto Scaling Groups * Use named parameters, simplify parameter validation --- IMPLEMENTATION_COVERAGE.md | 2 +- moto/autoscaling/exceptions.py | 5 + moto/autoscaling/models.py | 144 ++++++-- moto/autoscaling/responses.py | 28 ++ moto/core/responses.py | 4 +- moto/ec2/models.py | 16 + tests/test_autoscaling/test_autoscaling.py | 341 +++++++++++++++++- tests/test_autoscaling/test_cloudformation.py | 276 ++++++++++++++ 8 files changed, 774 insertions(+), 42 deletions(-) create mode 100644 tests/test_autoscaling/test_cloudformation.py diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 7b35c34ef9bf..721c9c977678 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -2639,7 +2639,7 @@ - [X] create_internet_gateway - [X] create_key_pair - [X] create_launch_template -- [ ] create_launch_template_version +- [x] create_launch_template_version - [ ] create_local_gateway_route - [ ] create_local_gateway_route_table_vpc_association - [X] create_nat_gateway diff --git a/moto/autoscaling/exceptions.py b/moto/autoscaling/exceptions.py index 6f73eff8f6bc..2fddd18ec061 100644 --- a/moto/autoscaling/exceptions.py +++ b/moto/autoscaling/exceptions.py @@ -21,3 +21,8 @@ def __init__(self, instance_id): super(InvalidInstanceError, self).__init__( "ValidationError", "Instance [{0}] is invalid.".format(instance_id) ) + + +class ValidationError(AutoscalingClientError): + def __init__(self, message): + super(ValidationError, self).__init__("ValidationError", message) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index d82f15095f31..1a25a656d0e6 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -7,6 +7,7 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel, CloudFormationModel +from moto.core.utils import camelcase_to_underscores from moto.ec2 import ec2_backends from moto.elb import elb_backends from moto.elbv2 import elbv2_backends @@ -15,6 +16,7 @@ AutoscalingClientError, ResourceContentionError, InvalidInstanceError, + ValidationError, ) # http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown @@ -233,6 +235,7 @@ def __init__( max_size, min_size, launch_config_name, + launch_template, vpc_zone_identifier, default_cooldown, health_check_period, @@ -242,10 +245,12 @@ def __init__( placement_group, termination_policies, autoscaling_backend, + ec2_backend, tags, new_instances_protected_from_scale_in=False, ): self.autoscaling_backend = autoscaling_backend + self.ec2_backend = ec2_backend self.name = name self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier) @@ -253,10 +258,10 @@ def __init__( self.max_size = max_size self.min_size = min_size - self.launch_config = self.autoscaling_backend.launch_configurations[ - launch_config_name - ] - self.launch_config_name = launch_config_name + self.launch_template = None + self.launch_config = None + + self._set_launch_configuration(launch_config_name, launch_template) self.default_cooldown = ( default_cooldown if default_cooldown else DEFAULT_COOLDOWN @@ -310,6 +315,34 @@ def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=Fals self.availability_zones = availability_zones self.vpc_zone_identifier = vpc_zone_identifier + def _set_launch_configuration(self, launch_config_name, launch_template): + if launch_config_name: + self.launch_config = self.autoscaling_backend.launch_configurations[ + launch_config_name + ] + self.launch_config_name = launch_config_name + + if launch_template: + launch_template_id = launch_template.get("launch_template_id") + launch_template_name = launch_template.get("launch_template_name") + + if not (launch_template_id or launch_template_name) or ( + launch_template_id and launch_template_name + ): + raise ValidationError( + "Valid requests must contain either launchTemplateId or LaunchTemplateName" + ) + + if launch_template_id: + self.launch_template = self.ec2_backend.get_launch_template( + launch_template_id + ) + elif launch_template_name: + self.launch_template = self.ec2_backend.get_launch_template_by_name( + launch_template_name + ) + self.launch_template_version = int(launch_template["version"]) + @staticmethod def __set_string_propagate_at_launch_booleans_on_tags(tags): bool_to_string = {True: "true", False: "false"} @@ -334,6 +367,10 @@ def create_from_cloudformation_json( properties = cloudformation_json["Properties"] launch_config_name = properties.get("LaunchConfigurationName") + launch_template = { + camelcase_to_underscores(k): v + for k, v in properties.get("LaunchTemplate", {}).items() + } load_balancer_names = properties.get("LoadBalancerNames", []) target_group_arns = properties.get("TargetGroupARNs", []) @@ -345,6 +382,7 @@ def create_from_cloudformation_json( max_size=properties.get("MaxSize"), min_size=properties.get("MinSize"), launch_config_name=launch_config_name, + launch_template=launch_template, vpc_zone_identifier=( ",".join(properties.get("VPCZoneIdentifier", [])) or None ), @@ -393,6 +431,38 @@ def delete(self, region_name): def physical_resource_id(self): return self.name + @property + def image_id(self): + if self.launch_template: + version = self.launch_template.get_version(self.launch_template_version) + return version.image_id + + return self.launch_config.image_id + + @property + def instance_type(self): + if self.launch_template: + version = self.launch_template.get_version(self.launch_template_version) + return version.instance_type + + return self.launch_config.instance_type + + @property + def user_data(self): + if self.launch_template: + version = self.launch_template.get_version(self.launch_template_version) + return version.user_data + + return self.launch_config.user_data + + @property + def security_groups(self): + if self.launch_template: + version = self.launch_template.get_version(self.launch_template_version) + return version.security_groups + + return self.launch_config.security_groups + def update( self, availability_zones, @@ -400,6 +470,7 @@ def update( max_size, min_size, launch_config_name, + launch_template, vpc_zone_identifier, default_cooldown, health_check_period, @@ -421,11 +492,8 @@ def update( if max_size is not None and max_size < len(self.instance_states): desired_capacity = max_size - if launch_config_name: - self.launch_config = self.autoscaling_backend.launch_configurations[ - launch_config_name - ] - self.launch_config_name = launch_config_name + self._set_launch_configuration(launch_config_name, launch_template) + if health_check_period is not None: self.health_check_period = health_check_period if health_check_type is not None: @@ -489,12 +557,13 @@ def get_propagated_tags(self): def replace_autoscaling_group_instances(self, count_needed, propagated_tags): propagated_tags[ASG_NAME_TAG] = self.name + reservation = self.autoscaling_backend.ec2_backend.add_instances( - self.launch_config.image_id, + self.image_id, count_needed, - self.launch_config.user_data, - self.launch_config.security_groups, - instance_type=self.launch_config.instance_type, + self.user_data, + self.security_groups, + instance_type=self.instance_type, tags={"instance": propagated_tags}, placement=random.choice(self.availability_zones), ) @@ -586,6 +655,7 @@ def create_auto_scaling_group( max_size, min_size, launch_config_name, + launch_template, vpc_zone_identifier, default_cooldown, health_check_period, @@ -609,7 +679,19 @@ def make_int(value): health_check_period = 300 else: health_check_period = make_int(health_check_period) - if launch_config_name is None and instance_id is not None: + + # TODO: Add MixedInstancesPolicy once implemented. + # Verify only a single launch config-like parameter is provided. + params = [launch_config_name, launch_template, instance_id] + num_params = sum([1 for param in params if param]) + + if num_params != 1: + raise ValidationError( + "Valid requests must contain either LaunchTemplate, LaunchConfigurationName, " + "InstanceId or MixedInstancesPolicy parameter." + ) + + if instance_id: try: instance = self.ec2_backend.get_instance(instance_id) launch_config_name = name @@ -626,6 +708,7 @@ def make_int(value): max_size=max_size, min_size=min_size, launch_config_name=launch_config_name, + launch_template=launch_template, vpc_zone_identifier=vpc_zone_identifier, default_cooldown=default_cooldown, health_check_period=health_check_period, @@ -635,6 +718,7 @@ def make_int(value): placement_group=placement_group, termination_policies=termination_policies, autoscaling_backend=self, + ec2_backend=self.ec2_backend, tags=tags, new_instances_protected_from_scale_in=new_instances_protected_from_scale_in, ) @@ -652,6 +736,7 @@ def update_auto_scaling_group( max_size, min_size, launch_config_name, + launch_template, vpc_zone_identifier, default_cooldown, health_check_period, @@ -660,19 +745,28 @@ def update_auto_scaling_group( termination_policies, new_instances_protected_from_scale_in=None, ): + # TODO: Add MixedInstancesPolicy once implemented. + # Verify only a single launch config-like parameter is provided. + if launch_config_name and launch_template: + raise ValidationError( + "Valid requests must contain either LaunchTemplate, LaunchConfigurationName " + "or MixedInstancesPolicy parameter." + ) + group = self.autoscaling_groups[name] group.update( - availability_zones, - desired_capacity, - max_size, - min_size, - launch_config_name, - vpc_zone_identifier, - default_cooldown, - health_check_period, - health_check_type, - placement_group, - termination_policies, + availability_zones=availability_zones, + desired_capacity=desired_capacity, + max_size=max_size, + min_size=min_size, + launch_config_name=launch_config_name, + launch_template=launch_template, + vpc_zone_identifier=vpc_zone_identifier, + default_cooldown=default_cooldown, + health_check_period=health_check_period, + health_check_type=health_check_type, + placement_group=placement_group, + termination_policies=termination_policies, new_instances_protected_from_scale_in=new_instances_protected_from_scale_in, ) return group diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 06b68aa4b976..a9651a7743b7 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -81,6 +81,7 @@ def create_auto_scaling_group(self): min_size=self._get_int_param("MinSize"), instance_id=self._get_param("InstanceId"), launch_config_name=self._get_param("LaunchConfigurationName"), + launch_template=self._get_dict_param("LaunchTemplate."), vpc_zone_identifier=self._get_param("VPCZoneIdentifier"), default_cooldown=self._get_int_param("DefaultCooldown"), health_check_period=self._get_int_param("HealthCheckGracePeriod"), @@ -197,6 +198,7 @@ def update_auto_scaling_group(self): max_size=self._get_int_param("MaxSize"), min_size=self._get_int_param("MinSize"), launch_config_name=self._get_param("LaunchConfigurationName"), + launch_template=self._get_dict_param("LaunchTemplate."), vpc_zone_identifier=self._get_param("VPCZoneIdentifier"), default_cooldown=self._get_int_param("DefaultCooldown"), health_check_period=self._get_int_param("HealthCheckGracePeriod"), @@ -573,14 +575,31 @@ def terminate_instance_in_auto_scaling_group(self): {{ group.health_check_type }} 2013-05-06T17:47:15.107Z + {% if group.launch_config_name %} {{ group.launch_config_name }} + {% elif group.launch_template %} + + {{ group.launch_template.id }} + {{ group.launch_template_version }} + {{ group.launch_template.name }} + + {% endif %} {% for instance_state in group.instance_states %} {{ instance_state.health_status }} {{ instance_state.instance.placement }} {{ instance_state.instance.id }} + {{ instance_state.instance.instance_type }} + {% if group.launch_config_name %} {{ group.launch_config_name }} + {% elif group.launch_template %} + + {{ group.launch_template.id }} + {{ group.launch_template_version }} + {{ group.launch_template.name }} + + {% endif %} {{ instance_state.lifecycle_state }} {{ instance_state.protected_from_scale_in|string|lower }} @@ -666,7 +685,16 @@ def terminate_instance_in_auto_scaling_group(self): {{ instance_state.instance.autoscaling_group.name }} {{ instance_state.instance.placement }} {{ instance_state.instance.id }} + {{ instance_state.instance.instance_type }} + {% if instance_state.instance.autoscaling_group.launch_config_name %} {{ instance_state.instance.autoscaling_group.launch_config_name }} + {% elif instance_state.instance.autoscaling_group.launch_template %} + + {{ instance_state.instance.autoscaling_group.launch_template.id }} + {{ instance_state.instance.autoscaling_group.launch_template_version }} + {{ instance_state.instance.autoscaling_group.launch_template.name }} + + {% endif %} {{ instance_state.lifecycle_state }} {{ instance_state.protected_from_scale_in|string|lower }} diff --git a/moto/core/responses.py b/moto/core/responses.py index 676d7549d1de..fdac22c18485 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -538,8 +538,8 @@ def _get_dict_param(self, param_prefix): returns { - "SlaveInstanceType": "m1.small", - "InstanceCount": "1", + "slave_instance_type": "m1.small", + "instance_count": "1", } """ params = {} diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 95ed0cb89205..f0ce89d8a42b 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -5386,6 +5386,22 @@ def __init__(self, template, number, data, description): self.description = description self.create_time = utc_date_and_time() + @property + def image_id(self): + return self.data.get("ImageId", "") + + @property + def instance_type(self): + return self.data.get("InstanceType", "") + + @property + def security_groups(self): + return self.data.get("SecurityGroups", []) + + @property + def user_data(self): + return self.data.get("UserData", "") + class LaunchTemplate(TaggedEC2Resource): def __init__(self, backend, name, template_data, version_description): diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 93a8c5a4894c..1e7121381943 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -17,6 +17,7 @@ mock_elb, mock_autoscaling_deprecated, mock_ec2, + mock_cloudformation, ) from tests.helpers import requires_boto_gte @@ -164,7 +165,7 @@ def test_list_many_autoscaling_groups(): @mock_autoscaling @mock_ec2 -def test_list_many_autoscaling_groups(): +def test_propogate_tags(): mocked_networking = setup_networking() conn = boto3.client("autoscaling", region_name="us-east-1") conn.create_launch_configuration(LaunchConfigurationName="TestLC") @@ -692,7 +693,7 @@ def test_detach_load_balancer(): def test_create_autoscaling_group_boto3(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName="test_launch_configuration" ) response = client.create_auto_scaling_group( @@ -798,13 +799,171 @@ def test_create_autoscaling_group_from_invalid_instance_id(): @mock_autoscaling -def test_describe_autoscaling_groups_boto3(): +@mock_ec2 +def test_create_autoscaling_group_from_template(): mocked_networking = setup_networking() + + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] client = boto3.client("autoscaling", region_name="us-east-1") - _ = client.create_launch_configuration( + response = client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={ + "LaunchTemplateId": template["LaunchTemplateId"], + "Version": str(template["LatestVersionNumber"]), + }, + MinSize=1, + MaxSize=3, + DesiredCapacity=2, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + +@mock_autoscaling +@mock_ec2 +def test_create_autoscaling_group_no_template_ref(): + mocked_networking = setup_networking() + + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + + with assert_raises(ClientError) as ex: + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={"Version": str(template["LatestVersionNumber"])}, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Code"].should.equal("ValidationError") + ex.exception.response["Error"]["Message"].should.equal( + "Valid requests must contain either launchTemplateId or LaunchTemplateName" + ) + + +@mock_autoscaling +@mock_ec2 +def test_create_autoscaling_group_multiple_template_ref(): + mocked_networking = setup_networking() + + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + + with assert_raises(ClientError) as ex: + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={ + "LaunchTemplateId": template["LaunchTemplateId"], + "LaunchTemplateName": template["LaunchTemplateName"], + "Version": str(template["LatestVersionNumber"]), + }, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Code"].should.equal("ValidationError") + ex.exception.response["Error"]["Message"].should.equal( + "Valid requests must contain either launchTemplateId or LaunchTemplateName" + ) + + +@mock_autoscaling +def test_create_autoscaling_group_boto3_no_launch_configuration(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + with assert_raises(ClientError) as ex: + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Code"].should.equal("ValidationError") + ex.exception.response["Error"]["Message"].should.equal( + "Valid requests must contain either LaunchTemplate, LaunchConfigurationName, " + "InstanceId or MixedInstancesPolicy parameter." + ) + + +@mock_autoscaling +@mock_ec2 +def test_create_autoscaling_group_boto3_multiple_launch_configurations(): + mocked_networking = setup_networking() + + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_launch_configuration( LaunchConfigurationName="test_launch_configuration" ) - _ = client.create_auto_scaling_group( + + with assert_raises(ClientError) as ex: + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration", + LaunchTemplate={ + "LaunchTemplateId": template["LaunchTemplateId"], + "Version": str(template["LatestVersionNumber"]), + }, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=False, + ) + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Code"].should.equal("ValidationError") + ex.exception.response["Error"]["Message"].should.equal( + "Valid requests must contain either LaunchTemplate, LaunchConfigurationName, " + "InstanceId or MixedInstancesPolicy parameter." + ) + + +@mock_autoscaling +def test_describe_autoscaling_groups_boto3_launch_config(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", + ) + client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchConfigurationName="test_launch_configuration", MinSize=0, @@ -818,22 +977,72 @@ def test_describe_autoscaling_groups_boto3(): response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) group = response["AutoScalingGroups"][0] group["AutoScalingGroupName"].should.equal("test_asg") + group["LaunchConfigurationName"].should.equal("test_launch_configuration") + group.should_not.have.key("LaunchTemplate") group["AvailabilityZones"].should.equal(["us-east-1a"]) group["VPCZoneIdentifier"].should.equal(mocked_networking["subnet1"]) group["NewInstancesProtectedFromScaleIn"].should.equal(True) for instance in group["Instances"]: + instance["LaunchConfigurationName"].should.equal("test_launch_configuration") + instance.should_not.have.key("LaunchTemplate") instance["AvailabilityZone"].should.equal("us-east-1a") instance["ProtectedFromScaleIn"].should.equal(True) + instance["InstanceType"].should.equal("t2.micro") @mock_autoscaling -def test_describe_autoscaling_instances_boto3(): +@mock_ec2 +def test_describe_autoscaling_groups_boto3_launch_template(): mocked_networking = setup_networking() + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] client = boto3.client("autoscaling", region_name="us-east-1") - _ = client.create_launch_configuration( - LaunchConfigurationName="test_launch_configuration" + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={"LaunchTemplateName": "test_launch_template", "Version": "1"}, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=True, ) - _ = client.create_auto_scaling_group( + expected_launch_template = { + "LaunchTemplateId": template["LaunchTemplateId"], + "LaunchTemplateName": "test_launch_template", + "Version": "1", + } + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + group = response["AutoScalingGroups"][0] + group["AutoScalingGroupName"].should.equal("test_asg") + group["LaunchTemplate"].should.equal(expected_launch_template) + group.should_not.have.key("LaunchConfigurationName") + group["AvailabilityZones"].should.equal(["us-east-1a"]) + group["VPCZoneIdentifier"].should.equal(mocked_networking["subnet1"]) + group["NewInstancesProtectedFromScaleIn"].should.equal(True) + for instance in group["Instances"]: + instance["LaunchTemplate"].should.equal(expected_launch_template) + instance.should_not.have.key("LaunchConfigurationName") + instance["AvailabilityZone"].should.equal("us-east-1a") + instance["ProtectedFromScaleIn"].should.equal(True) + instance["InstanceType"].should.equal("t2.micro") + + +@mock_autoscaling +def test_describe_autoscaling_instances_boto3_launch_config(): + mocked_networking = setup_networking() + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", + ) + client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchConfigurationName="test_launch_configuration", MinSize=0, @@ -846,9 +1055,51 @@ def test_describe_autoscaling_instances_boto3(): response = client.describe_auto_scaling_instances() len(response["AutoScalingInstances"]).should.equal(5) for instance in response["AutoScalingInstances"]: + instance["LaunchConfigurationName"].should.equal("test_launch_configuration") + instance.should_not.have.key("LaunchTemplate") + instance["AutoScalingGroupName"].should.equal("test_asg") + instance["AvailabilityZone"].should.equal("us-east-1a") + instance["ProtectedFromScaleIn"].should.equal(True) + instance["InstanceType"].should.equal("t2.micro") + + +@mock_autoscaling +@mock_ec2 +def test_describe_autoscaling_instances_boto3_launch_template(): + mocked_networking = setup_networking() + ec2_client = boto3.client("ec2", region_name="us-east-1") + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={"LaunchTemplateName": "test_launch_template", "Version": "1"}, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=True, + ) + expected_launch_template = { + "LaunchTemplateId": template["LaunchTemplateId"], + "LaunchTemplateName": "test_launch_template", + "Version": "1", + } + + response = client.describe_auto_scaling_instances() + len(response["AutoScalingInstances"]).should.equal(5) + for instance in response["AutoScalingInstances"]: + instance["LaunchTemplate"].should.equal(expected_launch_template) + instance.should_not.have.key("LaunchConfigurationName") instance["AutoScalingGroupName"].should.equal("test_asg") instance["AvailabilityZone"].should.equal("us-east-1a") instance["ProtectedFromScaleIn"].should.equal(True) + instance["InstanceType"].should.equal("t2.micro") @mock_autoscaling @@ -885,13 +1136,16 @@ def test_describe_autoscaling_instances_instanceid_filter(): @mock_autoscaling -def test_update_autoscaling_group_boto3(): +def test_update_autoscaling_group_boto3_launch_config(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") - _ = client.create_launch_configuration( + client.create_launch_configuration( LaunchConfigurationName="test_launch_configuration" ) - _ = client.create_auto_scaling_group( + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration_new" + ) + client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchConfigurationName="test_launch_configuration", MinSize=0, @@ -901,8 +1155,9 @@ def test_update_autoscaling_group_boto3(): NewInstancesProtectedFromScaleIn=True, ) - _ = client.update_auto_scaling_group( + client.update_auto_scaling_group( AutoScalingGroupName="test_asg", + LaunchConfigurationName="test_launch_configuration_new", MinSize=1, VPCZoneIdentifier="{subnet1},{subnet2}".format( subnet1=mocked_networking["subnet1"], subnet2=mocked_networking["subnet2"] @@ -912,6 +1167,64 @@ def test_update_autoscaling_group_boto3(): response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) group = response["AutoScalingGroups"][0] + group["LaunchConfigurationName"].should.equal("test_launch_configuration_new") + group["MinSize"].should.equal(1) + set(group["AvailabilityZones"]).should.equal({"us-east-1a", "us-east-1b"}) + group["NewInstancesProtectedFromScaleIn"].should.equal(False) + + +@mock_autoscaling +@mock_ec2 +def test_update_autoscaling_group_boto3_launch_template(): + mocked_networking = setup_networking() + ec2_client = boto3.client("ec2", region_name="us-east-1") + ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + ) + template = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template_new", + LaunchTemplateData={ + "ImageId": "ami-1ea5b10a3d8867db4", + "InstanceType": "t2.micro", + }, + )["LaunchTemplate"] + client = boto3.client("autoscaling", region_name="us-east-1") + client.create_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={"LaunchTemplateName": "test_launch_template", "Version": "1"}, + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking["subnet1"], + NewInstancesProtectedFromScaleIn=True, + ) + + client.update_auto_scaling_group( + AutoScalingGroupName="test_asg", + LaunchTemplate={ + "LaunchTemplateName": "test_launch_template_new", + "Version": "1", + }, + MinSize=1, + VPCZoneIdentifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking["subnet1"], subnet2=mocked_networking["subnet2"] + ), + NewInstancesProtectedFromScaleIn=False, + ) + + expected_launch_template = { + "LaunchTemplateId": template["LaunchTemplateId"], + "LaunchTemplateName": "test_launch_template_new", + "Version": "1", + } + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) + group = response["AutoScalingGroups"][0] + group["LaunchTemplate"].should.equal(expected_launch_template) group["MinSize"].should.equal(1) set(group["AvailabilityZones"]).should.equal({"us-east-1a", "us-east-1b"}) group["NewInstancesProtectedFromScaleIn"].should.equal(False) @@ -966,7 +1279,7 @@ def test_update_autoscaling_group_max_size_desired_capacity_change(): @mock_autoscaling -def test_autoscaling_taqs_update_boto3(): +def test_autoscaling_tags_update_boto3(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") _ = client.create_launch_configuration( diff --git a/tests/test_autoscaling/test_cloudformation.py b/tests/test_autoscaling/test_cloudformation.py new file mode 100644 index 000000000000..240ba66e0a59 --- /dev/null +++ b/tests/test_autoscaling/test_cloudformation.py @@ -0,0 +1,276 @@ +import boto3 +import sure # noqa + +from moto import ( + mock_autoscaling, + mock_cloudformation, + mock_ec2, +) + +from utils import setup_networking + + +@mock_autoscaling +@mock_cloudformation +def test_launch_configuration(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + client = boto3.client("autoscaling", region_name="us-east-1") + + stack_name = "test-launch-configuration" + + cf_template = """ +Resources: + LaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: ami-0cc293023f983ed53 + InstanceType: t2.micro + LaunchConfigurationName: test_launch_configuration +Outputs: + LaunchConfigurationName: + Value: !Ref LaunchConfiguration +""".strip() + + cf_client.create_stack( + StackName=stack_name, TemplateBody=cf_template, + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_launch_configuration") + + lc = client.describe_launch_configurations()["LaunchConfigurations"][0] + lc["LaunchConfigurationName"].should.be.equal("test_launch_configuration") + lc["ImageId"].should.be.equal("ami-0cc293023f983ed53") + lc["InstanceType"].should.be.equal("t2.micro") + + cf_template = """ +Resources: + LaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: ami-1ea5b10a3d8867db4 + InstanceType: m5.large + LaunchConfigurationName: test_launch_configuration +Outputs: + LaunchConfigurationName: + Value: !Ref LaunchConfiguration +""".strip() + + cf_client.update_stack( + StackName=stack_name, TemplateBody=cf_template, + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_launch_configuration") + + lc = client.describe_launch_configurations()["LaunchConfigurations"][0] + lc["LaunchConfigurationName"].should.be.equal("test_launch_configuration") + lc["ImageId"].should.be.equal("ami-1ea5b10a3d8867db4") + lc["InstanceType"].should.be.equal("m5.large") + + +@mock_autoscaling +@mock_cloudformation +def test_autoscaling_group_from_launch_config(): + subnet_id = setup_networking()["subnet1"] + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + client = boto3.client("autoscaling", region_name="us-east-1") + + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", + ) + stack_name = "test-auto-scaling-group" + + cf_template = """ +Parameters: + SubnetId: + Type: AWS::EC2::Subnet::Id +Resources: + AutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + AutoScalingGroupName: test_auto_scaling_group + AvailabilityZones: + - us-east-1a + LaunchConfigurationName: test_launch_configuration + MaxSize: "5" + MinSize: "1" + VPCZoneIdentifier: + - !Ref SubnetId +Outputs: + AutoScalingGroupName: + Value: !Ref AutoScalingGroup +""".strip() + + cf_client.create_stack( + StackName=stack_name, + TemplateBody=cf_template, + Parameters=[{"ParameterKey": "SubnetId", "ParameterValue": subnet_id}], + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_auto_scaling_group") + + asg = client.describe_auto_scaling_groups()["AutoScalingGroups"][0] + asg["AutoScalingGroupName"].should.be.equal("test_auto_scaling_group") + asg["MinSize"].should.be.equal(1) + asg["MaxSize"].should.be.equal(5) + asg["LaunchConfigurationName"].should.be.equal("test_launch_configuration") + + client.create_launch_configuration( + LaunchConfigurationName="test_launch_configuration_new", + InstanceType="t2.micro", + ) + + cf_template = """ +Parameters: + SubnetId: + Type: AWS::EC2::Subnet::Id +Resources: + AutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + AutoScalingGroupName: test_auto_scaling_group + AvailabilityZones: + - us-east-1a + LaunchConfigurationName: test_launch_configuration_new + MaxSize: "6" + MinSize: "2" + VPCZoneIdentifier: + - !Ref SubnetId +Outputs: + AutoScalingGroupName: + Value: !Ref AutoScalingGroup +""".strip() + + cf_client.update_stack( + StackName=stack_name, + TemplateBody=cf_template, + Parameters=[{"ParameterKey": "SubnetId", "ParameterValue": subnet_id}], + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_auto_scaling_group") + + asg = client.describe_auto_scaling_groups()["AutoScalingGroups"][0] + asg["AutoScalingGroupName"].should.be.equal("test_auto_scaling_group") + asg["MinSize"].should.be.equal(2) + asg["MaxSize"].should.be.equal(6) + asg["LaunchConfigurationName"].should.be.equal("test_launch_configuration_new") + + +@mock_autoscaling +@mock_cloudformation +@mock_ec2 +def test_autoscaling_group_from_launch_template(): + subnet_id = setup_networking()["subnet1"] + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + ec2_client = boto3.client("ec2", region_name="us-east-1") + client = boto3.client("autoscaling", region_name="us-east-1") + + template_response = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template", + LaunchTemplateData={ + "ImageId": "ami-0cc293023f983ed53", + "InstanceType": "t2.micro", + }, + ) + launch_template_id = template_response["LaunchTemplate"]["LaunchTemplateId"] + stack_name = "test-auto-scaling-group" + + cf_template = """ +Parameters: + SubnetId: + Type: AWS::EC2::Subnet::Id + LaunchTemplateId: + Type: String +Resources: + AutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + AutoScalingGroupName: test_auto_scaling_group + AvailabilityZones: + - us-east-1a + LaunchTemplate: + LaunchTemplateId: !Ref LaunchTemplateId + Version: "1" + MaxSize: "5" + MinSize: "1" + VPCZoneIdentifier: + - !Ref SubnetId +Outputs: + AutoScalingGroupName: + Value: !Ref AutoScalingGroup +""".strip() + + cf_client.create_stack( + StackName=stack_name, + TemplateBody=cf_template, + Parameters=[ + {"ParameterKey": "SubnetId", "ParameterValue": subnet_id}, + {"ParameterKey": "LaunchTemplateId", "ParameterValue": launch_template_id}, + ], + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_auto_scaling_group") + + asg = client.describe_auto_scaling_groups()["AutoScalingGroups"][0] + asg["AutoScalingGroupName"].should.be.equal("test_auto_scaling_group") + asg["MinSize"].should.be.equal(1) + asg["MaxSize"].should.be.equal(5) + lt = asg["LaunchTemplate"] + lt["LaunchTemplateId"].should.be.equal(launch_template_id) + lt["LaunchTemplateName"].should.be.equal("test_launch_template") + lt["Version"].should.be.equal("1") + + template_response = ec2_client.create_launch_template( + LaunchTemplateName="test_launch_template_new", + LaunchTemplateData={ + "ImageId": "ami-1ea5b10a3d8867db4", + "InstanceType": "m5.large", + }, + ) + launch_template_id = template_response["LaunchTemplate"]["LaunchTemplateId"] + + cf_template = """ +Parameters: + SubnetId: + Type: AWS::EC2::Subnet::Id + LaunchTemplateId: + Type: String +Resources: + AutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + AutoScalingGroupName: test_auto_scaling_group + AvailabilityZones: + - us-east-1a + LaunchTemplate: + LaunchTemplateId: !Ref LaunchTemplateId + Version: "1" + MaxSize: "6" + MinSize: "2" + VPCZoneIdentifier: + - !Ref SubnetId +Outputs: + AutoScalingGroupName: + Value: !Ref AutoScalingGroup +""".strip() + + cf_client.update_stack( + StackName=stack_name, + TemplateBody=cf_template, + Parameters=[ + {"ParameterKey": "SubnetId", "ParameterValue": subnet_id}, + {"ParameterKey": "LaunchTemplateId", "ParameterValue": launch_template_id}, + ], + ) + stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + stack["Outputs"][0]["OutputValue"].should.be.equal("test_auto_scaling_group") + + asg = client.describe_auto_scaling_groups()["AutoScalingGroups"][0] + asg["AutoScalingGroupName"].should.be.equal("test_auto_scaling_group") + asg["MinSize"].should.be.equal(2) + asg["MaxSize"].should.be.equal(6) + lt = asg["LaunchTemplate"] + lt["LaunchTemplateId"].should.be.equal(launch_template_id) + lt["LaunchTemplateName"].should.be.equal("test_launch_template_new") + lt["Version"].should.be.equal("1") From 3b06ce689e533f24831cb225306716c123942b52 Mon Sep 17 00:00:00 2001 From: Ciaran Evans <9111975+ciaranevans@users.noreply.github.com> Date: Thu, 27 Aug 2020 08:22:44 +0100 Subject: [PATCH 493/658] Address SFN.Client.exceptions.ExecutionAlreadyExists Not implemented (#3263) * Add check for existing execution, fix issue with make init * Remove f-string usage * Remove fstring usage in test * Pin black and run formatting on test_stepfunction * Reverse changes made by black 20.8b1 --- moto/stepfunctions/exceptions.py | 5 +++++ moto/stepfunctions/models.py | 9 ++++++++ requirements-dev.txt | 2 +- setup.py | 1 + .../test_stepfunctions/test_stepfunctions.py | 21 +++++++++++++++++++ 5 files changed, 37 insertions(+), 1 deletion(-) diff --git a/moto/stepfunctions/exceptions.py b/moto/stepfunctions/exceptions.py index 704e4ea83c72..6000bab4ea6a 100644 --- a/moto/stepfunctions/exceptions.py +++ b/moto/stepfunctions/exceptions.py @@ -18,6 +18,11 @@ def response(self): ) +class ExecutionAlreadyExists(AWSError): + TYPE = "ExecutionAlreadyExists" + STATUS = 400 + + class ExecutionDoesNotExist(AWSError): TYPE = "ExecutionDoesNotExist" STATUS = 400 diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index e36598f2340e..58b6bb434894 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -8,6 +8,7 @@ from moto.sts.models import ACCOUNT_ID from uuid import uuid4 from .exceptions import ( + ExecutionAlreadyExists, ExecutionDoesNotExist, InvalidArn, InvalidName, @@ -205,6 +206,7 @@ def delete_state_machine(self, arn): def start_execution(self, state_machine_arn, name=None): state_machine_name = self.describe_state_machine(state_machine_arn).name + self._ensure_execution_name_doesnt_exist(name) execution = Execution( region_name=self.region_name, account_id=self._get_account_id(), @@ -278,6 +280,13 @@ def _validate_arn(self, arn, regex, invalid_msg): if not arn or not match: raise InvalidArn(invalid_msg) + def _ensure_execution_name_doesnt_exist(self, name): + for execution in self.executions: + if execution.name == name: + raise ExecutionAlreadyExists( + "Execution Already Exists: '" + execution.execution_arn + "'" + ) + def _get_account_id(self): return ACCOUNT_ID diff --git a/requirements-dev.txt b/requirements-dev.txt index 313f2dfb629d..e40a568a5766 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,6 +1,6 @@ -r requirements.txt nose -black; python_version >= '3.6' +black==19.10b0; python_version >= '3.6' regex==2019.11.1; python_version >= '3.6' # Needed for black sure==1.4.11 coverage==4.5.4 diff --git a/setup.py b/setup.py index 707a56212925..ffaa8b273ce2 100755 --- a/setup.py +++ b/setup.py @@ -40,6 +40,7 @@ def get_version(): "werkzeug", "PyYAML>=5.1", "pytz", + "ecdsa<0.15", "python-dateutil<3.0.0,>=2.1", "python-jose[cryptography]>=3.1.0,<4.0.0", "docker>=2.5.1", diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 4324964d8b36..043fd9bfb309 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -404,6 +404,27 @@ def test_state_machine_start_execution_with_custom_name(): execution["startDate"].should.be.a(datetime) +@mock_stepfunctions +@mock_sts +def test_state_machine_start_execution_fails_on_duplicate_execution_name(): + client = boto3.client("stepfunctions", region_name=region) + # + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + execution_one = client.start_execution( + stateMachineArn=sm["stateMachineArn"], name="execution_name" + ) + # + with assert_raises(ClientError) as exc: + _ = client.start_execution( + stateMachineArn=sm["stateMachineArn"], name="execution_name" + ) + exc.exception.response["Error"]["Message"].should.equal( + "Execution Already Exists: '" + execution_one["executionArn"] + "'" + ) + + @mock_stepfunctions @mock_sts def test_state_machine_list_executions(): From 49d92861c0acaa006052a7a94355a5870cdc92d5 Mon Sep 17 00:00:00 2001 From: jweite Date: Thu, 27 Aug 2020 05:11:47 -0400 Subject: [PATCH 494/658] Iam cloudformation update, singificant cloudformation refactoring (#3218) * IAM User Cloudformation Enhancements: update, delete, getatt. * AWS::IAM::Policy Support * Added unit tests for AWS:IAM:Policy for roles and groups. Fixed bug related to groups. * AWS:IAM:AccessKey CloudFormation support. * Refactor of CloudFormation parsing.py methods to simplify and standardize how they call to the models. Adjusted some models accordingly. * Further model CloudFormation support changes to align with revised CloudFormation logic. Mostly avoidance of getting resoure name from properties. * Support for Kinesis Stream RetentionPeriodHours param. * Kinesis Stream Cloudformation Tag Support. * Added omitted 'region' param to boto3.client() calls in new tests. Co-authored-by: Joseph Weitekamp --- moto/awslambda/models.py | 7 +- moto/cloudformation/parsing.py | 136 +- moto/cloudwatch/models.py | 3 +- moto/datapipeline/models.py | 4 +- moto/dynamodb2/models/__init__.py | 8 +- moto/ecr/models.py | 6 +- moto/ecs/models.py | 26 +- moto/elbv2/models.py | 6 +- moto/events/models.py | 10 +- moto/iam/models.py | 403 +++++- moto/kinesis/models.py | 37 +- moto/kinesis/responses.py | 5 +- moto/rds/models.py | 12 +- moto/rds2/models.py | 13 +- moto/route53/models.py | 7 +- moto/s3/models.py | 6 +- moto/sns/models.py | 2 +- moto/sqs/models.py | 11 +- .../test_cloudformation_stack_crud_boto3.py | 10 +- tests/test_cloudformation/test_validate.py | 2 +- tests/test_iam/test_iam.py | 3 - tests/test_iam/test_iam_cloudformation.py | 1196 +++++++++++++++++ .../test_kinesis_cloudformation.py | 29 + tests/test_s3/test_s3.py | 143 +- tests/test_s3/test_s3_cloudformation.py | 145 ++ 25 files changed, 1912 insertions(+), 318 deletions(-) create mode 100644 tests/test_iam/test_iam_cloudformation.py create mode 100644 tests/test_s3/test_s3_cloudformation.py diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 2aa207da9417..ce9c78fc6560 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -702,10 +702,13 @@ def delete_from_cloudformation_json( ) for esm in esms: - if esm.logical_resource_id in resource_name: - lambda_backend.delete_event_source_mapping + if esm.uuid == resource_name: esm.delete(region_name) + @property + def physical_resource_id(self): + return self.uuid + class LambdaVersion(CloudFormationModel): def __init__(self, spec): diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 272856367881..760142033029 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -246,12 +246,14 @@ def generate_resource_name(resource_type, stack_name, logical_id): return "{0}{1}".format( stack_name[:max_stack_name_portion_len], right_hand_part_of_name ).lower() + elif resource_type == "AWS::IAM::Policy": + return "{0}-{1}-{2}".format(stack_name[:5], logical_id[:4], random_suffix()) else: return "{0}-{1}-{2}".format(stack_name, logical_id, random_suffix()) def parse_resource( - logical_id, resource_json, resources_map, add_name_to_resource_json=True + resource_json, resources_map, ): resource_type = resource_json["Type"] resource_class = resource_class_from_type(resource_type) @@ -263,21 +265,37 @@ def parse_resource( ) return None + if "Properties" not in resource_json: + resource_json["Properties"] = {} + resource_json = clean_json(resource_json, resources_map) - resource_name = generate_resource_name( + + return resource_class, resource_json, resource_type + + +def parse_resource_and_generate_name( + logical_id, resource_json, resources_map, +): + resource_tuple = parse_resource(resource_json, resources_map) + if not resource_tuple: + return None + resource_class, resource_json, resource_type = resource_tuple + + generated_resource_name = generate_resource_name( resource_type, resources_map.get("AWS::StackName"), logical_id ) + resource_name_property = resource_name_property_from_type(resource_type) if resource_name_property: - if "Properties" not in resource_json: - resource_json["Properties"] = dict() if ( - add_name_to_resource_json - and resource_name_property not in resource_json["Properties"] + "Properties" in resource_json + and resource_name_property in resource_json["Properties"] ): - resource_json["Properties"][resource_name_property] = resource_name - if resource_name_property in resource_json["Properties"]: resource_name = resource_json["Properties"][resource_name_property] + else: + resource_name = generated_resource_name + else: + resource_name = generated_resource_name return resource_class, resource_json, resource_name @@ -289,12 +307,14 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n return None resource_type = resource_json["Type"] - resource_tuple = parse_resource(logical_id, resource_json, resources_map) + resource_tuple = parse_resource_and_generate_name( + logical_id, resource_json, resources_map + ) if not resource_tuple: return None - resource_class, resource_json, resource_name = resource_tuple + resource_class, resource_json, resource_physical_name = resource_tuple resource = resource_class.create_from_cloudformation_json( - resource_name, resource_json, region_name + resource_physical_name, resource_json, region_name ) resource.type = resource_type resource.logical_resource_id = logical_id @@ -302,28 +322,34 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n def parse_and_update_resource(logical_id, resource_json, resources_map, region_name): - resource_class, new_resource_json, new_resource_name = parse_resource( - logical_id, resource_json, resources_map, False + resource_class, resource_json, new_resource_name = parse_resource_and_generate_name( + logical_id, resource_json, resources_map ) original_resource = resources_map[logical_id] - new_resource = resource_class.update_from_cloudformation_json( - original_resource=original_resource, - new_resource_name=new_resource_name, - cloudformation_json=new_resource_json, - region_name=region_name, - ) - new_resource.type = resource_json["Type"] - new_resource.logical_resource_id = logical_id - return new_resource + if not hasattr( + resource_class.update_from_cloudformation_json, "__isabstractmethod__" + ): + new_resource = resource_class.update_from_cloudformation_json( + original_resource=original_resource, + new_resource_name=new_resource_name, + cloudformation_json=resource_json, + region_name=region_name, + ) + new_resource.type = resource_json["Type"] + new_resource.logical_resource_id = logical_id + return new_resource + else: + return None -def parse_and_delete_resource(logical_id, resource_json, resources_map, region_name): - resource_class, resource_json, resource_name = parse_resource( - logical_id, resource_json, resources_map - ) - resource_class.delete_from_cloudformation_json( - resource_name, resource_json, region_name - ) +def parse_and_delete_resource(resource_name, resource_json, resources_map, region_name): + resource_class, resource_json, _ = parse_resource(resource_json, resources_map) + if not hasattr( + resource_class.delete_from_cloudformation_json, "__isabstractmethod__" + ): + resource_class.delete_from_cloudformation_json( + resource_name, resource_json, region_name + ) def parse_condition(condition, resources_map, condition_map): @@ -614,28 +640,36 @@ def update(self, template, parameters=None): ) self._parsed_resources[resource_name] = new_resource - for resource_name, resource in resources_by_action["Remove"].items(): - resource_json = old_template[resource_name] + for logical_name, _ in resources_by_action["Remove"].items(): + resource_json = old_template[logical_name] + resource = self._parsed_resources[logical_name] + # ToDo: Standardize this. + if hasattr(resource, "physical_resource_id"): + resource_name = self._parsed_resources[ + logical_name + ].physical_resource_id + else: + resource_name = None parse_and_delete_resource( resource_name, resource_json, self, self._region_name ) - self._parsed_resources.pop(resource_name) + self._parsed_resources.pop(logical_name) tries = 1 while resources_by_action["Modify"] and tries < 5: - for resource_name, resource in resources_by_action["Modify"].copy().items(): - resource_json = new_template[resource_name] + for logical_name, _ in resources_by_action["Modify"].copy().items(): + resource_json = new_template[logical_name] try: changed_resource = parse_and_update_resource( - resource_name, resource_json, self, self._region_name + logical_name, resource_json, self, self._region_name ) except Exception as e: # skip over dependency violations, and try again in a # second pass last_exception = e else: - self._parsed_resources[resource_name] = changed_resource - del resources_by_action["Modify"][resource_name] + self._parsed_resources[logical_name] = changed_resource + del resources_by_action["Modify"][logical_name] tries += 1 if tries == 5: raise last_exception @@ -650,22 +684,20 @@ def delete(self): if parsed_resource and hasattr(parsed_resource, "delete"): parsed_resource.delete(self._region_name) else: - resource_name_attribute = ( - parsed_resource.cloudformation_name_type() - if hasattr(parsed_resource, "cloudformation_name_type") - else resource_name_property_from_type(parsed_resource.type) + if hasattr(parsed_resource, "physical_resource_id"): + resource_name = parsed_resource.physical_resource_id + else: + resource_name = None + + resource_json = self._resource_json_map[ + parsed_resource.logical_resource_id + ] + + parse_and_delete_resource( + resource_name, resource_json, self, self._region_name, ) - if resource_name_attribute: - resource_json = self._resource_json_map[ - parsed_resource.logical_resource_id - ] - resource_name = resource_json["Properties"][ - resource_name_attribute - ] - parse_and_delete_resource( - resource_name, resource_json, self, self._region_name - ) - self._parsed_resources.pop(parsed_resource.logical_resource_id) + + self._parsed_resources.pop(parsed_resource.logical_resource_id) except Exception as e: # skip over dependency violations, and try again in a # second pass diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index d8b28bc9709c..5d956215c778 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -511,10 +511,9 @@ def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - log_group_name = properties["LogGroupName"] tags = properties.get("Tags", {}) return logs_backends[region_name].create_log_group( - log_group_name, tags, **properties + resource_name, tags, **properties ) diff --git a/moto/datapipeline/models.py b/moto/datapipeline/models.py index b17da1f098bd..e517b8f3ea56 100644 --- a/moto/datapipeline/models.py +++ b/moto/datapipeline/models.py @@ -90,9 +90,9 @@ def create_from_cloudformation_json( datapipeline_backend = datapipeline_backends[region_name] properties = cloudformation_json["Properties"] - cloudformation_unique_id = "cf-" + properties["Name"] + cloudformation_unique_id = "cf-" + resource_name pipeline = datapipeline_backend.create_pipeline( - properties["Name"], cloudformation_unique_id + resource_name, cloudformation_unique_id ) datapipeline_backend.put_pipeline_definition( pipeline.pipeline_id, properties["PipelineObjects"] diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 175ed64f8057..6757a6859967 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -461,7 +461,7 @@ def create_from_cloudformation_json( params["streams"] = properties["StreamSpecification"] table = dynamodb_backends[region_name].create_table( - name=properties["TableName"], **params + name=resource_name, **params ) return table @@ -469,11 +469,7 @@ def create_from_cloudformation_json( def delete_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - - table = dynamodb_backends[region_name].delete_table( - name=properties["TableName"] - ) + table = dynamodb_backends[region_name].delete_table(name=resource_name) return table def _generate_arn(self, name): diff --git a/moto/ecr/models.py b/moto/ecr/models.py index a1d5aa6e5911..33a0201fde00 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -80,15 +80,11 @@ def cloudformation_type(): def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - ecr_backend = ecr_backends[region_name] return ecr_backend.create_repository( # RepositoryName is optional in CloudFormation, thus create a random # name if necessary - repository_name=properties.get( - "RepositoryName", "ecrrepository{0}".format(int(random() * 10 ** 6)) - ) + repository_name=resource_name ) @classmethod diff --git a/moto/ecs/models.py b/moto/ecs/models.py index bf20c2245c49..7041a322b1d9 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -82,36 +82,24 @@ def cloudformation_type(): def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - # if properties is not provided, cloudformation will use the default values for all properties - if "Properties" in cloudformation_json: - properties = cloudformation_json["Properties"] - else: - properties = {} - ecs_backend = ecs_backends[region_name] return ecs_backend.create_cluster( # ClusterName is optional in CloudFormation, thus create a random # name if necessary - cluster_name=properties.get( - "ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6)) - ) + cluster_name=resource_name ) @classmethod def update_from_cloudformation_json( cls, original_resource, new_resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - - if original_resource.name != properties["ClusterName"]: + if original_resource.name != new_resource_name: ecs_backend = ecs_backends[region_name] ecs_backend.delete_cluster(original_resource.arn) return ecs_backend.create_cluster( # ClusterName is optional in CloudFormation, thus create a # random name if necessary - cluster_name=properties.get( - "ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6)) - ) + cluster_name=new_resource_name ) else: # no-op when nothing changed between old and new resources @@ -355,14 +343,13 @@ def create_from_cloudformation_json( task_definition = properties["TaskDefinition"].family else: task_definition = properties["TaskDefinition"] - service_name = "{0}Service{1}".format(cluster, int(random() * 10 ** 6)) desired_count = properties["DesiredCount"] # TODO: LoadBalancers # TODO: Role ecs_backend = ecs_backends[region_name] return ecs_backend.create_service( - cluster, service_name, desired_count, task_definition_str=task_definition + cluster, resource_name, desired_count, task_definition_str=task_definition ) @classmethod @@ -386,12 +373,9 @@ def update_from_cloudformation_json( # TODO: LoadBalancers # TODO: Role ecs_backend.delete_service(cluster_name, service_name) - new_service_name = "{0}Service{1}".format( - cluster_name, int(random() * 10 ** 6) - ) return ecs_backend.create_service( cluster_name, - new_service_name, + new_resource_name, desired_count, task_definition_str=task_definition, ) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 1deaac9c4d5d..cafdc28e442e 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -160,7 +160,6 @@ def create_from_cloudformation_json( elbv2_backend = elbv2_backends[region_name] - name = properties.get("Name") vpc_id = properties.get("VpcId") protocol = properties.get("Protocol") port = properties.get("Port") @@ -175,7 +174,7 @@ def create_from_cloudformation_json( target_type = properties.get("TargetType") target_group = elbv2_backend.create_target_group( - name=name, + name=resource_name, vpc_id=vpc_id, protocol=protocol, port=port, @@ -437,13 +436,12 @@ def create_from_cloudformation_json( elbv2_backend = elbv2_backends[region_name] - name = properties.get("Name", resource_name) security_groups = properties.get("SecurityGroups") subnet_ids = properties.get("Subnets") scheme = properties.get("Scheme", "internet-facing") load_balancer = elbv2_backend.create_load_balancer( - name, security_groups, subnet_ids, scheme=scheme + resource_name, security_groups, subnet_ids, scheme=scheme ) return load_balancer diff --git a/moto/events/models.py b/moto/events/models.py index 7fa7d225f561..9c27fbb337d6 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -88,7 +88,7 @@ def create_from_cloudformation_json( ): properties = cloudformation_json["Properties"] event_backend = events_backends[region_name] - event_name = properties.get("Name") or resource_name + event_name = resource_name return event_backend.put_rule(name=event_name, **properties) @classmethod @@ -104,9 +104,8 @@ def update_from_cloudformation_json( def delete_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] event_backend = events_backends[region_name] - event_name = properties.get("Name") or resource_name + event_name = resource_name event_backend.delete_rule(name=event_name) @@ -176,7 +175,7 @@ def create_from_cloudformation_json( ): properties = cloudformation_json["Properties"] event_backend = events_backends[region_name] - event_name = properties["Name"] + event_name = resource_name event_source_name = properties.get("EventSourceName") return event_backend.create_event_bus( name=event_name, event_source_name=event_source_name @@ -195,9 +194,8 @@ def update_from_cloudformation_json( def delete_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] event_backend = events_backends[region_name] - event_bus_name = properties["Name"] + event_bus_name = resource_name event_backend.delete_event_bus(event_bus_name) diff --git a/moto/iam/models.py b/moto/iam/models.py index 16b3ac0abe2f..3a174e17bb5f 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -12,7 +12,6 @@ from cryptography import x509 from cryptography.hazmat.backends import default_backend from six.moves.urllib.parse import urlparse -from uuid import uuid4 from moto.core.exceptions import RESTError from moto.core import BaseBackend, BaseModel, ACCOUNT_ID, CloudFormationModel @@ -84,7 +83,11 @@ def enabled_iso_8601(self): return iso_8601_datetime_without_milliseconds(self.enable_date) -class Policy(BaseModel): +class Policy(CloudFormationModel): + + # Note: This class does not implement the CloudFormation support for AWS::IAM::Policy, as that CF resource + # is for creating *inline* policies. That is done in class InlinePolicy. + is_attachable = False def __init__( @@ -295,8 +298,149 @@ def arn(self): ] -class InlinePolicy(Policy): - """TODO: is this needed?""" +class InlinePolicy(CloudFormationModel): + # Represents an Inline Policy created by CloudFormation + def __init__( + self, + resource_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ): + self.name = resource_name + self.policy_name = None + self.policy_document = None + self.group_names = None + self.role_names = None + self.user_names = None + self.update(policy_name, policy_document, group_names, role_names, user_names) + + def update( + self, policy_name, policy_document, group_names, role_names, user_names, + ): + self.policy_name = policy_name + self.policy_document = ( + json.dumps(policy_document) + if isinstance(policy_document, dict) + else policy_document + ) + self.group_names = group_names + self.role_names = role_names + self.user_names = user_names + + @staticmethod + def cloudformation_name_type(): + return None # Resource never gets named after by template PolicyName! + + @staticmethod + def cloudformation_type(): + return "AWS::IAM::Policy" + + @classmethod + def create_from_cloudformation_json( + cls, resource_physical_name, cloudformation_json, region_name + ): + properties = cloudformation_json.get("Properties", {}) + policy_document = properties.get("PolicyDocument") + policy_name = properties.get("PolicyName") + user_names = properties.get("Users") + role_names = properties.get("Roles") + group_names = properties.get("Groups") + + return iam_backend.create_inline_policy( + resource_physical_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if cls.is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + properties = cloudformation_json.get("Properties", {}) + policy_document = properties.get("PolicyDocument") + policy_name = properties.get("PolicyName", original_resource.name) + user_names = properties.get("Users") + role_names = properties.get("Roles") + group_names = properties.get("Groups") + + return iam_backend.update_inline_policy( + original_resource.name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + iam_backend.delete_inline_policy(resource_name) + + @staticmethod + def is_replacement_update(properties): + properties_requiring_replacement_update = [] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) + + @property + def physical_resource_id(self): + return self.name + + def apply_policy(self, backend): + if self.user_names: + for user_name in self.user_names: + backend.put_user_policy( + user_name, self.policy_name, self.policy_document + ) + if self.role_names: + for role_name in self.role_names: + backend.put_role_policy( + role_name, self.policy_name, self.policy_document + ) + if self.group_names: + for group_name in self.group_names: + backend.put_group_policy( + group_name, self.policy_name, self.policy_document + ) + + def unapply_policy(self, backend): + if self.user_names: + for user_name in self.user_names: + backend.delete_user_policy(user_name, self.policy_name) + if self.role_names: + for role_name in self.role_names: + backend.delete_role_policy(role_name, self.policy_name) + if self.group_names: + for group_name in self.group_names: + backend.delete_group_policy(group_name, self.policy_name) class Role(CloudFormationModel): @@ -338,11 +482,13 @@ def cloudformation_type(): @classmethod def create_from_cloudformation_json( - cls, resource_name, cloudformation_json, region_name + cls, resource_physical_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] role_name = ( - properties["RoleName"] if "RoleName" in properties else str(uuid4())[0:5] + properties["RoleName"] + if "RoleName" in properties + else resource_physical_name ) role = iam_backend.create_role( @@ -416,13 +562,15 @@ def cloudformation_type(): @classmethod def create_from_cloudformation_json( - cls, resource_name, cloudformation_json, region_name + cls, resource_physical_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] role_ids = properties["Roles"] return iam_backend.create_instance_profile( - name=resource_name, path=properties.get("Path", "/"), role_ids=role_ids + name=resource_physical_name, + path=properties.get("Path", "/"), + role_ids=role_ids, ) @property @@ -475,12 +623,12 @@ def uploaded_iso_8601(self): return iso_8601_datetime_without_milliseconds(self.upload_date) -class AccessKey(BaseModel): - def __init__(self, user_name): +class AccessKey(CloudFormationModel): + def __init__(self, user_name, status="Active"): self.user_name = user_name self.access_key_id = "AKIA" + random_access_key() self.secret_access_key = random_alphanumeric(40) - self.status = "Active" + self.status = status self.create_date = datetime.utcnow() self.last_used = None @@ -499,6 +647,66 @@ def get_cfn_attribute(self, attribute_name): return self.secret_access_key raise UnformattedGetAttTemplateException() + @staticmethod + def cloudformation_name_type(): + return None # Resource never gets named after by template PolicyName! + + @staticmethod + def cloudformation_type(): + return "AWS::IAM::AccessKey" + + @classmethod + def create_from_cloudformation_json( + cls, resource_physical_name, cloudformation_json, region_name + ): + properties = cloudformation_json.get("Properties", {}) + user_name = properties.get("UserName") + status = properties.get("Status", "Active") + + return iam_backend.create_access_key(user_name, status=status,) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if cls.is_replacement_update(properties): + new_resource = cls.create_from_cloudformation_json( + new_resource_name, cloudformation_json, region_name + ) + cls.delete_from_cloudformation_json( + original_resource.physical_resource_id, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + properties = cloudformation_json.get("Properties", {}) + status = properties.get("Status") + return iam_backend.update_access_key( + original_resource.user_name, original_resource.access_key_id, status + ) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + iam_backend.delete_access_key_by_name(resource_name) + + @staticmethod + def is_replacement_update(properties): + properties_requiring_replacement_update = ["Serial", "UserName"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) + + @property + def physical_resource_id(self): + return self.access_key_id + class SshPublicKey(BaseModel): def __init__(self, user_name, ssh_public_key_body): @@ -564,8 +772,14 @@ def put_policy(self, policy_name, policy_json): def list_policies(self): return self.policies.keys() + def delete_policy(self, policy_name): + if policy_name not in self.policies: + raise IAMNotFoundException("Policy {0} not found".format(policy_name)) + + del self.policies[policy_name] + -class User(BaseModel): +class User(CloudFormationModel): def __init__(self, name, path=None, tags=None): self.name = name self.id = random_resource_id() @@ -614,8 +828,8 @@ def delete_policy(self, policy_name): del self.policies[policy_name] - def create_access_key(self): - access_key = AccessKey(self.name) + def create_access_key(self, status="Active"): + access_key = AccessKey(self.name, status) self.access_keys.append(access_key) return access_key @@ -633,9 +847,11 @@ def delete_access_key(self, access_key_id): key = self.get_access_key_by_id(access_key_id) self.access_keys.remove(key) - def update_access_key(self, access_key_id, status): + def update_access_key(self, access_key_id, status=None): key = self.get_access_key_by_id(access_key_id) - key.status = status + if status is not None: + key.status = status + return key def get_access_key_by_id(self, access_key_id): for key in self.access_keys: @@ -646,6 +862,15 @@ def get_access_key_by_id(self, access_key_id): "The Access Key with id {0} cannot be found".format(access_key_id) ) + def has_access_key(self, access_key_id): + return any( + [ + access_key + for access_key in self.access_keys + if access_key.access_key_id == access_key_id + ] + ) + def upload_ssh_public_key(self, ssh_public_key_body): pubkey = SshPublicKey(self.name, ssh_public_key_body) self.ssh_public_keys.append(pubkey) @@ -677,7 +902,7 @@ def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == "Arn": - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"') + return self.arn raise UnformattedGetAttTemplateException() def to_csv(self): @@ -752,6 +977,66 @@ def to_csv(self): access_key_2_last_used, ) + @staticmethod + def cloudformation_name_type(): + return "UserName" + + @staticmethod + def cloudformation_type(): + return "AWS::IAM::User" + + @classmethod + def create_from_cloudformation_json( + cls, resource_physical_name, cloudformation_json, region_name + ): + properties = cloudformation_json.get("Properties", {}) + path = properties.get("Path") + return iam_backend.create_user(resource_physical_name, path) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if cls.is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + if "Path" in properties: + original_resource.path = properties["Path"] + return original_resource + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + iam_backend.delete_user(resource_name) + + @staticmethod + def is_replacement_update(properties): + properties_requiring_replacement_update = ["UserName"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) + + @property + def physical_resource_id(self): + return self.name + class AccountPasswordPolicy(BaseModel): def __init__( @@ -984,6 +1269,8 @@ def __init__(self): self.virtual_mfa_devices = {} self.account_password_policy = None self.account_summary = AccountSummary(self) + self.inline_policies = {} + self.access_keys = {} super(IAMBackend, self).__init__() def _init_managed_policies(self): @@ -1478,6 +1765,10 @@ def list_group_policies(self, group_name, marker=None, max_items=None): group = self.get_group(group_name) return group.list_policies() + def delete_group_policy(self, group_name, policy_name): + group = self.get_group(group_name) + group.delete_policy(policy_name) + def get_group_policy(self, group_name, policy_name): group = self.get_group(group_name) return group.get_policy(policy_name) @@ -1674,14 +1965,15 @@ def delete_user_policy(self, user_name, policy_name): def delete_policy(self, policy_arn): del self.managed_policies[policy_arn] - def create_access_key(self, user_name=None): + def create_access_key(self, user_name=None, status="Active"): user = self.get_user(user_name) - key = user.create_access_key() + key = user.create_access_key(status) + self.access_keys[key.physical_resource_id] = key return key - def update_access_key(self, user_name, access_key_id, status): + def update_access_key(self, user_name, access_key_id, status=None): user = self.get_user(user_name) - user.update_access_key(access_key_id, status) + return user.update_access_key(access_key_id, status) def get_access_key_last_used(self, access_key_id): access_keys_list = self.get_all_access_keys_for_all_users() @@ -1706,7 +1998,17 @@ def get_all_access_keys(self, user_name, marker=None, max_items=None): def delete_access_key(self, access_key_id, user_name): user = self.get_user(user_name) - user.delete_access_key(access_key_id) + access_key = user.get_access_key_by_id(access_key_id) + self.delete_access_key_by_name(access_key.access_key_id) + + def delete_access_key_by_name(self, name): + key = self.access_keys[name] + try: # User may have been deleted before their access key... + user = self.get_user(key.user_name) + user.delete_access_key(key.access_key_id) + except IAMNotFoundException: + pass + del self.access_keys[name] def upload_ssh_public_key(self, user_name, ssh_public_key_body): user = self.get_user(user_name) @@ -2017,5 +2319,62 @@ def delete_account_password_policy(self): def get_account_summary(self): return self.account_summary + def create_inline_policy( + self, + resource_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ): + if resource_name in self.inline_policies: + raise IAMConflictException( + "EntityAlreadyExists", + "Inline Policy {0} already exists".format(resource_name), + ) + + inline_policy = InlinePolicy( + resource_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ) + self.inline_policies[resource_name] = inline_policy + inline_policy.apply_policy(self) + return inline_policy + + def get_inline_policy(self, policy_id): + inline_policy = None + try: + inline_policy = self.inline_policies[policy_id] + except KeyError: + raise IAMNotFoundException("Inline policy {0} not found".format(policy_id)) + return inline_policy + + def update_inline_policy( + self, + resource_name, + policy_name, + policy_document, + group_names, + role_names, + user_names, + ): + inline_policy = self.get_inline_policy(resource_name) + inline_policy.unapply_policy(self) + inline_policy.update( + policy_name, policy_document, group_names, role_names, user_names, + ) + inline_policy.apply_policy(self) + return inline_policy + + def delete_inline_policy(self, policy_id): + inline_policy = self.get_inline_policy(policy_id) + inline_policy.unapply_policy(self) + del self.inline_policies[policy_id] + iam_backend = IAMBackend() diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index a9c4f547656a..280402d5f025 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -135,7 +135,7 @@ def to_json(self): class Stream(CloudFormationModel): - def __init__(self, stream_name, shard_count, region_name): + def __init__(self, stream_name, shard_count, retention_period_hours, region_name): self.stream_name = stream_name self.creation_datetime = datetime.datetime.now() self.region = region_name @@ -145,6 +145,7 @@ def __init__(self, stream_name, shard_count, region_name): self.status = "ACTIVE" self.shard_count = None self.update_shard_count(shard_count) + self.retention_period_hours = retention_period_hours def update_shard_count(self, shard_count): # ToDo: This was extracted from init. It's only accurate for new streams. @@ -213,6 +214,7 @@ def to_json(self): "StreamName": self.stream_name, "StreamStatus": self.status, "HasMoreShards": False, + "RetentionPeriodHours": self.retention_period_hours, "Shards": [shard.to_json() for shard in self.shards.values()], } } @@ -243,9 +245,19 @@ def create_from_cloudformation_json( ): properties = cloudformation_json.get("Properties", {}) shard_count = properties.get("ShardCount", 1) - name = properties.get("Name", resource_name) + retention_period_hours = properties.get("RetentionPeriodHours", resource_name) + tags = { + tag_item["Key"]: tag_item["Value"] + for tag_item in properties.get("Tags", []) + } + backend = kinesis_backends[region_name] - return backend.create_stream(name, shard_count, region_name) + stream = backend.create_stream( + resource_name, shard_count, retention_period_hours, region_name + ) + if any(tags): + backend.add_tags_to_stream(stream.stream_name, tags) + return stream @classmethod def update_from_cloudformation_json( @@ -269,6 +281,15 @@ def update_from_cloudformation_json( else: # No Interruption if "ShardCount" in properties: original_resource.update_shard_count(properties["ShardCount"]) + if "RetentionPeriodHours" in properties: + original_resource.retention_period_hours = properties[ + "RetentionPeriodHours" + ] + if "Tags" in properties: + original_resource.tags = { + tag_item["Key"]: tag_item["Value"] + for tag_item in properties.get("Tags", []) + } return original_resource @classmethod @@ -276,9 +297,7 @@ def delete_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): backend = kinesis_backends[region_name] - properties = cloudformation_json.get("Properties", {}) - stream_name = properties.get(cls.cloudformation_name_type(), resource_name) - backend.delete_stream(stream_name) + backend.delete_stream(resource_name) @staticmethod def is_replacement_update(properties): @@ -398,10 +417,12 @@ def __init__(self): self.streams = OrderedDict() self.delivery_streams = {} - def create_stream(self, stream_name, shard_count, region_name): + def create_stream( + self, stream_name, shard_count, retention_period_hours, region_name + ): if stream_name in self.streams: raise ResourceInUseError(stream_name) - stream = Stream(stream_name, shard_count, region_name) + stream = Stream(stream_name, shard_count, retention_period_hours, region_name) self.streams[stream_name] = stream return stream diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 500f7855d0c8..8e7fc39411b4 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -25,7 +25,10 @@ def is_firehose(self): def create_stream(self): stream_name = self.parameters.get("StreamName") shard_count = self.parameters.get("ShardCount") - self.kinesis_backend.create_stream(stream_name, shard_count, self.region) + retention_period_hours = self.parameters.get("RetentionPeriodHours") + self.kinesis_backend.create_stream( + stream_name, shard_count, retention_period_hours, self.region + ) return "" def describe_stream(self): diff --git a/moto/rds/models.py b/moto/rds/models.py index 440da34d221d..33be04e8cb10 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -4,7 +4,6 @@ from jinja2 import Template from moto.core import BaseBackend, CloudFormationModel -from moto.core.utils import get_random_hex from moto.ec2.models import ec2_backends from moto.rds.exceptions import UnformattedGetAttTemplateException from moto.rds2.models import rds2_backends @@ -33,9 +32,6 @@ def create_from_cloudformation_json( ): properties = cloudformation_json["Properties"] - db_instance_identifier = properties.get(cls.cloudformation_name_type()) - if not db_instance_identifier: - db_instance_identifier = resource_name.lower() + get_random_hex(12) db_security_groups = properties.get("DBSecurityGroups") if not db_security_groups: db_security_groups = [] @@ -48,7 +44,7 @@ def create_from_cloudformation_json( "availability_zone": properties.get("AvailabilityZone"), "backup_retention_period": properties.get("BackupRetentionPeriod"), "db_instance_class": properties.get("DBInstanceClass"), - "db_instance_identifier": db_instance_identifier, + "db_instance_identifier": resource_name, "db_name": properties.get("DBName"), "db_subnet_group_name": db_subnet_group_name, "engine": properties.get("Engine"), @@ -229,7 +225,7 @@ def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - group_name = resource_name.lower() + get_random_hex(12) + group_name = resource_name.lower() description = properties["GroupDescription"] security_group_ingress_rules = properties.get("DBSecurityGroupIngress", []) tags = properties.get("Tags") @@ -303,9 +299,7 @@ def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - subnet_name = properties.get(cls.cloudformation_name_type()) - if not subnet_name: - subnet_name = resource_name.lower() + get_random_hex(12) + subnet_name = resource_name.lower() description = properties["DBSubnetGroupDescription"] subnet_ids = properties["SubnetIds"] tags = properties.get("Tags") diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 5f46311ece90..6efbf8492417 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -10,7 +10,6 @@ from re import compile as re_compile from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel, CloudFormationModel -from moto.core.utils import get_random_hex from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2.models import ec2_backends from .exceptions import ( @@ -371,9 +370,6 @@ def create_from_cloudformation_json( ): properties = cloudformation_json["Properties"] - db_instance_identifier = properties.get(cls.cloudformation_name_type()) - if not db_instance_identifier: - db_instance_identifier = resource_name.lower() + get_random_hex(12) db_security_groups = properties.get("DBSecurityGroups") if not db_security_groups: db_security_groups = [] @@ -386,7 +382,7 @@ def create_from_cloudformation_json( "availability_zone": properties.get("AvailabilityZone"), "backup_retention_period": properties.get("BackupRetentionPeriod"), "db_instance_class": properties.get("DBInstanceClass"), - "db_instance_identifier": db_instance_identifier, + "db_instance_identifier": resource_name, "db_name": properties.get("DBName"), "db_subnet_group_name": db_subnet_group_name, "engine": properties.get("Engine"), @@ -650,7 +646,7 @@ def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - group_name = resource_name.lower() + get_random_hex(12) + group_name = resource_name.lower() description = properties["GroupDescription"] security_group_ingress_rules = properties.get("DBSecurityGroupIngress", []) tags = properties.get("Tags") @@ -759,9 +755,6 @@ def create_from_cloudformation_json( ): properties = cloudformation_json["Properties"] - subnet_name = properties.get(cls.cloudformation_name_type()) - if not subnet_name: - subnet_name = resource_name.lower() + get_random_hex(12) description = properties["DBSubnetGroupDescription"] subnet_ids = properties["SubnetIds"] tags = properties.get("Tags") @@ -770,7 +763,7 @@ def create_from_cloudformation_json( subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids] rds2_backend = rds2_backends[region_name] subnet_group = rds2_backend.create_subnet_group( - subnet_name, description, subnets, tags + resource_name, description, subnets, tags ) return subnet_group diff --git a/moto/route53/models.py b/moto/route53/models.py index 52f60d971c2e..eb73f2bfb65b 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -298,10 +298,9 @@ def cloudformation_type(): def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - name = properties["Name"] - - hosted_zone = route53_backend.create_hosted_zone(name, private_zone=False) + hosted_zone = route53_backend.create_hosted_zone( + resource_name, private_zone=False + ) return hosted_zone diff --git a/moto/s3/models.py b/moto/s3/models.py index 70e33fdfb066..4230479af6c2 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1086,7 +1086,7 @@ def create_from_cloudformation_json( ): bucket = s3_backend.create_bucket(resource_name, region_name) - properties = cloudformation_json["Properties"] + properties = cloudformation_json.get("Properties", {}) if "BucketEncryption" in properties: bucket_encryption = cfn_to_api_encryption(properties["BucketEncryption"]) @@ -1129,9 +1129,7 @@ def update_from_cloudformation_json( def delete_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - bucket_name = properties[cls.cloudformation_name_type()] - s3_backend.delete_bucket(bucket_name) + s3_backend.delete_bucket(resource_name) def to_config_dict(self): """Return the AWS Config JSON format of this S3 bucket. diff --git a/moto/sns/models.py b/moto/sns/models.py index 779a0fb06db9..1d956ffde3cc 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -104,7 +104,7 @@ def create_from_cloudformation_json( sns_backend = sns_backends[region_name] properties = cloudformation_json["Properties"] - topic = sns_backend.create_topic(properties.get(cls.cloudformation_name_type())) + topic = sns_backend.create_topic(resource_name) for subscription in properties.get("Subscription", []): sns_backend.subscribe( topic.arn, subscription["Endpoint"], subscription["Protocol"] diff --git a/moto/sqs/models.py b/moto/sqs/models.py index a34e95c4f0ff..039224f5bcf3 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -374,10 +374,7 @@ def create_from_cloudformation_json( sqs_backend = sqs_backends[region_name] return sqs_backend.create_queue( - name=properties["QueueName"], - tags=tags_dict, - region=region_name, - **properties + name=resource_name, tags=tags_dict, region=region_name, **properties ) @classmethod @@ -385,7 +382,7 @@ def update_from_cloudformation_json( cls, original_resource, new_resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - queue_name = properties["QueueName"] + queue_name = original_resource.name sqs_backend = sqs_backends[region_name] queue = sqs_backend.get_queue(queue_name) @@ -402,10 +399,8 @@ def update_from_cloudformation_json( def delete_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - queue_name = properties["QueueName"] sqs_backend = sqs_backends[region_name] - sqs_backend.delete_queue(queue_name) + sqs_backend.delete_queue(resource_name) @property def approximate_number_of_messages_delayed(self): diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 41d3fad3ec8f..65469f1b34ea 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -592,7 +592,7 @@ def test_boto3_create_stack_set_with_yaml(): @mock_cloudformation @mock_s3 def test_create_stack_set_from_s3_url(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3_conn = boto3.resource("s3", region_name="us-east-1") s3_conn.create_bucket(Bucket="foobar") @@ -704,7 +704,7 @@ def test_boto3_create_stack_with_short_form_func_yaml(): @mock_s3 @mock_cloudformation def test_get_template_summary(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3_conn = boto3.resource("s3", region_name="us-east-1") conn = boto3.client("cloudformation", region_name="us-east-1") @@ -802,7 +802,7 @@ def test_create_stack_with_role_arn(): @mock_cloudformation @mock_s3 def test_create_stack_from_s3_url(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3_conn = boto3.resource("s3", region_name="us-east-1") s3_conn.create_bucket(Bucket="foobar") @@ -857,7 +857,7 @@ def test_update_stack_with_previous_value(): @mock_s3 @mock_ec2 def test_update_stack_from_s3_url(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3_conn = boto3.resource("s3", region_name="us-east-1") cf_conn = boto3.client("cloudformation", region_name="us-east-1") @@ -886,7 +886,7 @@ def test_update_stack_from_s3_url(): @mock_cloudformation @mock_s3 def test_create_change_set_from_s3_url(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3_conn = boto3.resource("s3", region_name="us-east-1") s3_conn.create_bucket(Bucket="foobar") diff --git a/tests/test_cloudformation/test_validate.py b/tests/test_cloudformation/test_validate.py index 081ceee5415c..ea14fceeaf46 100644 --- a/tests/test_cloudformation/test_validate.py +++ b/tests/test_cloudformation/test_validate.py @@ -118,7 +118,7 @@ def test_boto3_yaml_validate_successful(): @mock_cloudformation @mock_s3 def test_boto3_yaml_validate_template_url_successful(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3_conn = boto3.resource("s3", region_name="us-east-1") s3_conn.create_bucket(Bucket="foobar") diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 610333303135..288825d6e389 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -5,12 +5,9 @@ import boto import boto3 import csv -import os import sure # noqa -import sys from boto.exception import BotoServerError from botocore.exceptions import ClientError -from dateutil.tz import tzutc from moto import mock_iam, mock_iam_deprecated, settings from moto.core import ACCOUNT_ID diff --git a/tests/test_iam/test_iam_cloudformation.py b/tests/test_iam/test_iam_cloudformation.py new file mode 100644 index 000000000000..aa063273f94b --- /dev/null +++ b/tests/test_iam/test_iam_cloudformation.py @@ -0,0 +1,1196 @@ +import boto3 +import yaml +import sure # noqa + +from nose.tools import assert_raises +from botocore.exceptions import ClientError + +from moto import mock_iam, mock_cloudformation, mock_s3, mock_sts + +# AWS::IAM::User Tests +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_user(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + user_name = "MyUser" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + UserName: {0} +""".strip().format( + user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + provisioned_resource["LogicalResourceId"].should.equal("TheUser") + provisioned_resource["PhysicalResourceId"].should.equal(user_name) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_user_no_interruption(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + user_name = provisioned_resource["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name)["User"] + user["Path"].should.equal("/") + + path = "/MyPath/" + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + Path: {0} +""".strip().format( + path + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + user = iam_client.get_user(UserName=user_name)["User"] + user["Path"].should.equal(path) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_user_replacement(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + original_user_name = provisioned_resource["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=original_user_name)["User"] + user["Path"].should.equal("/") + + new_user_name = "MyUser" + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + UserName: {0} +""".strip().format( + new_user_name + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + with assert_raises(ClientError) as e: + iam_client.get_user(UserName=original_user_name) + e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") + + iam_client.get_user(UserName=new_user_name) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_drop_user(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheFirstUser: + Type: AWS::IAM::User + TheSecondUser: + Type: AWS::IAM::User +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + first_provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheFirstUser" + ][0] + second_provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheSecondUser" + ][0] + first_user_name = first_provisioned_user["PhysicalResourceId"] + second_user_name = second_provisioned_user["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + iam_client.get_user(UserName=first_user_name) + iam_client.get_user(UserName=second_user_name) + + template = """ +Resources: + TheSecondUser: + Type: AWS::IAM::User +""".strip() + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + len(provisioned_resources).should.equal(1) + second_provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheSecondUser" + ][0] + second_user_name.should.equal(second_provisioned_user["PhysicalResourceId"]) + + iam_client.get_user(UserName=second_user_name) + with assert_raises(ClientError) as e: + iam_client.get_user(UserName=first_user_name) + e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_user(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + user_name = "MyUser" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + UserName: {} +""".strip().format( + user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + + cf_client.delete_stack(StackName=stack_name) + + with assert_raises(ClientError) as e: + user = iam_client.get_user(UserName=user_name) + e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_user_having_generated_name(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + provisioned_resource["LogicalResourceId"].should.equal("TheUser") + user_name = provisioned_resource["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + + cf_client.delete_stack(StackName=stack_name) + + with assert_raises(ClientError) as e: + user = iam_client.get_user(UserName=user_name) + e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_user_get_attr(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + user_name = "MyUser" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + Properties: + UserName: {0} +Outputs: + UserName: + Value: !Ref TheUser + UserArn: + Value: !GetAtt TheUser.Arn +""".strip().format( + user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + output_user_name = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "UserName" + ][0] + output_user_arn = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "UserArn" + ][0] + + iam_client = boto3.client("iam", region_name="us-east-1") + user_description = iam_client.get_user(UserName=output_user_name)["User"] + output_user_arn.should.equal(user_description["Arn"]) + + +# AWS::IAM::Policy Tests +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_user_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + user_name = "MyUser" + iam_client.create_user(UserName=user_name) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + bucket = s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Users: + - {2} +""".strip().format( + policy_name, bucket_arn, user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_user_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + user_name_1 = "MyUser1" + iam_client.create_user(UserName=user_name_1) + user_name_2 = "MyUser2" + iam_client.create_user(UserName=user_name_2) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Users: + - {2} +""".strip().format( + policy_name, bucket_arn, user_name_1 + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_user_policy(UserName=user_name_1, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + # Change template and user + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:ListBuckets + Resource: {1} + Users: + - {2} +""".strip().format( + policy_name, bucket_arn, user_name_2 + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_user_policy(UserName=user_name_2, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + iam_client.get_user_policy.when.called_with( + UserName=user_name_1, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_user_policy_having_generated_name(): + iam_client = boto3.client("iam", region_name="us-east-1") + user_name = "MyUser" + iam_client.create_user(UserName=user_name) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + bucket = s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: MyPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {0} + Users: + - {1} +""".strip().format( + bucket_arn, user_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + cf_client.delete_stack(StackName=stack_name) + iam_client.get_user_policy.when.called_with( + UserName=user_name, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_role_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + role_name = "MyRole" + iam_client.create_role(RoleName=role_name, AssumeRolePolicyDocument="{}") + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Roles: + - {2} +""".strip().format( + policy_name, bucket_arn, role_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_role_policy(RoleName=role_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_role_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + role_name_1 = "MyRole1" + iam_client.create_role(RoleName=role_name_1, AssumeRolePolicyDocument="{}") + role_name_2 = "MyRole2" + iam_client.create_role(RoleName=role_name_2, AssumeRolePolicyDocument="{}") + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Roles: + - {2} +""".strip().format( + policy_name, bucket_arn, role_name_1 + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_role_policy(RoleName=role_name_1, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + # Change template and user + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:ListBuckets + Resource: {1} + Roles: + - {2} +""".strip().format( + policy_name, bucket_arn, role_name_2 + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_role_policy(RoleName=role_name_2, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + iam_client.get_role_policy.when.called_with( + RoleName=role_name_1, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_role_policy_having_generated_name(): + iam_client = boto3.client("iam", region_name="us-east-1") + role_name = "MyRole" + iam_client.create_role(RoleName=role_name, AssumeRolePolicyDocument="{}") + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: MyPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {0} + Roles: + - {1} +""".strip().format( + bucket_arn, role_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_role_policy(RoleName=role_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + cf_client.delete_stack(StackName=stack_name) + iam_client.get_role_policy.when.called_with( + RoleName=role_name, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_group_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + group_name = "MyGroup" + iam_client.create_group(GroupName=group_name) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Groups: + - {2} +""".strip().format( + policy_name, bucket_arn, group_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_group_policy(): + iam_client = boto3.client("iam", region_name="us-east-1") + group_name_1 = "MyGroup1" + iam_client.create_group(GroupName=group_name_1) + group_name_2 = "MyGroup2" + iam_client.create_group(GroupName=group_name_2) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {1} + Groups: + - {2} +""".strip().format( + policy_name, bucket_arn, group_name_1 + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_group_policy(GroupName=group_name_1, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + # Change template and user + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: {0} + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:ListBuckets + Resource: {1} + Groups: + - {2} +""".strip().format( + policy_name, bucket_arn, group_name_2 + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_group_policy(GroupName=group_name_2, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + iam_client.get_group_policy.when.called_with( + GroupName=group_name_1, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +@mock_s3 +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_group_policy_having_generated_name(): + iam_client = boto3.client("iam", region_name="us-east-1") + group_name = "MyGroup" + iam_client.create_group(GroupName=group_name) + + s3_client = boto3.client("s3", region_name="us-east-1") + bucket_name = "my-bucket" + s3_client.create_bucket(Bucket=bucket_name) + bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + + cf_client = boto3.client("cloudformation", region_name="us-east-1") + stack_name = "MyStack" + policy_name = "MyPolicy" + + template = """ +Resources: + ThePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: MyPolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: s3:* + Resource: {0} + Groups: + - {1} +""".strip().format( + bucket_arn, group_name + ) + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + logical_resource_id = provisioned_resource["LogicalResourceId"] + logical_resource_id.should.equal("ThePolicy") + + original_policy_document = yaml.load(template, Loader=yaml.FullLoader)["Resources"][ + logical_resource_id + ]["Properties"]["PolicyDocument"] + policy = iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name) + policy["PolicyDocument"].should.equal(original_policy_document) + + cf_client.delete_stack(StackName=stack_name) + iam_client.get_group_policy.when.called_with( + GroupName=group_name, PolicyName=policy_name + ).should.throw(iam_client.exceptions.NoSuchEntityException) + + +# AWS::IAM::User AccessKeys +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_create_user_with_access_key(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_keys = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ] + len(provisioned_access_keys).should.equal(1) + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name)["User"] + user["UserName"].should.equal(user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_keys["AccessKeyMetadata"][0]["UserName"].should.equal(user_name) + + +@mock_sts +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_access_key_get_attr(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser +Outputs: + AccessKeyId: + Value: !Ref TheAccessKey + SecretKey: + Value: !GetAtt TheAccessKey.SecretAccessKey +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + stack_description = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] + output_access_key_id = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "AccessKeyId" + ][0] + output_secret_key = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "SecretKey" + ][0] + + sts_client = boto3.client( + "sts", + aws_access_key_id=output_access_key_id, + aws_secret_access_key=output_secret_key, + region_name="us-east-1", + ) + caller_identity = sts_client.get_caller_identity() + caller_identity["Arn"].split("/")[1].should.equal(user_name) + pass + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_users_access_key(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ + Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser + """.strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_key = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ][0] + access_key_id = provisioned_access_key["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + + access_key_id.should.equal(access_keys["AccessKeyMetadata"][0]["AccessKeyId"]) + + cf_client.delete_stack(StackName=stack_name) + + iam_client.get_user.when.called_with(UserName=user_name).should.throw( + iam_client.exceptions.NoSuchEntityException + ) + iam_client.list_access_keys.when.called_with(UserName=user_name).should.throw( + iam_client.exceptions.NoSuchEntityException + ) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_delete_users_access_key(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ + Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser + """.strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_keys = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ] + len(provisioned_access_keys).should.equal(1) + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name)["User"] + user["UserName"].should.equal(user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_keys["AccessKeyMetadata"][0]["UserName"].should.equal(user_name) + + cf_client.delete_stack(StackName=stack_name) + + iam_client.get_user.when.called_with(UserName=user_name).should.throw( + iam_client.exceptions.NoSuchEntityException + ) + iam_client.list_access_keys.when.called_with(UserName=user_name).should.throw( + iam_client.exceptions.NoSuchEntityException + ) + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_users_access_key_no_interruption(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_key = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ][0] + access_key_id = provisioned_access_key["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_key_id.should.equal(access_keys["AccessKeyMetadata"][0]["AccessKeyId"]) + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + Status: Inactive +""".strip() + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_keys["AccessKeyMetadata"][0]["Status"].should.equal("Inactive") + + +@mock_iam +@mock_cloudformation +def test_iam_cloudformation_update_users_access_key_replacement(): + cf_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: !Ref TheUser +""".strip() + + cf_client.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resources = cf_client.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ] + + provisioned_user = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheUser" + ][0] + user_name = provisioned_user["PhysicalResourceId"] + + provisioned_access_key = [ + resource + for resource in provisioned_resources + if resource["LogicalResourceId"] == "TheAccessKey" + ][0] + access_key_id = provisioned_access_key["PhysicalResourceId"] + + iam_client = boto3.client("iam", region_name="us-east-1") + user = iam_client.get_user(UserName=user_name) + access_keys = iam_client.list_access_keys(UserName=user_name) + access_key_id.should.equal(access_keys["AccessKeyMetadata"][0]["AccessKeyId"]) + + other_user_name = "MyUser" + iam_client.create_user(UserName=other_user_name) + + template = """ +Resources: + TheUser: + Type: AWS::IAM::User + TheAccessKey: + Type: AWS::IAM::AccessKey + Properties: + UserName: {0} +""".strip().format( + other_user_name + ) + + cf_client.update_stack(StackName=stack_name, TemplateBody=template) + + access_keys = iam_client.list_access_keys(UserName=user_name) + len(access_keys["AccessKeyMetadata"]).should.equal(0) + + access_keys = iam_client.list_access_keys(UserName=other_user_name) + access_key_id.should_not.equal(access_keys["AccessKeyMetadata"][0]["AccessKeyId"]) diff --git a/tests/test_kinesis/test_kinesis_cloudformation.py b/tests/test_kinesis/test_kinesis_cloudformation.py index 7f3aef0ded08..59f73b888e5f 100644 --- a/tests/test_kinesis/test_kinesis_cloudformation.py +++ b/tests/test_kinesis/test_kinesis_cloudformation.py @@ -73,6 +73,12 @@ def test_kinesis_cloudformation_update(): Properties: Name: MyStream ShardCount: 4 + RetentionPeriodHours: 48 + Tags: + - Key: TagKey1 + Value: TagValue1 + - Key: TagKey2 + Value: TagValue2 """.strip() cf_conn.create_stack(StackName=stack_name, TemplateBody=template) @@ -83,6 +89,14 @@ def test_kinesis_cloudformation_update(): stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ "StreamDescription" ] + stream_description["RetentionPeriodHours"].should.equal(48) + + tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"] + tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"] + tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"] + tag1_value.should.equal("TagValue1") + tag2_value.should.equal("TagValue2") + shards_provisioned = len( [ shard @@ -98,12 +112,27 @@ def test_kinesis_cloudformation_update(): Type: AWS::Kinesis::Stream Properties: ShardCount: 6 + RetentionPeriodHours: 24 + Tags: + - Key: TagKey1 + Value: TagValue1a + - Key: TagKey2 + Value: TagValue2a + """.strip() cf_conn.update_stack(StackName=stack_name, TemplateBody=template) stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ "StreamDescription" ] + stream_description["RetentionPeriodHours"].should.equal(24) + + tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"] + tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"] + tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"] + tag1_value.should.equal("TagValue1a") + tag2_value.should.equal("TagValue2a") + shards_provisioned = len( [ shard diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index c8e3ed4de346..6622b2f41c19 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals import datetime -import os import sys from boto3 import Session @@ -11,7 +10,6 @@ from functools import wraps from gzip import GzipFile from io import BytesIO -import mimetypes import zlib import pickle import uuid @@ -36,7 +34,7 @@ import sure # noqa -from moto import settings, mock_s3, mock_s3_deprecated, mock_config, mock_cloudformation +from moto import settings, mock_s3, mock_s3_deprecated, mock_config import moto.s3.models as s3model from moto.core.exceptions import InvalidNextTokenException from moto.core.utils import py2_strip_unicode_keys @@ -4686,142 +4684,3 @@ def test_presigned_put_url_with_custom_headers(): s3.delete_object(Bucket=bucket, Key=key) s3.delete_bucket(Bucket=bucket) - - -@mock_s3 -@mock_cloudformation -def test_s3_bucket_cloudformation_basic(): - s3 = boto3.client("s3", region_name="us-east-1") - cf = boto3.client("cloudformation", region_name="us-east-1") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}}, - "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, - } - template_json = json.dumps(template) - stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ - "StackId" - ] - stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] - - s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) - - -@mock_s3 -@mock_cloudformation -def test_s3_bucket_cloudformation_with_properties(): - s3 = boto3.client("s3", region_name="us-east-1") - cf = boto3.client("cloudformation", region_name="us-east-1") - - bucket_name = "MyBucket" - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testInstance": { - "Type": "AWS::S3::Bucket", - "Properties": { - "BucketName": bucket_name, - "BucketEncryption": { - "ServerSideEncryptionConfiguration": [ - { - "ServerSideEncryptionByDefault": { - "SSEAlgorithm": "AES256" - } - } - ] - }, - }, - } - }, - "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, - } - template_json = json.dumps(template) - stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ - "StackId" - ] - stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] - s3.head_bucket(Bucket=bucket_name) - - encryption = s3.get_bucket_encryption(Bucket=bucket_name) - encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ - "ApplyServerSideEncryptionByDefault" - ]["SSEAlgorithm"].should.equal("AES256") - - -@mock_s3 -@mock_cloudformation -def test_s3_bucket_cloudformation_update_no_interruption(): - s3 = boto3.client("s3", region_name="us-east-1") - cf = boto3.client("cloudformation", region_name="us-east-1") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, - "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, - } - template_json = json.dumps(template) - cf.create_stack(StackName="test_stack", TemplateBody=template_json) - stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] - s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testInstance": { - "Type": "AWS::S3::Bucket", - "Properties": { - "BucketEncryption": { - "ServerSideEncryptionConfiguration": [ - { - "ServerSideEncryptionByDefault": { - "SSEAlgorithm": "AES256" - } - } - ] - } - }, - } - }, - "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, - } - template_json = json.dumps(template) - cf.update_stack(StackName="test_stack", TemplateBody=template_json) - encryption = s3.get_bucket_encryption( - Bucket=stack_description["Outputs"][0]["OutputValue"] - ) - encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ - "ApplyServerSideEncryptionByDefault" - ]["SSEAlgorithm"].should.equal("AES256") - - -@mock_s3 -@mock_cloudformation -def test_s3_bucket_cloudformation_update_replacement(): - s3 = boto3.client("s3", region_name="us-east-1") - cf = boto3.client("cloudformation", region_name="us-east-1") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, - "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, - } - template_json = json.dumps(template) - cf.create_stack(StackName="test_stack", TemplateBody=template_json) - stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] - s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testInstance": { - "Type": "AWS::S3::Bucket", - "Properties": {"BucketName": "MyNewBucketName"}, - } - }, - "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, - } - template_json = json.dumps(template) - cf.update_stack(StackName="test_stack", TemplateBody=template_json) - stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] - s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) diff --git a/tests/test_s3/test_s3_cloudformation.py b/tests/test_s3/test_s3_cloudformation.py new file mode 100644 index 000000000000..69d0c9f984c0 --- /dev/null +++ b/tests/test_s3/test_s3_cloudformation.py @@ -0,0 +1,145 @@ +import json +import boto3 + +import sure # noqa + +from moto import mock_s3, mock_cloudformation + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_basic(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ + "StackId" + ] + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_with_properties(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + bucket_name = "MyBucket" + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketName": bucket_name, + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + }, + }, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ + "StackId" + ] + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=bucket_name) + + encryption = s3.get_bucket_encryption(Bucket=bucket_name) + encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ + "ApplyServerSideEncryptionByDefault" + ]["SSEAlgorithm"].should.equal("AES256") + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_update_no_interruption(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + } + }, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.update_stack(StackName="test_stack", TemplateBody=template_json) + encryption = s3.get_bucket_encryption( + Bucket=stack_description["Outputs"][0]["OutputValue"] + ) + encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ + "ApplyServerSideEncryptionByDefault" + ]["SSEAlgorithm"].should.equal("AES256") + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_update_replacement(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": "MyNewBucketName"}, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.update_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) From 1c939a5f069d9a082f9d0fb8cbb83557b0ed8dbe Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Thu, 27 Aug 2020 21:01:39 +0530 Subject: [PATCH 495/658] Fix:EC2-Create-Subnet availability Zone Id support (#3198) * Fix:EC2-Create-Subnet availability Zone Id support * Linting * Fix:fixed build errors * linting Co-authored-by: Bert Blommers Co-authored-by: Bert Blommers Co-authored-by: usmankb --- moto/ec2/models.py | 33 +++++++++++++++++++++++++-------- moto/ec2/responses/subnets.py | 12 +++++++----- tests/test_ec2/test_subnets.py | 26 +++++++++++++++++++++++--- 3 files changed, 55 insertions(+), 16 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index f0ce89d8a42b..07a05bbda48a 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3402,7 +3402,14 @@ def get_subnet(self, subnet_id): return subnets[subnet_id] raise InvalidSubnetIdError(subnet_id) - def create_subnet(self, vpc_id, cidr_block, availability_zone, context=None): + def create_subnet( + self, + vpc_id, + cidr_block, + availability_zone=None, + availability_zone_id=None, + context=None, + ): subnet_id = random_subnet_id() vpc = self.get_vpc( vpc_id @@ -3430,15 +3437,25 @@ def create_subnet(self, vpc_id, cidr_block, availability_zone, context=None): # consider it the default default_for_az = str(availability_zone not in self.subnets).lower() map_public_ip_on_launch = default_for_az - if availability_zone is None: + + if availability_zone is None and not availability_zone_id: availability_zone = "us-east-1a" try: - availability_zone_data = next( - zone - for zones in RegionsAndZonesBackend.zones.values() - for zone in zones - if zone.name == availability_zone - ) + if availability_zone: + availability_zone_data = next( + zone + for zones in RegionsAndZonesBackend.zones.values() + for zone in zones + if zone.name == availability_zone + ) + elif availability_zone_id: + availability_zone_data = next( + zone + for zones in RegionsAndZonesBackend.zones.values() + for zone in zones + if zone.zone_id == availability_zone_id + ) + except StopIteration: raise InvalidAvailabilityZoneError( availability_zone, diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py index e11984e5265a..3bad8e12ff9f 100644 --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -9,12 +9,14 @@ class Subnets(BaseResponse): def create_subnet(self): vpc_id = self._get_param("VpcId") cidr_block = self._get_param("CidrBlock") - availability_zone = self._get_param( - "AvailabilityZone", - if_none=random.choice(self.ec2_backend.describe_availability_zones()).name, - ) + availability_zone = self._get_param("AvailabilityZone") + availability_zone_id = self._get_param("AvailabilityZoneId") + if not availability_zone and not availability_zone_id: + availability_zone = random.choice( + self.ec2_backend.describe_availability_zones() + ).name subnet = self.ec2_backend.create_subnet( - vpc_id, cidr_block, availability_zone, context=self + vpc_id, cidr_block, availability_zone, availability_zone_id, context=self ) template = self.response_template(CREATE_SUBNET_RESPONSE) return template.render(subnet=subnet) diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index eae0bc468d09..08d404b97f93 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -75,6 +75,18 @@ def test_subnet_should_have_proper_availability_zone_set(): subnetA.availability_zone.should.equal("us-west-1b") +@mock_ec2 +def test_availability_zone_in_create_subnet(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + + vpc = ec2.create_vpc(CidrBlock="172.31.0.0/16") + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock="172.31.48.0/20", AvailabilityZoneId="use1-az6" + ) + subnet.availability_zone_id.should.equal("use1-az6") + + @mock_ec2 def test_default_subnet(): ec2 = boto3.resource("ec2", region_name="us-west-1") @@ -612,7 +624,15 @@ def test_run_instances_should_attach_to_default_subnet(): # Assert subnet is created appropriately subnets = client.describe_subnets()["Subnets"] default_subnet_id = subnets[0]["SubnetId"] - instances["Instances"][0]["NetworkInterfaces"][0]["SubnetId"].should.equal( - default_subnet_id + if len(subnets) > 1: + default_subnet_id1 = subnets[1]["SubnetId"] + assert ( + instances["Instances"][0]["NetworkInterfaces"][0]["SubnetId"] + == default_subnet_id + or instances["Instances"][0]["NetworkInterfaces"][0]["SubnetId"] + == default_subnet_id1 + ) + assert ( + subnets[0]["AvailableIpAddressCount"] == 4090 + or subnets[1]["AvailableIpAddressCount"] == 4090 ) - subnets[0]["AvailableIpAddressCount"].should.equal(4090) From 0a89f9d1dfc3dacea3791533fbc27b7d08a276e9 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Thu, 27 Aug 2020 22:01:20 +0530 Subject: [PATCH 496/658] Fix:SQS:Added Non existent queue name in ERROR RESPONSE (#3261) * Fix:SQS:Added Non existent queue name in ERROR RESPONSE * Linting Co-authored-by: Bert Blommers --- moto/sqs/responses.py | 11 +++++++++-- tests/test_sqs/test_sqs.py | 8 ++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 29804256c2b3..5cc77e9fb8a4 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -70,7 +70,10 @@ def _get_validated_visibility_timeout(self, timeout=None): def call_action(self): status_code, headers, body = super(SQSResponse, self).call_action() if status_code == 404: - return 404, headers, ERROR_INEXISTENT_QUEUE + queue_name = self.querystring.get("QueueName", [""])[0] + template = self.response_template(ERROR_INEXISTENT_QUEUE) + response = template.render(queue_name=queue_name) + return 404, headers, response return status_code, headers, body def _error(self, code, message, status=400): @@ -718,7 +721,11 @@ def list_queue_tags(self): Sender AWS.SimpleQueueService.NonExistentQueue - The specified queue does not exist for this wsdl version. + {% if queue_name %} + The specified queue {{queue_name}} does not exist for this wsdl version. + {% else %} + The specified queue does not exist for this wsdl version. + {% endif %} b8bc806b-fa6b-53b5-8be8-cfa2f9836bc3 diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 4de5b90180ec..945fe86ae0a3 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -198,7 +198,8 @@ def test_get_queue_url_errors(): client = boto3.client("sqs", region_name="us-east-1") client.get_queue_url.when.called_with(QueueName="non-existing-queue").should.throw( - ClientError, "The specified queue does not exist for this wsdl version." + ClientError, + "The specified queue non-existing-queue does not exist for this wsdl version.", ) @@ -206,10 +207,13 @@ def test_get_queue_url_errors(): def test_get_nonexistent_queue(): sqs = boto3.resource("sqs", region_name="us-east-1") with assert_raises(ClientError) as err: - sqs.get_queue_by_name(QueueName="nonexisting-queue") + sqs.get_queue_by_name(QueueName="non-existing-queue") ex = err.exception ex.operation_name.should.equal("GetQueueUrl") ex.response["Error"]["Code"].should.equal("AWS.SimpleQueueService.NonExistentQueue") + ex.response["Error"]["Message"].should.equal( + "The specified queue non-existing-queue does not exist for this wsdl version." + ) with assert_raises(ClientError) as err: sqs.Queue("http://whatever-incorrect-queue-address").load() From bcf61d0b09d9c082efed1fbfe2863d96b922c950 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Tue, 1 Sep 2020 12:55:59 +0530 Subject: [PATCH 497/658] Fix: Api-Gateway ApiKeyAlreadyExists headers change. (#3162) * Fix: Api-Gateway ApiKeyAlreadyExists headers change. * Added test for non decorator * Fixed cli errors * Fix:fixed build errors * Fix: assert only in case of non server mode Co-authored-by: usmankb --- moto/apigateway/responses.py | 2 +- tests/test_apigateway/test_apigateway.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index d8f3ed5051c2..0454ae58e72e 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -449,7 +449,7 @@ def apikeys(self, request, full_url, headers): except ApiKeyAlreadyExists as error: return ( error.code, - self.headers, + {}, '{{"message":"{0}","code":"{1}"}}'.format( error.message, error.error_type ), diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index d79851ab0e2f..c58d644fad76 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1858,6 +1858,23 @@ def test_create_api_key(): client.create_api_key.when.called_with(**payload).should.throw(ClientError) +@mock_apigateway +def test_create_api_headers(): + region_name = "us-west-2" + client = boto3.client("apigateway", region_name=region_name) + + apikey_value = "12345" + apikey_name = "TESTKEY1" + payload = {"value": apikey_value, "name": apikey_name} + + client.create_api_key(**payload) + with assert_raises(ClientError) as ex: + client.create_api_key(**payload) + ex.exception.response["Error"]["Code"].should.equal("ConflictException") + if not settings.TEST_SERVER_MODE: + ex.exception.response["ResponseMetadata"]["HTTPHeaders"].should.equal({}) + + @mock_apigateway def test_api_keys(): region_name = "us-west-2" From 236ab59afeb94a5bffa1447cab3404ae9c32aee7 Mon Sep 17 00:00:00 2001 From: xsphrx <34844540+xsphrx@users.noreply.github.com> Date: Tue, 1 Sep 2020 10:20:31 +0200 Subject: [PATCH 498/658] added cognito-idp initiate_auth and PASSWORD_VERIFIER challenge to respond_to_auth_challenge (#3260) * added cognito-idp initiate_auth and PASSWORD_VERIFIER challenge to respond_to_auth_challenge * fixed for python2 * added mfa, REFRESH_TOKEN to initiate_auth, SOFTWARE_TOKEN_MFA to respond_to_auth_challenge * added negative tests * test --- moto/cognitoidp/exceptions.py | 8 + moto/cognitoidp/models.py | 231 +++++++++++++- moto/cognitoidp/responses.py | 61 +++- moto/cognitoidp/utils.py | 11 + tests/test_cognitoidp/test_cognitoidp.py | 372 +++++++++++++++++++++++ 5 files changed, 681 insertions(+), 2 deletions(-) diff --git a/moto/cognitoidp/exceptions.py b/moto/cognitoidp/exceptions.py index c9b6368caa5e..baf5f6526e9b 100644 --- a/moto/cognitoidp/exceptions.py +++ b/moto/cognitoidp/exceptions.py @@ -45,6 +45,14 @@ def __init__(self, message): ) +class UserNotConfirmedException(BadRequest): + def __init__(self, message): + super(UserNotConfirmedException, self).__init__() + self.description = json.dumps( + {"message": message, "__type": "UserNotConfirmedException"} + ) + + class InvalidParameterException(JsonRESTError): def __init__(self, msg=None): self.code = 400 diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index a3cb69084725..bfa7177f16c8 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -21,13 +21,15 @@ ResourceNotFoundError, UserNotFoundError, UsernameExistsException, + UserNotConfirmedException, InvalidParameterException, ) -from .utils import create_id +from .utils import create_id, check_secret_hash UserStatus = { "FORCE_CHANGE_PASSWORD": "FORCE_CHANGE_PASSWORD", "CONFIRMED": "CONFIRMED", + "UNCONFIRMED": "UNCONFIRMED", } @@ -300,6 +302,9 @@ def __init__(self, user_pool_id, username, password, status, attributes): self.attributes = attributes self.create_date = datetime.datetime.utcnow() self.last_modified_date = datetime.datetime.utcnow() + self.sms_mfa_enabled = False + self.software_token_mfa_enabled = False + self.token_verified = False # Groups this user is a member of. # Note that these links are bidirectional. @@ -316,6 +321,11 @@ def _base_json(self): # list_users brings back "Attributes" while admin_get_user brings back "UserAttributes". def to_json(self, extended=False, attributes_key="Attributes"): + user_mfa_setting_list = [] + if self.software_token_mfa_enabled: + user_mfa_setting_list.append("SOFTWARE_TOKEN_MFA") + elif self.sms_mfa_enabled: + user_mfa_setting_list.append("SMS_MFA") user_json = self._base_json() if extended: user_json.update( @@ -323,6 +333,7 @@ def to_json(self, extended=False, attributes_key="Attributes"): "Enabled": self.enabled, attributes_key: self.attributes, "MFAOptions": [], + "UserMFASettingList": user_mfa_setting_list, } ) @@ -731,6 +742,9 @@ def admin_initiate_auth(self, user_pool_id, client_id, auth_flow, auth_parameter def respond_to_auth_challenge( self, session, client_id, challenge_name, challenge_responses ): + if challenge_name == "PASSWORD_VERIFIER": + session = challenge_responses.get("PASSWORD_CLAIM_SECRET_BLOCK") + user_pool = self.sessions.get(session) if not user_pool: raise ResourceNotFoundError(session) @@ -751,6 +765,62 @@ def respond_to_auth_challenge( del self.sessions[session] return self._log_user_in(user_pool, client, username) + elif challenge_name == "PASSWORD_VERIFIER": + username = challenge_responses.get("USERNAME") + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + password_claim_signature = challenge_responses.get( + "PASSWORD_CLAIM_SIGNATURE" + ) + if not password_claim_signature: + raise ResourceNotFoundError(password_claim_signature) + password_claim_secret_block = challenge_responses.get( + "PASSWORD_CLAIM_SECRET_BLOCK" + ) + if not password_claim_secret_block: + raise ResourceNotFoundError(password_claim_secret_block) + timestamp = challenge_responses.get("TIMESTAMP") + if not timestamp: + raise ResourceNotFoundError(timestamp) + + if user.software_token_mfa_enabled: + return { + "ChallengeName": "SOFTWARE_TOKEN_MFA", + "Session": session, + "ChallengeParameters": {}, + } + + if user.sms_mfa_enabled: + return { + "ChallengeName": "SMS_MFA", + "Session": session, + "ChallengeParameters": {}, + } + + del self.sessions[session] + return self._log_user_in(user_pool, client, username) + elif challenge_name == "SOFTWARE_TOKEN_MFA": + username = challenge_responses.get("USERNAME") + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + software_token_mfa_code = challenge_responses.get("SOFTWARE_TOKEN_MFA_CODE") + if not software_token_mfa_code: + raise ResourceNotFoundError(software_token_mfa_code) + + if client.generate_secret: + secret_hash = challenge_responses.get("SECRET_HASH") + if not check_secret_hash( + client.secret, client.id, username, secret_hash + ): + raise NotAuthorizedError(secret_hash) + + del self.sessions[session] + return self._log_user_in(user_pool, client, username) + else: return {} @@ -806,6 +876,165 @@ def create_resource_server(self, user_pool_id, identifier, name, scopes): user_pool.resource_servers[identifier] = resource_server return resource_server + def sign_up(self, client_id, username, password, attributes): + user_pool = None + for p in self.user_pools.values(): + if client_id in p.clients: + user_pool = p + if user_pool is None: + raise ResourceNotFoundError(client_id) + + user = CognitoIdpUser( + user_pool_id=user_pool.id, + username=username, + password=password, + attributes=attributes, + status=UserStatus["UNCONFIRMED"], + ) + user_pool.users[user.username] = user + return user + + def confirm_sign_up(self, client_id, username, confirmation_code): + user_pool = None + for p in self.user_pools.values(): + if client_id in p.clients: + user_pool = p + if user_pool is None: + raise ResourceNotFoundError(client_id) + + if username not in user_pool.users: + raise UserNotFoundError(username) + + user = user_pool.users[username] + user.status = UserStatus["CONFIRMED"] + return "" + + def initiate_auth(self, client_id, auth_flow, auth_parameters): + user_pool = None + for p in self.user_pools.values(): + if client_id in p.clients: + user_pool = p + if user_pool is None: + raise ResourceNotFoundError(client_id) + + client = p.clients.get(client_id) + + if auth_flow == "USER_SRP_AUTH": + username = auth_parameters.get("USERNAME") + srp_a = auth_parameters.get("SRP_A") + if not srp_a: + raise ResourceNotFoundError(srp_a) + if client.generate_secret: + secret_hash = auth_parameters.get("SECRET_HASH") + if not check_secret_hash( + client.secret, client.id, username, secret_hash + ): + raise NotAuthorizedError(secret_hash) + + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + if user.status == UserStatus["UNCONFIRMED"]: + raise UserNotConfirmedException("User is not confirmed.") + + session = str(uuid.uuid4()) + self.sessions[session] = user_pool + + return { + "ChallengeName": "PASSWORD_VERIFIER", + "Session": session, + "ChallengeParameters": { + "SALT": str(uuid.uuid4()), + "SRP_B": str(uuid.uuid4()), + "USERNAME": user.id, + "USER_ID_FOR_SRP": user.id, + "SECRET_BLOCK": session, + }, + } + elif auth_flow == "REFRESH_TOKEN": + refresh_token = auth_parameters.get("REFRESH_TOKEN") + if not refresh_token: + raise ResourceNotFoundError(refresh_token) + + client_id, username = user_pool.refresh_tokens[refresh_token] + if not username: + raise ResourceNotFoundError(username) + + if client.generate_secret: + secret_hash = auth_parameters.get("SECRET_HASH") + if not check_secret_hash( + client.secret, client.id, username, secret_hash + ): + raise NotAuthorizedError(secret_hash) + + ( + id_token, + access_token, + expires_in, + ) = user_pool.create_tokens_from_refresh_token(refresh_token) + + return { + "AuthenticationResult": { + "IdToken": id_token, + "AccessToken": access_token, + "ExpiresIn": expires_in, + } + } + else: + return None + + def associate_software_token(self, access_token): + for user_pool in self.user_pools.values(): + if access_token in user_pool.access_tokens: + _, username = user_pool.access_tokens[access_token] + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + return {"SecretCode": str(uuid.uuid4())} + else: + raise NotAuthorizedError(access_token) + + def verify_software_token(self, access_token, user_code): + for user_pool in self.user_pools.values(): + if access_token in user_pool.access_tokens: + _, username = user_pool.access_tokens[access_token] + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + user.token_verified = True + + return {"Status": "SUCCESS"} + else: + raise NotAuthorizedError(access_token) + + def set_user_mfa_preference( + self, access_token, software_token_mfa_settings, sms_mfa_settings + ): + for user_pool in self.user_pools.values(): + if access_token in user_pool.access_tokens: + _, username = user_pool.access_tokens[access_token] + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + if software_token_mfa_settings["Enabled"]: + if user.token_verified: + user.software_token_mfa_enabled = True + else: + raise InvalidParameterException( + "User has not verified software token mfa" + ) + + elif sms_mfa_settings["Enabled"]: + user.sms_mfa_enabled = True + + return None + else: + raise NotAuthorizedError(access_token) + cognitoidp_backends = {} for region in Session().get_available_regions("cognito-idp"): diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 972ba883acd8..f3c005ff5732 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -4,7 +4,7 @@ import os from moto.core.responses import BaseResponse -from .models import cognitoidp_backends, find_region_by_value +from .models import cognitoidp_backends, find_region_by_value, UserStatus class CognitoIdpResponse(BaseResponse): @@ -390,6 +390,65 @@ def create_resource_server(self): ) return json.dumps({"ResourceServer": resource_server.to_json()}) + def sign_up(self): + client_id = self._get_param("ClientId") + username = self._get_param("Username") + password = self._get_param("Password") + user = cognitoidp_backends[self.region].sign_up( + client_id=client_id, + username=username, + password=password, + attributes=self._get_param("UserAttributes", []), + ) + return json.dumps( + { + "UserConfirmed": user.status == UserStatus["CONFIRMED"], + "UserSub": user.id, + } + ) + + def confirm_sign_up(self): + client_id = self._get_param("ClientId") + username = self._get_param("Username") + confirmation_code = self._get_param("ConfirmationCode") + cognitoidp_backends[self.region].confirm_sign_up( + client_id=client_id, username=username, confirmation_code=confirmation_code, + ) + return "" + + def initiate_auth(self): + client_id = self._get_param("ClientId") + auth_flow = self._get_param("AuthFlow") + auth_parameters = self._get_param("AuthParameters") + + auth_result = cognitoidp_backends[self.region].initiate_auth( + client_id, auth_flow, auth_parameters + ) + + return json.dumps(auth_result) + + def associate_software_token(self): + access_token = self._get_param("AccessToken") + result = cognitoidp_backends[self.region].associate_software_token(access_token) + return json.dumps(result) + + def verify_software_token(self): + access_token = self._get_param("AccessToken") + user_code = self._get_param("UserCode") + result = cognitoidp_backends[self.region].verify_software_token( + access_token, user_code + ) + return json.dumps(result) + + def set_user_mfa_preference(self): + access_token = self._get_param("AccessToken") + software_token_mfa_settings = self._get_param("SoftwareTokenMfaSettings") + sms_mfa_settings = self._get_param("SMSMfaSettings") + cognitoidp_backends[self.region].set_user_mfa_preference( + access_token, software_token_mfa_settings, sms_mfa_settings + ) + return "" + class CognitoIdpJsonWebKeyResponse(BaseResponse): def __init__(self): diff --git a/moto/cognitoidp/utils.py b/moto/cognitoidp/utils.py index 5f5fe4f8f85e..11f34bcaef52 100644 --- a/moto/cognitoidp/utils.py +++ b/moto/cognitoidp/utils.py @@ -2,9 +2,20 @@ import six import random import string +import hashlib +import hmac +import base64 def create_id(): size = 26 chars = list(range(10)) + list(string.ascii_lowercase) return "".join(six.text_type(random.choice(chars)) for x in range(size)) + + +def check_secret_hash(app_client_secret, app_client_id, username, secret_hash): + key = bytes(str(app_client_secret).encode("latin-1")) + msg = bytes(str(username + app_client_id).encode("latin-1")) + new_digest = hmac.new(key, msg, hashlib.sha256).digest() + SECRET_HASH = base64.b64encode(new_digest).decode() + return SECRET_HASH == secret_hash diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 39875aeb4406..65c5151e3e79 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -4,6 +4,9 @@ import os import random import re +import hmac +import hashlib +import base64 import requests import uuid @@ -1248,6 +1251,137 @@ def test_authentication_flow(): authentication_flow(conn) +def user_authentication_flow(conn): + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + user_attribute_name = str(uuid.uuid4()) + user_attribute_value = str(uuid.uuid4()) + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + ReadAttributes=[user_attribute_name], + GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + + conn.sign_up( + ClientId=client_id, Username=username, Password=password, + ) + + client_secret = conn.describe_user_pool_client( + UserPoolId=user_pool_id, ClientId=client_id, + )["UserPoolClient"]["ClientSecret"] + + conn.confirm_sign_up( + ClientId=client_id, Username=username, ConfirmationCode="123456", + ) + + # generating secret hash + key = bytes(str(client_secret).encode("latin-1")) + msg = bytes(str(username + client_id).encode("latin-1")) + new_digest = hmac.new(key, msg, hashlib.sha256).digest() + secret_hash = base64.b64encode(new_digest).decode() + + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": secret_hash, + }, + ) + + result = conn.respond_to_auth_challenge( + ClientId=client_id, + ChallengeName=result["ChallengeName"], + ChallengeResponses={ + "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), + "PASSWORD_CLAIM_SECRET_BLOCK": result["Session"], + "TIMESTAMP": str(uuid.uuid4()), + "USERNAME": username, + }, + ) + + refresh_token = result["AuthenticationResult"]["RefreshToken"] + + # add mfa token + conn.associate_software_token( + AccessToken=result["AuthenticationResult"]["AccessToken"], + ) + + conn.verify_software_token( + AccessToken=result["AuthenticationResult"]["AccessToken"], UserCode="123456", + ) + + conn.set_user_mfa_preference( + AccessToken=result["AuthenticationResult"]["AccessToken"], + SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True,}, + ) + + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="REFRESH_TOKEN", + AuthParameters={"SECRET_HASH": secret_hash, "REFRESH_TOKEN": refresh_token,}, + ) + + result["AuthenticationResult"]["IdToken"].should_not.be.none + result["AuthenticationResult"]["AccessToken"].should_not.be.none + + # authenticate user once again this time with mfa token + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": secret_hash, + }, + ) + + result = conn.respond_to_auth_challenge( + ClientId=client_id, + ChallengeName=result["ChallengeName"], + ChallengeResponses={ + "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), + "PASSWORD_CLAIM_SECRET_BLOCK": result["Session"], + "TIMESTAMP": str(uuid.uuid4()), + "USERNAME": username, + }, + ) + + result = conn.respond_to_auth_challenge( + ClientId=client_id, + Session=result["Session"], + ChallengeName=result["ChallengeName"], + ChallengeResponses={ + "SOFTWARE_TOKEN_MFA_CODE": "123456", + "USERNAME": username, + "SECRET_HASH": secret_hash, + }, + ) + + return { + "user_pool_id": user_pool_id, + "client_id": client_id, + "client_secret": client_secret, + "secret_hash": secret_hash, + "id_token": result["AuthenticationResult"]["IdToken"], + "access_token": result["AuthenticationResult"]["AccessToken"], + "refresh_token": refresh_token, + "username": username, + "password": password, + "additional_fields": {user_attribute_name: user_attribute_value}, + } + + +@mock_cognitoidp +def test_user_authentication_flow(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_authentication_flow(conn) + + @mock_cognitoidp def test_token_legitimacy(): conn = boto3.client("cognito-idp", "us-west-2") @@ -1437,6 +1571,244 @@ def test_resource_server(): ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) +@mock_cognitoidp +def test_sign_up(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), + )["UserPoolClient"]["ClientId"] + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + result = conn.sign_up(ClientId=client_id, Username=username, Password=password) + result["UserConfirmed"].should.be.false + result["UserSub"].should_not.be.none + + +@mock_cognitoidp +def test_confirm_sign_up(): + conn = boto3.client("cognito-idp", "us-west-2") + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + conn.sign_up(ClientId=client_id, Username=username, Password=password) + + conn.confirm_sign_up( + ClientId=client_id, Username=username, ConfirmationCode="123456", + ) + + result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + result["UserStatus"].should.equal("CONFIRMED") + + +@mock_cognitoidp +def test_initiate_auth_USER_SRP_AUTH(): + conn = boto3.client("cognito-idp", "us-west-2") + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + conn.sign_up(ClientId=client_id, Username=username, Password=password) + client_secret = conn.describe_user_pool_client( + UserPoolId=user_pool_id, ClientId=client_id, + )["UserPoolClient"]["ClientSecret"] + conn.confirm_sign_up( + ClientId=client_id, Username=username, ConfirmationCode="123456", + ) + + key = bytes(str(client_secret).encode("latin-1")) + msg = bytes(str(username + client_id).encode("latin-1")) + new_digest = hmac.new(key, msg, hashlib.sha256).digest() + secret_hash = base64.b64encode(new_digest).decode() + + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": secret_hash, + }, + ) + + result["ChallengeName"].should.equal("PASSWORD_VERIFIER") + + +@mock_cognitoidp +def test_initiate_auth_REFRESH_TOKEN(): + conn = boto3.client("cognito-idp", "us-west-2") + result = user_authentication_flow(conn) + result = conn.initiate_auth( + ClientId=result["client_id"], + AuthFlow="REFRESH_TOKEN", + AuthParameters={ + "REFRESH_TOKEN": result["refresh_token"], + "SECRET_HASH": result["secret_hash"], + }, + ) + + result["AuthenticationResult"]["AccessToken"].should_not.be.none + + +@mock_cognitoidp +def test_initiate_auth_for_unconfirmed_user(): + conn = boto3.client("cognito-idp", "us-west-2") + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + conn.sign_up(ClientId=client_id, Username=username, Password=password) + client_secret = conn.describe_user_pool_client( + UserPoolId=user_pool_id, ClientId=client_id, + )["UserPoolClient"]["ClientSecret"] + + key = bytes(str(client_secret).encode("latin-1")) + msg = bytes(str(username + client_id).encode("latin-1")) + new_digest = hmac.new(key, msg, hashlib.sha256).digest() + secret_hash = base64.b64encode(new_digest).decode() + + caught = False + try: + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": secret_hash, + }, + ) + except conn.exceptions.UserNotConfirmedException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_initiate_auth_with_invalid_secret_hash(): + conn = boto3.client("cognito-idp", "us-west-2") + username = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + )["UserPoolClient"]["ClientId"] + conn.sign_up(ClientId=client_id, Username=username, Password=password) + client_secret = conn.describe_user_pool_client( + UserPoolId=user_pool_id, ClientId=client_id, + )["UserPoolClient"]["ClientSecret"] + conn.confirm_sign_up( + ClientId=client_id, Username=username, ConfirmationCode="123456", + ) + + invalid_secret_hash = str(uuid.uuid4()) + + caught = False + try: + result = conn.initiate_auth( + ClientId=client_id, + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": invalid_secret_hash, + }, + ) + except conn.exceptions.NotAuthorizedException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_setting_mfa(): + conn = boto3.client("cognito-idp", "us-west-2") + result = authentication_flow(conn) + conn.associate_software_token(AccessToken=result["access_token"]) + conn.verify_software_token(AccessToken=result["access_token"], UserCode="123456") + conn.set_user_mfa_preference( + AccessToken=result["access_token"], + SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True}, + ) + result = conn.admin_get_user( + UserPoolId=result["user_pool_id"], Username=result["username"] + ) + + result["UserMFASettingList"].should.have.length_of(1) + + +@mock_cognitoidp +def test_setting_mfa_when_token_not_verified(): + conn = boto3.client("cognito-idp", "us-west-2") + result = authentication_flow(conn) + conn.associate_software_token(AccessToken=result["access_token"]) + + caught = False + try: + conn.set_user_mfa_preference( + AccessToken=result["access_token"], + SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True}, + ) + except conn.exceptions.InvalidParameterException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_respond_to_auth_challenge_with_invalid_secret_hash(): + conn = boto3.client("cognito-idp", "us-west-2") + result = user_authentication_flow(conn) + + valid_secret_hash = result["secret_hash"] + invalid_secret_hash = str(uuid.uuid4()) + + challenge = conn.initiate_auth( + ClientId=result["client_id"], + AuthFlow="USER_SRP_AUTH", + AuthParameters={ + "USERNAME": result["username"], + "SRP_A": str(uuid.uuid4()), + "SECRET_HASH": valid_secret_hash, + }, + ) + + challenge = conn.respond_to_auth_challenge( + ClientId=result["client_id"], + ChallengeName=challenge["ChallengeName"], + ChallengeResponses={ + "PASSWORD_CLAIM_SIGNATURE": str(uuid.uuid4()), + "PASSWORD_CLAIM_SECRET_BLOCK": challenge["Session"], + "TIMESTAMP": str(uuid.uuid4()), + "USERNAME": result["username"], + }, + ) + + caught = False + try: + conn.respond_to_auth_challenge( + ClientId=result["client_id"], + Session=challenge["Session"], + ChallengeName=challenge["ChallengeName"], + ChallengeResponses={ + "SOFTWARE_TOKEN_MFA_CODE": "123456", + "USERNAME": result["username"], + "SECRET_HASH": invalid_secret_hash, + }, + ) + except conn.exceptions.NotAuthorizedException: + caught = True + + caught.should.be.true + + # Test will retrieve public key from cognito.amazonaws.com/.well-known/jwks.json, # which isnt mocked in ServerMode if not settings.TEST_SERVER_MODE: From 94c676b9cf19dba4a67fefbce0bc3bc885b8b5fa Mon Sep 17 00:00:00 2001 From: Peter Baumgartner Date: Tue, 1 Sep 2020 03:24:08 -0600 Subject: [PATCH 499/658] include=["TAGS"] for describe_task_definition (#3265) * include=["TAGS"] for describe_task_definition * Different approach * describe_services tags and tests --- moto/ecs/responses.py | 22 ++++++++++++++-------- tests/test_ecs/test_ecs_boto3.py | 19 +++++++++++++++++++ 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index e911bb9432ca..15d2f0c4bfea 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -87,7 +87,10 @@ def list_task_definitions(self): def describe_task_definition(self): task_definition_str = self._get_param("taskDefinition") data = self.ecs_backend.describe_task_definition(task_definition_str) - return json.dumps({"taskDefinition": data.response_object, "failures": []}) + resp = {"taskDefinition": data.response_object, "failures": []} + if "TAGS" in self._get_param("include", []): + resp["tags"] = self.ecs_backend.list_tags_for_resource(data.arn) + return json.dumps(resp) def deregister_task_definition(self): task_definition_str = self._get_param("taskDefinition") @@ -191,13 +194,16 @@ def describe_services(self): cluster_str = self._get_param("cluster") service_names = self._get_param("services") services = self.ecs_backend.describe_services(cluster_str, service_names) - - return json.dumps( - { - "services": [service.response_object for service in services], - "failures": [], - } - ) + resp = { + "services": [service.response_object for service in services], + "failures": [], + } + if "TAGS" in self._get_param("include", []): + for i, service in enumerate(services): + resp["services"][i]["tags"] = self.ecs_backend.list_tags_for_resource( + service.arn + ) + return json.dumps(resp) def update_service(self): cluster_str = self._get_param("cluster") diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index d9360df9283e..d46c8b983013 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -254,6 +254,7 @@ def test_describe_task_definition(): "logConfiguration": {"logDriver": "json-file"}, } ], + tags=[{"key": "Name", "value": "test_ecs_task"}], ) _ = client.register_task_definition( family="test_ecs_task", @@ -297,6 +298,11 @@ def test_describe_task_definition(): "arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2" ) + response = client.describe_task_definition( + taskDefinition="test_ecs_task:1", include=["TAGS"] + ) + response["tags"].should.equal([{"key": "Name", "value": "test_ecs_task"}]) + @mock_ecs def test_deregister_task_definition(): @@ -512,6 +518,7 @@ def test_describe_services(): serviceName="test_ecs_service1", taskDefinition="test_ecs_task", desiredCount=2, + tags=[{"key": "Name", "value": "test_ecs_service1"}], ) _ = client.create_service( cluster="test_ecs_cluster", @@ -554,6 +561,18 @@ def test_describe_services(): datetime.now() - response["services"][0]["deployments"][0]["updatedAt"].replace(tzinfo=None) ).seconds.should.be.within(0, 10) + response = client.describe_services( + cluster="test_ecs_cluster", + services=[ + "test_ecs_service1", + "arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2", + ], + include=["TAGS"], + ) + response["services"][0]["tags"].should.equal( + [{"key": "Name", "value": "test_ecs_service1"}] + ) + response["services"][1]["tags"].should.equal([]) @mock_ecs From 127b3e73e91bebf470a53efc52888c341b95f6d8 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Tue, 1 Sep 2020 19:44:13 +0900 Subject: [PATCH 500/658] Fix scaffold.py (#3270) * upgrade prompt-toolkit to make scaffold.py work * update append_mock_to_init_py * enable to run when method name is not upper camel case * support new moto/backend.py format * use prompt-toolkit 2.x.x to support python2 * fix invalid initialization of moto/backends.py --- requirements-dev.txt | 2 +- scripts/scaffold.py | 52 +++++++++++++++++++++----------------------- 2 files changed, 26 insertions(+), 28 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index e40a568a5766..8a91eb14f9ad 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -12,7 +12,7 @@ boto3>=1.4.4 botocore>=1.15.13 six>=1.9 parameterized>=0.7.0 -prompt-toolkit==1.0.14 +prompt-toolkit==2.0.10 # 3.x is not available with python2 click==6.7 inflection==0.3.1 lxml==4.2.3 diff --git a/scripts/scaffold.py b/scripts/scaffold.py index 43a648b488b9..de6781b3f756 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -114,12 +114,12 @@ def append_mock_to_init_py(service): with open(path) as f: lines = [_.replace('\n', '') for _ in f.readlines()] - if any(_ for _ in lines if re.match('^from.*mock_{}.*$'.format(service), _)): + if any(_ for _ in lines if re.match('^mock_{}.*lazy_load(.*)$'.format(service), _)): return - filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)] + filtered_lines = [_ for _ in lines if re.match('^mock_.*lazy_load(.*)$', _)] last_import_line_index = lines.index(filtered_lines[-1]) - new_line = 'from .{} import mock_{} # noqa'.format(get_escaped_service(service), get_escaped_service(service)) + new_line = 'mock_{} = lazy_load(".{}", "mock_{}")'.format(get_escaped_service(service), get_escaped_service(service), get_escaped_service(service)) lines.insert(last_import_line_index + 1, new_line) body = '\n'.join(lines) + '\n' @@ -127,23 +127,6 @@ def append_mock_to_init_py(service): f.write(body) -def append_mock_import_to_backends_py(service): - path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py') - with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] - - if any(_ for _ in lines if re.match('^from moto\.{}.*{}_backends.*$'.format(service, service), _)): - return - filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)] - last_import_line_index = lines.index(filtered_lines[-1]) - - new_line = 'from moto.{} import {}_backends'.format(get_escaped_service(service), get_escaped_service(service)) - lines.insert(last_import_line_index + 1, new_line) - - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: - f.write(body) - def append_mock_dict_to_backends_py(service): path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py') with open(path) as f: @@ -154,7 +137,7 @@ def append_mock_dict_to_backends_py(service): filtered_lines = [_ for _ in lines if re.match(".*\".*\":.*_backends.*", _)] last_elem_line_index = lines.index(filtered_lines[-1]) - new_line = " \"{}\": {}_backends,".format(service, get_escaped_service(service)) + new_line = " \"{}\": (\"{}\", \"{}_backends\"),".format(service, get_escaped_service(service), get_escaped_service(service)) prev_line = lines[last_elem_line_index] if not prev_line.endswith('{') and not prev_line.endswith(','): lines[last_elem_line_index] += ',' @@ -212,7 +195,6 @@ def initialize_service(service, operation, api_protocol): # append mock to init files append_mock_to_init_py(service) - append_mock_import_to_backends_py(service) append_mock_dict_to_backends_py(service) @@ -229,6 +211,9 @@ def to_snake_case(s): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() +def get_operation_name_in_keys(operation_name, operation_keys): + index = [_.lower() for _ in operation_keys].index(operation_name.lower()) + return operation_keys[index] def get_function_in_responses(service, operation, protocol): """refers to definition of API in botocore, and autogenerates function @@ -237,7 +222,11 @@ def get_function_in_responses(service, operation, protocol): """ client = boto3.client(service) - aws_operation_name = to_upper_camel_case(operation) + aws_operation_name = get_operation_name_in_keys( + to_upper_camel_case(operation), + list(client._service_model._service_description['operations'].keys()) + ) + op_model = client._service_model.operation_model(aws_operation_name) if not hasattr(op_model.output_shape, 'members'): outputs = {} @@ -282,7 +271,10 @@ def get_function_in_models(service, operation): https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json """ client = boto3.client(service) - aws_operation_name = to_upper_camel_case(operation) + aws_operation_name = get_operation_name_in_keys( + to_upper_camel_case(operation), + list(client._service_model._service_description['operations'].keys()) + ) op_model = client._service_model.operation_model(aws_operation_name) inputs = op_model.input_shape.members if not hasattr(op_model.output_shape, 'members'): @@ -329,7 +321,11 @@ def get_response_query_template(service, operation): https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json """ client = boto3.client(service) - aws_operation_name = to_upper_camel_case(operation) + aws_operation_name = get_operation_name_in_keys( + to_upper_camel_case(operation), + list(client._service_model._service_description['operations'].keys()) + ) + op_model = client._service_model.operation_model(aws_operation_name) result_wrapper = op_model.output_shape.serialization['resultWrapper'] response_wrapper = result_wrapper.replace('Result', 'Response') @@ -403,11 +399,13 @@ def insert_code_to_class(path, base_class, new_code): with open(path, 'w') as f: f.write(body) - def insert_url(service, operation, api_protocol): client = boto3.client(service) service_class = client.__class__.__name__ - aws_operation_name = to_upper_camel_case(operation) + aws_operation_name = get_operation_name_in_keys( + to_upper_camel_case(operation), + list(client._service_model._service_description['operations'].keys()) + ) uri = client._service_model.operation_model(aws_operation_name).http['requestUri'] path = os.path.join(os.path.dirname(__file__), '..', 'moto', get_escaped_service(service), 'urls.py') From 3ea46617d93ff38f7c5adf76a3af6cf733fad564 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Tue, 1 Sep 2020 22:35:25 +0530 Subject: [PATCH 501/658] Fix:sqs get-queue-attributes response template (#3255) * Fix:sqs get-queue-attributes response template * Fix:removed debug statements * Modified the template * "fixed build issues" * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/sqs/models.py | 3 ++- moto/sqs/responses.py | 10 ++++---- tests/test_sqs/test_sqs.py | 49 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 5 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 039224f5bcf3..71ca62941507 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -626,7 +626,8 @@ def get_queue_attributes(self, queue_name, attribute_names): attributes = queue.attributes else: for name in (name for name in attribute_names if name in queue.attributes): - attributes[name] = queue.attributes.get(name) + if queue.attributes.get(name) is not None: + attributes[name] = queue.attributes.get(name) return attributes diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 5cc77e9fb8a4..e28fbca8a328 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -490,10 +490,12 @@ def list_queue_tags(self): GET_QUEUE_ATTRIBUTES_RESPONSE = """ {% for key, value in attributes.items() %} - - {{ key }} - {{ value }} - + {% if value is not none %} + + {{ key }} + {{ value }} + + {% endif %} {% endfor %} diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 945fe86ae0a3..b072e8b94033 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -45,6 +45,25 @@ } }""" +TEST_POLICY = """ +{ + "Version":"2012-10-17", + "Statement":[ + { + "Effect": "Allow", + "Principal": { "AWS": "*" }, + "Action": "sqs:SendMessage", + "Resource": "'$sqs_queue_arn'", + "Condition":{ + "ArnEquals":{ + "aws:SourceArn":"'$sns_topic_arn'" + } + } + } + ] +} +""" + @mock_sqs def test_create_fifo_queue_fail(): @@ -1451,6 +1470,36 @@ def test_permissions(): ) +@mock_sqs +def test_get_queue_attributes_template_response_validation(): + client = boto3.client("sqs", region_name="us-east-1") + + resp = client.create_queue( + QueueName="test-dlr-queue.fifo", Attributes={"FifoQueue": "true"} + ) + queue_url = resp["QueueUrl"] + + attrs = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"]) + assert attrs.get("Attributes").get("Policy") is None + + attributes = {"Policy": TEST_POLICY} + + client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes) + attrs = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["Policy"]) + assert attrs.get("Attributes").get("Policy") is not None + + assert ( + json.loads(attrs.get("Attributes").get("Policy")).get("Version") == "2012-10-17" + ) + assert len(json.loads(attrs.get("Attributes").get("Policy")).get("Statement")) == 1 + assert ( + json.loads(attrs.get("Attributes").get("Policy")) + .get("Statement")[0] + .get("Action") + == "sqs:SendMessage" + ) + + @mock_sqs def test_add_permission_errors(): client = boto3.client("sqs", region_name="us-east-1") From 00a5641cb9e6205ca13064ff7184720299fdab64 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 2 Sep 2020 11:40:56 +0530 Subject: [PATCH 502/658] Fix:s3 Presign Put Request with File upload (#3235) * Fix:s3 Presign Put Request with File upload * Added imports in test Co-authored-by: usmankb --- moto/s3/responses.py | 5 +++++ tests/test_s3/test_s3.py | 25 ++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 603571c0d852..364ae4623a0b 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1092,6 +1092,11 @@ def _key_response(self, request, full_url, headers): else: # Flask server body = request.data + # when the data is being passed as a file + if request.files and not body: + for _, value in request.files.items(): + body = value.stream.read() + if body is None: body = b"" diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 6622b2f41c19..078abfa3b32c 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -3,7 +3,7 @@ import datetime import sys - +import os from boto3 import Session from six.moves.urllib.request import urlopen from six.moves.urllib.error import HTTPError @@ -1054,6 +1054,29 @@ def test_streaming_upload_from_file_to_presigned_url(): assert response.status_code == 200 +@mock_s3 +def test_multipart_upload_from_file_to_presigned_url(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + s3.create_bucket(Bucket="mybucket") + + params = {"Bucket": "mybucket", "Key": "file_upload"} + presigned_url = boto3.client("s3").generate_presigned_url( + "put_object", params, ExpiresIn=900 + ) + + file = open("text.txt", "w") + file.write("test") + file.close() + files = {"upload_file": open("text.txt", "rb")} + + requests.put(presigned_url, files=files) + resp = s3.get_object(Bucket="mybucket", Key="file_upload") + data = resp["Body"].read() + assert data == b"test" + # cleanup + os.remove("text.txt") + + @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource("s3") From 25161c0c18252b4ab53af9453fa878253dadfc1f Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Wed, 2 Sep 2020 16:51:51 +0900 Subject: [PATCH 503/658] Add kinesisvideo (#3271) * kinesisvideo create_stream * add kinesis video stream description * add kinesisvideo describe_stream * add kinesisvideo list_streams * add kinesisvideo delete_stream * remove unused comment * remove duplicated definition * add kinesis video exceptions * pass region_name to kinesisvideo client in test * fix kinesisvideo url path * resolve conflict of kinesisvideo url and kinesis url * specify region name to kinesisvideobackend * Add get-dataendpoint to kinesisvideo * include stream name in ResourceInUseException of kinesisvideo * use ACCOUNT_ID from moto.core in kinesisvideo * add server test for kinesisvideo * split up kinesisvideo test --- moto/__init__.py | 1 + moto/backends.py | 1 + moto/kinesis/urls.py | 3 +- moto/kinesisvideo/__init__.py | 6 + moto/kinesisvideo/exceptions.py | 24 +++ moto/kinesisvideo/models.py | 147 +++++++++++++++++++ moto/kinesisvideo/responses.py | 70 +++++++++ moto/kinesisvideo/urls.py | 18 +++ tests/test_kinesisvideo/test_kinesisvideo.py | 140 ++++++++++++++++++ tests/test_kinesisvideo/test_server.py | 18 +++ 10 files changed, 427 insertions(+), 1 deletion(-) create mode 100644 moto/kinesisvideo/__init__.py create mode 100644 moto/kinesisvideo/exceptions.py create mode 100644 moto/kinesisvideo/models.py create mode 100644 moto/kinesisvideo/responses.py create mode 100644 moto/kinesisvideo/urls.py create mode 100644 tests/test_kinesisvideo/test_kinesisvideo.py create mode 100644 tests/test_kinesisvideo/test_server.py diff --git a/moto/__init__.py b/moto/__init__.py index 7d841fbbc42d..da66d9c6164c 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -113,6 +113,7 @@ def f(*args, **kwargs): XRaySegment = lazy_load(".xray", "XRaySegment") mock_xray = lazy_load(".xray", "mock_xray") mock_xray_client = lazy_load(".xray", "mock_xray_client") +mock_kinesisvideo = lazy_load(".kinesisvideo", "mock_kinesisvideo") # import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) diff --git a/moto/backends.py b/moto/backends.py index 4252bfd9582b..9216d4615f37 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -69,6 +69,7 @@ "sts": ("sts", "sts_backends"), "swf": ("swf", "swf_backends"), "xray": ("xray", "xray_backends"), + "kinesisvideo": ("kinesisvideo", "kinesisvideo_backends"), } diff --git a/moto/kinesis/urls.py b/moto/kinesis/urls.py index c95f03190187..a33225d60054 100644 --- a/moto/kinesis/urls.py +++ b/moto/kinesis/urls.py @@ -2,7 +2,8 @@ from .responses import KinesisResponse url_bases = [ - "https?://kinesis.(.+).amazonaws.com", + # Need to avoid conflicting with kinesisvideo + r"https?://kinesis\.(.+).amazonaws.com", "https?://firehose.(.+).amazonaws.com", ] diff --git a/moto/kinesisvideo/__init__.py b/moto/kinesisvideo/__init__.py new file mode 100644 index 000000000000..ee79d957be27 --- /dev/null +++ b/moto/kinesisvideo/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import kinesisvideo_backends +from ..core.models import base_decorator + +kinesisvideo_backend = kinesisvideo_backends["us-east-1"] +mock_kinesisvideo = base_decorator(kinesisvideo_backends) diff --git a/moto/kinesisvideo/exceptions.py b/moto/kinesisvideo/exceptions.py new file mode 100644 index 000000000000..e2e119b3799d --- /dev/null +++ b/moto/kinesisvideo/exceptions.py @@ -0,0 +1,24 @@ +from __future__ import unicode_literals + +from moto.core.exceptions import RESTError + + +class KinesisvideoClientError(RESTError): + code = 400 + + +class ResourceNotFoundException(KinesisvideoClientError): + def __init__(self): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The requested stream is not found or not active.", + ) + + +class ResourceInUseException(KinesisvideoClientError): + def __init__(self, message): + self.code = 400 + super(ResourceInUseException, self).__init__( + "ResourceInUseException", message, + ) diff --git a/moto/kinesisvideo/models.py b/moto/kinesisvideo/models.py new file mode 100644 index 000000000000..90d84ac0228b --- /dev/null +++ b/moto/kinesisvideo/models.py @@ -0,0 +1,147 @@ +from __future__ import unicode_literals +from boto3 import Session +from moto.core import BaseBackend, BaseModel +from datetime import datetime +from .exceptions import ( + ResourceNotFoundException, + ResourceInUseException, +) +import random +import string +from moto.core.utils import get_random_hex +from moto.core import ACCOUNT_ID + + +class Stream(BaseModel): + def __init__( + self, + region_name, + device_name, + stream_name, + media_type, + kms_key_id, + data_retention_in_hours, + tags, + ): + self.region_name = region_name + self.stream_name = stream_name + self.device_name = device_name + self.media_type = media_type + self.kms_key_id = kms_key_id + self.data_retention_in_hours = data_retention_in_hours + self.tags = tags + self.status = "ACTIVE" + self.version = self._get_random_string() + self.creation_time = datetime.utcnow() + stream_arn = "arn:aws:kinesisvideo:{}:{}:stream/{}/1598784211076".format( + self.region_name, ACCOUNT_ID, self.stream_name + ) + self.data_endpoint_number = get_random_hex() + self.arn = stream_arn + + def _get_random_string(self, length=20): + letters = string.ascii_lowercase + result_str = "".join([random.choice(letters) for _ in range(length)]) + return result_str + + def get_data_endpoint(self, api_name): + data_endpoint_prefix = "s-" if api_name in ("PUT_MEDIA", "GET_MEDIA") else "b-" + return "https://{}{}.kinesisvideo.{}.amazonaws.com".format( + data_endpoint_prefix, self.data_endpoint_number, self.region_name + ) + + def to_dict(self): + return { + "DeviceName": self.device_name, + "StreamName": self.stream_name, + "StreamARN": self.arn, + "MediaType": self.media_type, + "KmsKeyId": self.kms_key_id, + "Version": self.version, + "Status": self.status, + "CreationTime": self.creation_time.isoformat(), + "DataRetentionInHours": self.data_retention_in_hours, + } + + +class KinesisVideoBackend(BaseBackend): + def __init__(self, region_name=None): + super(KinesisVideoBackend, self).__init__() + self.region_name = region_name + self.streams = {} + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_stream( + self, + device_name, + stream_name, + media_type, + kms_key_id, + data_retention_in_hours, + tags, + ): + streams = [_ for _ in self.streams.values() if _.stream_name == stream_name] + if len(streams) > 0: + raise ResourceInUseException( + "The stream {} already exists.".format(stream_name) + ) + stream = Stream( + self.region_name, + device_name, + stream_name, + media_type, + kms_key_id, + data_retention_in_hours, + tags, + ) + self.streams[stream.arn] = stream + return stream.arn + + def _get_stream(self, stream_name, stream_arn): + if stream_name: + streams = [_ for _ in self.streams.values() if _.stream_name == stream_name] + if len(streams) == 0: + raise ResourceNotFoundException() + stream = streams[0] + elif stream_arn: + stream = self.streams.get(stream_arn) + if stream is None: + raise ResourceNotFoundException() + return stream + + def describe_stream(self, stream_name, stream_arn): + stream = self._get_stream(stream_name, stream_arn) + stream_info = stream.to_dict() + return stream_info + + def list_streams(self, max_results, next_token, stream_name_condition): + stream_info_list = [_.to_dict() for _ in self.streams.values()] + next_token = None + return stream_info_list, next_token + + def delete_stream(self, stream_arn, current_version): + stream = self.streams.get(stream_arn) + if stream is None: + raise ResourceNotFoundException() + del self.streams[stream_arn] + + def get_data_endpoint(self, stream_name, stream_arn, api_name): + stream = self._get_stream(stream_name, stream_arn) + return stream.get_data_endpoint(api_name) + + # add methods from here + + +kinesisvideo_backends = {} +for region in Session().get_available_regions("kinesisvideo"): + kinesisvideo_backends[region] = KinesisVideoBackend(region) +for region in Session().get_available_regions( + "kinesisvideo", partition_name="aws-us-gov" +): + kinesisvideo_backends[region] = KinesisVideoBackend(region) +for region in Session().get_available_regions("kinesisvideo", partition_name="aws-cn"): + kinesisvideo_backends[region] = KinesisVideoBackend(region) diff --git a/moto/kinesisvideo/responses.py b/moto/kinesisvideo/responses.py new file mode 100644 index 000000000000..376e5b5fe897 --- /dev/null +++ b/moto/kinesisvideo/responses.py @@ -0,0 +1,70 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import kinesisvideo_backends +import json + + +class KinesisVideoResponse(BaseResponse): + SERVICE_NAME = "kinesisvideo" + + @property + def kinesisvideo_backend(self): + return kinesisvideo_backends[self.region] + + def create_stream(self): + device_name = self._get_param("DeviceName") + stream_name = self._get_param("StreamName") + media_type = self._get_param("MediaType") + kms_key_id = self._get_param("KmsKeyId") + data_retention_in_hours = self._get_int_param("DataRetentionInHours") + tags = self._get_param("Tags") + stream_arn = self.kinesisvideo_backend.create_stream( + device_name=device_name, + stream_name=stream_name, + media_type=media_type, + kms_key_id=kms_key_id, + data_retention_in_hours=data_retention_in_hours, + tags=tags, + ) + return json.dumps(dict(StreamARN=stream_arn)) + + def describe_stream(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + stream_info = self.kinesisvideo_backend.describe_stream( + stream_name=stream_name, stream_arn=stream_arn, + ) + return json.dumps(dict(StreamInfo=stream_info)) + + def list_streams(self): + max_results = self._get_int_param("MaxResults") + next_token = self._get_param("NextToken") + stream_name_condition = self._get_param("StreamNameCondition") + stream_info_list, next_token = self.kinesisvideo_backend.list_streams( + max_results=max_results, + next_token=next_token, + stream_name_condition=stream_name_condition, + ) + return json.dumps(dict(StreamInfoList=stream_info_list, NextToken=next_token)) + + def delete_stream(self): + stream_arn = self._get_param("StreamARN") + current_version = self._get_param("CurrentVersion") + self.kinesisvideo_backend.delete_stream( + stream_arn=stream_arn, current_version=current_version, + ) + return json.dumps(dict()) + + def get_data_endpoint(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + api_name = self._get_param("APIName") + data_endpoint = self.kinesisvideo_backend.get_data_endpoint( + stream_name=stream_name, stream_arn=stream_arn, api_name=api_name, + ) + return json.dumps(dict(DataEndpoint=data_endpoint)) + + # add methods from here + + +# add templates from here diff --git a/moto/kinesisvideo/urls.py b/moto/kinesisvideo/urls.py new file mode 100644 index 000000000000..9aab7f8e2001 --- /dev/null +++ b/moto/kinesisvideo/urls.py @@ -0,0 +1,18 @@ +from __future__ import unicode_literals +from .responses import KinesisVideoResponse + +url_bases = [ + "https?://kinesisvideo.(.+).amazonaws.com", +] + + +response = KinesisVideoResponse() + + +url_paths = { + "{0}/createStream$": response.dispatch, + "{0}/describeStream$": response.dispatch, + "{0}/deleteStream$": response.dispatch, + "{0}/listStreams$": response.dispatch, + "{0}/getDataEndpoint$": response.dispatch, +} diff --git a/tests/test_kinesisvideo/test_kinesisvideo.py b/tests/test_kinesisvideo/test_kinesisvideo.py new file mode 100644 index 000000000000..de3d9ebbbec7 --- /dev/null +++ b/tests/test_kinesisvideo/test_kinesisvideo.py @@ -0,0 +1,140 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from nose.tools import assert_raises +from moto import mock_kinesisvideo +from botocore.exceptions import ClientError +import json + + +@mock_kinesisvideo +def test_create_stream(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + device_name = "random-device" + + # stream can be created + res = client.create_stream(StreamName=stream_name, DeviceName=device_name) + res.should.have.key("StreamARN").which.should.contain(stream_name) + + +@mock_kinesisvideo +def test_create_stream_with_same_name(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + device_name = "random-device" + + client.create_stream(StreamName=stream_name, DeviceName=device_name) + + # cannot create with same stream name + with assert_raises(ClientError): + client.create_stream(StreamName=stream_name, DeviceName=device_name) + + +@mock_kinesisvideo +def test_describe_stream(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + device_name = "random-device" + + res = client.create_stream(StreamName=stream_name, DeviceName=device_name) + res.should.have.key("StreamARN").which.should.contain(stream_name) + stream_arn = res["StreamARN"] + + # cannot create with existing stream name + with assert_raises(ClientError): + client.create_stream(StreamName=stream_name, DeviceName=device_name) + + # stream can be described with name + res = client.describe_stream(StreamName=stream_name) + res.should.have.key("StreamInfo") + stream_info = res["StreamInfo"] + stream_info.should.have.key("StreamARN").which.should.contain(stream_name) + stream_info.should.have.key("StreamName").which.should.equal(stream_name) + stream_info.should.have.key("DeviceName").which.should.equal(device_name) + + # stream can be described with arn + res = client.describe_stream(StreamARN=stream_arn) + res.should.have.key("StreamInfo") + stream_info = res["StreamInfo"] + stream_info.should.have.key("StreamARN").which.should.contain(stream_name) + stream_info.should.have.key("StreamName").which.should.equal(stream_name) + stream_info.should.have.key("DeviceName").which.should.equal(device_name) + + +@mock_kinesisvideo +def test_describe_stream_with_name_not_exist(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name_not_exist = "not-exist-stream" + + # cannot describe with not exist stream name + with assert_raises(ClientError): + client.describe_stream(StreamName=stream_name_not_exist) + + +@mock_kinesisvideo +def test_list_streams(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + stream_name_2 = "my-stream-2" + device_name = "random-device" + + client.create_stream(StreamName=stream_name, DeviceName=device_name) + client.create_stream(StreamName=stream_name_2, DeviceName=device_name) + + # streams can be listed + res = client.list_streams() + res.should.have.key("StreamInfoList") + streams = res["StreamInfoList"] + streams.should.have.length_of(2) + + +@mock_kinesisvideo +def test_delete_stream(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + stream_name_2 = "my-stream-2" + device_name = "random-device" + + client.create_stream(StreamName=stream_name, DeviceName=device_name) + res = client.create_stream(StreamName=stream_name_2, DeviceName=device_name) + stream_2_arn = res["StreamARN"] + + # stream can be deleted + client.delete_stream(StreamARN=stream_2_arn) + res = client.list_streams() + streams = res["StreamInfoList"] + streams.should.have.length_of(1) + + +@mock_kinesisvideo +def test_delete_stream_with_arn_not_exist(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + stream_name_2 = "my-stream-2" + device_name = "random-device" + + client.create_stream(StreamName=stream_name, DeviceName=device_name) + res = client.create_stream(StreamName=stream_name_2, DeviceName=device_name) + stream_2_arn = res["StreamARN"] + + client.delete_stream(StreamARN=stream_2_arn) + + # cannot delete with not exist stream + stream_arn_not_exist = stream_2_arn + with assert_raises(ClientError): + client.delete_stream(StreamARN=stream_arn_not_exist) + + +@mock_kinesisvideo +def test_data_endpoint(): + client = boto3.client("kinesisvideo", region_name="ap-northeast-1") + stream_name = "my-stream" + device_name = "random-device" + + # data-endpoint can be created + api_name = "GET_MEDIA" + client.create_stream(StreamName=stream_name, DeviceName=device_name) + res = client.get_data_endpoint(StreamName=stream_name, APIName=api_name) + res.should.have.key("DataEndpoint") diff --git a/tests/test_kinesisvideo/test_server.py b/tests/test_kinesisvideo/test_server.py new file mode 100644 index 000000000000..20301353ffbc --- /dev/null +++ b/tests/test_kinesisvideo/test_server.py @@ -0,0 +1,18 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_kinesisvideo + +""" +Test the different server responses +""" + + +@mock_kinesisvideo +def test_kinesisvideo_server_is_up(): + backend = server.create_backend_app("kinesisvideo") + test_client = backend.test_client() + res = test_client.post("/listStreams") + res.status_code.should.equal(200) From 6c4a60d0376d11fa7f5128f6b586ae65ed4ab3fa Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Wed, 2 Sep 2020 17:59:26 +0900 Subject: [PATCH 504/658] Add introduction of using scaffold.py (#3274) --- CONTRIBUTING.md | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index edcc4656176f..e4a189e5e5da 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -50,6 +50,41 @@ Note the `urls.py` that redirects all incoming URL requests to a generic `dispat If you want more control over incoming requests or their bodies, it is possible to redirect specific requests to a custom method. See this PR for an example: https://github.com/spulec/moto/pull/2957/files +### Generating template code of services. + +By using `scripts/scaffold.py`, you can automatically generate template code of new services and new method of existing service. The script looks up API specification of given boto3 method and adds necessary codes includng request parameters and response parameters. In some cases, it fails to generate codes. +Please try out by runninig `python scripts/scaffold.py` + +```bash +$ python scripts/scaffold.py +Select service: codedeploy + +==Current Implementation Status== +[ ] add_tags_to_on_premises_instances +... +[ ] create_deployment +...[ +[ ] update_deployment_group +================================= +Select Operation: create_deployment + + + Initializing service codedeploy + creating moto/codedeploy + creating moto/codedeploy/models.py + creating moto/codedeploy/exceptions.py + creating moto/codedeploy/__init__.py + creating moto/codedeploy/responses.py + creating moto/codedeploy/urls.py + creating tests/test_codedeploy + creating tests/test_codedeploy/test_server.py + creating tests/test_codedeploy/test_codedeploy.py + inserting code moto/codedeploy/responses.py + inserting code moto/codedeploy/models.py +You will still need to add the mock into "__init__.py" +``` + + ## Maintainers ### Releasing a new version of Moto From 3fb7cf75d43562ba0dc494be6021a86fb0bc3436 Mon Sep 17 00:00:00 2001 From: Karthikeyan Singaravelan Date: Wed, 2 Sep 2020 15:40:29 +0530 Subject: [PATCH 505/658] Fix deprecation warning due to base64.decodestring in Python 3. (#3272) --- tests/test_ec2/test_instances.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 1310b3a1d181..7ec385973cee 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -23,6 +23,11 @@ from tests.helpers import requires_boto_gte +if six.PY2: + decode_method = base64.decodestring +else: + decode_method = base64.decodebytes + ################ Test Readme ############### def add_servers(ami_id, count): conn = boto.connect_ec2() @@ -908,7 +913,7 @@ def test_user_data_with_run_instance(): instance_attribute = instance.get_attribute("userData") instance_attribute.should.be.a(InstanceAttribute) retrieved_user_data = instance_attribute.get("userData").encode("utf-8") - decoded_user_data = base64.decodestring(retrieved_user_data) + decoded_user_data = decode_method(retrieved_user_data) decoded_user_data.should.equal(b"some user data") From d2e16ecc2eadc5d1d751a4e8daf27ba96d827d65 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 2 Sep 2020 23:05:53 +0530 Subject: [PATCH 506/658] Fix:s3 Presign Post with object acl (#3246) * Fix:s3 Presign Post with object acl * Added import in tests Co-authored-by: usmankb --- moto/s3/responses.py | 4 ++++ tests/test_s3/test_s3.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 364ae4623a0b..395cb573676a 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -860,6 +860,10 @@ def _bucket_response_post(self, request, body, bucket_name): new_key = self.backend.set_object(bucket_name, key, f) + if form.get("acl"): + acl = get_canned_acl(form.get("acl")) + new_key.set_acl(acl) + # Metadata metadata = metadata_from_headers(form) new_key.set_metadata(metadata) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 078abfa3b32c..960594801cbe 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2800,6 +2800,39 @@ def test_put_bucket_acl_body(): assert not result.get("Grants") +@mock_s3 +def test_object_acl_with_presigned_post(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + + bucket_name = "imageS3Bucket" + object_name = "text.txt" + fields = {"acl": "public-read"} + file = open("text.txt", "w") + file.write("test") + file.close() + + s3.create_bucket(Bucket=bucket_name) + response = s3.generate_presigned_post( + bucket_name, object_name, Fields=fields, ExpiresIn=60000 + ) + + with open(object_name, "rb") as f: + files = {"file": (object_name, f)} + requests.post(response["url"], data=response["fields"], files=files) + + response = s3.get_object_acl(Bucket=bucket_name, Key=object_name) + + assert "Grants" in response + assert len(response["Grants"]) == 2 + assert response["Grants"][1]["Permission"] == "READ" + + response = s3.get_object(Bucket=bucket_name, Key=object_name) + + assert "ETag" in response + assert "Body" in response + os.remove("text.txt") + + @mock_s3 def test_put_bucket_notification(): s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) From 49b12ab7f567f82573b9c3d006f0be2bc40f44b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C5=82awek=20Ehlert?= Date: Sat, 27 Jul 2019 12:52:23 +0200 Subject: [PATCH 507/658] First stab at extracting deps in setup.py to extras --- requirements.txt | 2 +- setup.py | 55 ++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 45 insertions(+), 12 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4de489f8c3f7..f5a476248fa4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ # Please add requirements to setup.py --e . +-e .[all] diff --git a/setup.py b/setup.py index ffaa8b273ce2..92ab7a5de0ea 100755 --- a/setup.py +++ b/setup.py @@ -33,22 +33,13 @@ def get_version(): "boto>=2.36.0", "boto3>=1.9.201", "botocore>=1.12.201", - "cryptography>=2.3.0", "requests>=2.5", "xmltodict", "six>1.9", "werkzeug", - "PyYAML>=5.1", "pytz", - "ecdsa<0.15", "python-dateutil<3.0.0,>=2.1", - "python-jose[cryptography]>=3.1.0,<4.0.0", - "docker>=2.5.1", - "jsondiff>=1.1.2", - "aws-xray-sdk!=0.96,>=0.93", "responses>=0.9.0", - "idna<3,>=2.5", - "cfn-lint>=0.4.0", "MarkupSafe<2.0", # This is a Jinja2 dependency, 2.0.0a1 currently seems broken ] @@ -72,7 +63,6 @@ def get_version(): "mock<=3.0.5", "more-itertools==5.0.0", "setuptools==44.0.0", - "sshpubkeys>=3.1.0,<4.0", "zipp==0.6.0", ] else: @@ -81,14 +71,57 @@ def get_version(): "mock", "more-itertools", "setuptools", - "sshpubkeys>=3.1.0", "zipp", ] +_dep_cryptography = "cryptography>=2.3.0" +_dep_PyYAML = "PyYAML>=5.1" +_dep_python_jose = "python-jose[cryptography]>=3.1.0,<4.0.0" +_dep_python_jose_ecdsa_pin = "ecdsa<0.15" # https://github.com/spulec/moto/pull/3263#discussion_r477404984 +_dep_docker = "docker>=2.5.1" +_dep_jsondiff = "jsondiff>=1.1.2" +_dep_aws_xray_sdk = "aws-xray-sdk!=0.96,>=0.93" +_dep_idna = "idna<3,>=2.5" +_dep_cfn_lint = "cfn-lint>=0.4.0" +_dep_sshpubkeys_py2 = "sshpubkeys>=3.1.0,<4.0; python_version<'3'" +_dep_sshpubkeys_py3 = "sshpubkeys>=3.1.0; python_version>'3'" + +all_extra_deps = [ + _dep_cryptography, + _dep_PyYAML, + _dep_python_jose, + _dep_python_jose_ecdsa_pin, + _dep_docker, + _dep_jsondiff, + _dep_aws_xray_sdk, + _dep_idna, + _dep_cfn_lint, + _dep_sshpubkeys_py2, + _dep_sshpubkeys_py3, +] + +# TODO: do we want to add ALL services here? +# i.e. even those without extra dependencies. +# Would be good for future-compatibility, I guess. +extras_per_service = { + "ec2": [_dep_cryptography, _dep_sshpubkeys_py2, _dep_sshpubkeys_py3], + 'acm': [_dep_cryptography], + 'iam': [_dep_cryptography], + 'cloudformation': [_dep_PyYAML, _dep_cfn_lint], + 'cognitoidp': [_dep_python_jose, _dep_python_jose_ecdsa_pin], + 'awslambda': [_dep_docker], + 'batch': [_dep_docker], + 'iotdata': [_dep_jsondiff], + 'xray': [_dep_aws_xray_sdk], +} + extras_require = { + 'all': all_extra_deps, 'server': ['flask'], } +extras_require.update(extras_per_service) + # https://hynek.me/articles/conditional-python-dependencies/ if int(setuptools.__version__.split(".", 1)[0]) < 18: if sys.version_info[0:2] < (3, 3): From 6c73def64a3f9d27a018412194cffde33a7e55be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C5=82awek=20Ehlert?= Date: Tue, 7 Jan 2020 13:25:20 +0100 Subject: [PATCH 508/658] Use extras when running the test server Make sure that `travis_moto_server.sh` script actually installs `all` and `server` extras. --- travis_moto_server.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/travis_moto_server.sh b/travis_moto_server.sh index 4be26073e717..c764d1cd1c46 100755 --- a/travis_moto_server.sh +++ b/travis_moto_server.sh @@ -1,9 +1,8 @@ #!/usr/bin/env bash set -e -pip install flask # TravisCI on bionic dist uses old version of Docker Engine # which is incompatibile with newer docker-py # See https://github.com/docker/docker-py/issues/2639 pip install "docker>=2.5.1,<=4.2.2" -pip install /moto/dist/moto*.gz -moto_server -H 0.0.0.0 -p 5000 \ No newline at end of file +pip install $(ls /moto/dist/moto*.gz)[server,all] +moto_server -H 0.0.0.0 -p 5000 From 8854fd06e855f71256b7c6a63b38a2d200666ed9 Mon Sep 17 00:00:00 2001 From: zhil3 <41350057+zhil3@users.noreply.github.com> Date: Fri, 4 Sep 2020 04:11:17 -0400 Subject: [PATCH 509/658] Add describe_endpoint and register_certificate_without_ca in iot_mock module with unittest (#3279) Co-authored-by: Zhi Li --- IMPLEMENTATION_COVERAGE.md | 4 +-- moto/iot/models.py | 60 ++++++++++++++++++++++++++++++++++++++ moto/iot/responses.py | 16 ++++++++++ moto/utilities/utils.py | 10 +++++++ tests/test_iot/test_iot.py | 60 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 148 insertions(+), 2 deletions(-) create mode 100644 moto/utilities/utils.py diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 721c9c977678..3246c2615ef2 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -4447,7 +4447,7 @@ - [ ] describe_default_authorizer - [ ] describe_dimension - [ ] describe_domain_configuration -- [ ] describe_endpoint +- [X] describe_endpoint - [ ] describe_event_configurations - [ ] describe_index - [X] describe_job @@ -4533,7 +4533,7 @@ - [ ] list_violation_events - [ ] register_ca_certificate - [X] register_certificate -- [ ] register_certificate_without_ca +- [X] register_certificate_without_ca - [ ] register_thing - [ ] reject_certificate_transfer - [ ] remove_thing_from_billing_group diff --git a/moto/iot/models.py b/moto/iot/models.py index 5b74b353cb21..ebd15d10a721 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -20,6 +20,7 @@ InvalidStateTransitionException, VersionConflictException, ) +from moto.utilities.utils import random_string class FakeThing(BaseModel): @@ -374,6 +375,55 @@ def to_dict(self): return obj +class FakeEndpoint(BaseModel): + def __init__(self, endpoint_type, region_name): + if endpoint_type not in [ + "iot:Data", + "iot:Data-ATS", + "iot:CredentialProvider", + "iot:Jobs", + ]: + raise InvalidRequestException( + " An error occurred (InvalidRequestException) when calling the DescribeEndpoint " + "operation: Endpoint type %s not recognized." % endpoint_type + ) + self.region_name = region_name + data_identifier = random_string(14) + if endpoint_type == "iot:Data": + self.endpoint = "{i}.iot.{r}.amazonaws.com".format( + i=data_identifier, r=self.region_name + ) + elif "iot:Data-ATS" in endpoint_type: + self.endpoint = "{i}-ats.iot.{r}.amazonaws.com".format( + i=data_identifier, r=self.region_name + ) + elif "iot:CredentialProvider" in endpoint_type: + identifier = random_string(14) + self.endpoint = "{i}.credentials.iot.{r}.amazonaws.com".format( + i=identifier, r=self.region_name + ) + elif "iot:Jobs" in endpoint_type: + identifier = random_string(14) + self.endpoint = "{i}.jobs.iot.{r}.amazonaws.com".format( + i=identifier, r=self.region_name + ) + self.endpoint_type = endpoint_type + + def to_get_dict(self): + obj = { + "endpointAddress": self.endpoint, + } + + return obj + + def to_dict(self): + obj = { + "endpointAddress": self.endpoint, + } + + return obj + + class IoTBackend(BaseBackend): def __init__(self, region_name=None): super(IoTBackend, self).__init__() @@ -387,6 +437,7 @@ def __init__(self, region_name=None): self.policies = OrderedDict() self.principal_policies = OrderedDict() self.principal_things = OrderedDict() + self.endpoint = None def reset(self): region_name = self.region_name @@ -495,6 +546,10 @@ def describe_thing_type(self, thing_type_name): raise ResourceNotFoundException() return thing_types[0] + def describe_endpoint(self, endpoint_type): + self.endpoint = FakeEndpoint(endpoint_type, self.region_name) + return self.endpoint + def delete_thing(self, thing_name, expected_version): # TODO: handle expected_version @@ -625,6 +680,11 @@ def register_certificate( self.certificates[certificate.certificate_id] = certificate return certificate + def register_certificate_without_ca(self, certificate_pem, status): + certificate = FakeCertificate(certificate_pem, status, self.region_name) + self.certificates[certificate.certificate_id] = certificate + return certificate + def update_certificate(self, certificate_id, new_status): cert = self.describe_certificate(certificate_id) # TODO: validate new_status diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 07a8c10c2299..15c62d91ea9f 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -88,6 +88,11 @@ def describe_thing_type(self): ) return json.dumps(thing_type.to_dict()) + def describe_endpoint(self): + endpoint_type = self._get_param("endpointType") + endpoint = self.iot_backend.describe_endpoint(endpoint_type=endpoint_type) + return json.dumps(endpoint.to_dict()) + def delete_thing(self): thing_name = self._get_param("thingName") expected_version = self._get_param("expectedVersion") @@ -330,6 +335,17 @@ def register_certificate(self): dict(certificateId=cert.certificate_id, certificateArn=cert.arn) ) + def register_certificate_without_ca(self): + certificate_pem = self._get_param("certificatePem") + status = self._get_param("status") + + cert = self.iot_backend.register_certificate_without_ca( + certificate_pem=certificate_pem, status=status, + ) + return json.dumps( + dict(certificateId=cert.certificate_id, certificateArn=cert.arn) + ) + def update_certificate(self): certificate_id = self._get_param("certificateId") new_status = self._get_param("newStatus") diff --git a/moto/utilities/utils.py b/moto/utilities/utils.py new file mode 100644 index 000000000000..6bd5e8b86429 --- /dev/null +++ b/moto/utilities/utils.py @@ -0,0 +1,10 @@ +import random +import string + + +def random_string(length=None): + n = length or 20 + random_str = "".join( + [random.choice(string.ascii_letters + string.digits) for i in range(n)] + ) + return random_str diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index c3ee4c96d87c..12e1ff7b0e2f 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -463,6 +463,46 @@ def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): ) +@mock_iot +def test_endpoints(): + region_name = "ap-northeast-1" + client = boto3.client("iot", region_name=region_name) + + # iot:Data + endpoint = client.describe_endpoint(endpointType="iot:Data") + endpoint.should.have.key("endpointAddress").which.should_not.contain("ats") + endpoint.should.have.key("endpointAddress").which.should.contain( + "iot.{}.amazonaws.com".format(region_name) + ) + + # iot:Data-ATS + endpoint = client.describe_endpoint(endpointType="iot:Data-ATS") + endpoint.should.have.key("endpointAddress").which.should.contain( + "ats.iot.{}.amazonaws.com".format(region_name) + ) + + # iot:Data-ATS + endpoint = client.describe_endpoint(endpointType="iot:CredentialProvider") + endpoint.should.have.key("endpointAddress").which.should.contain( + "credentials.iot.{}.amazonaws.com".format(region_name) + ) + + # iot:Data-ATS + endpoint = client.describe_endpoint(endpointType="iot:Jobs") + endpoint.should.have.key("endpointAddress").which.should.contain( + "jobs.iot.{}.amazonaws.com".format(region_name) + ) + + # raise InvalidRequestException + try: + client.describe_endpoint(endpointType="iot:Abc") + except client.exceptions.InvalidRequestException as exc: + error_code = exc.response["Error"]["Code"] + error_code.should.equal("InvalidRequestException") + else: + raise Exception("Should have raised error") + + @mock_iot def test_certs(): client = boto3.client("iot", region_name="us-east-1") @@ -523,6 +563,26 @@ def test_certs(): res = client.list_certificates() res.should.have.key("certificates") + # Test register_certificate without CA flow + cert = client.register_certificate_without_ca( + certificatePem=cert_pem, status="INACTIVE" + ) + cert.should.have.key("certificateId").which.should_not.be.none + cert.should.have.key("certificateArn").which.should_not.be.none + cert_id = cert["certificateId"] + + res = client.list_certificates() + res.should.have.key("certificates").which.should.have.length_of(1) + for cert in res["certificates"]: + cert.should.have.key("certificateArn").which.should_not.be.none + cert.should.have.key("certificateId").which.should_not.be.none + cert.should.have.key("status").which.should_not.be.none + cert.should.have.key("creationDate").which.should_not.be.none + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key("certificates") + @mock_iot def test_delete_policy_validation(): From ca64d8fc7a0719e66a03d106ad312fef5bd477ee Mon Sep 17 00:00:00 2001 From: Ciaran Evans <9111975+ciaranevans@users.noreply.github.com> Date: Fri, 4 Sep 2020 09:58:16 +0100 Subject: [PATCH 510/658] Implement Execution inputs for Step Functions (#3284) * Add input attribute to Execution and test with describe_execution * Switch back method name --- moto/stepfunctions/models.py | 5 ++- moto/stepfunctions/responses.py | 7 ++- .../test_stepfunctions/test_stepfunctions.py | 44 ++++++++++++++----- 3 files changed, 43 insertions(+), 13 deletions(-) diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index 58b6bb434894..19fb4561d093 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -34,6 +34,7 @@ def __init__( state_machine_name, execution_name, state_machine_arn, + execution_input, ): execution_arn = "arn:aws:states:{}:{}:execution:{}:{}" execution_arn = execution_arn.format( @@ -43,6 +44,7 @@ def __init__( self.name = execution_name self.start_date = iso_8601_datetime_without_milliseconds(datetime.now()) self.state_machine_arn = state_machine_arn + self.execution_input = execution_input self.status = "RUNNING" self.stop_date = None @@ -204,7 +206,7 @@ def delete_state_machine(self, arn): if sm: self.state_machines.remove(sm) - def start_execution(self, state_machine_arn, name=None): + def start_execution(self, state_machine_arn, name=None, execution_input=None): state_machine_name = self.describe_state_machine(state_machine_arn).name self._ensure_execution_name_doesnt_exist(name) execution = Execution( @@ -213,6 +215,7 @@ def start_execution(self, state_machine_arn, name=None): state_machine_name=state_machine_name, execution_name=name or str(uuid4()), state_machine_arn=state_machine_arn, + execution_input=execution_input, ) self.executions.append(execution) return execution diff --git a/moto/stepfunctions/responses.py b/moto/stepfunctions/responses.py index 7083167b6ecf..d9e438892be7 100644 --- a/moto/stepfunctions/responses.py +++ b/moto/stepfunctions/responses.py @@ -95,8 +95,11 @@ def list_tags_for_resource(self): def start_execution(self): arn = self._get_param("stateMachineArn") name = self._get_param("name") + execution_input = self._get_param("input", if_none="{}") try: - execution = self.stepfunction_backend.start_execution(arn, name) + execution = self.stepfunction_backend.start_execution( + arn, name, execution_input + ) except AWSError as err: return err.response() response = { @@ -129,7 +132,7 @@ def describe_execution(self): execution = self.stepfunction_backend.describe_execution(arn) response = { "executionArn": arn, - "input": "{}", + "input": execution.execution_input, "name": execution.name, "startDate": execution.start_date, "stateMachineArn": execution.state_machine_arn, diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 043fd9bfb309..d94867719c5c 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -1,8 +1,8 @@ from __future__ import unicode_literals import boto3 +import json import sure # noqa -import datetime from datetime import datetime from botocore.exceptions import ClientError @@ -134,7 +134,7 @@ def test_state_machine_creation_fails_with_invalid_names(): # for invalid_name in invalid_names: - with assert_raises(ClientError) as exc: + with assert_raises(ClientError): client.create_state_machine( name=invalid_name, definition=str(simple_definition), @@ -147,7 +147,7 @@ def test_state_machine_creation_requires_valid_role_arn(): client = boto3.client("stepfunctions", region_name=region) name = "example_step_function" # - with assert_raises(ClientError) as exc: + with assert_raises(ClientError): client.create_state_machine( name=name, definition=str(simple_definition), @@ -242,7 +242,7 @@ def test_state_machine_creation_can_be_described(): def test_state_machine_throws_error_when_describing_unknown_machine(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + with assert_raises(ClientError): unknown_state_machine = ( "arn:aws:states:" + region @@ -258,7 +258,7 @@ def test_state_machine_throws_error_when_describing_unknown_machine(): def test_state_machine_throws_error_when_describing_bad_arn(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + with assert_raises(ClientError): client.describe_state_machine(stateMachineArn="bad") @@ -267,7 +267,7 @@ def test_state_machine_throws_error_when_describing_bad_arn(): def test_state_machine_throws_error_when_describing_machine_in_different_account(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + with assert_raises(ClientError): unknown_state_machine = ( "arn:aws:states:" + region + ":000000000000:stateMachine:unknown" ) @@ -376,7 +376,7 @@ def test_state_machine_start_execution(): def test_state_machine_start_execution_bad_arn_raises_exception(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + with assert_raises(ClientError): client.start_execution(stateMachineArn="bad") @@ -464,7 +464,7 @@ def test_state_machine_list_executions_when_none_exist(): @mock_stepfunctions @mock_sts -def test_state_machine_describe_execution(): +def test_state_machine_describe_execution_with_no_input(): client = boto3.client("stepfunctions", region_name=region) # sm = client.create_state_machine( @@ -483,12 +483,36 @@ def test_state_machine_describe_execution(): description.shouldnt.have("stopDate") +@mock_stepfunctions +@mock_sts +def test_state_machine_describe_execution_with_custom_input(): + client = boto3.client("stepfunctions", region_name=region) + # + execution_input = json.dumps({"input_key": "input_val"}) + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + execution = client.start_execution( + stateMachineArn=sm["stateMachineArn"], input=execution_input + ) + description = client.describe_execution(executionArn=execution["executionArn"]) + # + description["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + description["executionArn"].should.equal(execution["executionArn"]) + description["input"].should.equal(execution_input) + description["name"].shouldnt.be.empty + description["startDate"].should.equal(execution["startDate"]) + description["stateMachineArn"].should.equal(sm["stateMachineArn"]) + description["status"].should.equal("RUNNING") + description.shouldnt.have("stopDate") + + @mock_stepfunctions @mock_sts def test_execution_throws_error_when_describing_unknown_execution(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + with assert_raises(ClientError): unknown_execution = ( "arn:aws:states:" + region + ":" + _get_account_id() + ":execution:unknown" ) @@ -519,7 +543,7 @@ def test_state_machine_can_be_described_by_execution(): def test_state_machine_throws_error_when_describing_unknown_execution(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError) as exc: + with assert_raises(ClientError): unknown_execution = ( "arn:aws:states:" + region + ":" + _get_account_id() + ":execution:unknown" ) From 6a467ec50f50a14ad67845b3d46eadab919c8719 Mon Sep 17 00:00:00 2001 From: Ciaran Evans <9111975+ciaranevans@users.noreply.github.com> Date: Fri, 4 Sep 2020 10:22:21 +0100 Subject: [PATCH 511/658] Add validation on execution input --- moto/stepfunctions/exceptions.py | 5 +++ moto/stepfunctions/models.py | 11 +++++ .../test_stepfunctions/test_stepfunctions.py | 41 +++++++++++++++++++ 3 files changed, 57 insertions(+) diff --git a/moto/stepfunctions/exceptions.py b/moto/stepfunctions/exceptions.py index 6000bab4ea6a..4abb6a8afd35 100644 --- a/moto/stepfunctions/exceptions.py +++ b/moto/stepfunctions/exceptions.py @@ -38,6 +38,11 @@ class InvalidName(AWSError): STATUS = 400 +class InvalidExecutionInput(AWSError): + TYPE = "InvalidExecutionInput" + STATUS = 400 + + class StateMachineDoesNotExist(AWSError): TYPE = "StateMachineDoesNotExist" STATUS = 400 diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index 19fb4561d093..3184d6456b4b 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -1,3 +1,4 @@ +import json import re from datetime import datetime @@ -11,6 +12,7 @@ ExecutionAlreadyExists, ExecutionDoesNotExist, InvalidArn, + InvalidExecutionInput, InvalidName, StateMachineDoesNotExist, ) @@ -209,6 +211,7 @@ def delete_state_machine(self, arn): def start_execution(self, state_machine_arn, name=None, execution_input=None): state_machine_name = self.describe_state_machine(state_machine_arn).name self._ensure_execution_name_doesnt_exist(name) + self._validate_execution_input(execution_input) execution = Execution( region_name=self.region_name, account_id=self._get_account_id(), @@ -290,6 +293,14 @@ def _ensure_execution_name_doesnt_exist(self, name): "Execution Already Exists: '" + execution.execution_arn + "'" ) + def _validate_execution_input(self, execution_input): + try: + json.loads(execution_input) + except Exception as ex: + raise InvalidExecutionInput( + "Invalid State Machine Execution Input: '" + str(ex) + "'" + ) + def _get_account_id(self): return ACCOUNT_ID diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index d94867719c5c..36b08487c121 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -425,6 +425,47 @@ def test_state_machine_start_execution_fails_on_duplicate_execution_name(): ) +@mock_stepfunctions +@mock_sts +def test_state_machine_start_execution_with_custom_input(): + client = boto3.client("stepfunctions", region_name=region) + # + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + execution_input = json.dumps({"input_key": "input_value"}) + execution = client.start_execution( + stateMachineArn=sm["stateMachineArn"], input=execution_input + ) + # + execution["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + uuid_regex = "[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + expected_exec_name = ( + "arn:aws:states:" + + region + + ":" + + _get_account_id() + + ":execution:name:" + + uuid_regex + ) + execution["executionArn"].should.match(expected_exec_name) + execution["startDate"].should.be.a(datetime) + + +@mock_stepfunctions +@mock_sts +def test_state_machine_start_execution_with_invalid_input(): + client = boto3.client("stepfunctions", region_name=region) + # + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + with assert_raises(ClientError): + _ = client.start_execution(stateMachineArn=sm["stateMachineArn"], input="") + with assert_raises(ClientError): + _ = client.start_execution(stateMachineArn=sm["stateMachineArn"], input="{") + + @mock_stepfunctions @mock_sts def test_state_machine_list_executions(): From c66812edbaca4b2a9535552ee022262d02513996 Mon Sep 17 00:00:00 2001 From: Toshiya Kawasaki Date: Fri, 4 Sep 2020 20:14:48 +0900 Subject: [PATCH 512/658] Add kinesisvideo archived media (#3280) * add get_hls_streaming_session_url * add get_dash_streaming_session_url * add get_clip * add test_server for kinesisvideo archived media * fix for lint * fix for lint * avoid testing kinesisvideoarchivedmedia with TEST_SERVER_MODE=true --- Makefile | 6 +- moto/__init__.py | 3 + moto/backends.py | 4 + moto/kinesisvideo/responses.py | 5 -- moto/kinesisvideoarchivedmedia/__init__.py | 6 ++ moto/kinesisvideoarchivedmedia/exceptions.py | 3 + moto/kinesisvideoarchivedmedia/models.py | 88 +++++++++++++++++++ moto/kinesisvideoarchivedmedia/responses.py | 70 +++++++++++++++ moto/kinesisvideoarchivedmedia/urls.py | 14 +++ .../test_kinesisvideoarchivedmedia.py | 86 ++++++++++++++++++ .../test_server.py | 19 ++++ 11 files changed, 298 insertions(+), 6 deletions(-) create mode 100644 moto/kinesisvideoarchivedmedia/__init__.py create mode 100644 moto/kinesisvideoarchivedmedia/exceptions.py create mode 100644 moto/kinesisvideoarchivedmedia/models.py create mode 100644 moto/kinesisvideoarchivedmedia/responses.py create mode 100644 moto/kinesisvideoarchivedmedia/urls.py create mode 100644 tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py create mode 100644 tests/test_kinesisvideoarchivedmedia/test_server.py diff --git a/Makefile b/Makefile index e84d036b7fc8..acc5b20376a1 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,11 @@ SHELL := /bin/bash ifeq ($(TEST_SERVER_MODE), true) # exclude test_iot and test_iotdata for now # because authentication of iot is very complicated - TEST_EXCLUDE := --exclude='test_iot.*' + + # exclude test_kinesisvideoarchivedmedia + # because testing with moto_server is difficult with data-endpoint + + TEST_EXCLUDE := --exclude='test_iot.*' --exclude="test_kinesisvideoarchivedmedia.*" else TEST_EXCLUDE := endif diff --git a/moto/__init__.py b/moto/__init__.py index da66d9c6164c..e21d3f894a90 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -114,6 +114,9 @@ def f(*args, **kwargs): mock_xray = lazy_load(".xray", "mock_xray") mock_xray_client = lazy_load(".xray", "mock_xray_client") mock_kinesisvideo = lazy_load(".kinesisvideo", "mock_kinesisvideo") +mock_kinesisvideoarchivedmedia = lazy_load( + ".kinesisvideoarchivedmedia", "mock_kinesisvideoarchivedmedia" +) # import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) diff --git a/moto/backends.py b/moto/backends.py index 9216d4615f37..7b1c1d08db7b 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -70,6 +70,10 @@ "swf": ("swf", "swf_backends"), "xray": ("xray", "xray_backends"), "kinesisvideo": ("kinesisvideo", "kinesisvideo_backends"), + "kinesis-video-archived-media": ( + "kinesisvideoarchivedmedia", + "kinesisvideoarchivedmedia_backends", + ), } diff --git a/moto/kinesisvideo/responses.py b/moto/kinesisvideo/responses.py index 376e5b5fe897..d1e386f2eeea 100644 --- a/moto/kinesisvideo/responses.py +++ b/moto/kinesisvideo/responses.py @@ -63,8 +63,3 @@ def get_data_endpoint(self): stream_name=stream_name, stream_arn=stream_arn, api_name=api_name, ) return json.dumps(dict(DataEndpoint=data_endpoint)) - - # add methods from here - - -# add templates from here diff --git a/moto/kinesisvideoarchivedmedia/__init__.py b/moto/kinesisvideoarchivedmedia/__init__.py new file mode 100644 index 000000000000..c1676c87143a --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import kinesisvideoarchivedmedia_backends +from ..core.models import base_decorator + +kinesisvideoarchivedmedia_backend = kinesisvideoarchivedmedia_backends["us-east-1"] +mock_kinesisvideoarchivedmedia = base_decorator(kinesisvideoarchivedmedia_backends) diff --git a/moto/kinesisvideoarchivedmedia/exceptions.py b/moto/kinesisvideoarchivedmedia/exceptions.py new file mode 100644 index 000000000000..38c60cea22fb --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/exceptions.py @@ -0,0 +1,3 @@ +from __future__ import unicode_literals + +# Not implemented exceptions for now diff --git a/moto/kinesisvideoarchivedmedia/models.py b/moto/kinesisvideoarchivedmedia/models.py new file mode 100644 index 000000000000..46fddf567c43 --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/models.py @@ -0,0 +1,88 @@ +from __future__ import unicode_literals +from boto3 import Session +from moto.core import BaseBackend +from moto.kinesisvideo import kinesisvideo_backends +from moto.sts.utils import random_session_token + + +class KinesisVideoArchivedMediaBackend(BaseBackend): + def __init__(self, region_name=None): + super(KinesisVideoArchivedMediaBackend, self).__init__() + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def _get_streaming_url(self, stream_name, stream_arn, api_name): + stream = kinesisvideo_backends[self.region_name]._get_stream( + stream_name, stream_arn + ) + data_endpoint = stream.get_data_endpoint(api_name) + session_token = random_session_token() + api_to_relative_path = { + "GET_HLS_STREAMING_SESSION_URL": "/hls/v1/getHLSMasterPlaylist.m3u8", + "GET_DASH_STREAMING_SESSION_URL": "/dash/v1/getDASHManifest.mpd", + } + relative_path = api_to_relative_path[api_name] + url = "{}{}?SessionToken={}".format(data_endpoint, relative_path, session_token) + return url + + def get_hls_streaming_session_url( + self, + stream_name, + stream_arn, + playback_mode, + hls_fragment_selector, + container_format, + discontinuity_mode, + display_fragment_timestamp, + expires, + max_media_playlist_fragment_results, + ): + # Ignore option paramters as the format of hls_url does't depends on them + api_name = "GET_HLS_STREAMING_SESSION_URL" + url = self._get_streaming_url(stream_name, stream_arn, api_name) + return url + + def get_dash_streaming_session_url( + self, + stream_name, + stream_arn, + playback_mode, + display_fragment_timestamp, + display_fragment_number, + dash_fragment_selector, + expires, + max_manifest_fragment_results, + ): + # Ignore option paramters as the format of hls_url does't depends on them + api_name = "GET_DASH_STREAMING_SESSION_URL" + url = self._get_streaming_url(stream_name, stream_arn, api_name) + return url + + def get_clip(self, stream_name, stream_arn, clip_fragment_selector): + kinesisvideo_backends[self.region_name]._get_stream(stream_name, stream_arn) + content_type = "video/mp4" # Fixed content_type as it depends on input stream + payload = b"sample-mp4-video" + return content_type, payload + + +kinesisvideoarchivedmedia_backends = {} +for region in Session().get_available_regions("kinesis-video-archived-media"): + kinesisvideoarchivedmedia_backends[region] = KinesisVideoArchivedMediaBackend( + region + ) +for region in Session().get_available_regions( + "kinesis-video-archived-media", partition_name="aws-us-gov" +): + kinesisvideoarchivedmedia_backends[region] = KinesisVideoArchivedMediaBackend( + region + ) +for region in Session().get_available_regions( + "kinesis-video-archived-media", partition_name="aws-cn" +): + kinesisvideoarchivedmedia_backends[region] = KinesisVideoArchivedMediaBackend( + region + ) diff --git a/moto/kinesisvideoarchivedmedia/responses.py b/moto/kinesisvideoarchivedmedia/responses.py new file mode 100644 index 000000000000..d021ced0e72b --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/responses.py @@ -0,0 +1,70 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import kinesisvideoarchivedmedia_backends +import json + + +class KinesisVideoArchivedMediaResponse(BaseResponse): + SERVICE_NAME = "kinesis-video-archived-media" + + @property + def kinesisvideoarchivedmedia_backend(self): + return kinesisvideoarchivedmedia_backends[self.region] + + def get_hls_streaming_session_url(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + playback_mode = self._get_param("PlaybackMode") + hls_fragment_selector = self._get_param("HLSFragmentSelector") + container_format = self._get_param("ContainerFormat") + discontinuity_mode = self._get_param("DiscontinuityMode") + display_fragment_timestamp = self._get_param("DisplayFragmentTimestamp") + expires = self._get_int_param("Expires") + max_media_playlist_fragment_results = self._get_param( + "MaxMediaPlaylistFragmentResults" + ) + hls_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_hls_streaming_session_url( + stream_name=stream_name, + stream_arn=stream_arn, + playback_mode=playback_mode, + hls_fragment_selector=hls_fragment_selector, + container_format=container_format, + discontinuity_mode=discontinuity_mode, + display_fragment_timestamp=display_fragment_timestamp, + expires=expires, + max_media_playlist_fragment_results=max_media_playlist_fragment_results, + ) + return json.dumps(dict(HLSStreamingSessionURL=hls_streaming_session_url)) + + def get_dash_streaming_session_url(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + playback_mode = self._get_param("PlaybackMode") + display_fragment_timestamp = self._get_param("DisplayFragmentTimestamp") + display_fragment_number = self._get_param("DisplayFragmentNumber") + dash_fragment_selector = self._get_param("DASHFragmentSelector") + expires = self._get_int_param("Expires") + max_manifest_fragment_results = self._get_param("MaxManifestFragmentResults") + dash_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_dash_streaming_session_url( + stream_name=stream_name, + stream_arn=stream_arn, + playback_mode=playback_mode, + display_fragment_timestamp=display_fragment_timestamp, + display_fragment_number=display_fragment_number, + dash_fragment_selector=dash_fragment_selector, + expires=expires, + max_manifest_fragment_results=max_manifest_fragment_results, + ) + return json.dumps(dict(DASHStreamingSessionURL=dash_streaming_session_url)) + + def get_clip(self): + stream_name = self._get_param("StreamName") + stream_arn = self._get_param("StreamARN") + clip_fragment_selector = self._get_param("ClipFragmentSelector") + content_type, payload = self.kinesisvideoarchivedmedia_backend.get_clip( + stream_name=stream_name, + stream_arn=stream_arn, + clip_fragment_selector=clip_fragment_selector, + ) + new_headers = {"Content-Type": content_type} + return payload, new_headers diff --git a/moto/kinesisvideoarchivedmedia/urls.py b/moto/kinesisvideoarchivedmedia/urls.py new file mode 100644 index 000000000000..88c2d59f09a6 --- /dev/null +++ b/moto/kinesisvideoarchivedmedia/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import KinesisVideoArchivedMediaResponse + +url_bases = [ + r"https?://.*\.kinesisvideo.(.+).amazonaws.com", +] + + +response = KinesisVideoArchivedMediaResponse() + + +url_paths = { + "{0}/.*$": response.dispatch, +} diff --git a/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py new file mode 100644 index 000000000000..ee44391977c9 --- /dev/null +++ b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py @@ -0,0 +1,86 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_kinesisvideoarchivedmedia +from moto import mock_kinesisvideo +from datetime import datetime, timedelta + + +@mock_kinesisvideo +@mock_kinesisvideoarchivedmedia +def test_get_hls_streaming_session_url(): + region_name = "ap-northeast-1" + kvs_client = boto3.client("kinesisvideo", region_name=region_name) + stream_name = "my-stream" + kvs_client.create_stream(StreamName=stream_name) + + api_name = "GET_HLS_STREAMING_SESSION_URL" + res = kvs_client.get_data_endpoint(StreamName=stream_name, APIName=api_name) + data_endpoint = res["DataEndpoint"] + + client = boto3.client( + "kinesis-video-archived-media", + region_name=region_name, + endpoint_url=data_endpoint, + ) + res = client.get_hls_streaming_session_url(StreamName=stream_name,) + reg_exp = "^{}/hls/v1/getHLSMasterPlaylist.m3u8\?SessionToken\=.+$".format( + data_endpoint + ) + res.should.have.key("HLSStreamingSessionURL").which.should.match(reg_exp) + + +@mock_kinesisvideo +@mock_kinesisvideoarchivedmedia +def test_get_dash_streaming_session_url(): + region_name = "ap-northeast-1" + kvs_client = boto3.client("kinesisvideo", region_name=region_name) + stream_name = "my-stream" + kvs_client.create_stream(StreamName=stream_name) + + api_name = "GET_DASH_STREAMING_SESSION_URL" + res = kvs_client.get_data_endpoint(StreamName=stream_name, APIName=api_name) + data_endpoint = res["DataEndpoint"] + + client = boto3.client( + "kinesis-video-archived-media", + region_name=region_name, + endpoint_url=data_endpoint, + ) + res = client.get_dash_streaming_session_url(StreamName=stream_name,) + reg_exp = "^{}/dash/v1/getDASHManifest.mpd\?SessionToken\=.+$".format(data_endpoint) + res.should.have.key("DASHStreamingSessionURL").which.should.match(reg_exp) + + +@mock_kinesisvideo +@mock_kinesisvideoarchivedmedia +def test_get_clip(): + region_name = "ap-northeast-1" + kvs_client = boto3.client("kinesisvideo", region_name=region_name) + stream_name = "my-stream" + kvs_client.create_stream(StreamName=stream_name) + + api_name = "GET_DASH_STREAMING_SESSION_URL" + res = kvs_client.get_data_endpoint(StreamName=stream_name, APIName=api_name) + data_endpoint = res["DataEndpoint"] + + client = boto3.client( + "kinesis-video-archived-media", + region_name=region_name, + endpoint_url=data_endpoint, + ) + end_timestamp = datetime.utcnow() - timedelta(hours=1) + start_timestamp = end_timestamp - timedelta(minutes=5) + res = client.get_clip( + StreamName=stream_name, + ClipFragmentSelector={ + "FragmentSelectorType": "PRODUCER_TIMESTAMP", + "TimestampRange": { + "StartTimestamp": start_timestamp, + "EndTimestamp": end_timestamp, + }, + }, + ) + res.should.have.key("ContentType").which.should.match("video/mp4") + res.should.have.key("Payload") diff --git a/tests/test_kinesisvideoarchivedmedia/test_server.py b/tests/test_kinesisvideoarchivedmedia/test_server.py new file mode 100644 index 000000000000..482c7bb1b89f --- /dev/null +++ b/tests/test_kinesisvideoarchivedmedia/test_server.py @@ -0,0 +1,19 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_kinesisvideoarchivedmedia + +""" +Test the different server responses +""" + + +@mock_kinesisvideoarchivedmedia +def test_kinesisvideoarchivedmedia_server_is_up(): + backend = server.create_backend_app("kinesis-video-archived-media") + test_client = backend.test_client() + res = test_client.post("/getHLSStreamingSessionURL") + # Just checking server is up + res.status_code.should.equal(404) From 9b7ee6687151fb56c26c42851739b2aefa7248ef Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 5 Sep 2020 11:43:58 -0500 Subject: [PATCH 513/658] update implementation coverage. --- IMPLEMENTATION_COVERAGE.md | 169 ++++++++++++++++++------------------- 1 file changed, 82 insertions(+), 87 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 3246c2615ef2..90ebf9a57339 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -464,13 +464,13 @@ - [ ] delete_scaling_policy - [ ] delete_scheduled_action - [ ] deregister_scalable_target -- [x] describe_scalable_targets +- [X] describe_scalable_targets - [ ] describe_scaling_activities - [ ] describe_scaling_policies - [ ] describe_scheduled_actions - [ ] put_scaling_policy - [ ] put_scheduled_action -- [x] register_scalable_target - includes enhanced validation support for ECS targets +- [X] register_scalable_target
## application-insights @@ -642,15 +642,15 @@ ## athena
-26% implemented +36% implemented - [ ] batch_get_named_query - [ ] batch_get_query_execution -- [ ] create_named_query +- [X] create_named_query - [X] create_work_group - [ ] delete_named_query - [ ] delete_work_group -- [ ] get_named_query +- [X] get_named_query - [ ] get_query_execution - [ ] get_query_results - [X] get_work_group @@ -1664,7 +1664,7 @@ ## cognito-idp
-38% implemented +44% implemented - [ ] add_custom_attributes - [X] admin_add_user_to_group @@ -1693,11 +1693,11 @@ - [ ] admin_update_device_status - [X] admin_update_user_attributes - [ ] admin_user_global_sign_out -- [ ] associate_software_token +- [X] associate_software_token - [X] change_password - [ ] confirm_device - [X] confirm_forgot_password -- [ ] confirm_sign_up +- [X] confirm_sign_up - [X] create_group - [X] create_identity_provider - [X] create_resource_server @@ -1732,7 +1732,7 @@ - [ ] get_user_attribute_verification_code - [ ] get_user_pool_mfa_config - [ ] global_sign_out -- [ ] initiate_auth +- [X] initiate_auth - [ ] list_devices - [X] list_groups - [X] list_identity_providers @@ -1747,10 +1747,10 @@ - [X] respond_to_auth_challenge - [ ] set_risk_configuration - [ ] set_ui_customization -- [ ] set_user_mfa_preference +- [X] set_user_mfa_preference - [ ] set_user_pool_mfa_config - [ ] set_user_settings -- [ ] sign_up +- [X] sign_up - [ ] start_user_import_job - [ ] stop_user_import_job - [ ] tag_resource @@ -1764,7 +1764,7 @@ - [ ] update_user_pool - [X] update_user_pool_client - [X] update_user_pool_domain -- [ ] verify_software_token +- [X] verify_software_token - [ ] verify_user_attribute
@@ -1889,7 +1889,7 @@ ## config
-26% implemented +32% implemented - [X] batch_get_aggregate_resource_config - [X] batch_get_resource_config @@ -1901,7 +1901,7 @@ - [X] delete_delivery_channel - [ ] delete_evaluation_results - [ ] delete_organization_config_rule -- [ ] delete_organization_conformance_pack +- [X] delete_organization_conformance_pack - [ ] delete_pending_aggregation_request - [ ] delete_remediation_configuration - [ ] delete_remediation_exceptions @@ -1925,8 +1925,8 @@ - [X] describe_delivery_channels - [ ] describe_organization_config_rule_statuses - [ ] describe_organization_config_rules -- [ ] describe_organization_conformance_pack_statuses -- [ ] describe_organization_conformance_packs +- [X] describe_organization_conformance_pack_statuses +- [X] describe_organization_conformance_packs - [ ] describe_pending_aggregation_requests - [ ] describe_remediation_configurations - [ ] describe_remediation_exceptions @@ -1944,7 +1944,7 @@ - [ ] get_conformance_pack_compliance_summary - [ ] get_discovered_resource_counts - [ ] get_organization_config_rule_detailed_status -- [ ] get_organization_conformance_pack_detailed_status +- [X] get_organization_conformance_pack_detailed_status - [X] get_resource_config_history - [X] list_aggregate_discovered_resources - [X] list_discovered_resources @@ -1957,7 +1957,7 @@ - [X] put_delivery_channel - [X] put_evaluations - [ ] put_organization_config_rule -- [ ] put_organization_conformance_pack +- [X] put_organization_conformance_pack - [ ] put_remediation_configurations - [ ] put_remediation_exceptions - [ ] put_resource_config @@ -2580,7 +2580,7 @@ ## ec2
-26% implemented +27% implemented - [ ] accept_reserved_instances_exchange_quote - [ ] accept_transit_gateway_peering_attachment @@ -2639,7 +2639,7 @@ - [X] create_internet_gateway - [X] create_key_pair - [X] create_launch_template -- [x] create_launch_template_version +- [ ] create_launch_template_version - [ ] create_local_gateway_route - [ ] create_local_gateway_route_table_vpc_association - [X] create_nat_gateway @@ -2939,7 +2939,7 @@ - [ ] purchase_reserved_instances_offering - [ ] purchase_scheduled_instances - [X] reboot_instances -- [ ] register_image +- [X] register_image - [ ] register_instance_event_notification_attributes - [ ] register_transit_gateway_multicast_group_members - [ ] register_transit_gateway_multicast_group_sources @@ -3031,7 +3031,7 @@ ## ecs
-73% implemented +72% implemented - [ ] create_capacity_provider - [X] create_cluster @@ -4118,7 +4118,7 @@ ## iam
-69% implemented +70% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile @@ -4146,7 +4146,7 @@ - [X] delete_account_alias - [X] delete_account_password_policy - [X] delete_group -- [ ] delete_group_policy +- [X] delete_group_policy - [X] delete_instance_profile - [X] delete_login_profile - [X] delete_open_id_connect_provider @@ -4367,7 +4367,7 @@ ## iot
-27% implemented +28% implemented - [ ] accept_certificate_transfer - [ ] add_thing_to_billing_group @@ -4837,7 +4837,6 @@ - [ ] describe_configuration - [ ] describe_configuration_revision - [ ] get_bootstrap_brokers -- [ ] get_compatible_kafka_versions - [ ] list_cluster_operations - [ ] list_clusters - [ ] list_configuration_revisions @@ -4850,7 +4849,6 @@ - [ ] update_broker_count - [ ] update_broker_storage - [ ] update_cluster_configuration -- [ ] update_cluster_kafka_version - [ ] update_monitoring
@@ -4920,11 +4918,11 @@ ## kinesis-video-archived-media
-0% implemented +60% implemented -- [ ] get_clip -- [ ] get_dash_streaming_session_url -- [ ] get_hls_streaming_session_url +- [X] get_clip +- [X] get_dash_streaming_session_url +- [X] get_hls_streaming_session_url - [ ] get_media_for_fragment_list - [ ] list_fragments
@@ -5004,18 +5002,18 @@ ## kinesisvideo
-0% implemented +26% implemented - [ ] create_signaling_channel -- [ ] create_stream +- [X] create_stream - [ ] delete_signaling_channel -- [ ] delete_stream +- [X] delete_stream - [ ] describe_signaling_channel -- [ ] describe_stream -- [ ] get_data_endpoint +- [X] describe_stream +- [X] get_data_endpoint - [ ] get_signaling_channel_endpoint - [ ] list_signaling_channels -- [ ] list_streams +- [X] list_streams - [ ] list_tags_for_resource - [ ] list_tags_for_stream - [ ] tag_resource @@ -5100,7 +5098,7 @@ ## lambda
-38% implemented +44% implemented - [ ] add_layer_version_permission - [X] add_permission @@ -6100,7 +6098,7 @@ ## organizations
-47% implemented +68% implemented - [ ] accept_handshake - [X] attach_policy @@ -6114,7 +6112,7 @@ - [ ] delete_organization - [ ] delete_organizational_unit - [X] delete_policy -- [ ] deregister_delegated_administrator +- [X] deregister_delegated_administrator - [X] describe_account - [X] describe_create_account_status - [ ] describe_effective_policy @@ -6123,20 +6121,20 @@ - [X] describe_organizational_unit - [X] describe_policy - [ ] detach_policy -- [ ] disable_aws_service_access -- [ ] disable_policy_type +- [X] disable_aws_service_access +- [X] disable_policy_type - [ ] enable_all_features -- [ ] enable_aws_service_access -- [ ] enable_policy_type +- [X] enable_aws_service_access +- [X] enable_policy_type - [ ] invite_account_to_organization - [ ] leave_organization - [X] list_accounts - [X] list_accounts_for_parent -- [ ] list_aws_service_access_for_organization +- [X] list_aws_service_access_for_organization - [X] list_children - [ ] list_create_account_status -- [ ] list_delegated_administrators -- [ ] list_delegated_services_for_account +- [X] list_delegated_administrators +- [X] list_delegated_services_for_account - [ ] list_handshakes_for_account - [ ] list_handshakes_for_organization - [X] list_organizational_units_for_parent @@ -6147,7 +6145,7 @@ - [X] list_tags_for_resource - [X] list_targets_for_policy - [X] move_account -- [ ] register_delegated_administrator +- [X] register_delegated_administrator - [ ] remove_account_from_organization - [X] tag_resource - [X] untag_resource @@ -6545,21 +6543,21 @@ ## ram
-0% implemented +20% implemented - [ ] accept_resource_share_invitation - [ ] associate_resource_share - [ ] associate_resource_share_permission -- [ ] create_resource_share -- [ ] delete_resource_share +- [X] create_resource_share +- [X] delete_resource_share - [ ] disassociate_resource_share - [ ] disassociate_resource_share_permission -- [ ] enable_sharing_with_aws_organization +- [X] enable_sharing_with_aws_organization - [ ] get_permission - [ ] get_resource_policies - [ ] get_resource_share_associations - [ ] get_resource_share_invitations -- [ ] get_resource_shares +- [X] get_resource_shares - [ ] list_pending_invitation_resources - [ ] list_permissions - [ ] list_principals @@ -6570,7 +6568,7 @@ - [ ] reject_resource_share_invitation - [ ] tag_resource - [ ] untag_resource -- [ ] update_resource_share +- [X] update_resource_share
## rds @@ -7074,7 +7072,7 @@ ## s3
-25% implemented +26% implemented - [ ] abort_multipart_upload - [ ] complete_multipart_upload @@ -7093,7 +7091,7 @@ - [X] delete_bucket_tagging - [ ] delete_bucket_website - [X] delete_object -- [x] delete_object_tagging +- [X] delete_object_tagging - [ ] delete_objects - [ ] delete_public_access_block - [ ] get_bucket_accelerate_configuration @@ -7193,7 +7191,7 @@ ## sagemaker
-0% implemented +12% implemented - [ ] add_tags - [ ] associate_trial_component @@ -7203,22 +7201,22 @@ - [ ] create_code_repository - [ ] create_compilation_job - [ ] create_domain -- [ ] create_endpoint -- [ ] create_endpoint_config +- [X] create_endpoint +- [X] create_endpoint_config - [ ] create_experiment - [ ] create_flow_definition - [ ] create_human_task_ui - [ ] create_hyper_parameter_tuning_job - [ ] create_labeling_job -- [ ] create_model +- [X] create_model - [ ] create_model_package - [ ] create_monitoring_schedule -- [ ] create_notebook_instance +- [X] create_notebook_instance - [ ] create_notebook_instance_lifecycle_config - [ ] create_presigned_domain_url - [ ] create_presigned_notebook_instance_url - [ ] create_processing_job -- [ ] create_training_job +- [X] create_training_job - [ ] create_transform_job - [ ] create_trial - [ ] create_trial_component @@ -7228,14 +7226,14 @@ - [ ] delete_app - [ ] delete_code_repository - [ ] delete_domain -- [ ] delete_endpoint -- [ ] delete_endpoint_config +- [X] delete_endpoint +- [X] delete_endpoint_config - [ ] delete_experiment - [ ] delete_flow_definition -- [ ] delete_model +- [X] delete_model - [ ] delete_model_package - [ ] delete_monitoring_schedule -- [ ] delete_notebook_instance +- [X] delete_notebook_instance - [ ] delete_notebook_instance_lifecycle_config - [ ] delete_tags - [ ] delete_trial @@ -7248,21 +7246,21 @@ - [ ] describe_code_repository - [ ] describe_compilation_job - [ ] describe_domain -- [ ] describe_endpoint -- [ ] describe_endpoint_config +- [X] describe_endpoint +- [X] describe_endpoint_config - [ ] describe_experiment - [ ] describe_flow_definition - [ ] describe_human_task_ui - [ ] describe_hyper_parameter_tuning_job - [ ] describe_labeling_job -- [ ] describe_model +- [X] describe_model - [ ] describe_model_package - [ ] describe_monitoring_schedule - [ ] describe_notebook_instance - [ ] describe_notebook_instance_lifecycle_config - [ ] describe_processing_job - [ ] describe_subscribed_workteam -- [ ] describe_training_job +- [X] describe_training_job - [ ] describe_transform_job - [ ] describe_trial - [ ] describe_trial_component @@ -7287,7 +7285,7 @@ - [ ] list_labeling_jobs - [ ] list_labeling_jobs_for_workteam - [ ] list_model_packages -- [ ] list_models +- [X] list_models - [ ] list_monitoring_executions - [ ] list_monitoring_schedules - [ ] list_notebook_instance_lifecycle_configs @@ -7305,13 +7303,13 @@ - [ ] render_ui_template - [ ] search - [ ] start_monitoring_schedule -- [ ] start_notebook_instance +- [X] start_notebook_instance - [ ] stop_auto_ml_job - [ ] stop_compilation_job - [ ] stop_hyper_parameter_tuning_job - [ ] stop_labeling_job - [ ] stop_monitoring_schedule -- [ ] stop_notebook_instance +- [X] stop_notebook_instance - [ ] stop_processing_job - [ ] stop_training_job - [ ] stop_transform_job @@ -7645,7 +7643,7 @@ ## ses
-21% implemented +23% implemented - [ ] clone_receipt_rule_set - [X] create_configuration_set @@ -7653,8 +7651,8 @@ - [ ] create_configuration_set_tracking_options - [ ] create_custom_verification_email_template - [ ] create_receipt_filter -- [ ] create_receipt_rule -- [ ] create_receipt_rule_set +- [X] create_receipt_rule +- [X] create_receipt_rule_set - [ ] create_template - [ ] delete_configuration_set - [ ] delete_configuration_set_event_destination @@ -7959,7 +7957,7 @@ ## ssm
-12% implemented +18% implemented - [X] add_tags_to_resource - [ ] cancel_command @@ -7967,14 +7965,14 @@ - [ ] create_activation - [ ] create_association - [ ] create_association_batch -- [ ] create_document +- [X] create_document - [ ] create_maintenance_window - [ ] create_ops_item - [ ] create_patch_baseline - [ ] create_resource_data_sync - [ ] delete_activation - [ ] delete_association -- [ ] delete_document +- [X] delete_document - [ ] delete_inventory - [ ] delete_maintenance_window - [X] delete_parameter @@ -7992,7 +7990,7 @@ - [ ] describe_automation_executions - [ ] describe_automation_step_executions - [ ] describe_available_patches -- [ ] describe_document +- [X] describe_document - [ ] describe_document_permission - [ ] describe_effective_instance_associations - [ ] describe_effective_patches_for_patch_baseline @@ -8023,7 +8021,7 @@ - [ ] get_connection_status - [ ] get_default_patch_baseline - [ ] get_deployable_patch_snapshot_for_instance -- [ ] get_document +- [X] get_document - [ ] get_inventory - [ ] get_inventory_schema - [ ] get_maintenance_window @@ -8048,7 +8046,7 @@ - [ ] list_compliance_items - [ ] list_compliance_summaries - [ ] list_document_versions -- [ ] list_documents +- [X] list_documents - [ ] list_inventory_entries - [ ] list_resource_compliance_summaries - [ ] list_resource_data_sync @@ -8073,8 +8071,8 @@ - [ ] terminate_session - [ ] update_association - [ ] update_association_status -- [ ] update_document -- [ ] update_document_default_version +- [X] update_document +- [X] update_document_default_version - [ ] update_maintenance_window - [ ] update_maintenance_window_target - [ ] update_maintenance_window_task @@ -8706,7 +8704,6 @@ - [ ] delete_group - [ ] delete_mailbox_permissions - [ ] delete_resource -- [ ] delete_retention_policy - [ ] delete_user - [ ] deregister_from_work_mail - [ ] describe_group @@ -8716,7 +8713,6 @@ - [ ] disassociate_delegate_from_resource - [ ] disassociate_member_from_group - [ ] get_access_control_effect -- [ ] get_default_retention_policy - [ ] get_mailbox_details - [ ] list_access_control_rules - [ ] list_aliases @@ -8730,7 +8726,6 @@ - [ ] list_users - [ ] put_access_control_rule - [ ] put_mailbox_permissions -- [ ] put_retention_policy - [ ] register_to_work_mail - [ ] reset_password - [ ] tag_resource From 99c339c0628bd32150e2daf304bb28c3a06fc91c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 7 Sep 2020 12:56:19 +0100 Subject: [PATCH 514/658] CHANGELOG updates for release 1.3.15 --- CHANGELOG.md | 209 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 209 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 732dad23af96..16cb7ece8d84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,215 @@ Moto Changelog =================== +1.3.15 +----- + +Full list of PRs merged in this release: +https://github.com/spulec/moto/pulls?q=is%3Apr+is%3Aclosed+merged%3A2019-11-14..2020-09-07 + + + General Changes: + * The scaffold.py-script has been fixed to make it easier to scaffold new services. + See the README for an introduction. + * Reduced dependency overhead. + It is now possible to install dependencies for only a specific service using pip install moto[service]. + Available services: all, acm, awslambda, batch, cloudformation, cognitoidp, ec2, iotdata, iam, xray + + New Services: + * Application Autoscaling + * Code Commit + * Code Pipeline + * Elastic Beanstalk + * Kinesis Video + * Kinesis Video Archived Media + * Managed BlockChain + * Resource Access Manager (ram) + * Sagemaker + + New Methods: + * Athena: + * create_named_query + * get_named_query + * get_work_group + * start_query_execution + * stop_query_execution + * API Gateway: + * create_authorizer + * create_domain_name + * create_model + * delete_authorizer + * get_authorizer + * get_authorizers + * get_domain_name + * get_domain_names + * get_model + * get_models + * update_authorizer + * Autoscaling: + * enter_standby + * exit_standby + * terminate_instance_in_auto_scaling_group + * CloudFormation: + * get_template_summary + * CloudWatch: + * describe_alarms_for_metric + * get_metric_data + * CloudWatch Logs: + * delete_subscription_filter + * describe_subscription_filters + * put_subscription_filter + * Cognito IDP: + * associate_software_token + * create_resource_server + * confirm_sign_up + * initiate_auth + * set_user_mfa_preference + * sign_up + * verify_software_token + * DynamoDB: + * describe_continuous_backups + * transact_get_items + * transact_write_items + * update_continuous_backups + * EC2: + * create_vpc_endpoint + * describe_vpc_classic_link + * describe_vpc_classic_link_dns_support + * describe_vpc_endpoint_services + * disable_vpc_classic_link + * disable_vpc_classic_link_dns_support + * enable_vpc_classic_link + * enable_vpc_classic_link_dns_support + * register_image + * ECS: + * create_task_set + * delete_task_set + * describe_task_set + * update_service_primary_task_set + * update_task_set + * Events: + * delete_event_bus + * create_event_bus + * list_event_buses + * list_tags_for_resource + * tag_resource + * untag_resource + * Glue: + * get_databases + * IAM: + * delete_group + * delete_instance_profile + * delete_ssh_public_key + * get_account_summary + * get_ssh_public_key + * list_user_tags + * list_ssh_public_keys + * update_ssh_public_key + * upload_ssh_public_key + * IOT: + * cancel_job + * cancel_job_execution + * create_policy_version + * delete_job + * delete_job_execution + * describe_endpoint + * describe_job_execution + * delete_policy_version + * get_policy_version + * get_job_document + * list_attached_policies + * list_job_executions_for_job + * list_job_executions_for_thing + * list_jobs + * list_policy_versions + * set_default_policy_version + * register_certificate_without_ca + * KMS: + * untag_resource + * Lambda: + * delete_function_concurrency + * get_function_concurrency + * put_function_concurrency + * Organisations: + * describe_create_account_status + * deregister_delegated_administrator + * disable_policy_type + * enable_policy_type + * list_delegated_administrators + * list_delegated_services_for_account + * list_tags_for_resource + * register_delegated_administrator + * tag_resource + * untag_resource + * update_organizational_unit + * S3: + * delete_bucket_encryption + * delete_public_access_block + * get_bucket_encryption + * get_public_access_block + * put_bucket_encryption + * put_public_access_block + * S3 Control: + * delete_public_access_block + * get_public_access_block + * put_public_access_block + * SecretsManager: + * get_resource_policy + * update_secret + * SES: + * create_configuration_set + * create_configuration_set_event_destination + * create_receipt_rule_set + * create_receipt_rule + * create_template + * get_template + * get_send_statistics + * list_templates + * STS: + * assume_role_with_saml + * SSM: + * create_documen + * delete_document + * describe_document + * get_document + * list_documents + * update_document + * update_document_default_version + * SWF: + * undeprecate_activity_type + * undeprecate_domain + * undeprecate_workflow_type + + General Updates: + * API Gateway - create_rest_api now supports policy-parameter + * Autoscaling - describe_auto_scaling_instances now supports InstanceIds-parameter + * AutoScalingGroups - now support launch templates + * CF - Now supports DependsOn-configuration + * CF - Now supports FN::Transform AWS::Include mapping + * CF - Now supports update and deletion of Lambdas + * CF - Now supports creation, update and deletion of EventBus (Events) + * CF - Now supports update of Rules (Events) + * CF - Now supports creation, update and deletion of EventSourceMappings (AWS Lambda) + * CF - Now supports update and deletion of Kinesis Streams + * CF - Now supports creation of DynamoDB streams + * CF - Now supports deletion of DynamoDB tables + * CF - list_stacks now supports the status_filter-parameter + * Cognito IDP - list_users now supports filter-parameter + * DynamoDB - GSI/LSI's now support ProjectionType=KEYS_ONLY + * EC2 - create_route now supports the NetworkInterfaceId-parameter + * EC2 - describe_instances now supports additional filters (owner-id) + * EC2 - describe_instance_status now supports additional filters (instance-state-name, instance-state-code) + * EC2 - describe_nat_gateways now supports additional filters (nat-gateway-id, vpc-id, subnet-id, state) + * EC2 - describe_vpn_gateways now supports additional filters (attachment.vpc_id, attachment.state, vpn-gateway-id, type) + * IAM - list_users now supports path_prefix-parameter + * IOT - list_thing_groups now supports parent_group, name_prefix_filter, recursive-parameters + * S3 - delete_objects now supports deletion of specific VersionIds + * SecretsManager - list_secrets now supports filters-parameter + * SFN - start_execution now receives and validates input + * SNS - Now supports sending a message directly to a phone number + * SQS - MessageAttributes now support labeled DataTypes + + 1.3.14 ----- From 06d403d31309fe7174ea98b7d81d4b2db1fc6aaf Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 7 Sep 2020 10:36:56 -0500 Subject: [PATCH 515/658] 1.3.15 --- moto/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/__init__.py b/moto/__init__.py index e21d3f894a90..45ae971734a3 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -122,7 +122,7 @@ def f(*args, **kwargs): # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = "moto" -__version__ = "1.3.15.dev" +__version__ = "1.3.15" try: From 9232cc7ba0be75ff923b2717a4b6d74baa69db27 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Tue, 8 Sep 2020 02:47:44 -0700 Subject: [PATCH 516/658] #3290 - Add additional dependencies in extras_require --- setup.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index 92ab7a5de0ea..fe28961918d0 100755 --- a/setup.py +++ b/setup.py @@ -99,25 +99,27 @@ def get_version(): _dep_sshpubkeys_py2, _dep_sshpubkeys_py3, ] +all_server_deps = all_extra_deps + ['flask'] # TODO: do we want to add ALL services here? # i.e. even those without extra dependencies. # Would be good for future-compatibility, I guess. extras_per_service = { - "ec2": [_dep_cryptography, _dep_sshpubkeys_py2, _dep_sshpubkeys_py3], 'acm': [_dep_cryptography], - 'iam': [_dep_cryptography], - 'cloudformation': [_dep_PyYAML, _dep_cfn_lint], - 'cognitoidp': [_dep_python_jose, _dep_python_jose_ecdsa_pin], 'awslambda': [_dep_docker], 'batch': [_dep_docker], + 'cloudformation': [_dep_PyYAML, _dep_cfn_lint], + 'cognitoidp': [_dep_python_jose, _dep_python_jose_ecdsa_pin], + "ec2": [_dep_cryptography, _dep_sshpubkeys_py2, _dep_sshpubkeys_py3], + 'iam': [_dep_cryptography], 'iotdata': [_dep_jsondiff], + 's3': [_dep_cryptography], 'xray': [_dep_aws_xray_sdk], } extras_require = { 'all': all_extra_deps, - 'server': ['flask'], + 'server': all_server_deps, } extras_require.update(extras_per_service) From ed9668107847233ec6f695a494269825e01ba932 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 8 Sep 2020 20:33:03 -0500 Subject: [PATCH 517/658] Revert dependency cleanup for now. See #3290. --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index fe28961918d0..5f6840251e30 100755 --- a/setup.py +++ b/setup.py @@ -148,7 +148,8 @@ def get_version(): ], }, packages=find_packages(exclude=("tests", "tests.*")), - install_requires=install_requires, + # Addding all requirements for now until we cut a larger release + install_requires=install_requires + all_extra_deps, extras_require=extras_require, include_package_data=True, license="Apache", From 63ce647123755e4c4693a89f52c254596004c098 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 8 Sep 2020 20:33:29 -0500 Subject: [PATCH 518/658] 1.3.16 --- CHANGELOG.md | 4 ++++ moto/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16cb7ece8d84..9df85cf37a6f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,10 @@ Moto Changelog =================== +1.3.16 +----- + * Undoing dependency cleanup until we cut a larger release + 1.3.15 ----- diff --git a/moto/__init__.py b/moto/__init__.py index 45ae971734a3..d34cd29b19c5 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -122,7 +122,7 @@ def f(*args, **kwargs): # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = "moto" -__version__ = "1.3.15" +__version__ = "1.3.16" try: From 16d5d2df28dd0c2da474d7ee90ba873889fe48ac Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 8 Sep 2020 20:37:08 -0500 Subject: [PATCH 519/658] Dev releases. --- moto/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/__init__.py b/moto/__init__.py index d34cd29b19c5..d9e57189390e 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -122,7 +122,7 @@ def f(*args, **kwargs): # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = "moto" -__version__ = "1.3.16" +__version__ = "1.3.16.dev" try: From c321ad46b0cdce6b4c7c860aa5c9f760fb94a437 Mon Sep 17 00:00:00 2001 From: Daniel Okey-Okoro <46732983+okeyokoro@users.noreply.github.com> Date: Thu, 10 Sep 2020 00:32:41 -0700 Subject: [PATCH 520/658] fix: deleting non-existent topic shouldn't raise KeyError (#3285) --- moto/sns/models.py | 5 ++++- tests/test_sns/test_topics.py | 6 ++++++ tests/test_sns/test_topics_boto3.py | 9 +++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 1d956ffde3cc..6ac709098172 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -426,7 +426,10 @@ def list_topics(self, next_token=None): return self._get_values_nexttoken(self.topics, next_token) def delete_topic(self, arn): - self.topics.pop(arn) + try: + self.topics.pop(arn) + except KeyError: + raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) def get_topic(self, arn): try: diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index e91ab6e2d9a4..b561b94a18a2 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -32,6 +32,12 @@ def test_create_and_delete_topic(): topics.should.have.length_of(0) +@mock_sns_deprecated +def test_delete_non_existent_topic(): + conn = boto.connect_sns() + conn.delete_topic.when.called_with("a-fake-arn").should.throw(BotoServerError) + + @mock_sns_deprecated def test_get_missing_topic(): conn = boto.connect_sns() diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 87800bd84f73..a2d12f56f4ac 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -35,6 +35,15 @@ def test_create_and_delete_topic(): topics.should.have.length_of(0) +@mock_sns +def test_delete_non_existent_topic(): + conn = boto3.client("sns", region_name="us-east-1") + + conn.delete_topic.when.called_with( + TopicArn="arn:aws:sns:us-east-1:123456789012:fake-topic" + ).should.throw(conn.exceptions.NotFoundException) + + @mock_sns def test_create_topic_with_attributes(): conn = boto3.client("sns", region_name="us-east-1") From 7054143701a9f30294f316b81502b545c805fc32 Mon Sep 17 00:00:00 2001 From: Karthikeyan Singaravelan Date: Thu, 10 Sep 2020 13:50:26 +0530 Subject: [PATCH 521/658] Fix deprecation warnings due to invalid escape sequences. (#3273) * Fix deprecation warnings due to invalid escape sequences. * Fix linter error. --- moto/apigateway/urls.py | 4 ++-- moto/ec2instanceconnect/urls.py | 2 +- moto/events/responses.py | 4 ++-- moto/managedblockchain/utils.py | 10 +++++----- moto/rds/urls.py | 2 +- moto/route53/urls.py | 16 ++++++++-------- moto/sqs/urls.py | 2 +- tests/test_awslambda/test_lambda.py | 2 +- tests/test_core/test_url_mapping.py | 10 +++++----- tests/test_ec2/test_key_pairs.py | 2 +- tests/test_ec2/test_tags.py | 6 +++--- tests/test_sns/test_publishing.py | 4 ++-- tests/test_sns/test_publishing_boto3.py | 6 +++--- 13 files changed, 35 insertions(+), 35 deletions(-) diff --git a/moto/apigateway/urls.py b/moto/apigateway/urls.py index cb48e225f6f1..7e8de139870e 100644 --- a/moto/apigateway/urls.py +++ b/moto/apigateway/urls.py @@ -15,9 +15,9 @@ "{0}/restapis/(?P[^/]+)/deployments/(?P[^/]+)/?$": APIGatewayResponse().individual_deployment, "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/?$": APIGatewayResponse().resource_individual, "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/?$": APIGatewayResponse().resource_methods, - "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/responses/(?P\d+)$": APIGatewayResponse().resource_method_responses, + r"{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/responses/(?P\d+)$": APIGatewayResponse().resource_method_responses, "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/?$": APIGatewayResponse().integrations, - "{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/responses/(?P\d+)/?$": APIGatewayResponse().integration_responses, + r"{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/responses/(?P\d+)/?$": APIGatewayResponse().integration_responses, "{0}/apikeys$": APIGatewayResponse().apikeys, "{0}/apikeys/(?P[^/]+)": APIGatewayResponse().apikey_individual, "{0}/usageplans$": APIGatewayResponse().usage_plans, diff --git a/moto/ec2instanceconnect/urls.py b/moto/ec2instanceconnect/urls.py index e7078264fb10..d7b6b7ce4c05 100644 --- a/moto/ec2instanceconnect/urls.py +++ b/moto/ec2instanceconnect/urls.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .responses import Ec2InstanceConnectResponse -url_bases = ["https?://ec2-instance-connect\.(.+)\.amazonaws\.com"] +url_bases = [r"https?://ec2-instance-connect\.(.+)\.amazonaws\.com"] url_paths = {"{0}/$": Ec2InstanceConnectResponse.dispatch} diff --git a/moto/events/responses.py b/moto/events/responses.py index 76c590e16ba7..c4e49fc80c81 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -183,9 +183,9 @@ def put_rule(self): if sched_exp: if not ( - re.match("^cron\(.*\)", sched_exp) + re.match(r"^cron\(.*\)", sched_exp) or re.match( - "^rate\(\d*\s(minute|minutes|hour|hours|day|days)\)", sched_exp + r"^rate\(\d*\s(minute|minutes|hour|hours|day|days)\)", sched_exp ) ): return self.error( diff --git a/moto/managedblockchain/utils.py b/moto/managedblockchain/utils.py index c8118619eadb..d0485829bf11 100644 --- a/moto/managedblockchain/utils.py +++ b/moto/managedblockchain/utils.py @@ -14,7 +14,7 @@ def region_from_managedblckchain_url(url): def networkid_from_managedblockchain_url(full_url): - id_search = re.search("\/n-[A-Z0-9]{26}", full_url, re.IGNORECASE) + id_search = re.search(r"\/n-[A-Z0-9]{26}", full_url, re.IGNORECASE) return_id = None if id_search: return_id = id_search.group(0).replace("/", "") @@ -28,7 +28,7 @@ def get_network_id(): def memberid_from_managedblockchain_url(full_url): - id_search = re.search("\/m-[A-Z0-9]{26}", full_url, re.IGNORECASE) + id_search = re.search(r"\/m-[A-Z0-9]{26}", full_url, re.IGNORECASE) return_id = None if id_search: return_id = id_search.group(0).replace("/", "") @@ -42,7 +42,7 @@ def get_member_id(): def proposalid_from_managedblockchain_url(full_url): - id_search = re.search("\/p-[A-Z0-9]{26}", full_url, re.IGNORECASE) + id_search = re.search(r"\/p-[A-Z0-9]{26}", full_url, re.IGNORECASE) return_id = None if id_search: return_id = id_search.group(0).replace("/", "") @@ -56,7 +56,7 @@ def get_proposal_id(): def invitationid_from_managedblockchain_url(full_url): - id_search = re.search("\/in-[A-Z0-9]{26}", full_url, re.IGNORECASE) + id_search = re.search(r"\/in-[A-Z0-9]{26}", full_url, re.IGNORECASE) return_id = None if id_search: return_id = id_search.group(0).replace("/", "") @@ -107,7 +107,7 @@ def admin_password_ok(password): def nodeid_from_managedblockchain_url(full_url): - id_search = re.search("\/nd-[A-Z0-9]{26}", full_url, re.IGNORECASE) + id_search = re.search(r"\/nd-[A-Z0-9]{26}", full_url, re.IGNORECASE) return_id = None if id_search: return_id = id_search.group(0).replace("/", "") diff --git a/moto/rds/urls.py b/moto/rds/urls.py index 9c7570167c64..86e6ec00bc43 100644 --- a/moto/rds/urls.py +++ b/moto/rds/urls.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals from .responses import RDSResponse -url_bases = ["https?://rds(\..+)?.amazonaws.com"] +url_bases = [r"https?://rds(\..+)?.amazonaws.com"] url_paths = {"{0}/$": RDSResponse.dispatch} diff --git a/moto/route53/urls.py b/moto/route53/urls.py index a697d258a4cf..c0fc9373460c 100644 --- a/moto/route53/urls.py +++ b/moto/route53/urls.py @@ -13,12 +13,12 @@ def tag_response2(*args, **kwargs): url_paths = { - "{0}/(?P[\d_-]+)/hostedzone$": Route53().list_or_create_hostzone_response, - "{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)$": Route53().get_or_delete_hostzone_response, - "{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)/rrset/?$": Route53().rrset_response, - "{0}/(?P[\d_-]+)/hostedzonesbyname": Route53().list_hosted_zones_by_name_response, - "{0}/(?P[\d_-]+)/healthcheck": Route53().health_check_response, - "{0}/(?P[\d_-]+)/tags/healthcheck/(?P[^/]+)$": tag_response1, - "{0}/(?P[\d_-]+)/tags/hostedzone/(?P[^/]+)$": tag_response2, - "{0}/(?P[\d_-]+)/trafficpolicyinstances/*": Route53().not_implemented_response, + r"{0}/(?P[\d_-]+)/hostedzone$": Route53().list_or_create_hostzone_response, + r"{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)$": Route53().get_or_delete_hostzone_response, + r"{0}/(?P[\d_-]+)/hostedzone/(?P[^/]+)/rrset/?$": Route53().rrset_response, + r"{0}/(?P[\d_-]+)/hostedzonesbyname": Route53().list_hosted_zones_by_name_response, + r"{0}/(?P[\d_-]+)/healthcheck": Route53().health_check_response, + r"{0}/(?P[\d_-]+)/tags/healthcheck/(?P[^/]+)$": tag_response1, + r"{0}/(?P[\d_-]+)/tags/hostedzone/(?P[^/]+)$": tag_response2, + r"{0}/(?P[\d_-]+)/trafficpolicyinstances/*": Route53().not_implemented_response, } diff --git a/moto/sqs/urls.py b/moto/sqs/urls.py index 3acf8591a4eb..54fd44650a48 100644 --- a/moto/sqs/urls.py +++ b/moto/sqs/urls.py @@ -7,5 +7,5 @@ url_paths = { "{0}/$": dispatch, - "{0}/(?P\d+)/(?P[a-zA-Z0-9\-_\.]+)": dispatch, + r"{0}/(?P\d+)/(?P[a-zA-Z0-9\-_\.]+)": dispatch, } diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index ca05d4aa4f15..f7e7b3c7e1bf 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1663,7 +1663,7 @@ def test_update_function_s3(): def test_create_function_with_invalid_arn(): err = create_invalid_lambda("test-iam-role") err.exception.response["Error"]["Message"].should.equal( - "1 validation error detected: Value 'test-iam-role' at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: arn:(aws[a-zA-Z-]*)?:iam::(\d{12}):role/?[a-zA-Z_0-9+=,.@\-_/]+" + r"1 validation error detected: Value 'test-iam-role' at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: arn:(aws[a-zA-Z-]*)?:iam::(\d{12}):role/?[a-zA-Z_0-9+=,.@\-_/]+" ) diff --git a/tests/test_core/test_url_mapping.py b/tests/test_core/test_url_mapping.py index 4dccc4f218c2..9d0632e05c20 100644 --- a/tests/test_core/test_url_mapping.py +++ b/tests/test_core/test_url_mapping.py @@ -14,10 +14,10 @@ def test_flask_path_converting_simple(): def test_flask_path_converting_regex(): - convert_regex_to_flask_path("/(?P[a-zA-Z0-9\-_]+)").should.equal( - '/' + convert_regex_to_flask_path(r"/(?P[a-zA-Z0-9\-_]+)").should.equal( + r'/' ) - convert_regex_to_flask_path("(?P\d+)/(?P.*)$").should.equal( - '/' - ) + convert_regex_to_flask_path( + r"(?P\d+)/(?P.*)$" + ).should.equal(r'/') diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index d632c2478a79..09982ac7a06d 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -16,7 +16,7 @@ RSA_PUBLIC_KEY_OPENSSH = b"""\ ssh-rsa \ AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H\ -6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\8kweyMQrhrt6HaKGgromRiz37LQx\ +6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\\8kweyMQrhrt6HaKGgromRiz37LQx\ 4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBp\ JzbZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6\ A3t8mL7r91aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2X\ diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 92ed18dd46f9..8480f8bc0a50 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -287,13 +287,13 @@ def test_get_all_tags_value_filter(): tags = conn.get_all_tags(filters={"value": "*some*value*"}) tags.should.have.length_of(3) - tags = conn.get_all_tags(filters={"value": "*value\*"}) + tags = conn.get_all_tags(filters={"value": r"*value\*"}) tags.should.have.length_of(1) - tags = conn.get_all_tags(filters={"value": "*value\*\*"}) + tags = conn.get_all_tags(filters={"value": r"*value\*\*"}) tags.should.have.length_of(1) - tags = conn.get_all_tags(filters={"value": "*value\*\?"}) + tags = conn.get_all_tags(filters={"value": r"*value\*\?"}) tags.should.have.length_of(1) diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index 30fa80f15a01..cc7dbb8d69f3 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -54,7 +54,7 @@ def test_publish_to_sqs(): "us-east-1", ) acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", message.get_body(), ) @@ -98,7 +98,7 @@ def test_publish_to_sqs_in_different_region(): ) acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", message.get_body(), ) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 99e7ae7a45b2..c84f19694964 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -49,7 +49,7 @@ def test_publish_to_sqs(): messages = queue.receive_messages(MaxNumberOfMessages=1) expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, "us-east-1") acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", messages[0].body, ) @@ -290,7 +290,7 @@ def test_publish_to_sqs_dump_json(): escaped = message.replace('"', '\\"') expected = MESSAGE_FROM_SQS_TEMPLATE % (escaped, published_message_id, "us-east-1") acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", messages[0].body, ) @@ -323,7 +323,7 @@ def test_publish_to_sqs_in_different_region(): messages = queue.receive_messages(MaxNumberOfMessages=1) expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, "us-west-1") acquired_message = re.sub( - "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", "2015-01-01T12:00:00.000Z", messages[0].body, ) From c2d1ce2c1451a3785b3842f4c0f031443999ce9e Mon Sep 17 00:00:00 2001 From: Arcadiy Ivanov Date: Fri, 11 Sep 2020 05:17:39 -0400 Subject: [PATCH 522/658] Add If-Match, If-None-Match and If-Unmodified-Since to S3 GET/HEAD (#3021) fixes #2705 --- moto/s3/exceptions.py | 20 ++++++- moto/s3/responses.py | 40 +++++++++++--- tests/test_s3/test_s3.py | 114 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 167 insertions(+), 7 deletions(-) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 3ed385f1cea9..7ea21b096190 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from moto.core.exceptions import RESTError +from moto.core.exceptions import RESTError ERROR_WITH_BUCKET_NAME = """{% extends 'single_error' %} {% block extra %}{{ bucket }}{% endblock %} @@ -10,6 +10,10 @@ {% block extra %}{{ key_name }}{% endblock %} """ +ERROR_WITH_CONDITION_NAME = """{% extends 'single_error' %} +{% block extra %}{{ condition }}{% endblock %} +""" + class S3ClientError(RESTError): def __init__(self, *args, **kwargs): @@ -386,3 +390,17 @@ def __init__(self): super(NoSuchUpload, self).__init__( "NoSuchUpload", "The specified multipart upload does not exist." ) + + +class PreconditionFailed(S3ClientError): + code = 412 + + def __init__(self, failed_condition, **kwargs): + kwargs.setdefault("template", "condition_error") + self.templates["condition_error"] = ERROR_WITH_CONDITION_NAME + super(PreconditionFailed, self).__init__( + "PreconditionFailed", + "At least one of the pre-conditions you specified did not hold", + condition=failed_condition, + **kwargs + ) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 395cb573676a..fa3e536a7476 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -36,6 +36,7 @@ InvalidNotificationEvent, ObjectNotInActiveTierError, NoSystemTags, + PreconditionFailed, ) from .models import ( s3_backend, @@ -1149,13 +1150,28 @@ def _key_response_get(self, bucket_name, query, key_name, headers): ) version_id = query.get("versionId", [None])[0] if_modified_since = headers.get("If-Modified-Since", None) + if_match = headers.get("If-Match", None) + if_none_match = headers.get("If-None-Match", None) + if_unmodified_since = headers.get("If-Unmodified-Since", None) + key = self.backend.get_object(bucket_name, key_name, version_id=version_id) if key is None: raise MissingKey(key_name) + + if if_unmodified_since: + if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since) + if key.last_modified > if_unmodified_since: + raise PreconditionFailed("If-Unmodified-Since") + if if_match and key.etag != if_match: + raise PreconditionFailed("If-Match") + if if_modified_since: if_modified_since = str_to_rfc_1123_datetime(if_modified_since) - if if_modified_since and key.last_modified < if_modified_since: + if key.last_modified < if_modified_since: + return 304, response_headers, "Not Modified" + if if_none_match and key.etag == if_none_match: return 304, response_headers, "Not Modified" + if "acl" in query: template = self.response_template(S3_OBJECT_ACL_RESPONSE) return 200, response_headers, template.render(obj=key) @@ -1319,8 +1335,9 @@ def _key_response_head(self, bucket_name, query, key_name, headers): part_number = int(part_number) if_modified_since = headers.get("If-Modified-Since", None) - if if_modified_since: - if_modified_since = str_to_rfc_1123_datetime(if_modified_since) + if_match = headers.get("If-Match", None) + if_none_match = headers.get("If-None-Match", None) + if_unmodified_since = headers.get("If-Unmodified-Since", None) key = self.backend.get_object( bucket_name, key_name, version_id=version_id, part_number=part_number @@ -1329,10 +1346,21 @@ def _key_response_head(self, bucket_name, query, key_name, headers): response_headers.update(key.metadata) response_headers.update(key.response_dict) - if if_modified_since and key.last_modified < if_modified_since: + if if_unmodified_since: + if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since) + if key.last_modified > if_unmodified_since: + return 412, response_headers, "" + if if_match and key.etag != if_match: + return 412, response_headers, "" + + if if_modified_since: + if_modified_since = str_to_rfc_1123_datetime(if_modified_since) + if key.last_modified < if_modified_since: + return 304, response_headers, "Not Modified" + if if_none_match and key.etag == if_none_match: return 304, response_headers, "Not Modified" - else: - return 200, response_headers, "" + + return 200, response_headers, "" else: return 404, response_headers, "" diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 960594801cbe..4139cf0550dc 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2335,6 +2335,64 @@ def test_boto3_get_object_if_modified_since(): e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) +@mock_s3 +def test_boto3_get_object_if_unmodified_since(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + s3.put_object(Bucket=bucket_name, Key=key, Body="test") + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.get_object( + Bucket=bucket_name, + Key=key, + IfUnmodifiedSince=datetime.datetime.utcnow() - datetime.timedelta(hours=1), + ) + e = err.exception + e.response["Error"]["Code"].should.equal("PreconditionFailed") + e.response["Error"]["Condition"].should.equal("If-Unmodified-Since") + + +@mock_s3 +def test_boto3_get_object_if_match(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + s3.put_object(Bucket=bucket_name, Key=key, Body="test") + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.get_object( + Bucket=bucket_name, Key=key, IfMatch='"hello"', + ) + e = err.exception + e.response["Error"]["Code"].should.equal("PreconditionFailed") + e.response["Error"]["Condition"].should.equal("If-Match") + + +@mock_s3 +def test_boto3_get_object_if_none_match(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + etag = s3.put_object(Bucket=bucket_name, Key=key, Body="test")["ETag"] + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.get_object( + Bucket=bucket_name, Key=key, IfNoneMatch=etag, + ) + e = err.exception + e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) + + @mock_s3 def test_boto3_head_object_if_modified_since(): s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) @@ -2355,6 +2413,62 @@ def test_boto3_head_object_if_modified_since(): e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) +@mock_s3 +def test_boto3_head_object_if_unmodified_since(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + s3.put_object(Bucket=bucket_name, Key=key, Body="test") + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.head_object( + Bucket=bucket_name, + Key=key, + IfUnmodifiedSince=datetime.datetime.utcnow() - datetime.timedelta(hours=1), + ) + e = err.exception + e.response["Error"].should.equal({"Code": "412", "Message": "Precondition Failed"}) + + +@mock_s3 +def test_boto3_head_object_if_match(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + s3.put_object(Bucket=bucket_name, Key=key, Body="test") + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.head_object( + Bucket=bucket_name, Key=key, IfMatch='"hello"', + ) + e = err.exception + e.response["Error"].should.equal({"Code": "412", "Message": "Precondition Failed"}) + + +@mock_s3 +def test_boto3_head_object_if_none_match(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = "hello.txt" + + etag = s3.put_object(Bucket=bucket_name, Key=key, Body="test")["ETag"] + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.head_object( + Bucket=bucket_name, Key=key, IfNoneMatch=etag, + ) + e = err.exception + e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) + + @mock_s3 @reduced_min_part_size def test_boto3_multipart_etag(): From 9f0f230d130a839fb2de6bfc97af4182360fbcdb Mon Sep 17 00:00:00 2001 From: jweite Date: Fri, 11 Sep 2020 06:17:36 -0400 Subject: [PATCH 523/658] Change to test_s3 method test_presigned_url_restrict_parameters to tolerate change in exception messages, spurred by boto3 1.14.59 release. (#3308) Co-authored-by: Joseph Weitekamp --- tests/test_s3/test_s3.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 4139cf0550dc..d338269e9c8c 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4769,8 +4769,8 @@ def test_presigned_url_restrict_parameters(): ClientMethod="put_object", Params={"Bucket": bucket, "Key": key, "Unknown": "metadata"}, ) - assert str(err.exception).should.equal( - 'Parameter validation failed:\nUnknown parameter in input: "Unknown", must be one of: ACL, Body, Bucket, CacheControl, ContentDisposition, ContentEncoding, ContentLanguage, ContentLength, ContentMD5, ContentType, Expires, GrantFullControl, GrantRead, GrantReadACP, GrantWriteACP, Key, Metadata, ServerSideEncryption, StorageClass, WebsiteRedirectLocation, SSECustomerAlgorithm, SSECustomerKey, SSECustomerKeyMD5, SSEKMSKeyId, SSEKMSEncryptionContext, RequestPayer, Tagging, ObjectLockMode, ObjectLockRetainUntilDate, ObjectLockLegalHoldStatus' + assert str(err.exception).should.match( + r'Parameter validation failed:\nUnknown parameter in input: "Unknown", must be one of:.*' ) s3.delete_bucket(Bucket=bucket) From db1d7123f666faaf99ad3a254057e728ddaabedf Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 13 Sep 2020 16:08:23 +0100 Subject: [PATCH 524/658] List dependencies for services - add integration test to verify --- CHANGELOG.md | 19 +- Makefile | 3 + README.md | 29 +- moto/apigateway/models.py | 3 +- moto/codecommit/models.py | 2 +- moto/codepipeline/models.py | 4 +- moto/ec2/models.py | 2 +- moto/ec2/utils.py | 2 +- moto/events/models.py | 3 +- moto/kms/models.py | 3 +- moto/sagemaker/models.py | 11 +- moto/stepfunctions/models.py | 3 +- moto/xray/models.py | 8 +- requirements-dev.txt | 6 +- requirements-tests.txt | 4 + scripts/int_test.sh | 65 +++ setup.py | 22 +- ....py => test_autoscaling_cloudformation.py} | 0 ...on.py => test_awslambda_cloudformation.py} | 0 ...mation.py => test_batch_cloudformation.py} | 0 tests/test_codecommit/test_codecommit.py | 2 +- .../test_cognitoidentity.py | 1 + tests/test_ec2/test_amis.py | 2 +- tests/test_ec2/test_ec2_cloudformation.py | 100 +++++ .../test_elastic_network_interfaces.py | 29 +- tests/test_ec2/test_instances.py | 43 +- tests/test_ec2/test_spot_instances.py | 28 +- tests/test_ec2/test_subnets.py | 35 +- tests/test_ecs/test_ecs_boto3.py | 253 ------------ tests/test_ecs/test_ecs_cloudformation.py | 253 ++++++++++++ .../test_eb.py | 0 tests/test_elbv2/test_elbv2.py | 346 +--------------- tests/test_elbv2/test_elbv2_cloudformation.py | 348 ++++++++++++++++ ...{test_glacier_server.py => test_server.py} | 0 tests/test_logs/test_integration.py | 383 ++++++++++++++++++ tests/test_logs/test_logs.py | 381 +---------------- ...t_bucket_path_server.py => test_server.py} | 0 tests/test_sqs/test_sqs.py | 81 ++-- tests/test_sqs/test_sqs_cloudformation.py | 38 ++ tests/test_ssm/test_ssm_boto3.py | 68 +--- tests/test_ssm/test_ssm_cloudformation.py | 70 ++++ 41 files changed, 1405 insertions(+), 1245 deletions(-) create mode 100644 requirements-tests.txt create mode 100755 scripts/int_test.sh rename tests/test_autoscaling/{test_cloudformation.py => test_autoscaling_cloudformation.py} (100%) rename tests/test_awslambda/{test_lambda_cloudformation.py => test_awslambda_cloudformation.py} (100%) rename tests/test_batch/{test_cloudformation.py => test_batch_cloudformation.py} (100%) create mode 100644 tests/test_ec2/test_ec2_cloudformation.py create mode 100644 tests/test_ecs/test_ecs_cloudformation.py rename tests/{test_eb => test_elasticbeanstalk}/test_eb.py (100%) create mode 100644 tests/test_elbv2/test_elbv2_cloudformation.py rename tests/test_glacier/{test_glacier_server.py => test_server.py} (100%) create mode 100644 tests/test_logs/test_integration.py rename tests/test_s3bucket_path/{test_bucket_path_server.py => test_server.py} (100%) create mode 100644 tests/test_sqs/test_sqs_cloudformation.py create mode 100644 tests/test_ssm/test_ssm_cloudformation.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 9df85cf37a6f..8d31409f0768 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,17 @@ Moto Changelog =================== -1.3.16 +Unreleased ----- - * Undoing dependency cleanup until we cut a larger release + * Reduced dependency overhead. + It is now possible to install dependencies for only specific services using: + pip install moto[service1,service1]. + See the README for more information. -1.3.15 ------ + +1.3.16 +----- Full list of PRs merged in this release: https://github.com/spulec/moto/pulls?q=is%3Apr+is%3Aclosed+merged%3A2019-11-14..2020-09-07 @@ -15,9 +19,6 @@ https://github.com/spulec/moto/pulls?q=is%3Apr+is%3Aclosed+merged%3A2019-11-14.. General Changes: * The scaffold.py-script has been fixed to make it easier to scaffold new services. See the README for an introduction. - * Reduced dependency overhead. - It is now possible to install dependencies for only a specific service using pip install moto[service]. - Available services: all, acm, awslambda, batch, cloudformation, cognitoidp, ec2, iotdata, iam, xray New Services: * Application Autoscaling @@ -213,6 +214,10 @@ https://github.com/spulec/moto/pulls?q=is%3Apr+is%3Aclosed+merged%3A2019-11-14.. * SNS - Now supports sending a message directly to a phone number * SQS - MessageAttributes now support labeled DataTypes +1.3.15 +----- + +This release broke dependency management for a lot of services - please upgrade to 1.3.16. 1.3.14 ----- diff --git a/Makefile b/Makefile index acc5b20376a1..2fc6aea96588 100644 --- a/Makefile +++ b/Makefile @@ -57,3 +57,6 @@ implementation_coverage: scaffold: @pip install -r requirements-dev.txt > /dev/null exec python scripts/scaffold.py + +int_test: + @./scripts/int_test.sh diff --git a/README.md b/README.md index 956be5da15ea..58ab04f962c8 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,25 @@ ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/moto.svg) ![PyPI - Downloads](https://img.shields.io/pypi/dw/moto.svg) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + +## Install + +To install moto for a specific service: +```console +$ pip install moto[ec2,s3] +``` +This will install Moto, and the dependencies required for that specific service. +If you don't care about the number of dependencies, or if you want to mock many AWS services: +```console +$ pip install moto[all] +``` +Not all services might be covered, in which case you might see a warning: +`moto 1.3.16 does not provide the extra 'service'`. +You can ignore the warning, or simply install moto as is: +```console +$ pip install moto +``` + ## In a nutshell Moto is a library that allows your tests to easily mock out AWS Services. @@ -459,15 +478,7 @@ require that you update your hosts file for your code to work properly: 1. `s3-control` For the above services, this is required because the hostname is in the form of `AWS_ACCOUNT_ID.localhost`. -As a result, you need to add that entry to your host file for your tests to function properly. - - -## Install - - -```console -$ pip install moto -``` +As a result, you need to add that entry to your host file for your tests to function properly. ## Releases diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index e4cbac36281c..4a44404a2421 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -14,10 +14,9 @@ except ImportError: from urllib.parse import urlparse import responses -from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from .utils import create_id from moto.core.utils import path_url -from moto.sts.models import ACCOUNT_ID from .exceptions import ( ApiKeyNotFoundException, UsagePlanNotFoundException, diff --git a/moto/codecommit/models.py b/moto/codecommit/models.py index 6a4e82ad21e4..ad99e8f3dc28 100644 --- a/moto/codecommit/models.py +++ b/moto/codecommit/models.py @@ -2,7 +2,7 @@ from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from datetime import datetime -from moto.iam.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID from .exceptions import RepositoryDoesNotExistException, RepositoryNameExistsException import uuid diff --git a/moto/codepipeline/models.py b/moto/codepipeline/models.py index 50f07deb0d7e..4d2b9c0f9497 100644 --- a/moto/codepipeline/models.py +++ b/moto/codepipeline/models.py @@ -15,9 +15,7 @@ InvalidTagsException, TooManyTagsException, ) -from moto.core import BaseBackend, BaseModel - -from moto.iam.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel class CodePipeline(BaseModel): diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 07a05bbda48a..60f179128ea4 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -27,7 +27,7 @@ iso_8601_datetime_with_milliseconds, camelcase_to_underscores, ) -from moto.iam.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID from .exceptions import ( CidrLimitExceeded, DependencyViolationError, diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index bc124bddf456..653cd055d45e 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -11,7 +11,7 @@ from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import rsa -from moto.iam.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID EC2_RESOURCE_TO_PREFIX = { "customer-gateway": "cgw", diff --git a/moto/events/models.py b/moto/events/models.py index 9c27fbb337d6..4d5047891589 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -4,8 +4,7 @@ from boto3 import Session from moto.core.exceptions import JsonRESTError -from moto.core import BaseBackend, CloudFormationModel -from moto.sts.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel from moto.utilities.tagging_service import TaggingService from uuid import uuid4 diff --git a/moto/kms/models.py b/moto/kms/models.py index 2eb7cb771cc4..7a9918f2bbb5 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -6,11 +6,10 @@ from boto3 import Session -from moto.core import BaseBackend, CloudFormationModel +from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel from moto.core.utils import unix_time from moto.utilities.tagging_service import TaggingService from moto.core.exceptions import JsonRESTError -from moto.iam.models import ACCOUNT_ID from .utils import decrypt, encrypt, generate_key_id, generate_master_key diff --git a/moto/sagemaker/models.py b/moto/sagemaker/models.py index 6ff36249f1d8..9c394cc23f9e 100644 --- a/moto/sagemaker/models.py +++ b/moto/sagemaker/models.py @@ -1,14 +1,13 @@ from __future__ import unicode_literals import os +from boto3 import Session from copy import deepcopy from datetime import datetime -from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.exceptions import RESTError -from moto.ec2 import ec2_backends from moto.sagemaker import validators -from moto.sts.models import ACCOUNT_ID from .exceptions import MissingModel @@ -909,5 +908,9 @@ def get_training_job_tags(self, arn): sagemaker_backends = {} -for region, ec2_backend in ec2_backends.items(): +for region in Session().get_available_regions("sagemaker"): + sagemaker_backends[region] = SageMakerModelBackend(region) +for region in Session().get_available_regions("sagemaker", partition_name="aws-us-gov"): + sagemaker_backends[region] = SageMakerModelBackend(region) +for region in Session().get_available_regions("sagemaker", partition_name="aws-cn"): sagemaker_backends[region] = SageMakerModelBackend(region) diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index 3184d6456b4b..03cbcf32088d 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -4,9 +4,8 @@ from boto3 import Session -from moto.core import BaseBackend +from moto.core import ACCOUNT_ID, BaseBackend from moto.core.utils import iso_8601_datetime_without_milliseconds -from moto.sts.models import ACCOUNT_ID from uuid import uuid4 from .exceptions import ( ExecutionAlreadyExists, diff --git a/moto/xray/models.py b/moto/xray/models.py index 33a271f9b2c7..39d8ae2d4b93 100644 --- a/moto/xray/models.py +++ b/moto/xray/models.py @@ -1,11 +1,11 @@ from __future__ import unicode_literals import bisect +from boto3 import Session import datetime from collections import defaultdict import json from moto.core import BaseBackend, BaseModel -from moto.ec2 import ec2_backends from .exceptions import BadSegmentException, AWSError @@ -287,5 +287,9 @@ def get_trace_ids(self, trace_ids, next_token): xray_backends = {} -for region, ec2_backend in ec2_backends.items(): +for region in Session().get_available_regions("xray"): + xray_backends[region] = XRayBackend() +for region in Session().get_available_regions("xray", partition_name="aws-us-gov"): + xray_backends[region] = XRayBackend() +for region in Session().get_available_regions("xray", partition_name="aws-cn"): xray_backends[region] = XRayBackend() diff --git a/requirements-dev.txt b/requirements-dev.txt index 8a91eb14f9ad..459d98855c31 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,17 +1,15 @@ -r requirements.txt -nose +-r requirements-tests.txt + black==19.10b0; python_version >= '3.6' regex==2019.11.1; python_version >= '3.6' # Needed for black -sure==1.4.11 coverage==4.5.4 flake8==3.7.8 -freezegun flask boto>=2.45.0 boto3>=1.4.4 botocore>=1.15.13 six>=1.9 -parameterized>=0.7.0 prompt-toolkit==2.0.10 # 3.x is not available with python2 click==6.7 inflection==0.3.1 diff --git a/requirements-tests.txt b/requirements-tests.txt new file mode 100644 index 000000000000..eaa8454c77de --- /dev/null +++ b/requirements-tests.txt @@ -0,0 +1,4 @@ +nose +sure==1.4.11 +freezegun +parameterized>=0.7.0 \ No newline at end of file diff --git a/scripts/int_test.sh b/scripts/int_test.sh new file mode 100755 index 000000000000..bc029eca4854 --- /dev/null +++ b/scripts/int_test.sh @@ -0,0 +1,65 @@ +overwrite() { echo -e "\r\033[1A\033[0K$@"; } + +contains() { + [[ $1 =~ (^|[[:space:]])$2($|[[:space:]]) ]] && return 0 || return 1 +} + +valid_service() { + # Verify whether this is a valid service + # We'll ignore metadata folders, and folders that test generic Moto behaviour + # We'll also ignore CloudFormation, as it will always depend on other services + local ignore_moto_folders="core instance_metadata __pycache__ templates cloudformation" + if echo $ignore_moto_folders | grep -q "$1"; then + return 1 + else + return 0 + fi +} + +test_service() { + service=$1 + path_to_test_file=$2 + venv_path="test_venv_${service}" + overwrite "Running tests for ${service}.." + virtualenv ${venv_path} -p `which python3` > /dev/null + source ${venv_path}/bin/activate > /dev/null + # Can't just install requirements-file, as it points to all dependencies + pip install -r requirements-tests.txt > /dev/null + pip install .[$service] > /dev/null 2>&1 + # Restart venv - ensure these deps are loaded + deactivate + source ${venv_path}/bin/activate > /dev/null + # Run tests for this service + test_result_filename="test_results_${service}.log" + touch $test_result_filename + nosetests -qxs --ignore-files="test_server\.py" --ignore-files="test_${service}_cloudformation\.py" --ignore-files="test_integration\.py" $path_to_test_file >$test_result_filename 2>&1 + RESULT=$? + if [[ $RESULT != 0 ]]; then + echo -e "Tests for ${service} have failed!\n" + else + rm $test_result_filename + fi + deactivate + rm -rf ${venv_path} +} + +echo "Running Dependency tests..." +ITER=0 +for file in moto/* +do + if [[ -d $file ]]; then + service=${file:5} + path_to_test_file="tests/test_${service}" + if valid_service $service && [[ -d $path_to_test_file ]]; then + test_service $service $path_to_test_file & + elif valid_service $service; then + echo -e "No tests for ${service} can be found on ${path_to_test_file}!\n" + fi + if (( $ITER % 4 == 0 )); then + # Ensure we're only processing 4 services at the time + wait + fi + fi + ITER=$(expr $ITER + 1) +done +wait diff --git a/setup.py b/setup.py index 5f6840251e30..40abb8666259 100755 --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ def get_version(): "boto>=2.36.0", "boto3>=1.9.201", "botocore>=1.12.201", + "cryptography>=2.3.0", "requests>=2.5", "xmltodict", "six>1.9", @@ -74,7 +75,6 @@ def get_version(): "zipp", ] -_dep_cryptography = "cryptography>=2.3.0" _dep_PyYAML = "PyYAML>=5.1" _dep_python_jose = "python-jose[cryptography]>=3.1.0,<4.0.0" _dep_python_jose_ecdsa_pin = "ecdsa<0.15" # https://github.com/spulec/moto/pull/3263#discussion_r477404984 @@ -87,7 +87,6 @@ def get_version(): _dep_sshpubkeys_py3 = "sshpubkeys>=3.1.0; python_version>'3'" all_extra_deps = [ - _dep_cryptography, _dep_PyYAML, _dep_python_jose, _dep_python_jose_ecdsa_pin, @@ -105,18 +104,22 @@ def get_version(): # i.e. even those without extra dependencies. # Would be good for future-compatibility, I guess. extras_per_service = { - 'acm': [_dep_cryptography], + 'apigateway': [_dep_python_jose, _dep_python_jose_ecdsa_pin], 'awslambda': [_dep_docker], 'batch': [_dep_docker], - 'cloudformation': [_dep_PyYAML, _dep_cfn_lint], + 'cloudformation': [_dep_docker, _dep_PyYAML, _dep_cfn_lint], 'cognitoidp': [_dep_python_jose, _dep_python_jose_ecdsa_pin], - "ec2": [_dep_cryptography, _dep_sshpubkeys_py2, _dep_sshpubkeys_py3], - 'iam': [_dep_cryptography], + 'dynamodb2': [_dep_docker], + 'dynamodbstreams': [_dep_docker], + "ec2": [_dep_docker, _dep_sshpubkeys_py2, _dep_sshpubkeys_py3], 'iotdata': [_dep_jsondiff], - 's3': [_dep_cryptography], + 's3': [_dep_PyYAML], + 'ses': [_dep_docker], + 'sns': [_dep_docker], + 'sqs': [_dep_docker], + 'ssm': [_dep_docker, _dep_PyYAML, _dep_cfn_lint], 'xray': [_dep_aws_xray_sdk], } - extras_require = { 'all': all_extra_deps, 'server': all_server_deps, @@ -148,8 +151,7 @@ def get_version(): ], }, packages=find_packages(exclude=("tests", "tests.*")), - # Addding all requirements for now until we cut a larger release - install_requires=install_requires + all_extra_deps, + install_requires=install_requires, extras_require=extras_require, include_package_data=True, license="Apache", diff --git a/tests/test_autoscaling/test_cloudformation.py b/tests/test_autoscaling/test_autoscaling_cloudformation.py similarity index 100% rename from tests/test_autoscaling/test_cloudformation.py rename to tests/test_autoscaling/test_autoscaling_cloudformation.py diff --git a/tests/test_awslambda/test_lambda_cloudformation.py b/tests/test_awslambda/test_awslambda_cloudformation.py similarity index 100% rename from tests/test_awslambda/test_lambda_cloudformation.py rename to tests/test_awslambda/test_awslambda_cloudformation.py diff --git a/tests/test_batch/test_cloudformation.py b/tests/test_batch/test_batch_cloudformation.py similarity index 100% rename from tests/test_batch/test_cloudformation.py rename to tests/test_batch/test_batch_cloudformation.py diff --git a/tests/test_codecommit/test_codecommit.py b/tests/test_codecommit/test_codecommit.py index 6e916f20aaba..69021372a06f 100644 --- a/tests/test_codecommit/test_codecommit.py +++ b/tests/test_codecommit/test_codecommit.py @@ -2,7 +2,7 @@ import sure # noqa from moto import mock_codecommit -from moto.iam.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID from botocore.exceptions import ClientError from nose.tools import assert_raises diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index 0ec7acfb0b93..164cb023c60d 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import boto3 +import sure # noqa from botocore.exceptions import ClientError from nose.tools import assert_raises diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index e32ef97800cb..5b26acf6f836 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -12,7 +12,7 @@ from moto import mock_ec2_deprecated, mock_ec2 from moto.ec2.models import AMIS, OWNER_ID -from moto.iam.models import ACCOUNT_ID +from moto.core import ACCOUNT_ID from tests.helpers import requires_boto_gte diff --git a/tests/test_ec2/test_ec2_cloudformation.py b/tests/test_ec2/test_ec2_cloudformation.py new file mode 100644 index 000000000000..b5aa8dd24fac --- /dev/null +++ b/tests/test_ec2/test_ec2_cloudformation.py @@ -0,0 +1,100 @@ +from moto import mock_cloudformation_deprecated, mock_ec2_deprecated +from moto import mock_cloudformation, mock_ec2 +from tests.test_cloudformation.fixtures import vpc_eni +import boto +import boto3 +import json +import sure # noqa + + +@mock_ec2_deprecated +@mock_cloudformation_deprecated +def test_elastic_network_interfaces_cloudformation(): + template = vpc_eni.template + template_json = json.dumps(template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack("test_stack", template_body=template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eni = ec2_conn.get_all_network_interfaces()[0] + eni.private_ip_addresses.should.have.length_of(1) + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + cfn_eni = [ + resource + for resource in resources + if resource.resource_type == "AWS::EC2::NetworkInterface" + ][0] + cfn_eni.physical_resource_id.should.equal(eni.id) + + outputs = {output.key: output.value for output in stack.outputs} + outputs["ENIIpAddress"].should.equal(eni.private_ip_addresses[0].private_ip_address) + + +@mock_ec2 +@mock_cloudformation +def test_volume_size_through_cloudformation(): + ec2 = boto3.client("ec2", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + volume_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-d3adb33f", + "KeyName": "dummy", + "InstanceType": "t2.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": "50"}} + ], + "Tags": [ + {"Key": "foo", "Value": "bar"}, + {"Key": "blah", "Value": "baz"}, + ], + }, + } + }, + } + template_json = json.dumps(volume_template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + instances = ec2.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2.describe_volumes(VolumeIds=[volume["VolumeId"]]) + volumes["Volumes"][0]["Size"].should.equal(50) + + +@mock_ec2_deprecated +@mock_cloudformation_deprecated +def test_subnet_tags_through_cloudformation(): + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + + subnet_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testSubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": vpc.id, + "CidrBlock": "10.0.0.0/24", + "AvailabilityZone": "us-west-1b", + "Tags": [ + {"Key": "foo", "Value": "bar"}, + {"Key": "blah", "Value": "baz"}, + ], + }, + } + }, + } + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + template_json = json.dumps(subnet_template) + cf_conn.create_stack("test_stack", template_body=template_json) + + subnet = vpc_conn.get_all_subnets(filters={"cidrBlock": "10.0.0.0/24"})[0] + subnet.tags["foo"].should.equal("bar") + subnet.tags["blah"].should.equal("baz") diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 4e502586e8aa..e7fd878a6f7c 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -7,15 +7,12 @@ import boto3 from botocore.exceptions import ClientError import boto -import boto.cloudformation import boto.ec2 from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated +from moto import mock_ec2, mock_ec2_deprecated from tests.helpers import requires_boto_gte -from tests.test_cloudformation.fixtures import vpc_eni -import json @mock_ec2_deprecated @@ -501,27 +498,3 @@ def test_elastic_network_interfaces_describe_network_interfaces_with_filter(): eni1.private_ip_address ) response["NetworkInterfaces"][0]["Description"].should.equal(eni1.description) - - -@mock_ec2_deprecated -@mock_cloudformation_deprecated -def test_elastic_network_interfaces_cloudformation(): - template = vpc_eni.template - template_json = json.dumps(template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack("test_stack", template_body=template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eni = ec2_conn.get_all_network_interfaces()[0] - eni.private_ip_addresses.should.have.length_of(1) - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - cfn_eni = [ - resource - for resource in resources - if resource.resource_type == "AWS::EC2::NetworkInterface" - ][0] - cfn_eni.physical_resource_id.should.equal(eni.id) - - outputs = {output.key: output.value for output in stack.outputs} - outputs["ENIIpAddress"].should.equal(eni.private_ip_addresses[0].private_ip_address) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 7ec385973cee..d7a2ff3f3c47 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -7,19 +7,17 @@ from nose.tools import assert_raises import base64 -import datetime import ipaddress -import json import six import boto import boto3 from boto.ec2.instance import Reservation, InstanceAttribute -from boto.exception import EC2ResponseError, EC2ResponseError +from boto.exception import EC2ResponseError from freezegun import freeze_time import sure # noqa -from moto import mock_ec2_deprecated, mock_ec2, mock_cloudformation +from moto import mock_ec2_deprecated, mock_ec2 from tests.helpers import requires_boto_gte @@ -1673,40 +1671,3 @@ def test_describe_instance_attribute(): invalid_instance_attribute=invalid_instance_attribute ) ex.exception.response["Error"]["Message"].should.equal(message) - - -@mock_ec2 -@mock_cloudformation -def test_volume_size_through_cloudformation(): - ec2 = boto3.client("ec2", region_name="us-east-1") - cf = boto3.client("cloudformation", region_name="us-east-1") - - volume_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testInstance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-d3adb33f", - "KeyName": "dummy", - "InstanceType": "t2.micro", - "BlockDeviceMappings": [ - {"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": "50"}} - ], - "Tags": [ - {"Key": "foo", "Value": "bar"}, - {"Key": "blah", "Value": "baz"}, - ], - }, - } - }, - } - template_json = json.dumps(volume_template) - cf.create_stack(StackName="test_stack", TemplateBody=template_json) - instances = ec2.describe_instances() - volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ - "Ebs" - ] - - volumes = ec2.describe_volumes(VolumeIds=[volume["VolumeId"]]) - volumes["Volumes"][0]["Size"].should.equal(50) diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index cfc95bb82ff4..5eb5a6e480b3 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -9,8 +9,8 @@ import pytz import sure # noqa -from moto import mock_ec2, mock_ec2_deprecated -from moto.backends import get_model +from moto import mock_ec2, mock_ec2_deprecated, settings +from moto.ec2.models import ec2_backends from moto.core.utils import iso_8601_datetime_with_milliseconds @@ -184,13 +184,14 @@ def test_request_spot_instances_fulfilled(): request.state.should.equal("open") - get_model("SpotInstanceRequest", "us-east-1")[0].state = "active" + if not settings.TEST_SERVER_MODE: + ec2_backends["us-east-1"].spot_instance_requests[request.id].state = "active" - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] - request.state.should.equal("active") + request.state.should.equal("active") @mock_ec2_deprecated @@ -247,10 +248,11 @@ def test_request_spot_instances_setting_instance_id(): conn = boto.ec2.connect_to_region("us-east-1") request = conn.request_spot_instances(price=0.5, image_id="ami-abcd1234") - req = get_model("SpotInstanceRequest", "us-east-1")[0] - req.state = "active" - req.instance_id = "i-12345678" + if not settings.TEST_SERVER_MODE: + req = ec2_backends["us-east-1"].spot_instance_requests[request[0].id] + req.state = "active" + req.instance_id = "i-12345678" - request = conn.get_all_spot_instance_requests()[0] - assert request.state == "active" - assert request.instance_id == "i-12345678" + request = conn.get_all_spot_instance_requests()[0] + assert request.state == "active" + assert request.instance_id == "i-12345678" diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 08d404b97f93..45c9040fcdbc 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -9,11 +9,10 @@ import boto.vpc from boto.exception import EC2ResponseError from botocore.exceptions import ParamValidationError, ClientError -import json import sure # noqa import random -from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated +from moto import mock_ec2, mock_ec2_deprecated @mock_ec2_deprecated @@ -311,38 +310,6 @@ def test_get_subnets_filtering(): ).should.throw(NotImplementedError) -@mock_ec2_deprecated -@mock_cloudformation_deprecated -def test_subnet_tags_through_cloudformation(): - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - - subnet_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testSubnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": vpc.id, - "CidrBlock": "10.0.0.0/24", - "AvailabilityZone": "us-west-1b", - "Tags": [ - {"Key": "foo", "Value": "bar"}, - {"Key": "blah", "Value": "baz"}, - ], - }, - } - }, - } - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - template_json = json.dumps(subnet_template) - cf_conn.create_stack("test_stack", template_body=template_json) - - subnet = vpc_conn.get_all_subnets(filters={"cidrBlock": "10.0.0.0/24"})[0] - subnet.tags["foo"].should.equal("bar") - subnet.tags["blah"].should.equal("baz") - - @mock_ec2 def test_create_subnet_response_fields(): ec2 = boto3.resource("ec2", region_name="us-west-1") diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index d46c8b983013..c528349f545a 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1,8 +1,6 @@ from __future__ import unicode_literals from datetime import datetime -from copy import deepcopy - from botocore.exceptions import ClientError import boto3 import sure # noqa @@ -10,7 +8,6 @@ from moto.ec2 import utils as ec2_utils from uuid import UUID -from moto import mock_cloudformation, mock_elbv2 from moto import mock_ecs from moto import mock_ec2 from nose.tools import assert_raises @@ -1649,120 +1646,6 @@ def test_resource_reservation_and_release_memory_reservation(): container_instance_description["runningTasksCount"].should.equal(0) -@mock_ecs -@mock_cloudformation -def test_create_cluster_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": {"ClusterName": "testcluster"}, - } - }, - } - template_json = json.dumps(template) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_clusters() - len(resp["clusterArns"]).should.equal(0) - - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) - - resp = ecs_conn.list_clusters() - len(resp["clusterArns"]).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_create_cluster_through_cloudformation_no_name(): - # cloudformation should create a cluster name for you if you do not provide it - # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": {"testCluster": {"Type": "AWS::ECS::Cluster"}}, - } - template_json = json.dumps(template) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_clusters() - len(resp["clusterArns"]).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": {"ClusterName": "testcluster1"}, - } - }, - } - template2 = deepcopy(template1) - template2["Resources"]["testCluster"]["Properties"]["ClusterName"] = "testcluster2" - template1_json = json.dumps(template1) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - stack_resp = cfn_conn.create_stack( - StackName="test_stack", TemplateBody=template1_json - ) - - template2_json = json.dumps(template2) - cfn_conn.update_stack(StackName=stack_resp["StackId"], TemplateBody=template2_json) - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_clusters() - len(resp["clusterArns"]).should.equal(1) - resp["clusterArns"][0].endswith("testcluster2").should.be.true - - -@mock_ecs -@mock_cloudformation -def test_create_task_definition_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true", - } - ], - "Volumes": [], - }, - } - }, - } - template_json = json.dumps(template) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - stack_name = "test_stack" - cfn_conn.create_stack(StackName=stack_name, TemplateBody=template_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_task_definitions() - len(resp["taskDefinitionArns"]).should.equal(1) - task_definition_arn = resp["taskDefinitionArns"][0] - - task_definition_details = cfn_conn.describe_stack_resource( - StackName=stack_name, LogicalResourceId="testTaskDefinition" - )["StackResourceDetail"] - task_definition_details["PhysicalResourceId"].should.equal(task_definition_arn) - - @mock_ec2 @mock_ecs def test_task_definitions_unable_to_be_placed(): @@ -1877,142 +1760,6 @@ def test_task_definitions_with_port_clash(): response["tasks"][0]["stoppedReason"].should.equal("") -@mock_ecs -@mock_cloudformation -def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "Family": "testTaskDefinition1", - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true", - } - ], - "Volumes": [], - }, - } - }, - } - template1_json = json.dumps(template1) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template1_json) - - template2 = deepcopy(template1) - template2["Resources"]["testTaskDefinition"]["Properties"][ - "Family" - ] = "testTaskDefinition2" - template2_json = json.dumps(template2) - cfn_conn.update_stack(StackName="test_stack", TemplateBody=template2_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_task_definitions(familyPrefix="testTaskDefinition2") - len(resp["taskDefinitionArns"]).should.equal(1) - resp["taskDefinitionArns"][0].endswith("testTaskDefinition2:1").should.be.true - - -@mock_ecs -@mock_cloudformation -def test_create_service_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": {"ClusterName": "testcluster"}, - }, - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true", - } - ], - "Volumes": [], - }, - }, - "testService": { - "Type": "AWS::ECS::Service", - "Properties": { - "Cluster": {"Ref": "testCluster"}, - "DesiredCount": 10, - "TaskDefinition": {"Ref": "testTaskDefinition"}, - }, - }, - }, - } - template_json = json.dumps(template) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_services(cluster="testcluster") - len(resp["serviceArns"]).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_update_service_through_cloudformation_should_trigger_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": {"ClusterName": "testcluster"}, - }, - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true", - } - ], - "Volumes": [], - }, - }, - "testService": { - "Type": "AWS::ECS::Service", - "Properties": { - "Cluster": {"Ref": "testCluster"}, - "TaskDefinition": {"Ref": "testTaskDefinition"}, - "DesiredCount": 10, - }, - }, - }, - } - template_json1 = json.dumps(template1) - cfn_conn = boto3.client("cloudformation", region_name="us-west-1") - cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json1) - template2 = deepcopy(template1) - template2["Resources"]["testService"]["Properties"]["DesiredCount"] = 5 - template2_json = json.dumps(template2) - cfn_conn.update_stack(StackName="test_stack", TemplateBody=template2_json) - - ecs_conn = boto3.client("ecs", region_name="us-west-1") - resp = ecs_conn.list_services(cluster="testcluster") - len(resp["serviceArns"]).should.equal(1) - - @mock_ec2 @mock_ecs def test_attributes(): diff --git a/tests/test_ecs/test_ecs_cloudformation.py b/tests/test_ecs/test_ecs_cloudformation.py new file mode 100644 index 000000000000..6988a08e8bf6 --- /dev/null +++ b/tests/test_ecs/test_ecs_cloudformation.py @@ -0,0 +1,253 @@ +import boto3 +import json +from copy import deepcopy +from moto import mock_cloudformation, mock_ecs + +@mock_ecs +@mock_cloudformation +def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "Family": "testTaskDefinition1", + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true", + } + ], + "Volumes": [], + }, + } + }, + } + template1_json = json.dumps(template1) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template1_json) + + template2 = deepcopy(template1) + template2["Resources"]["testTaskDefinition"]["Properties"][ + "Family" + ] = "testTaskDefinition2" + template2_json = json.dumps(template2) + cfn_conn.update_stack(StackName="test_stack", TemplateBody=template2_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_task_definitions(familyPrefix="testTaskDefinition2") + len(resp["taskDefinitionArns"]).should.equal(1) + resp["taskDefinitionArns"][0].endswith("testTaskDefinition2:1").should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_service_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "testcluster"}, + }, + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true", + } + ], + "Volumes": [], + }, + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "DesiredCount": 10, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + }, + }, + }, + } + template_json = json.dumps(template) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_services(cluster="testcluster") + len(resp["serviceArns"]).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_service_through_cloudformation_should_trigger_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "testcluster"}, + }, + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true", + } + ], + "Volumes": [], + }, + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + "DesiredCount": 10, + }, + }, + }, + } + template_json1 = json.dumps(template1) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json1) + template2 = deepcopy(template1) + template2["Resources"]["testService"]["Properties"]["DesiredCount"] = 5 + template2_json = json.dumps(template2) + cfn_conn.update_stack(StackName="test_stack", TemplateBody=template2_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_services(cluster="testcluster") + len(resp["serviceArns"]).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "testcluster"}, + } + }, + } + template_json = json.dumps(template) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_clusters() + len(resp["clusterArns"]).should.equal(0) + + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) + + resp = ecs_conn.list_clusters() + len(resp["clusterArns"]).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation_no_name(): + # cloudformation should create a cluster name for you if you do not provide it + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": {"testCluster": {"Type": "AWS::ECS::Cluster"}}, + } + template_json = json.dumps(template) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + cfn_conn.create_stack(StackName="test_stack", TemplateBody=template_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_clusters() + len(resp["clusterArns"]).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": {"ClusterName": "testcluster1"}, + } + }, + } + template2 = deepcopy(template1) + template2["Resources"]["testCluster"]["Properties"]["ClusterName"] = "testcluster2" + template1_json = json.dumps(template1) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + stack_resp = cfn_conn.create_stack( + StackName="test_stack", TemplateBody=template1_json + ) + + template2_json = json.dumps(template2) + cfn_conn.update_stack(StackName=stack_resp["StackId"], TemplateBody=template2_json) + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_clusters() + len(resp["clusterArns"]).should.equal(1) + resp["clusterArns"][0].endswith("testcluster2").should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_task_definition_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true", + } + ], + "Volumes": [], + }, + } + }, + } + template_json = json.dumps(template) + cfn_conn = boto3.client("cloudformation", region_name="us-west-1") + stack_name = "test_stack" + cfn_conn.create_stack(StackName=stack_name, TemplateBody=template_json) + + ecs_conn = boto3.client("ecs", region_name="us-west-1") + resp = ecs_conn.list_task_definitions() + len(resp["taskDefinitionArns"]).should.equal(1) + task_definition_arn = resp["taskDefinitionArns"][0] + + task_definition_details = cfn_conn.describe_stack_resource( + StackName=stack_name, LogicalResourceId="testTaskDefinition" + )["StackResourceDetail"] + task_definition_details["PhysicalResourceId"].should.equal(task_definition_arn) \ No newline at end of file diff --git a/tests/test_eb/test_eb.py b/tests/test_elasticbeanstalk/test_eb.py similarity index 100% rename from tests/test_eb/test_eb.py rename to tests/test_elasticbeanstalk/test_eb.py diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index c155cba20107..5ab85284dda0 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import json import os import boto3 import botocore @@ -8,7 +7,7 @@ from nose.tools import assert_raises import sure # noqa -from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation +from moto import mock_elbv2, mock_ec2, mock_acm from moto.elbv2 import elbv2_backends from moto.core import ACCOUNT_ID @@ -1667,82 +1666,6 @@ def test_modify_listener_http_to_https(): ) -@mock_ec2 -@mock_elbv2 -@mock_cloudformation -def test_create_target_groups_through_cloudformation(): - cfn_conn = boto3.client("cloudformation", region_name="us-east-1") - elbv2_client = boto3.client("elbv2", region_name="us-east-1") - - # test that setting a name manually as well as letting cloudformation create a name both work - # this is a special case because test groups have a name length limit of 22 characters, and must be unique - # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": {"CidrBlock": "10.0.0.0/16"}, - }, - "testGroup1": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 80, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup2": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 90, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup3": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "MyTargetGroup", - "Port": 70, - "Protocol": "HTTPS", - "VpcId": {"Ref": "testVPC"}, - }, - }, - }, - } - template_json = json.dumps(template) - cfn_conn.create_stack(StackName="test-stack", TemplateBody=template_json) - - describe_target_groups_response = elbv2_client.describe_target_groups() - target_group_dicts = describe_target_groups_response["TargetGroups"] - assert len(target_group_dicts) == 3 - - # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) - # and one named MyTargetGroup - assert ( - len( - [ - tg - for tg in target_group_dicts - if tg["TargetGroupName"] == "MyTargetGroup" - ] - ) - == 1 - ) - assert ( - len( - [ - tg - for tg in target_group_dicts - if tg["TargetGroupName"].startswith("test-stack") - ] - ) - == 2 - ) - - @mock_elbv2 @mock_ec2 def test_redirect_action_listener_rule(): @@ -1816,95 +1739,6 @@ def test_redirect_action_listener_rule(): modify_listener_actions.should.equal(expected_default_actions) -@mock_elbv2 -@mock_cloudformation -def test_redirect_action_listener_rule_cloudformation(): - cnf_conn = boto3.client("cloudformation", region_name="us-east-1") - elbv2_client = boto3.client("elbv2", region_name="us-east-1") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": {"CidrBlock": "10.0.0.0/16"}, - }, - "subnet1": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.0.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "subnet2": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.1.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "testLb": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "my-lb", - "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], - "Type": "application", - "SecurityGroups": [], - }, - }, - "testListener": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "LoadBalancerArn": {"Ref": "testLb"}, - "Port": 80, - "Protocol": "HTTP", - "DefaultActions": [ - { - "Type": "redirect", - "RedirectConfig": { - "Port": "443", - "Protocol": "HTTPS", - "StatusCode": "HTTP_301", - }, - } - ], - }, - }, - }, - } - template_json = json.dumps(template) - cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) - - describe_load_balancers_response = elbv2_client.describe_load_balancers( - Names=["my-lb"] - ) - describe_load_balancers_response["LoadBalancers"].should.have.length_of(1) - load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ - "LoadBalancerArn" - ] - - describe_listeners_response = elbv2_client.describe_listeners( - LoadBalancerArn=load_balancer_arn - ) - - describe_listeners_response["Listeners"].should.have.length_of(1) - describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( - [ - { - "Type": "redirect", - "RedirectConfig": { - "Port": "443", - "Protocol": "HTTPS", - "StatusCode": "HTTP_301", - }, - } - ] - ) - - @mock_elbv2 @mock_ec2 def test_cognito_action_listener_rule(): @@ -1962,97 +1796,6 @@ def test_cognito_action_listener_rule(): describe_listener_actions.should.equal(action) -@mock_elbv2 -@mock_cloudformation -def test_cognito_action_listener_rule_cloudformation(): - cnf_conn = boto3.client("cloudformation", region_name="us-east-1") - elbv2_client = boto3.client("elbv2", region_name="us-east-1") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": {"CidrBlock": "10.0.0.0/16"}, - }, - "subnet1": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.0.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "subnet2": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.1.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "testLb": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "my-lb", - "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], - "Type": "application", - "SecurityGroups": [], - }, - }, - "testListener": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "LoadBalancerArn": {"Ref": "testLb"}, - "Port": 80, - "Protocol": "HTTP", - "DefaultActions": [ - { - "Type": "authenticate-cognito", - "AuthenticateCognitoConfig": { - "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( - ACCOUNT_ID - ), - "UserPoolClientId": "abcd1234abcd", - "UserPoolDomain": "testpool", - }, - } - ], - }, - }, - }, - } - template_json = json.dumps(template) - cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) - - describe_load_balancers_response = elbv2_client.describe_load_balancers( - Names=["my-lb"] - ) - load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ - "LoadBalancerArn" - ] - describe_listeners_response = elbv2_client.describe_listeners( - LoadBalancerArn=load_balancer_arn - ) - - describe_listeners_response["Listeners"].should.have.length_of(1) - describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( - [ - { - "Type": "authenticate-cognito", - "AuthenticateCognitoConfig": { - "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( - ACCOUNT_ID - ), - "UserPoolClientId": "abcd1234abcd", - "UserPoolDomain": "testpool", - }, - } - ] - ) - - @mock_elbv2 @mock_ec2 def test_fixed_response_action_listener_rule(): @@ -2108,93 +1851,6 @@ def test_fixed_response_action_listener_rule(): describe_listener_actions.should.equal(action) -@mock_elbv2 -@mock_cloudformation -def test_fixed_response_action_listener_rule_cloudformation(): - cnf_conn = boto3.client("cloudformation", region_name="us-east-1") - elbv2_client = boto3.client("elbv2", region_name="us-east-1") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": {"CidrBlock": "10.0.0.0/16"}, - }, - "subnet1": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.0.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "subnet2": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.1.0/24", - "VpcId": {"Ref": "testVPC"}, - "AvalabilityZone": "us-east-1b", - }, - }, - "testLb": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "my-lb", - "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], - "Type": "application", - "SecurityGroups": [], - }, - }, - "testListener": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "LoadBalancerArn": {"Ref": "testLb"}, - "Port": 80, - "Protocol": "HTTP", - "DefaultActions": [ - { - "Type": "fixed-response", - "FixedResponseConfig": { - "ContentType": "text/plain", - "MessageBody": "This page does not exist", - "StatusCode": "404", - }, - } - ], - }, - }, - }, - } - template_json = json.dumps(template) - cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) - - describe_load_balancers_response = elbv2_client.describe_load_balancers( - Names=["my-lb"] - ) - load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ - "LoadBalancerArn" - ] - describe_listeners_response = elbv2_client.describe_listeners( - LoadBalancerArn=load_balancer_arn - ) - - describe_listeners_response["Listeners"].should.have.length_of(1) - describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( - [ - { - "Type": "fixed-response", - "FixedResponseConfig": { - "ContentType": "text/plain", - "MessageBody": "This page does not exist", - "StatusCode": "404", - }, - } - ] - ) - - @mock_elbv2 @mock_ec2 def test_fixed_response_action_listener_rule_validates_status_code(): diff --git a/tests/test_elbv2/test_elbv2_cloudformation.py b/tests/test_elbv2/test_elbv2_cloudformation.py new file mode 100644 index 000000000000..9196fc853981 --- /dev/null +++ b/tests/test_elbv2/test_elbv2_cloudformation.py @@ -0,0 +1,348 @@ +import boto3 +import json + +from moto import mock_elbv2, mock_ec2, mock_cloudformation +from moto.core import ACCOUNT_ID + + +@mock_elbv2 +@mock_cloudformation +def test_redirect_action_listener_rule_cloudformation(): + cnf_conn = boto3.client("cloudformation", region_name="us-east-1") + elbv2_client = boto3.client("elbv2", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": {"CidrBlock": "10.0.0.0/16"}, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + }, + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [ + { + "Type": "redirect", + "RedirectConfig": { + "Port": "443", + "Protocol": "HTTPS", + "StatusCode": "HTTP_301", + }, + } + ], + }, + }, + }, + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers( + Names=["my-lb"] + ) + describe_load_balancers_response["LoadBalancers"].should.have.length_of(1) + load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ + "LoadBalancerArn" + ] + + describe_listeners_response = elbv2_client.describe_listeners( + LoadBalancerArn=load_balancer_arn + ) + + describe_listeners_response["Listeners"].should.have.length_of(1) + describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( + [ + { + "Type": "redirect", + "RedirectConfig": { + "Port": "443", + "Protocol": "HTTPS", + "StatusCode": "HTTP_301", + }, + } + ] + ) + + +@mock_elbv2 +@mock_cloudformation +def test_cognito_action_listener_rule_cloudformation(): + cnf_conn = boto3.client("cloudformation", region_name="us-east-1") + elbv2_client = boto3.client("elbv2", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": {"CidrBlock": "10.0.0.0/16"}, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + }, + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [ + { + "Type": "authenticate-cognito", + "AuthenticateCognitoConfig": { + "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( + ACCOUNT_ID + ), + "UserPoolClientId": "abcd1234abcd", + "UserPoolDomain": "testpool", + }, + } + ], + }, + }, + }, + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers( + Names=["my-lb"] + ) + load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ + "LoadBalancerArn" + ] + describe_listeners_response = elbv2_client.describe_listeners( + LoadBalancerArn=load_balancer_arn + ) + + describe_listeners_response["Listeners"].should.have.length_of(1) + describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( + [ + { + "Type": "authenticate-cognito", + "AuthenticateCognitoConfig": { + "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( + ACCOUNT_ID + ), + "UserPoolClientId": "abcd1234abcd", + "UserPoolDomain": "testpool", + }, + } + ] + ) + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_create_target_groups_through_cloudformation(): + cfn_conn = boto3.client("cloudformation", region_name="us-east-1") + elbv2_client = boto3.client("elbv2", region_name="us-east-1") + + # test that setting a name manually as well as letting cloudformation create a name both work + # this is a special case because test groups have a name length limit of 22 characters, and must be unique + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": {"CidrBlock": "10.0.0.0/16"}, + }, + "testGroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 80, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 90, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup3": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Name": "MyTargetGroup", + "Port": 70, + "Protocol": "HTTPS", + "VpcId": {"Ref": "testVPC"}, + }, + }, + }, + } + template_json = json.dumps(template) + cfn_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_target_groups_response = elbv2_client.describe_target_groups() + target_group_dicts = describe_target_groups_response["TargetGroups"] + assert len(target_group_dicts) == 3 + + # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) + # and one named MyTargetGroup + assert ( + len( + [ + tg + for tg in target_group_dicts + if tg["TargetGroupName"] == "MyTargetGroup" + ] + ) + == 1 + ) + assert ( + len( + [ + tg + for tg in target_group_dicts + if tg["TargetGroupName"].startswith("test-stack") + ] + ) + == 2 + ) + + +@mock_elbv2 +@mock_cloudformation +def test_fixed_response_action_listener_rule_cloudformation(): + cnf_conn = boto3.client("cloudformation", region_name="us-east-1") + elbv2_client = boto3.client("elbv2", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": {"CidrBlock": "10.0.0.0/16"}, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + }, + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [ + { + "Type": "fixed-response", + "FixedResponseConfig": { + "ContentType": "text/plain", + "MessageBody": "This page does not exist", + "StatusCode": "404", + }, + } + ], + }, + }, + }, + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers( + Names=["my-lb"] + ) + load_balancer_arn = describe_load_balancers_response["LoadBalancers"][0][ + "LoadBalancerArn" + ] + describe_listeners_response = elbv2_client.describe_listeners( + LoadBalancerArn=load_balancer_arn + ) + + describe_listeners_response["Listeners"].should.have.length_of(1) + describe_listeners_response["Listeners"][0]["DefaultActions"].should.equal( + [ + { + "Type": "fixed-response", + "FixedResponseConfig": { + "ContentType": "text/plain", + "MessageBody": "This page does not exist", + "StatusCode": "404", + }, + } + ] + ) \ No newline at end of file diff --git a/tests/test_glacier/test_glacier_server.py b/tests/test_glacier/test_server.py similarity index 100% rename from tests/test_glacier/test_glacier_server.py rename to tests/test_glacier/test_server.py diff --git a/tests/test_logs/test_integration.py b/tests/test_logs/test_integration.py new file mode 100644 index 000000000000..3fe6a68afdab --- /dev/null +++ b/tests/test_logs/test_integration.py @@ -0,0 +1,383 @@ +import base64 +import boto3 +import json +import time +import zlib + +from botocore.exceptions import ClientError +from io import BytesIO +from moto import mock_logs, mock_lambda, mock_iam +from nose.tools import assert_raises +from zipfile import ZipFile, ZIP_DEFLATED + + +@mock_lambda +@mock_logs +def test_put_subscription_filter_update(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + creation_time = filter["creationTime"] + creation_time.should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + # to update an existing subscription filter the 'filerName' must be identical + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="[]", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.equal(creation_time) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "[]" + + # when + # only one subscription filter can be associated with a log group + with assert_raises(ClientError) as e: + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test-2", + filterPattern="", + destinationArn=function_arn, + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("LimitExceededException") + ex.response["Error"]["Message"].should.equal("Resource limit exceeded.") + + +@mock_lambda +@mock_logs +def test_put_subscription_filter_with_lambda(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + client_logs.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + {"timestamp": 0, "message": "test"}, + {"timestamp": 0, "message": "test 2"}, + ], + ) + + # then + msg_showed_up, received_message = _wait_for_log_msg( + client_logs, "/aws/lambda/test", "awslogs" + ) + assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format( + received_message + ) + + data = json.loads(received_message)["awslogs"]["data"] + response = json.loads( + zlib.decompress(base64.b64decode(data), 16 + zlib.MAX_WBITS).decode("utf-8") + ) + response["messageType"].should.equal("DATA_MESSAGE") + response["owner"].should.equal("123456789012") + response["logGroup"].should.equal("/test") + response["logStream"].should.equal("stream") + response["subscriptionFilters"].should.equal(["test"]) + log_events = sorted(response["logEvents"], key=lambda log_event: log_event["id"]) + log_events.should.have.length_of(2) + log_events[0]["id"].should.be.a(int) + log_events[0]["message"].should.equal("test") + log_events[0]["timestamp"].should.equal(0) + log_events[1]["id"].should.be.a(int) + log_events[1]["message"].should.equal("test 2") + log_events[1]["timestamp"].should.equal(0) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="test", + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(0) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + with assert_raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="not-existing-log-group", filterName="test", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with assert_raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="wrong-filter-name", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified subscription filter does not exist." + ) + + +@mock_logs +def test_put_subscription_filter_errors(): + # given + client = boto3.client("logs", "us-east-1") + log_group_name = "/test" + client.create_log_group(logGroupName=log_group_name) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="not-existing-log-group", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:test", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + +def _get_role_name(region_name): + with mock_iam(): + iam = boto3.client("iam", region_name=region_name) + try: + return iam.get_role(RoleName="test-role")["Role"]["Arn"] + except ClientError: + return iam.create_role( + RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/", + )["Role"]["Arn"] + + +def _get_test_zip_file(): + func_str = """ +def lambda_handler(event, context): + return event +""" + + zip_output = BytesIO() + zip_file = ZipFile(zip_output, "w", ZIP_DEFLATED) + zip_file.writestr("lambda_function.py", func_str) + zip_file.close() + zip_output.seek(0) + return zip_output.read() + + +def _wait_for_log_msg(client, log_group_name, expected_msg_part): + received_messages = [] + start = time.time() + while (time.time() - start) < 10: + result = client.describe_log_streams(logGroupName=log_group_name) + log_streams = result.get("logStreams") + if not log_streams: + time.sleep(1) + continue + + for log_stream in log_streams: + result = client.get_log_events( + logGroupName=log_group_name, logStreamName=log_stream["logStreamName"], + ) + received_messages.extend( + [event["message"] for event in result.get("events")] + ) + for message in received_messages: + if expected_msg_part in message: + return True, message + time.sleep(1) + return False, received_messages \ No newline at end of file diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 675948150410..e234cc561a93 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,17 +1,10 @@ -import base64 -import json -import time -import zlib -from io import BytesIO -from zipfile import ZipFile, ZIP_DEFLATED - import boto3 import os import sure # noqa import six from botocore.exceptions import ClientError -from moto import mock_logs, settings, mock_lambda, mock_iam +from moto import mock_logs, settings from nose.tools import assert_raises from nose import SkipTest @@ -465,375 +458,3 @@ def test_describe_subscription_filters_errors(): ex.response["Error"]["Message"].should.equal( "The specified log group does not exist" ) - - -@mock_lambda -@mock_logs -def test_put_subscription_filter_update(): - # given - region_name = "us-east-1" - client_lambda = boto3.client("lambda", region_name) - client_logs = boto3.client("logs", region_name) - log_group_name = "/test" - log_stream_name = "stream" - client_logs.create_log_group(logGroupName=log_group_name) - client_logs.create_log_stream( - logGroupName=log_group_name, logStreamName=log_stream_name - ) - function_arn = client_lambda.create_function( - FunctionName="test", - Runtime="python3.8", - Role=_get_role_name(region_name), - Handler="lambda_function.lambda_handler", - Code={"ZipFile": _get_test_zip_file()}, - Description="test lambda function", - Timeout=3, - MemorySize=128, - Publish=True, - )["FunctionArn"] - - # when - client_logs.put_subscription_filter( - logGroupName=log_group_name, - filterName="test", - filterPattern="", - destinationArn=function_arn, - ) - - # then - response = client_logs.describe_subscription_filters(logGroupName=log_group_name) - response["subscriptionFilters"].should.have.length_of(1) - filter = response["subscriptionFilters"][0] - creation_time = filter["creationTime"] - creation_time.should.be.a(int) - filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" - filter["distribution"] = "ByLogStream" - filter["logGroupName"] = "/test" - filter["filterName"] = "test" - filter["filterPattern"] = "" - - # when - # to update an existing subscription filter the 'filerName' must be identical - client_logs.put_subscription_filter( - logGroupName=log_group_name, - filterName="test", - filterPattern="[]", - destinationArn=function_arn, - ) - - # then - response = client_logs.describe_subscription_filters(logGroupName=log_group_name) - response["subscriptionFilters"].should.have.length_of(1) - filter = response["subscriptionFilters"][0] - filter["creationTime"].should.equal(creation_time) - filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" - filter["distribution"] = "ByLogStream" - filter["logGroupName"] = "/test" - filter["filterName"] = "test" - filter["filterPattern"] = "[]" - - # when - # only one subscription filter can be associated with a log group - with assert_raises(ClientError) as e: - client_logs.put_subscription_filter( - logGroupName=log_group_name, - filterName="test-2", - filterPattern="", - destinationArn=function_arn, - ) - - # then - ex = e.exception - ex.operation_name.should.equal("PutSubscriptionFilter") - ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.response["Error"]["Code"].should.contain("LimitExceededException") - ex.response["Error"]["Message"].should.equal("Resource limit exceeded.") - - -@mock_lambda -@mock_logs -def test_put_subscription_filter_with_lambda(): - # given - region_name = "us-east-1" - client_lambda = boto3.client("lambda", region_name) - client_logs = boto3.client("logs", region_name) - log_group_name = "/test" - log_stream_name = "stream" - client_logs.create_log_group(logGroupName=log_group_name) - client_logs.create_log_stream( - logGroupName=log_group_name, logStreamName=log_stream_name - ) - function_arn = client_lambda.create_function( - FunctionName="test", - Runtime="python3.8", - Role=_get_role_name(region_name), - Handler="lambda_function.lambda_handler", - Code={"ZipFile": _get_test_zip_file()}, - Description="test lambda function", - Timeout=3, - MemorySize=128, - Publish=True, - )["FunctionArn"] - - # when - client_logs.put_subscription_filter( - logGroupName=log_group_name, - filterName="test", - filterPattern="", - destinationArn=function_arn, - ) - - # then - response = client_logs.describe_subscription_filters(logGroupName=log_group_name) - response["subscriptionFilters"].should.have.length_of(1) - filter = response["subscriptionFilters"][0] - filter["creationTime"].should.be.a(int) - filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" - filter["distribution"] = "ByLogStream" - filter["logGroupName"] = "/test" - filter["filterName"] = "test" - filter["filterPattern"] = "" - - # when - client_logs.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=[ - {"timestamp": 0, "message": "test"}, - {"timestamp": 0, "message": "test 2"}, - ], - ) - - # then - msg_showed_up, received_message = _wait_for_log_msg( - client_logs, "/aws/lambda/test", "awslogs" - ) - assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format( - received_message - ) - - data = json.loads(received_message)["awslogs"]["data"] - response = json.loads( - zlib.decompress(base64.b64decode(data), 16 + zlib.MAX_WBITS).decode("utf-8") - ) - response["messageType"].should.equal("DATA_MESSAGE") - response["owner"].should.equal("123456789012") - response["logGroup"].should.equal("/test") - response["logStream"].should.equal("stream") - response["subscriptionFilters"].should.equal(["test"]) - log_events = sorted(response["logEvents"], key=lambda log_event: log_event["id"]) - log_events.should.have.length_of(2) - log_events[0]["id"].should.be.a(int) - log_events[0]["message"].should.equal("test") - log_events[0]["timestamp"].should.equal(0) - log_events[1]["id"].should.be.a(int) - log_events[1]["message"].should.equal("test 2") - log_events[1]["timestamp"].should.equal(0) - - -@mock_logs -def test_put_subscription_filter_errors(): - # given - client = boto3.client("logs", "us-east-1") - log_group_name = "/test" - client.create_log_group(logGroupName=log_group_name) - - # when - with assert_raises(ClientError) as e: - client.put_subscription_filter( - logGroupName="not-existing-log-group", - filterName="test", - filterPattern="", - destinationArn="arn:aws:lambda:us-east-1:123456789012:function:test", - ) - - # then - ex = e.exception - ex.operation_name.should.equal("PutSubscriptionFilter") - ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") - ex.response["Error"]["Message"].should.equal( - "The specified log group does not exist" - ) - - # when - with assert_raises(ClientError) as e: - client.put_subscription_filter( - logGroupName="/test", - filterName="test", - filterPattern="", - destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", - ) - - # then - ex = e.exception - ex.operation_name.should.equal("PutSubscriptionFilter") - ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.response["Error"]["Code"].should.contain("InvalidParameterException") - ex.response["Error"]["Message"].should.equal( - "Could not execute the lambda function. " - "Make sure you have given CloudWatch Logs permission to execute your function." - ) - - # when - with assert_raises(ClientError) as e: - client.put_subscription_filter( - logGroupName="/test", - filterName="test", - filterPattern="", - destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", - ) - - # then - ex = e.exception - ex.operation_name.should.equal("PutSubscriptionFilter") - ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.response["Error"]["Code"].should.contain("InvalidParameterException") - ex.response["Error"]["Message"].should.equal( - "Could not execute the lambda function. " - "Make sure you have given CloudWatch Logs permission to execute your function." - ) - - -@mock_lambda -@mock_logs -def test_delete_subscription_filter_errors(): - # given - region_name = "us-east-1" - client_lambda = boto3.client("lambda", region_name) - client_logs = boto3.client("logs", region_name) - log_group_name = "/test" - client_logs.create_log_group(logGroupName=log_group_name) - function_arn = client_lambda.create_function( - FunctionName="test", - Runtime="python3.8", - Role=_get_role_name(region_name), - Handler="lambda_function.lambda_handler", - Code={"ZipFile": _get_test_zip_file()}, - Description="test lambda function", - Timeout=3, - MemorySize=128, - Publish=True, - )["FunctionArn"] - client_logs.put_subscription_filter( - logGroupName=log_group_name, - filterName="test", - filterPattern="", - destinationArn=function_arn, - ) - - # when - client_logs.delete_subscription_filter( - logGroupName="/test", filterName="test", - ) - - # then - response = client_logs.describe_subscription_filters(logGroupName=log_group_name) - response["subscriptionFilters"].should.have.length_of(0) - - -@mock_lambda -@mock_logs -def test_delete_subscription_filter_errors(): - # given - region_name = "us-east-1" - client_lambda = boto3.client("lambda", region_name) - client_logs = boto3.client("logs", region_name) - log_group_name = "/test" - client_logs.create_log_group(logGroupName=log_group_name) - function_arn = client_lambda.create_function( - FunctionName="test", - Runtime="python3.8", - Role=_get_role_name(region_name), - Handler="lambda_function.lambda_handler", - Code={"ZipFile": _get_test_zip_file()}, - Description="test lambda function", - Timeout=3, - MemorySize=128, - Publish=True, - )["FunctionArn"] - client_logs.put_subscription_filter( - logGroupName=log_group_name, - filterName="test", - filterPattern="", - destinationArn=function_arn, - ) - - # when - with assert_raises(ClientError) as e: - client_logs.delete_subscription_filter( - logGroupName="not-existing-log-group", filterName="test", - ) - - # then - ex = e.exception - ex.operation_name.should.equal("DeleteSubscriptionFilter") - ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") - ex.response["Error"]["Message"].should.equal( - "The specified log group does not exist" - ) - - # when - with assert_raises(ClientError) as e: - client_logs.delete_subscription_filter( - logGroupName="/test", filterName="wrong-filter-name", - ) - - # then - ex = e.exception - ex.operation_name.should.equal("DeleteSubscriptionFilter") - ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") - ex.response["Error"]["Message"].should.equal( - "The specified subscription filter does not exist." - ) - - -def _get_role_name(region_name): - with mock_iam(): - iam = boto3.client("iam", region_name=region_name) - try: - return iam.get_role(RoleName="test-role")["Role"]["Arn"] - except ClientError: - return iam.create_role( - RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/", - )["Role"]["Arn"] - - -def _get_test_zip_file(): - func_str = """ -def lambda_handler(event, context): - return event -""" - - zip_output = BytesIO() - zip_file = ZipFile(zip_output, "w", ZIP_DEFLATED) - zip_file.writestr("lambda_function.py", func_str) - zip_file.close() - zip_output.seek(0) - return zip_output.read() - - -def _wait_for_log_msg(client, log_group_name, expected_msg_part): - received_messages = [] - start = time.time() - while (time.time() - start) < 10: - result = client.describe_log_streams(logGroupName=log_group_name) - log_streams = result.get("logStreams") - if not log_streams: - time.sleep(1) - continue - - for log_stream in log_streams: - result = client.get_log_events( - logGroupName=log_group_name, logStreamName=log_stream["logStreamName"], - ) - received_messages.extend( - [event["message"] for event in result.get("events")] - ) - for message in received_messages: - if expected_msg_part in message: - return True, message - time.sleep(1) - return False, received_messages diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_server.py similarity index 100% rename from tests/test_s3bucket_path/test_bucket_path_server.py rename to tests/test_s3bucket_path/test_server.py diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index b072e8b94033..3f325d8077ac 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -3,7 +3,6 @@ import base64 import json -import os import time import uuid @@ -17,34 +16,13 @@ from boto.sqs.message import Message, RawMessage from botocore.exceptions import ClientError from freezegun import freeze_time -from moto import mock_sqs, mock_sqs_deprecated, mock_cloudformation, settings +from moto import mock_sqs, mock_sqs_deprecated, mock_lambda, mock_logs, settings from nose import SkipTest from nose.tools import assert_raises from tests.helpers import requires_boto_gte +from tests.test_awslambda.test_lambda import get_test_zip_file1, get_role_name from moto.core import ACCOUNT_ID -sqs_template_with_tags = """ -{ - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "SQSQueue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "Tags" : [ - { - "Key" : "keyname1", - "Value" : "value1" - }, - { - "Key" : "keyname2", - "Value" : "value2" - } - ] - } - } - } -}""" - TEST_POLICY = """ { "Version":"2012-10-17", @@ -2042,15 +2020,54 @@ def test_send_messages_to_fifo_without_message_group_id(): ) +@mock_logs +@mock_lambda @mock_sqs -@mock_cloudformation -def test_create_from_cloudformation_json_with_tags(): - cf = boto3.client("cloudformation", region_name="us-east-1") - client = boto3.client("sqs", region_name="us-east-1") +def test_invoke_function_from_sqs_exception(): + logs_conn = boto3.client("logs", region_name="us-east-1") + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="test-sqs-queue1") - cf.create_stack(StackName="test-sqs", TemplateBody=sqs_template_with_tags) + conn = boto3.client("lambda", region_name="us-east-1") + func = conn.create_function( + FunctionName="testFunction", + Runtime="python2.7", + Role=get_role_name(), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": get_test_zip_file1()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.create_event_source_mapping( + EventSourceArn=queue.attributes["QueueArn"], FunctionName=func["FunctionArn"] + ) + + assert response["EventSourceArn"] == queue.attributes["QueueArn"] + assert response["State"] == "Enabled" - queue_url = client.list_queues()["QueueUrls"][0] + entries = [{"Id": "1", "MessageBody": json.dumps({"uuid": str(uuid.uuid4()), "test": "test"})}] + + queue.send_messages(Entries=entries) + + start = time.time() + while (time.time() - start) < 30: + result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction") + log_streams = result.get("logStreams") + if not log_streams: + time.sleep(1) + continue + assert len(log_streams) >= 1 + + result = logs_conn.get_log_events( + logGroupName="/aws/lambda/testFunction", + logStreamName=log_streams[0]["logStreamName"], + ) + for event in result.get("events"): + if "custom log event" in event["message"]: + return + time.sleep(1) - queue_tags = client.list_queue_tags(QueueUrl=queue_url)["Tags"] - queue_tags.should.equal({"keyname1": "value1", "keyname2": "value2"}) + assert False, "Test Failed" diff --git a/tests/test_sqs/test_sqs_cloudformation.py b/tests/test_sqs/test_sqs_cloudformation.py new file mode 100644 index 000000000000..73f76c8f6290 --- /dev/null +++ b/tests/test_sqs/test_sqs_cloudformation.py @@ -0,0 +1,38 @@ +import boto3 +from moto import mock_sqs, mock_cloudformation + +sqs_template_with_tags = """ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "SQSQueue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "Tags" : [ + { + "Key" : "keyname1", + "Value" : "value1" + }, + { + "Key" : "keyname2", + "Value" : "value2" + } + ] + } + } + } +}""" + + +@mock_sqs +@mock_cloudformation +def test_create_from_cloudformation_json_with_tags(): + cf = boto3.client("cloudformation", region_name="us-east-1") + client = boto3.client("sqs", region_name="us-east-1") + + cf.create_stack(StackName="test-sqs", TemplateBody=sqs_template_with_tags) + + queue_url = client.list_queues()["QueueUrls"][0] + + queue_tags = client.list_queue_tags(QueueUrl=queue_url)["Tags"] + queue_tags.should.equal({"keyname1": "value1", "keyname2": "value2"}) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index cc79ce93ddf3..2f74759e9003 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -7,12 +7,11 @@ import sure # noqa import datetime import uuid -import json from botocore.exceptions import ClientError, ParamValidationError from nose.tools import assert_raises -from moto import mock_ssm, mock_cloudformation +from moto import mock_ssm @mock_ssm @@ -1714,68 +1713,3 @@ def test_get_command_invocation(): invocation_response = client.get_command_invocation( CommandId=cmd_id, InstanceId=instance_id, PluginName="FAKE" ) - - -@mock_ssm -@mock_cloudformation -def test_get_command_invocations_from_stack(): - stack_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Test Stack", - "Resources": { - "EC2Instance1": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-test-image-id", - "KeyName": "test", - "InstanceType": "t2.micro", - "Tags": [ - {"Key": "Test Description", "Value": "Test tag"}, - {"Key": "Test Name", "Value": "Name tag for tests"}, - ], - }, - } - }, - "Outputs": { - "test": { - "Description": "Test Output", - "Value": "Test output value", - "Export": {"Name": "Test value to export"}, - }, - "PublicIP": {"Value": "Test public ip"}, - }, - } - - cloudformation_client = boto3.client("cloudformation", region_name="us-east-1") - - stack_template_str = json.dumps(stack_template) - - response = cloudformation_client.create_stack( - StackName="test_stack", - TemplateBody=stack_template_str, - Capabilities=("CAPABILITY_IAM",), - ) - - client = boto3.client("ssm", region_name="us-east-1") - - ssm_document = "AWS-RunShellScript" - params = {"commands": ["#!/bin/bash\necho 'hello world'"]} - - response = client.send_command( - Targets=[ - {"Key": "tag:aws:cloudformation:stack-name", "Values": ("test_stack",)} - ], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region="us-east-2", - OutputS3BucketName="the-bucket", - OutputS3KeyPrefix="pref", - ) - - cmd = response["Command"] - cmd_id = cmd["CommandId"] - instance_ids = cmd["InstanceIds"] - - invocation_response = client.get_command_invocation( - CommandId=cmd_id, InstanceId=instance_ids[0], PluginName="aws:runShellScript" - ) diff --git a/tests/test_ssm/test_ssm_cloudformation.py b/tests/test_ssm/test_ssm_cloudformation.py new file mode 100644 index 000000000000..a2205ceba3e1 --- /dev/null +++ b/tests/test_ssm/test_ssm_cloudformation.py @@ -0,0 +1,70 @@ +import boto3 +import json + + +from moto import mock_ssm, mock_cloudformation + + +@mock_ssm +@mock_cloudformation +def test_get_command_invocations_from_stack(): + stack_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Test Stack", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-test-image-id", + "KeyName": "test", + "InstanceType": "t2.micro", + "Tags": [ + {"Key": "Test Description", "Value": "Test tag"}, + {"Key": "Test Name", "Value": "Name tag for tests"}, + ], + }, + } + }, + "Outputs": { + "test": { + "Description": "Test Output", + "Value": "Test output value", + "Export": {"Name": "Test value to export"}, + }, + "PublicIP": {"Value": "Test public ip"}, + }, + } + + cloudformation_client = boto3.client("cloudformation", region_name="us-east-1") + + stack_template_str = json.dumps(stack_template) + + response = cloudformation_client.create_stack( + StackName="test_stack", + TemplateBody=stack_template_str, + Capabilities=("CAPABILITY_IAM",), + ) + + client = boto3.client("ssm", region_name="us-east-1") + + ssm_document = "AWS-RunShellScript" + params = {"commands": ["#!/bin/bash\necho 'hello world'"]} + + response = client.send_command( + Targets=[ + {"Key": "tag:aws:cloudformation:stack-name", "Values": ("test_stack",)} + ], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region="us-east-2", + OutputS3BucketName="the-bucket", + OutputS3KeyPrefix="pref", + ) + + cmd = response["Command"] + cmd_id = cmd["CommandId"] + instance_ids = cmd["InstanceIds"] + + invocation_response = client.get_command_invocation( + CommandId=cmd_id, InstanceId=instance_ids[0], PluginName="aws:runShellScript" + ) From 76aa7ce0abb49d0cfac2517c20f07cf5550ae456 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 13 Sep 2020 16:28:38 +0100 Subject: [PATCH 525/658] Dependency-integration test - add documentation to the top of the script --- scripts/int_test.sh | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/scripts/int_test.sh b/scripts/int_test.sh index bc029eca4854..f57bb157fbb7 100755 --- a/scripts/int_test.sh +++ b/scripts/int_test.sh @@ -1,3 +1,25 @@ +# +# Dependency Integration Test script +# + +# Runs a test to verify whether each service has the correct dependencies listed in setup.py +# +# ::Algorithm:: +# For each valid service: +# - Create a virtual environment +# - Install only the necessary dependencies +# - Run the tests for that service +# - If the tests fail: +# - This service is probably missing a dependency +# - A log file with the test results will be created (test_results_service.log) +# - Delete the virtual environment +# +# Note: +# Only tested on Linux +# Parallelized to test 4 services at the time. +# Could take some time to run - around 20 minutes on the author's machine + + overwrite() { echo -e "\r\033[1A\033[0K$@"; } contains() { From 0ab21f62a8cc5eaaaf3aed2cd87d812a90d93866 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 13 Sep 2020 19:42:38 +0100 Subject: [PATCH 526/658] Linting --- tests/test_awslambda/__init__.py | 0 tests/test_ecs/test_ecs_cloudformation.py | 3 ++- tests/test_elbv2/test_elbv2_cloudformation.py | 2 +- tests/test_logs/test_integration.py | 2 +- tests/test_sqs/test_sqs.py | 7 ++++++- 5 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 tests/test_awslambda/__init__.py diff --git a/tests/test_awslambda/__init__.py b/tests/test_awslambda/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/test_ecs/test_ecs_cloudformation.py b/tests/test_ecs/test_ecs_cloudformation.py index 6988a08e8bf6..a34c89aa7a92 100644 --- a/tests/test_ecs/test_ecs_cloudformation.py +++ b/tests/test_ecs/test_ecs_cloudformation.py @@ -3,6 +3,7 @@ from copy import deepcopy from moto import mock_cloudformation, mock_ecs + @mock_ecs @mock_cloudformation def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): @@ -250,4 +251,4 @@ def test_create_task_definition_through_cloudformation(): task_definition_details = cfn_conn.describe_stack_resource( StackName=stack_name, LogicalResourceId="testTaskDefinition" )["StackResourceDetail"] - task_definition_details["PhysicalResourceId"].should.equal(task_definition_arn) \ No newline at end of file + task_definition_details["PhysicalResourceId"].should.equal(task_definition_arn) diff --git a/tests/test_elbv2/test_elbv2_cloudformation.py b/tests/test_elbv2/test_elbv2_cloudformation.py index 9196fc853981..cc7ba8246a2e 100644 --- a/tests/test_elbv2/test_elbv2_cloudformation.py +++ b/tests/test_elbv2/test_elbv2_cloudformation.py @@ -345,4 +345,4 @@ def test_fixed_response_action_listener_rule_cloudformation(): }, } ] - ) \ No newline at end of file + ) diff --git a/tests/test_logs/test_integration.py b/tests/test_logs/test_integration.py index 3fe6a68afdab..bda233485901 100644 --- a/tests/test_logs/test_integration.py +++ b/tests/test_logs/test_integration.py @@ -380,4 +380,4 @@ def _wait_for_log_msg(client, log_group_name, expected_msg_part): if expected_msg_part in message: return True, message time.sleep(1) - return False, received_messages \ No newline at end of file + return False, received_messages diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 3f325d8077ac..8c05e0f35268 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -2048,7 +2048,12 @@ def test_invoke_function_from_sqs_exception(): assert response["EventSourceArn"] == queue.attributes["QueueArn"] assert response["State"] == "Enabled" - entries = [{"Id": "1", "MessageBody": json.dumps({"uuid": str(uuid.uuid4()), "test": "test"})}] + entries = [ + { + "Id": "1", + "MessageBody": json.dumps({"uuid": str(uuid.uuid4()), "test": "test"}), + } + ] queue.send_messages(Entries=entries) From 94543f6e4821ce49f5a8aa73bc3910a34f61cfe7 Mon Sep 17 00:00:00 2001 From: Leo Sutic Date: Tue, 15 Sep 2020 14:29:09 +0200 Subject: [PATCH 527/658] Include response headers when deleting objects. (#3313) * Return delete meta. * Add tests. * Lint fixes. Co-authored-by: Leo Sutic --- moto/s3/models.py | 21 +++++++++++++++++---- moto/s3/responses.py | 12 +++++++++--- tests/test_s3/test_s3.py | 24 ++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 7 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 4230479af6c2..98229539e689 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1620,7 +1620,9 @@ def prefix_query(self, bucket, prefix, delimiter): def _set_delete_marker(self, bucket_name, key_name): bucket = self.get_bucket(bucket_name) - bucket.keys[key_name] = FakeDeleteMarker(key=bucket.keys[key_name]) + delete_marker = FakeDeleteMarker(key=bucket.keys[key_name]) + bucket.keys[key_name] = delete_marker + return delete_marker def delete_object_tagging(self, bucket_name, key_name, version_id=None): key = self.get_object(bucket_name, key_name, version_id=version_id) @@ -1630,15 +1632,26 @@ def delete_object(self, bucket_name, key_name, version_id=None): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) + response_meta = {} + try: if not bucket.is_versioned: bucket.keys.pop(key_name) else: if version_id is None: - self._set_delete_marker(bucket_name, key_name) + delete_marker = self._set_delete_marker(bucket_name, key_name) + response_meta["version-id"] = delete_marker.version_id else: if key_name not in bucket.keys: raise KeyError + + response_meta["delete-marker"] = "false" + for key in bucket.keys.getlist(key_name): + if str(key.version_id) == str(version_id): + if type(key) is FakeDeleteMarker: + response_meta["delete-marker"] = "true" + break + bucket.keys.setlist( key_name, [ @@ -1650,9 +1663,9 @@ def delete_object(self, bucket_name, key_name, version_id=None): if not bucket.keys.getlist(key_name): bucket.keys.pop(key_name) - return True + return True, response_meta except KeyError: - return False + return False, None def copy_key( self, diff --git a/moto/s3/responses.py b/moto/s3/responses.py index fa3e536a7476..530365a6eba4 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -902,7 +902,7 @@ def _bucket_response_delete_keys(self, request, body, bucket_name): key_name = object_["Key"] version_id = object_.get("VersionId", None) - success = self.backend.delete_object( + success, _ = self.backend.delete_object( bucket_name, undo_clean_key_name(key_name), version_id=version_id ) if success: @@ -1666,8 +1666,14 @@ def _key_response_delete(self, bucket_name, query, key_name): ) template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE) return 204, {}, template.render(version_id=version_id) - self.backend.delete_object(bucket_name, key_name, version_id=version_id) - return 204, {}, "" + success, response_meta = self.backend.delete_object( + bucket_name, key_name, version_id=version_id + ) + response_headers = {} + if response_meta is not None: + for k in response_meta: + response_headers["x-amz-{}".format(k)] = response_meta[k] + return 204, response_headers, "" def _complete_multipart_body(self, body): ps = minidom.parseString(body).getElementsByTagName("Part") diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index d338269e9c8c..8cc9a740cdf2 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2315,6 +2315,30 @@ def test_boto3_delete_versioned_bucket(): client.delete_bucket(Bucket="blah") +@mock_s3 +def test_boto3_delete_versioned_bucket_returns_meta(): + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + + client.create_bucket(Bucket="blah") + client.put_bucket_versioning( + Bucket="blah", VersioningConfiguration={"Status": "Enabled"} + ) + + put_resp = client.put_object(Bucket="blah", Key="test1", Body=b"test1") + + # Delete the object + del_resp = client.delete_object(Bucket="blah", Key="test1") + assert "DeleteMarker" not in del_resp + assert del_resp["VersionId"] is not None + + # Delete the delete marker + del_resp2 = client.delete_object( + Bucket="blah", Key="test1", VersionId=del_resp["VersionId"] + ) + assert del_resp2["DeleteMarker"] == True + assert "VersionId" not in del_resp2 + + @mock_s3 def test_boto3_get_object_if_modified_since(): s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) From cc0bd5213f005dc705cd31212cc0cb015863a4dd Mon Sep 17 00:00:00 2001 From: Leo Sutic Date: Sat, 19 Sep 2020 11:07:17 +0200 Subject: [PATCH 528/658] Enable CORS from everywhere using flask-cors. (#3316) Co-authored-by: Leo Sutic --- moto/server.py | 2 ++ requirements-dev.txt | 1 + setup.py | 2 +- tests/test_s3/test_server.py | 28 ++++++++++++++++++++++++++++ 4 files changed, 32 insertions(+), 1 deletion(-) diff --git a/moto/server.py b/moto/server.py index bf76095a67fe..a10dc4e3e93f 100644 --- a/moto/server.py +++ b/moto/server.py @@ -9,6 +9,7 @@ import six from flask import Flask +from flask_cors import CORS from flask.testing import FlaskClient from six.moves.urllib.parse import urlencode @@ -205,6 +206,7 @@ def create_backend_app(service): backend_app = Flask(__name__) backend_app.debug = True backend_app.service = service + CORS(backend_app) # Reset view functions to reset the app backend_app.view_functions = {} diff --git a/requirements-dev.txt b/requirements-dev.txt index 8a91eb14f9ad..ad1e30508a0a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,6 +7,7 @@ coverage==4.5.4 flake8==3.7.8 freezegun flask +flask-cors boto>=2.45.0 boto3>=1.4.4 botocore>=1.15.13 diff --git a/setup.py b/setup.py index 5f6840251e30..a6adbea6dd33 100755 --- a/setup.py +++ b/setup.py @@ -99,7 +99,7 @@ def get_version(): _dep_sshpubkeys_py2, _dep_sshpubkeys_py3, ] -all_server_deps = all_extra_deps + ['flask'] +all_server_deps = all_extra_deps + ['flask', 'flask-cors'] # TODO: do we want to add ALL services here? # i.e. even those without extra dependencies. diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 56d46de09384..9ef1acb11937 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -108,3 +108,31 @@ def test_s3_server_post_unicode_bucket_key(): } ) assert backend_app + + +def test_s3_server_post_cors(): + test_client = authenticated_client() + + preflight_headers = { + "Access-Control-Request-Method": "POST", + "Access-Control-Request-Headers": "origin, x-requested-with", + "Origin": "https://localhost:9000", + } + + res = test_client.options( + "/", "http://tester.localhost:5000/", headers=preflight_headers + ) + assert res.status_code in [200, 204] + + expected_methods = set(["DELETE", "PATCH", "PUT", "GET", "HEAD", "POST", "OPTIONS"]) + assert set(res.headers["Allow"].split(", ")) == expected_methods + assert ( + set(res.headers["Access-Control-Allow-Methods"].split(", ")) == expected_methods + ) + + res.headers.should.have.key("Access-Control-Allow-Origin").which.should.equal( + "https://localhost:9000" + ) + res.headers.should.have.key("Access-Control-Allow-Headers").which.should.equal( + "origin, x-requested-with" + ) From 7ce1e87477cdec51d4a893857cd202b1c4ec2f82 Mon Sep 17 00:00:00 2001 From: Guy Moses Date: Sat, 19 Sep 2020 13:26:01 +0300 Subject: [PATCH 529/658] [fix] cognito-idp list_users Filter arg now support spaces (#3317) --- moto/cognitoidp/responses.py | 2 +- tests/test_cognitoidp/test_cognitoidp.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index f3c005ff5732..78725bcf14e6 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -287,7 +287,7 @@ def list_users(self): user_pool_id, limit=limit, pagination_token=token ) if filt: - name, value = filt.replace('"', "").split("=") + name, value = filt.replace('"', "").replace(" ", "").split("=") users = [ user for user in users diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 65c5151e3e79..06dae9951d44 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1085,6 +1085,13 @@ def test_list_users(): result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username_bis) + # checking Filter with space + result = conn.list_users( + UserPoolId=user_pool_id, Filter='phone_number = "+33666666666' + ) + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username_bis) + @mock_cognitoidp def test_list_users_returns_limit_items(): From 8c94893869971de369be7c98492e2548c34fe0f6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 19 Sep 2020 20:12:11 +0100 Subject: [PATCH 530/658] #3306 - Fix S3 creation date for AWS Java SDK --- moto/s3/models.py | 4 ++++ moto/s3/responses.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 98229539e689..706d8ad4205d 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -788,6 +788,10 @@ def __init__(self, name, region_name): def location(self): return self.region_name + @property + def creation_date_ISO8601(self): + return iso_8601_datetime_without_milliseconds_s3(self.creation_date) + @property def is_versioned(self): return self.versioning_status == "Enabled" diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 530365a6eba4..396a53aae206 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1755,7 +1755,7 @@ def _invalid_headers(self, url, headers): {% for bucket in buckets %} {{ bucket.name }} - {{ bucket.creation_date.isoformat() }} + {{ bucket.creation_date_ISO8601 }} {% endfor %} From da4de072a9a12818a2a9d869a98d4fd7caf10744 Mon Sep 17 00:00:00 2001 From: jweite Date: Sat, 19 Sep 2020 15:13:44 -0400 Subject: [PATCH 531/658] ApplicationAutoscaling: support for all the current various forms of resource_id (#3305) * Change to test_s3 method test_presigned_url_restrict_parameters to tolerate change in exception messages, spurred by boto3 1.14.59 release. * ApplicationAutoscaling: support for all the current various forms of resource_id. * Factored logic for extracting application autoscaling resource_type from resource_id to separate function, per PR3304 comment. Co-authored-by: Joseph Weitekamp --- moto/applicationautoscaling/models.py | 29 +++++++- .../test_applicationautoscaling.py | 73 +++++++++++++++++++ 2 files changed, 101 insertions(+), 1 deletion(-) diff --git a/moto/applicationautoscaling/models.py b/moto/applicationautoscaling/models.py index 39bb497aa524..a6303c75c1da 100644 --- a/moto/applicationautoscaling/models.py +++ b/moto/applicationautoscaling/models.py @@ -135,7 +135,7 @@ def _target_params_are_valid(namespace, r_id, dimension): try: valid_dimensions = [d.value for d in ScalableDimensionValueSet] d_namespace, d_resource_type, scaling_property = dimension.split(":") - resource_type, cluster, service = r_id.split("/") + resource_type = _get_resource_type_from_resource_id(r_id) if ( dimension not in valid_dimensions or d_namespace != namespace @@ -151,6 +151,33 @@ def _target_params_are_valid(namespace, r_id, dimension): return is_valid +def _get_resource_type_from_resource_id(resource_id): + # AWS Application Autoscaling resource_ids are multi-component (path-like) identifiers that vary in format, + # depending on the type of resource it identifies. resource_type is one of its components. + # resource_id format variations are described in + # https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html + # In a nutshell: + # - Most use slash separators, but some use colon separators. + # - The resource type is usually the first component of the resource_id... + # - ...except for sagemaker endpoints, dynamodb GSIs and keyspaces tables, where it's the third. + # - Comprehend uses an arn, with the resource type being the last element. + + if resource_id.startswith("arn:aws:comprehend"): + resource_id = resource_id.split(":")[-1] + resource_split = ( + resource_id.split("/") if "/" in resource_id else resource_id.split(":") + ) + if ( + resource_split[0] == "endpoint" + or (resource_split[0] == "table" and len(resource_split) > 2) + or (resource_split[0] == "keyspace") + ): + resource_type = resource_split[2] + else: + resource_type = resource_split[0] + return resource_type + + class FakeScalableTarget(BaseModel): def __init__( self, backend, service_namespace, resource_id, scalable_dimension, **kwargs diff --git a/tests/test_applicationautoscaling/test_applicationautoscaling.py b/tests/test_applicationautoscaling/test_applicationautoscaling.py index 632804992f53..8e5e136e5969 100644 --- a/tests/test_applicationautoscaling/test_applicationautoscaling.py +++ b/tests/test_applicationautoscaling/test_applicationautoscaling.py @@ -187,3 +187,76 @@ def register_scalable_target(client, **kwargs): RoleARN=kwargs.get("RoleARN", DEFAULT_ROLE_ARN), SuspendedState=kwargs.get("SuspendedState", DEFAULT_SUSPENDED_STATE), ) + + +@mock_ecs +@mock_applicationautoscaling +def test_register_scalable_target_resource_id_variations(): + + # Required to register an ECS target in moto + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs) + + # See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-applicationautoscaling-scalabletarget.html + resource_id_variations = [ + ( + DEFAULT_SERVICE_NAMESPACE, + DEFAULT_RESOURCE_ID, + DEFAULT_SCALABLE_DIMENSION, + ), # ECS + ( + "ec2", + "spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE", + "ec2:spot-fleet-request:TargetCapacity", + ), + ( + "elasticmapreduce", + "instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0", + "elasticmapreduce:instancegroup:InstanceCount", + ), + ("appstream", "fleet/sample-fleet", "appstream:fleet:DesiredCapacity"), + ("dynamodb", "table/my-table", "dynamodb:table:ReadCapacityUnits"), + ( + "dynamodb", + "table/my-table/index/my-table-index", + "dynamodb:index:ReadCapacityUnits", + ), + ("rds", "cluster:my-db-cluster", "rds:cluster:ReadReplicaCount"), + ( + "sagemaker", + "endpoint/MyEndPoint/variant/MyVariant", + "sagemaker:variant:DesiredInstanceCount", + ), + ( + "comprehend", + "arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE", + "comprehend:document-classifier-endpoint:DesiredInferenceUnits", + ), + ( + "lambda", + "function:my-function:prod", + "lambda:function:ProvisionedConcurrency", + ), + ( + "cassandra", + "keyspace/mykeyspace/table/mytable", + "cassandra:table:ReadCapacityUnits", + ), + ] + + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + for namespace, resource_id, scalable_dimension in resource_id_variations: + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + response = client.describe_scalable_targets(ServiceNamespace=namespace) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + num_targets = 2 if namespace == "dynamodb" and "index" in resource_id else 1 + len(response["ScalableTargets"]).should.equal(num_targets) + t = response["ScalableTargets"][-1] + t.should.have.key("ServiceNamespace").which.should.equal(namespace) + t.should.have.key("ResourceId").which.should.equal(resource_id) + t.should.have.key("ScalableDimension").which.should.equal(scalable_dimension) + t.should.have.key("CreationTime").which.should.be.a("datetime.datetime") From 1022aa0968f1975487ef24d4fcac3f23a9266a5e Mon Sep 17 00:00:00 2001 From: ruthbovell <63656505+ruthbovell@users.noreply.github.com> Date: Mon, 21 Sep 2020 07:37:50 +0100 Subject: [PATCH 532/658] Issue 3224 s3 copy glacier object (#3318) * 3224 Enhancement - S3 Copy restored glacier objects - adds setter for expiry date - copy sets expiry date to none when source is glacier object - throws error for copying glacier object only if not restored/still restoring * 3224 Enhancement - S3 Copy restored glacier objects - throws error for copying deep archive object only if not restored/still restoring --- moto/s3/models.py | 6 +++ moto/s3/responses.py | 8 +++- tests/test_s3/test_s3_storageclass.py | 58 ++++++++++++++++++++++++++- 3 files changed, 69 insertions(+), 3 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 98229539e689..5fa115d69eb8 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -169,6 +169,9 @@ def set_storage_class(self, storage): raise InvalidStorageClass(storage=storage) self._storage_class = storage + def set_expiry(self, expiry): + self._expiry = expiry + def set_acl(self, acl): self.acl = acl @@ -1689,6 +1692,9 @@ def copy_key( new_key.set_storage_class(storage) if acl is not None: new_key.set_acl(acl) + if key.storage_class in "GLACIER": + # Object copied from Glacier object should not have expiry + new_key.set_expiry(None) dest_bucket.keys[dest_key_name] = new_key diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 530365a6eba4..a7fb822109c3 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1276,7 +1276,13 @@ def _key_response_put(self, request, body, bucket_name, query, key_name, headers if key is not None: if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]: - raise ObjectNotInActiveTierError(key) + if key.response_dict.get( + "x-amz-restore" + ) is None or 'ongoing-request="true"' in key.response_dict.get( + "x-amz-restore" + ): + raise ObjectNotInActiveTierError(key) + self.backend.copy_key( src_bucket, src_key, diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py index f1a0479b2ad3..a89b4a896dbf 100644 --- a/tests/test_s3/test_s3_storageclass.py +++ b/tests/test_s3/test_s3_storageclass.py @@ -156,7 +156,7 @@ def test_s3_default_storage_class(): @mock_s3 -def test_s3_copy_object_error_for_glacier_storage_class(): +def test_s3_copy_object_error_for_glacier_storage_class_not_restored(): s3 = boto3.client("s3") s3.create_bucket( Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} @@ -177,7 +177,7 @@ def test_s3_copy_object_error_for_glacier_storage_class(): @mock_s3 -def test_s3_copy_object_error_for_deep_archive_storage_class(): +def test_s3_copy_object_error_for_deep_archive_storage_class_not_restored(): s3 = boto3.client("s3") s3.create_bucket( Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} @@ -195,3 +195,57 @@ def test_s3_copy_object_error_for_deep_archive_storage_class(): ) exc.exception.response["Error"]["Code"].should.equal("ObjectNotInActiveTierError") + + +@mock_s3 +def test_s3_copy_object_for_glacier_storage_class_restored(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="Bucket") + + s3.put_object( + Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="GLACIER" + ) + + s3.create_bucket(Bucket="Bucket2") + s3.restore_object(Bucket="Bucket", Key="First_Object", RestoreRequest={"Days": 123}) + + s3.copy_object( + CopySource={"Bucket": "Bucket", "Key": "First_Object"}, + Bucket="Bucket2", + Key="Second_Object", + ) + + list_of_copied_objects = s3.list_objects(Bucket="Bucket2") + # checks that copy of restored Glacier object has STANDARD storage class + list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") + # checks that metadata of copy has no Restore property + s3.head_object(Bucket="Bucket2", Key="Second_Object").should.not_have.property( + "Restore" + ) + + +@mock_s3 +def test_s3_copy_object_for_deep_archive_storage_class_restored(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="Bucket") + + s3.put_object( + Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="DEEP_ARCHIVE" + ) + + s3.create_bucket(Bucket="Bucket2") + s3.restore_object(Bucket="Bucket", Key="First_Object", RestoreRequest={"Days": 123}) + + s3.copy_object( + CopySource={"Bucket": "Bucket", "Key": "First_Object"}, + Bucket="Bucket2", + Key="Second_Object", + ) + + list_of_copied_objects = s3.list_objects(Bucket="Bucket2") + # checks that copy of restored Glacier object has STANDARD storage class + list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") + # checks that metadata of copy has no Restore property + s3.head_object(Bucket="Bucket2", Key="Second_Object").should.not_have.property( + "Restore" + ) From 0b9903a3df10bc337a6ed0564214649c5227a461 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 21 Sep 2020 20:51:18 +0530 Subject: [PATCH 533/658] Fix:s3 List Object response:delimiter (#3254) * Fix:s3 List Object delimiter in response * fixed tests * fixed failed tests Co-authored-by: usmankb --- moto/s3/responses.py | 4 +++- tests/test_s3/test_s3.py | 17 ++++++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index a7fb822109c3..e4259b57f58f 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1774,7 +1774,9 @@ def _invalid_headers(self, url, headers): {{ prefix }} {% endif %} {{ max_keys }} - {{ delimiter }} + {% if delimiter %} + {{ delimiter }} + {% endif %} {{ is_truncated }} {% if next_marker %} {{ next_marker }} diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 8cc9a740cdf2..b213a9a72624 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1709,6 +1709,17 @@ def test_website_redirect_location(): resp["WebsiteRedirectLocation"].should.equal(url) +@mock_s3 +def test_delimiter_optional_in_response(): + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + s3.create_bucket(Bucket="mybucket") + s3.put_object(Bucket="mybucket", Key="one", Body=b"1") + resp = s3.list_objects(Bucket="mybucket", MaxKeys=1) + assert resp.get("Delimiter") is None + resp = s3.list_objects(Bucket="mybucket", MaxKeys=1, Delimiter="/") + assert resp.get("Delimiter") == "/" + + @mock_s3 def test_boto3_list_objects_truncated_response(): s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) @@ -1725,7 +1736,7 @@ def test_boto3_list_objects_truncated_response(): assert resp["MaxKeys"] == 1 assert resp["IsTruncated"] == True assert resp.get("Prefix") is None - assert resp["Delimiter"] == "None" + assert resp.get("Delimiter") is None assert "NextMarker" in resp next_marker = resp["NextMarker"] @@ -1738,7 +1749,7 @@ def test_boto3_list_objects_truncated_response(): assert resp["MaxKeys"] == 1 assert resp["IsTruncated"] == True assert resp.get("Prefix") is None - assert resp["Delimiter"] == "None" + assert resp.get("Delimiter") is None assert "NextMarker" in resp next_marker = resp["NextMarker"] @@ -1751,7 +1762,7 @@ def test_boto3_list_objects_truncated_response(): assert resp["MaxKeys"] == 1 assert resp["IsTruncated"] == False assert resp.get("Prefix") is None - assert resp["Delimiter"] == "None" + assert resp.get("Delimiter") is None assert "NextMarker" not in resp From 2e0e542efe0e24c4f51e6c8ecddba86fac8edc56 Mon Sep 17 00:00:00 2001 From: ayushbhawsar <41651229+ayushbhawsar@users.noreply.github.com> Date: Mon, 21 Sep 2020 23:10:07 +0530 Subject: [PATCH 534/658] added cognito idp function admin_set_user_password to the code (#3328) * added cognito idp function to the code * fixed linting issues --- IMPLEMENTATION_COVERAGE.md | 2 +- moto/cognitoidp/models.py | 9 ++++++++- moto/cognitoidp/responses.py | 10 ++++++++++ tests/test_cognitoidp/test_cognitoidp.py | 24 ++++++++++++++++++++++++ 4 files changed, 43 insertions(+), 2 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 90ebf9a57339..81611ace04e2 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1687,7 +1687,7 @@ - [ ] admin_reset_user_password - [ ] admin_respond_to_auth_challenge - [ ] admin_set_user_mfa_preference -- [ ] admin_set_user_password +- [X] admin_set_user_password - [ ] admin_set_user_settings - [ ] admin_update_auth_event_feedback - [ ] admin_update_device_status diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index bfa7177f16c8..6ee71cbc04b4 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -353,7 +353,6 @@ def expand_attrs(attrs): class CognitoResourceServer(BaseModel): def __init__(self, user_pool_id, identifier, name, scopes): - self.user_pool_id = user_pool_id self.identifier = identifier self.name = name @@ -1035,6 +1034,14 @@ def set_user_mfa_preference( else: raise NotAuthorizedError(access_token) + def admin_set_user_password(self, user_pool_id, username, password, permanent): + user = self.admin_get_user(user_pool_id, username) + user.password = password + if permanent: + user.status = UserStatus["CONFIRMED"] + else: + user.status = UserStatus["FORCE_CHANGE_PASSWORD"] + cognitoidp_backends = {} for region in Session().get_available_regions("cognito-idp"): diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 78725bcf14e6..e10a122823a7 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -449,6 +449,16 @@ def set_user_mfa_preference(self): ) return "" + def admin_set_user_password(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + password = self._get_param("Password") + permanent = self._get_param("Permanent") + cognitoidp_backends[self.region].admin_set_user_password( + user_pool_id, username, password, permanent + ) + return "" + class CognitoIdpJsonWebKeyResponse(BaseResponse): def __init__(self): diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 06dae9951d44..a5212b82e1a7 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1816,6 +1816,30 @@ def test_respond_to_auth_challenge_with_invalid_secret_hash(): caught.should.be.true +@mock_cognitoidp +def test_admin_set_user_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[{"Name": "thing", "Value": value}], + ) + conn.admin_set_user_password( + UserPoolId=user_pool_id, Username=username, Password=password, Permanent=True + ) + result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + result["Username"].should.equal(username) + result["UserAttributes"].should.have.length_of(1) + result["UserAttributes"][0]["Name"].should.equal("thing") + result["UserAttributes"][0]["Value"].should.equal(value) + result["UserStatus"].should.equal("CONFIRMED") + + # Test will retrieve public key from cognito.amazonaws.com/.well-known/jwks.json, # which isnt mocked in ServerMode if not settings.TEST_SERVER_MODE: From 7bc5b5c08f8f5f7f92312983c2dbccd43c59f41f Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Thu, 30 Jul 2020 08:17:35 -0600 Subject: [PATCH 535/658] Add IAM Role and Policy to Config --- CONFIG_README.md | 4 +- moto/config/models.py | 4 +- moto/core/models.py | 21 ++ moto/iam/config.py | 173 ++++++++++++++ moto/iam/models.py | 148 +++++++++++- tests/test_iam/test_iam.py | 448 +++++++++++++++++++++++++++++++++++++ 6 files changed, 793 insertions(+), 5 deletions(-) create mode 100644 moto/iam/config.py diff --git a/CONFIG_README.md b/CONFIG_README.md index 356bb87a0f23..e223c84571b5 100644 --- a/CONFIG_README.md +++ b/CONFIG_README.md @@ -23,8 +23,8 @@ However, this will only work on resource types that have this enabled. ### Current enabled resource types: -1. S3 - +1. S3 (all) +1. IAM (Role, Policy) ## Developer Guide diff --git a/moto/config/models.py b/moto/config/models.py index b6dc4672d8ce..77f46e644c12 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -47,8 +47,8 @@ from moto.core import BaseBackend, BaseModel from moto.s3.config import s3_account_public_access_block_query, s3_config_query - from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID +from moto.iam.config import role_config_query, policy_config_query POP_STRINGS = [ "capitalizeStart", @@ -64,6 +64,8 @@ RESOURCE_MAP = { "AWS::S3::Bucket": s3_config_query, "AWS::S3::AccountPublicAccessBlock": s3_account_public_access_block_query, + "AWS::IAM::Role": role_config_query, + "AWS::IAM::Policy": policy_config_query, } diff --git a/moto/core/models.py b/moto/core/models.py index ae241322c211..bc7d282fdf08 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -766,6 +766,27 @@ def get_config_resource( """ raise NotImplementedError() + def aggregate_regions(self, path, backend_region, resource_region): + """ + Returns a list of "region\1eresourcename" strings + """ + + filter_region = backend_region or resource_region + if filter_region: + filter_resources = list(self.backends[filter_region].__dict__[path].keys()) + return map( + lambda resource: "{}\1e{}".format(filter_region, resource), + filter_resources, + ) + + # If we don't have a filter region + ret = [] + for region in self.backends: + this_region_resources = list(self.backends[region].__dict__[path].keys()) + for resource in this_region_resources: + ret.append("{}\1e{}".format(region, resource)) + return ret + class base_decorator(object): mock_backend = MockAWS diff --git a/moto/iam/config.py b/moto/iam/config.py new file mode 100644 index 000000000000..4bb38124865c --- /dev/null +++ b/moto/iam/config.py @@ -0,0 +1,173 @@ +import json + +from moto.core.exceptions import InvalidNextTokenException +from moto.core.models import ConfigQueryModel +from moto.iam import iam_backends + + +class RoleConfigQuery(ConfigQueryModel): + def list_config_service_resources( + self, + resource_ids, + resource_name, + limit, + next_token, + backend_region=None, + resource_region=None, + ): + # For aggregation -- did we get both a resource ID and a resource name? + if resource_ids and resource_name: + # If the values are different, then return an empty list: + if resource_name not in resource_ids: + return [], None + + role_list = self.aggregate_regions("roles", backend_region, resource_region) + + if not role_list: + return [], None + + # Pagination logic: + sorted_roles = sorted(role_list) + new_token = None + + # Get the start: + if not next_token: + start = 0 + else: + # "Tokens" are region + \00 + resource ID. + if next_token not in sorted_roles: + raise InvalidNextTokenException() + + start = sorted_roles.index(next_token) + + # Get the list of items to collect: + role_list = sorted_roles[start : (start + limit)] + + if len(sorted_roles) > (start + limit): + new_token = sorted_roles[start + limit] + + return ( + [ + { + "type": "AWS::IAM::Role", + "id": role.split("\1e")[1], + "name": role.split("\1e")[1], + "region": role.split("\1e")[0], + } + for role in role_list + ], + new_token, + ) + + def get_config_resource( + self, resource_id, resource_name=None, backend_region=None, resource_region=None + ): + + role = self.backends["global"].roles.get(resource_id, {}) + + if not role: + return + + if resource_name and role.name != resource_name: + return + + # Format the bucket to the AWS Config format: + config_data = role.to_config_dict() + + # The 'configuration' field is also a JSON string: + config_data["configuration"] = json.dumps(config_data["configuration"]) + + # Supplementary config need all values converted to JSON strings if they are not strings already: + for field, value in config_data["supplementaryConfiguration"].items(): + if not isinstance(value, str): + config_data["supplementaryConfiguration"][field] = json.dumps(value) + + return config_data + + +class PolicyConfigQuery(ConfigQueryModel): + def list_config_service_resources( + self, + resource_ids, + resource_name, + limit, + next_token, + backend_region=None, + resource_region=None, + ): + # For aggregation -- did we get both a resource ID and a resource name? + if resource_ids and resource_name: + # If the values are different, then return an empty list: + if resource_name not in resource_ids: + return [], None + + # We don't want to include AWS Managed Policies + policy_list = filter( + lambda policy: not policy.split("\1e")[1].startswith("arn:aws:iam::aws"), + self.aggregate_regions("managed_policies", backend_region, resource_region), + ) + + if not policy_list: + return [], None + + # Pagination logic: + sorted_policies = sorted(policy_list) + new_token = None + + # Get the start: + if not next_token: + start = 0 + else: + # "Tokens" are region + \00 + resource ID. + if next_token not in sorted_policies: + raise InvalidNextTokenException() + + start = sorted_policies.index(next_token) + + # Get the list of items to collect: + policy_list = sorted_policies[start : (start + limit)] + + if len(sorted_policies) > (start + limit): + new_token = sorted_policies[start + limit] + + return ( + [ + { + "type": "AWS::IAM::Policy", + "id": policy.split("\1e")[1], + "name": policy.split("\1e")[1], + "region": policy.split("\1e")[0], + } + for policy in policy_list + ], + new_token, + ) + + def get_config_resource( + self, resource_id, resource_name=None, backend_region=None, resource_region=None + ): + + policy = self.backends["global"].managed_policies.get(resource_id, {}) + + if not policy: + return + + if resource_name and policy.name != resource_name: + return + + # Format the bucket to the AWS Config format: + config_data = policy.to_config_dict() + + # The 'configuration' field is also a JSON string: + config_data["configuration"] = json.dumps(config_data["configuration"]) + + # Supplementary config need all values converted to JSON strings if they are not strings already: + for field, value in config_data["supplementaryConfiguration"].items(): + if not isinstance(value, str): + config_data["supplementaryConfiguration"][field] = json.dumps(value) + + return config_data + + +role_config_query = RoleConfigQuery(iam_backends) +policy_config_query = PolicyConfigQuery(iam_backends) diff --git a/moto/iam/models.py b/moto/iam/models.py index 3a174e17bb5f..9ae1ddcdf7f3 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -8,11 +8,13 @@ from datetime import datetime import json import re +import time from cryptography import x509 from cryptography.hazmat.backends import default_backend from six.moves.urllib.parse import urlparse +from six.moves.urllib import parse from moto.core.exceptions import RESTError from moto.core import BaseBackend, BaseModel, ACCOUNT_ID, CloudFormationModel from moto.core.utils import ( @@ -153,7 +155,7 @@ def __init__(self, url, thumbprint_list, client_id_list=None): self._errors = [] self._validate(url, thumbprint_list, client_id_list) - parsed_url = urlparse(url) + parsed_url = parse.urlparse(url) self.url = parsed_url.netloc + parsed_url.path self.thumbprint_list = thumbprint_list self.client_id_list = client_id_list @@ -201,7 +203,7 @@ def _validate(self, url, thumbprint_list, client_id_list): self._raise_errors() - parsed_url = urlparse(url) + parsed_url = parse.urlparse(url) if not parsed_url.scheme or not parsed_url.netloc: raise ValidationError("Invalid Open ID Connect Provider URL") @@ -265,6 +267,48 @@ def detach_from(self, obj): def arn(self): return "arn:aws:iam::{0}:policy{1}{2}".format(ACCOUNT_ID, self.path, self.name) + def to_config_dict(self): + return { + "version": "1.3", + "configurationItemCaptureTime": str(self.create_date), + "configurationItemStatus": "OK", + "configurationStateId": str( + int(time.mktime(self.create_date.timetuple())) + ), # PY2 and 3 compatible + "arn": "arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, self.name), + "resourceType": "AWS::IAM::Policy", + "resourceId": self.id, + "resourceName": self.name, + "awsRegion": "global", + "availabilityZone": "Not Applicable", + "resourceCreationTime": str(self.create_date), + "configuration": { + "policyName": self.name, + "policyId": self.id, + "arn": "arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, self.name), + "path": self.path, + "defaultVersionId": self.default_version_id, + "attachmentCount": self.attachment_count, + "permissionsBoundaryUsageCount": 0, + "isAttachable": ManagedPolicy.is_attachable, + "description": self.description, + "createDate": str(self.create_date.isoformat()), + "updateDate": str(self.create_date.isoformat()), + "policyVersionList": list( + map( + lambda version: { + "document": parse.quote(version.document), + "versionId": version.version_id, + "isDefaultVersion": version.is_default, + "createDate": str(version.create_date), + }, + self.versions, + ) + ), + }, + "supplementaryConfiguration": {}, + } + class AWSManagedPolicy(ManagedPolicy): """AWS-managed policy.""" @@ -513,6 +557,69 @@ def create_from_cloudformation_json( def arn(self): return "arn:aws:iam::{0}:role{1}{2}".format(ACCOUNT_ID, self.path, self.name) + def to_config_dict(self): + _managed_policies = [] + for key in self.managed_policies.keys(): + _managed_policies.append( + {"policyArn": key, "policyName": iam_backend.managed_policies[key].name} + ) + + _role_policy_list = [] + for key, value in self.policies.items(): + _role_policy_list.append( + {"policyName": key, "policyDocument": parse.quote(value)} + ) + + _instance_profiles = [] + for key, instance_profile in iam_backend.instance_profiles.items(): + for role in instance_profile.roles: + _instance_profiles.append(instance_profile.to_embedded_config_dict()) + break + + config_dict = { + "version": "1.3", + "configurationItemCaptureTime": str(self.create_date), + "configurationItemStatus": "ResourceDiscovered", + "configurationStateId": str( + int(time.mktime(self.create_date.timetuple())) + ), # PY2 and 3 compatible + "arn": "arn:aws:iam::{}:role/{}".format(ACCOUNT_ID, self.name), + "resourceType": "AWS::IAM::Role", + "resourceId": self.name, + "resourceName": self.name, + "awsRegion": "global", + "availabilityZone": "Not Applicable", + "resourceCreationTime": str(self.create_date), + "relatedEvents": [], + "relationships": [], + "tags": self.tags, + "configuration": { + "path": self.path, + "roleName": self.name, + "roleId": self.id, + "arn": "arn:aws:iam::{}:role/{}".format(ACCOUNT_ID, self.name), + "assumeRolePolicyDocument": parse.quote( + self.assume_role_policy_document + ) + if self.assume_role_policy_document + else None, + "instanceProfileList": _instance_profiles, + "rolePolicyList": _role_policy_list, + "createDate": self.create_date.isoformat(), + "attachedManagedPolicies": _managed_policies, + "permissionsBoundary": self.permissions_boundary, + "tags": list( + map( + lambda key: {"key": key, "value": self.tags[key]["Value"]}, + self.tags, + ) + ), + "roleLastUsed": None, + }, + "supplementaryConfiguration": {}, + } + return config_dict + def put_policy(self, policy_name, policy_json): self.policies[policy_name] = policy_json @@ -590,6 +697,43 @@ def get_cfn_attribute(self, attribute_name): return self.arn raise UnformattedGetAttTemplateException() + def to_embedded_config_dict(self): + # Instance Profiles aren't a config item itself, but they are returned in IAM roles with + # a "config like" json structure It's also different than Role.to_config_dict() + roles = [] + for role in self.roles: + roles.append( + { + "path": role.path, + "roleName": role.name, + "roleId": role.id, + "arn": "arn:aws:iam::{}:role/{}".format(ACCOUNT_ID, role.name), + "createDate": str(role.create_date), + "assumeRolePolicyDocument": parse.quote( + role.assume_role_policy_document + ), + "description": role.description, + "maxSessionDuration": None, + "permissionsBoundary": role.permissions_boundary, + "tags": list( + map( + lambda key: {"key": key, "value": role.tags[key]["Value"]}, + role.tags, + ) + ), + "roleLastUsed": None, + } + ) + + return { + "path": self.path, + "instanceProfileName": self.name, + "instanceProfileId": self.id, + "arn": "arn:aws:iam::{}:instance-profile/{}".format(ACCOUNT_ID, self.name), + "createDate": str(self.create_date), + "roles": roles, + } + class Certificate(BaseModel): def __init__(self, cert_name, cert_body, private_key, cert_chain=None, path=None): diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 288825d6e389..b42f3d76f0d7 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -19,6 +19,7 @@ from datetime import datetime from tests.helpers import requires_boto_gte from uuid import uuid4 +from six.moves.urllib import parse MOCK_CERT = """-----BEGIN CERTIFICATE----- @@ -2882,3 +2883,450 @@ def test_delete_role_with_instance_profiles_present(): role_names = [role["RoleName"] for role in iam.list_roles()["Roles"]] assert "Role1" in role_names assert "Role2" not in role_names + + +@mock_iam +def test_delete_account_password_policy_errors(): + client = boto3.client("iam", region_name="us-east-1") + + client.delete_account_password_policy.when.called_with().should.throw( + ClientError, "The account policy with name PasswordPolicy cannot be found." + ) + + +@mock_iam +def test_role_list_config_discovered_resources(): + from moto.iam.config import role_config_query + from moto.iam.utils import random_resource_id + + # Without any roles + assert role_config_query.list_config_service_resources(None, None, 100, None) == ( + [], + None, + ) + + # Create a role + role_config_query.backends["global"].create_role( + role_name="something", + assume_role_policy_document=None, + path="/", + permissions_boundary=None, + description="something", + tags=[], + max_session_duration=3600, + ) + + result = role_config_query.list_config_service_resources(None, None, 100, None)[0] + assert len(result) == 1 + + # The role gets a random ID, so we have to grab it + role = result[0] + assert role["type"] == "AWS::IAM::Role" + assert len(role["id"]) == len(random_resource_id()) + assert role["id"] == role["name"] + assert role["region"] == "global" + + +@mock_iam +def test_policy_list_config_discovered_resources(): + from moto.iam.config import policy_config_query + + # Without any policies + assert policy_config_query.list_config_service_resources(None, None, 100, None) == ( + [], + None, + ) + + basic_policy = { + "Version": "2012-10-17", + "Statement": [ + {"Action": ["ec2:DeleteKeyPair"], "Effect": "Deny", "Resource": "*"} + ], + } + + # Create a role + policy_config_query.backends["global"].create_policy( + description="mypolicy", + path="", + policy_document=json.dumps(basic_policy), + policy_name="mypolicy", + ) + + result = policy_config_query.list_config_service_resources(None, None, 100, None)[0] + assert len(result) == 1 + + policy = result[0] + assert policy["type"] == "AWS::IAM::Policy" + assert policy["id"] == policy["name"] == "arn:aws:iam::123456789012:policy/mypolicy" + assert policy["region"] == "global" + + +@mock_iam +def test_role_config_dict(): + from moto.iam.config import role_config_query, policy_config_query + from moto.iam.utils import random_resource_id + + # Without any roles + assert not role_config_query.get_config_resource("something") + assert role_config_query.list_config_service_resources(None, None, 100, None) == ( + [], + None, + ) + + basic_assume_role = { + "Version": "2012-10-17", + "Statement": [ + {"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "sts:AssumeRole"} + ], + } + + basic_policy = { + "Version": "2012-10-17", + "Statement": [{"Action": ["ec2:*"], "Effect": "Allow", "Resource": "*"}], + } + + # Create a policy for use in role permissions boundary + policy_config_query.backends["global"].create_policy( + description="basic_policy", + path="/", + policy_document=json.dumps(basic_policy), + policy_name="basic_policy", + ) + + policy_arn = policy_config_query.list_config_service_resources( + None, None, 100, None + )[0][0]["id"] + assert policy_arn is not None + + # Create some roles (and grab them repeatedly since they create with random names) + role_config_query.backends["global"].create_role( + role_name="plain_role", + assume_role_policy_document=None, + path="/", + permissions_boundary=None, + description="plain_role", + tags=[{"Key": "foo", "Value": "bar"}], + max_session_duration=3600, + ) + + plain_role = role_config_query.list_config_service_resources(None, None, 100, None)[ + 0 + ][0] + assert plain_role is not None + assert len(plain_role["id"]) == len(random_resource_id()) + + role_config_query.backends["global"].create_role( + role_name="assume_role", + assume_role_policy_document=json.dumps(basic_assume_role), + path="/", + permissions_boundary=None, + description="assume_role", + tags=[], + max_session_duration=3600, + ) + + assume_role = next( + role + for role in role_config_query.list_config_service_resources( + None, None, 100, None + )[0] + if role["id"] not in [plain_role["id"]] + ) + assert assume_role is not None + assert len(assume_role["id"]) == len(random_resource_id()) + assert assume_role["id"] is not plain_role["id"] + + role_config_query.backends["global"].create_role( + role_name="assume_and_permission_boundary_role", + assume_role_policy_document=json.dumps(basic_assume_role), + path="/", + permissions_boundary=policy_arn, + description="assume_and_permission_boundary_role", + tags=[], + max_session_duration=3600, + ) + + assume_and_permission_boundary_role = next( + role + for role in role_config_query.list_config_service_resources( + None, None, 100, None + )[0] + if role["id"] not in [plain_role["id"], assume_role["id"]] + ) + assert assume_and_permission_boundary_role is not None + assert len(assume_and_permission_boundary_role["id"]) == len(random_resource_id()) + assert assume_and_permission_boundary_role["id"] is not plain_role["id"] + assert assume_and_permission_boundary_role["id"] is not assume_role["id"] + + role_config_query.backends["global"].create_role( + role_name="role_with_attached_policy", + assume_role_policy_document=json.dumps(basic_assume_role), + path="/", + permissions_boundary=None, + description="role_with_attached_policy", + tags=[], + max_session_duration=3600, + ) + role_config_query.backends["global"].attach_role_policy( + policy_arn, "role_with_attached_policy" + ) + role_with_attached_policy = next( + role + for role in role_config_query.list_config_service_resources( + None, None, 100, None + )[0] + if role["id"] + not in [ + plain_role["id"], + assume_role["id"], + assume_and_permission_boundary_role["id"], + ] + ) + assert role_with_attached_policy is not None + assert len(role_with_attached_policy["id"]) == len(random_resource_id()) + assert role_with_attached_policy["id"] is not plain_role["id"] + assert role_with_attached_policy["id"] is not assume_role["id"] + assert ( + role_with_attached_policy["id"] is not assume_and_permission_boundary_role["id"] + ) + + role_config_query.backends["global"].create_role( + role_name="role_with_inline_policy", + assume_role_policy_document=json.dumps(basic_assume_role), + path="/", + permissions_boundary=None, + description="role_with_inline_policy", + tags=[], + max_session_duration=3600, + ) + role_config_query.backends["global"].put_role_policy( + "role_with_inline_policy", "inline_policy", json.dumps(basic_policy) + ) + + role_with_inline_policy = next( + role + for role in role_config_query.list_config_service_resources( + None, None, 100, None + )[0] + if role["id"] + not in [ + plain_role["id"], + assume_role["id"], + assume_and_permission_boundary_role["id"], + role_with_attached_policy["id"], + ] + ) + assert role_with_inline_policy is not None + assert len(role_with_inline_policy["id"]) == len(random_resource_id()) + assert role_with_inline_policy["id"] is not plain_role["id"] + assert role_with_inline_policy["id"] is not assume_role["id"] + assert ( + role_with_inline_policy["id"] is not assume_and_permission_boundary_role["id"] + ) + assert role_with_inline_policy["id"] is not role_with_attached_policy["id"] + + # plain role + plain_role_config = ( + role_config_query.backends["global"].roles[plain_role["id"]].to_config_dict() + ) + assert plain_role_config["version"] == "1.3" + assert plain_role_config["configurationItemStatus"] == "ResourceDiscovered" + assert plain_role_config["configurationStateId"] is not None + assert plain_role_config["arn"] == "arn:aws:iam::123456789012:role/plain_role" + assert plain_role_config["resourceType"] == "AWS::IAM::Role" + assert plain_role_config["resourceId"] == "plain_role" + assert plain_role_config["resourceName"] == "plain_role" + assert plain_role_config["awsRegion"] == "global" + assert plain_role_config["availabilityZone"] == "Not Applicable" + assert plain_role_config["resourceCreationTime"] is not None + assert plain_role_config["tags"] == {"foo": {"Key": "foo", "Value": "bar"}} + assert plain_role_config["configuration"]["path"] == "/" + assert plain_role_config["configuration"]["roleName"] == "plain_role" + assert plain_role_config["configuration"]["roleId"] == plain_role["id"] + assert plain_role_config["configuration"]["arn"] == plain_role_config["arn"] + assert plain_role_config["configuration"]["assumeRolePolicyDocument"] is None + assert plain_role_config["configuration"]["instanceProfileList"] == [] + assert plain_role_config["configuration"]["rolePolicyList"] == [] + assert plain_role_config["configuration"]["attachedManagedPolicies"] == [] + assert plain_role_config["configuration"]["permissionsBoundary"] is None + assert plain_role_config["configuration"]["tags"] == [ + {"key": "foo", "value": "bar"} + ] + assert plain_role_config["supplementaryConfiguration"] == {} + + # assume_role + assume_role_config = ( + role_config_query.backends["global"].roles[assume_role["id"]].to_config_dict() + ) + assert assume_role_config["arn"] == "arn:aws:iam::123456789012:role/assume_role" + assert assume_role_config["resourceId"] == "assume_role" + assert assume_role_config["resourceName"] == "assume_role" + assert assume_role_config["configuration"][ + "assumeRolePolicyDocument" + ] == parse.quote(json.dumps(basic_assume_role)) + + # assume_and_permission_boundary_role + assume_and_permission_boundary_role_config = ( + role_config_query.backends["global"] + .roles[assume_and_permission_boundary_role["id"]] + .to_config_dict() + ) + assert ( + assume_and_permission_boundary_role_config["arn"] + == "arn:aws:iam::123456789012:role/assume_and_permission_boundary_role" + ) + assert ( + assume_and_permission_boundary_role_config["resourceId"] + == "assume_and_permission_boundary_role" + ) + assert ( + assume_and_permission_boundary_role_config["resourceName"] + == "assume_and_permission_boundary_role" + ) + assert assume_and_permission_boundary_role_config["configuration"][ + "assumeRolePolicyDocument" + ] == parse.quote(json.dumps(basic_assume_role)) + assert ( + assume_and_permission_boundary_role_config["configuration"][ + "permissionsBoundary" + ] + == policy_arn + ) + + # role_with_attached_policy + role_with_attached_policy_config = ( + role_config_query.backends["global"] + .roles[role_with_attached_policy["id"]] + .to_config_dict() + ) + assert ( + role_with_attached_policy_config["arn"] + == "arn:aws:iam::123456789012:role/role_with_attached_policy" + ) + assert role_with_attached_policy_config["configuration"][ + "attachedManagedPolicies" + ] == [{"policyArn": policy_arn, "policyName": "basic_policy"}] + + # role_with_inline_policy + role_with_inline_policy_config = ( + role_config_query.backends["global"] + .roles[role_with_inline_policy["id"]] + .to_config_dict() + ) + assert ( + role_with_inline_policy_config["arn"] + == "arn:aws:iam::123456789012:role/role_with_inline_policy" + ) + assert role_with_inline_policy_config["configuration"]["rolePolicyList"] == [ + { + "policyName": "inline_policy", + "policyDocument": parse.quote(json.dumps(basic_policy)), + } + ] + + +@mock_iam +def test_policy_config_dict(): + from moto.iam.config import role_config_query, policy_config_query + from moto.iam.utils import random_policy_id + + # Without any roles + assert not policy_config_query.get_config_resource( + "arn:aws:iam::123456789012:policy/basic_policy" + ) + assert policy_config_query.list_config_service_resources(None, None, 100, None) == ( + [], + None, + ) + + basic_policy = { + "Version": "2012-10-17", + "Statement": [{"Action": ["ec2:*"], "Effect": "Allow", "Resource": "*"}], + } + + basic_policy_v2 = { + "Version": "2012-10-17", + "Statement": [ + {"Action": ["ec2:*", "s3:*"], "Effect": "Allow", "Resource": "*"} + ], + } + + policy_config_query.backends["global"].create_policy( + description="basic_policy", + path="/", + policy_document=json.dumps(basic_policy), + policy_name="basic_policy", + ) + + policy_arn = policy_config_query.list_config_service_resources( + None, None, 100, None + )[0][0]["id"] + assert policy_arn == "arn:aws:iam::123456789012:policy/basic_policy" + assert ( + policy_config_query.get_config_resource( + "arn:aws:iam::123456789012:policy/basic_policy" + ) + is not None + ) + + # Create a new version + policy_config_query.backends["global"].create_policy_version( + policy_arn, json.dumps(basic_policy_v2), "true" + ) + + # Create role to trigger attachment + role_config_query.backends["global"].create_role( + role_name="role_with_attached_policy", + assume_role_policy_document=None, + path="/", + permissions_boundary=None, + description="role_with_attached_policy", + tags=[], + max_session_duration=3600, + ) + role_config_query.backends["global"].attach_role_policy( + policy_arn, "role_with_attached_policy" + ) + + policy = ( + role_config_query.backends["global"] + .managed_policies["arn:aws:iam::123456789012:policy/basic_policy"] + .to_config_dict() + ) + assert policy["version"] == "1.3" + assert policy["configurationItemCaptureTime"] is not None + assert policy["configurationItemStatus"] == "OK" + assert policy["configurationStateId"] is not None + assert policy["arn"] == "arn:aws:iam::123456789012:policy/basic_policy" + assert policy["resourceType"] == "AWS::IAM::Policy" + assert len(policy["resourceId"]) == len(random_policy_id()) + assert policy["resourceName"] == "basic_policy" + assert policy["awsRegion"] == "global" + assert policy["availabilityZone"] == "Not Applicable" + assert policy["resourceCreationTime"] is not None + assert policy["configuration"]["policyName"] == policy["resourceName"] + assert policy["configuration"]["policyId"] == policy["resourceId"] + assert policy["configuration"]["arn"] == policy["arn"] + assert policy["configuration"]["path"] == "/" + assert policy["configuration"]["defaultVersionId"] == "v2" + assert policy["configuration"]["attachmentCount"] == 1 + assert policy["configuration"]["permissionsBoundaryUsageCount"] == 0 + assert policy["configuration"]["isAttachable"] == True + assert policy["configuration"]["description"] == "basic_policy" + assert policy["configuration"]["createDate"] is not None + assert policy["configuration"]["updateDate"] is not None + assert policy["configuration"]["policyVersionList"] == [ + { + "document": str(parse.quote(json.dumps(basic_policy))), + "versionId": "v1", + "isDefaultVersion": False, + "createDate": policy["configuration"]["policyVersionList"][0]["createDate"], + }, + { + "document": str(parse.quote(json.dumps(basic_policy_v2))), + "versionId": "v2", + "isDefaultVersion": True, + "createDate": policy["configuration"]["policyVersionList"][1]["createDate"], + }, + ] + assert policy["supplementaryConfiguration"] == {} From ff84b634845d67ed504d30db0906fea80248501b Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Sun, 2 Aug 2020 21:16:44 -0600 Subject: [PATCH 536/658] address PR comments --- CONFIG_README.md | 15 ++- moto/core/models.py | 26 ++++- moto/iam/config.py | 63 ++++++----- tests/test_iam/test_iam.py | 218 ++++++++++++++++++++++++++++--------- 4 files changed, 232 insertions(+), 90 deletions(-) diff --git a/CONFIG_README.md b/CONFIG_README.md index e223c84571b5..b0ae42181f86 100644 --- a/CONFIG_README.md +++ b/CONFIG_README.md @@ -53,15 +53,14 @@ An example of the above is implemented for S3. You can see that by looking at: 1. `moto/s3/config.py` 1. `moto/config/models.py` -As well as the corresponding unit tests in: +### Testing +For each resource type, you will need to test write tests for a few separate areas: -1. `tests/s3/test_s3.py` -1. `tests/config/test_config.py` +- Test the backend queries to ensure discovered resources come back (ie for `IAM::Policy`, write `tests.tests_iam.test_policy_list_config_discovered_resources`). For writing these tests, you must not make use of `boto` to create resources. You will need to use the backend model methods to provision the resources. This is to make tests compatible with the moto server. You must make tests for the resource type to test listing and object fetching. -Note for unit testing, you will want to add a test to ensure that you can query all the resources effectively. For testing this feature, -the unit tests for the `ConfigQueryModel` will not make use of `boto` to create resources, such as S3 buckets. You will need to use the -backend model methods to provision the resources. This is to make tests compatible with the moto server. You should absolutely make tests -in the resource type to test listing and object fetching. +- Test the config dict for all scenarios (ie for `IAM::Policy`, write `tests.tests_iam.test_policy_config_dict`). For writing this test, you'll need to create resources in the same way as the first test (without using `boto`), in every meaningful configuration that would produce a different config dict. Then, query the backend and ensure each of the dicts are as you expect. + +- Test that everything works end to end with the `boto` clients. (ie for `IAM::Policy`, write `tests.tests_iam.test_policy_config_client`). The main two items to test will be the `boto.client('config').list_discovered_resources()`, `boto.client('config').list_aggregate_discovered_resources()`, `moto.client('config').batch_get_resource_config()`, and `moto.client('config').batch_aggregate_get_resource_config()`. This test doesn't have to be super thorough, but it basically tests that the front end and backend logic all works together and returns correct resources. Beware the aggregate methods all have capital first letters (ie `Limit`), while non-aggregate methods have lowercase first letters (ie `limit`) ### Listing S3 is currently the model implementation, but it also odd in that S3 is a global resource type with regional resource residency. @@ -117,4 +116,4 @@ return for it. When implementing resource config fetching, you will need to return at a minimum `None` if the resource is not found, or a `dict` that looks like what AWS Config would return. -It's recommended to read the comment for the `ConfigQueryModel` 's `get_config_resource` function in [base class here](moto/core/models.py). +It's recommended to read the comment for the `ConfigQueryModel` 's `get_config_resource` function in [base class here](moto/core/models.py). \ No newline at end of file diff --git a/moto/core/models.py b/moto/core/models.py index bc7d282fdf08..422a9dd3d7bf 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -27,8 +27,8 @@ convert_flask_to_responses_response, ) - ACCOUNT_ID = os.environ.get("MOTO_ACCOUNT_ID", "123456789012") +CONFIG_BACKEND_DELIM = "\x1e" # Record Seperator "RS" ASCII Character class BaseMockAWS(object): @@ -768,15 +768,29 @@ def get_config_resource( def aggregate_regions(self, path, backend_region, resource_region): """ - Returns a list of "region\1eresourcename" strings + This method will is called for both aggregated and non-aggregated calls for config resources. + It will figure out how to return the full list of resources for a given regional backend and append them to a final list. + It produces a list of both the region and the resource name with a delimiter character (CONFIG_BACKEND_DELIM, ASCII Record separator, \x1e). + IE: "us-east-1\x1ei-1234567800" + + Each config-enabled resource has a method named `list_config_service_resources` which has to parse the delimiter + ... + :param path: - A dict accessor string applied to the backend that locates the resource. + :param backend_region: + :param resource_region: + :return: - Returns a list of "region\x1eresourcename" strings """ filter_region = backend_region or resource_region if filter_region: filter_resources = list(self.backends[filter_region].__dict__[path].keys()) - return map( - lambda resource: "{}\1e{}".format(filter_region, resource), - filter_resources, + return list( + map( + lambda resource: "{}{}{}".format( + filter_region, CONFIG_BACKEND_DELIM, resource + ), + filter_resources, + ) ) # If we don't have a filter region @@ -784,7 +798,7 @@ def aggregate_regions(self, path, backend_region, resource_region): for region in self.backends: this_region_resources = list(self.backends[region].__dict__[path].keys()) for resource in this_region_resources: - ret.append("{}\1e{}".format(region, resource)) + ret.append("{}{}{}".format(region, CONFIG_BACKEND_DELIM, resource)) return ret diff --git a/moto/iam/config.py b/moto/iam/config.py index 4bb38124865c..7074569ec69e 100644 --- a/moto/iam/config.py +++ b/moto/iam/config.py @@ -4,6 +4,8 @@ from moto.core.models import ConfigQueryModel from moto.iam import iam_backends +CONFIG_BACKEND_DELIM = "\x1e" # Record Seperator "RS" ASCII Character + class RoleConfigQuery(ConfigQueryModel): def list_config_service_resources( @@ -15,18 +17,17 @@ def list_config_service_resources( backend_region=None, resource_region=None, ): - # For aggregation -- did we get both a resource ID and a resource name? - if resource_ids and resource_name: - # If the values are different, then return an empty list: - if resource_name not in resource_ids: - return [], None + # IAM roles are "global" and aren't assigned into any availability zone + # The resource ID is a AWS-assigned random string like "AROA0BSVNSZKXVHS00SBJ" + # The resource name is a user-assigned string like "MyDevelopmentAdminRole" - role_list = self.aggregate_regions("roles", backend_region, resource_region) + # Grab roles from backend + role_list = self.aggregate_regions("roles", "global", None) if not role_list: return [], None - # Pagination logic: + # Pagination logic sorted_roles = sorted(role_list) new_token = None @@ -34,7 +35,7 @@ def list_config_service_resources( if not next_token: start = 0 else: - # "Tokens" are region + \00 + resource ID. + # "Tokens" are region + \x1e + resource ID. if next_token not in sorted_roles: raise InvalidNextTokenException() @@ -46,13 +47,16 @@ def list_config_service_resources( if len(sorted_roles) > (start + limit): new_token = sorted_roles[start + limit] + # Each element is a string of "region\x1eresource_id" return ( [ { "type": "AWS::IAM::Role", - "id": role.split("\1e")[1], - "name": role.split("\1e")[1], - "region": role.split("\1e")[0], + "id": role.split(CONFIG_BACKEND_DELIM)[1], + "name": self.backends["global"] + .roles[role.split(CONFIG_BACKEND_DELIM)[1]] + .name, + "region": role.split(CONFIG_BACKEND_DELIM)[0], } for role in role_list ], @@ -71,7 +75,7 @@ def get_config_resource( if resource_name and role.name != resource_name: return - # Format the bucket to the AWS Config format: + # Format the role to the AWS Config format: config_data = role.to_config_dict() # The 'configuration' field is also a JSON string: @@ -95,16 +99,19 @@ def list_config_service_resources( backend_region=None, resource_region=None, ): - # For aggregation -- did we get both a resource ID and a resource name? - if resource_ids and resource_name: - # If the values are different, then return an empty list: - if resource_name not in resource_ids: - return [], None - - # We don't want to include AWS Managed Policies + # IAM policies are "global" and aren't assigned into any availability zone + # The resource ID is a AWS-assigned random string like "ANPA0BSVNSZK00SJSPVUJ" + # The resource name is a user-assigned string like "my-development-policy" + + # We don't want to include AWS Managed Policies. This technically needs to + # respect the configuration recorder's 'includeGlobalResourceTypes' setting, + # but it's default set be default, and moto's config doesn't yet support + # custom configuration recorders, we'll just behave as default. policy_list = filter( - lambda policy: not policy.split("\1e")[1].startswith("arn:aws:iam::aws"), - self.aggregate_regions("managed_policies", backend_region, resource_region), + lambda policy: not policy.split(CONFIG_BACKEND_DELIM)[1].startswith( + "arn:aws:iam::aws" + ), + self.aggregate_regions("managed_policies", "global", None), ) if not policy_list: @@ -118,7 +125,7 @@ def list_config_service_resources( if not next_token: start = 0 else: - # "Tokens" are region + \00 + resource ID. + # "Tokens" are region + \x1e + resource ID. if next_token not in sorted_policies: raise InvalidNextTokenException() @@ -134,9 +141,13 @@ def list_config_service_resources( [ { "type": "AWS::IAM::Policy", - "id": policy.split("\1e")[1], - "name": policy.split("\1e")[1], - "region": policy.split("\1e")[0], + "id": self.backends["global"] + .managed_policies[policy.split(CONFIG_BACKEND_DELIM)[1]] + .id, + "name": self.backends["global"] + .managed_policies[policy.split(CONFIG_BACKEND_DELIM)[1]] + .name, + "region": policy.split(CONFIG_BACKEND_DELIM)[0], } for policy in policy_list ], @@ -155,7 +166,7 @@ def get_config_resource( if resource_name and policy.name != resource_name: return - # Format the bucket to the AWS Config format: + # Format the policy to the AWS Config format: config_data = policy.to_config_dict() # The 'configuration' field is also a JSON string: diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index b42f3d76f0d7..c56a9260ff8b 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -9,7 +9,7 @@ from boto.exception import BotoServerError from botocore.exceptions import ClientError -from moto import mock_iam, mock_iam_deprecated, settings +from moto import mock_config, mock_iam, mock_iam_deprecated, settings from moto.core import ACCOUNT_ID from moto.iam.models import aws_managed_policies from moto.backends import get_backend @@ -2923,48 +2923,14 @@ def test_role_list_config_discovered_resources(): role = result[0] assert role["type"] == "AWS::IAM::Role" assert len(role["id"]) == len(random_resource_id()) - assert role["id"] == role["name"] + assert role["name"] == "something" assert role["region"] == "global" -@mock_iam -def test_policy_list_config_discovered_resources(): - from moto.iam.config import policy_config_query - - # Without any policies - assert policy_config_query.list_config_service_resources(None, None, 100, None) == ( - [], - None, - ) - - basic_policy = { - "Version": "2012-10-17", - "Statement": [ - {"Action": ["ec2:DeleteKeyPair"], "Effect": "Deny", "Resource": "*"} - ], - } - - # Create a role - policy_config_query.backends["global"].create_policy( - description="mypolicy", - path="", - policy_document=json.dumps(basic_policy), - policy_name="mypolicy", - ) - - result = policy_config_query.list_config_service_resources(None, None, 100, None)[0] - assert len(result) == 1 - - policy = result[0] - assert policy["type"] == "AWS::IAM::Policy" - assert policy["id"] == policy["name"] == "arn:aws:iam::123456789012:policy/mypolicy" - assert policy["region"] == "global" - - @mock_iam def test_role_config_dict(): from moto.iam.config import role_config_query, policy_config_query - from moto.iam.utils import random_resource_id + from moto.iam.utils import random_resource_id, random_policy_id # Without any roles assert not role_config_query.get_config_resource("something") @@ -2986,17 +2952,21 @@ def test_role_config_dict(): } # Create a policy for use in role permissions boundary - policy_config_query.backends["global"].create_policy( - description="basic_policy", - path="/", - policy_document=json.dumps(basic_policy), - policy_name="basic_policy", + policy_arn = ( + policy_config_query.backends["global"] + .create_policy( + description="basic_policy", + path="/", + policy_document=json.dumps(basic_policy), + policy_name="basic_policy", + ) + .arn ) - policy_arn = policy_config_query.list_config_service_resources( + policy_id = policy_config_query.list_config_service_resources( None, None, 100, None )[0][0]["id"] - assert policy_arn is not None + assert len(policy_id) == len(random_policy_id()) # Create some roles (and grab them repeatedly since they create with random names) role_config_query.backends["global"].create_role( @@ -3225,6 +3195,141 @@ def test_role_config_dict(): ] +@mock_iam +@mock_config +def test_role_config_client(): + from moto.iam.models import ACCOUNT_ID + from moto.iam.utils import random_resource_id + + iam_client = boto3.client("iam", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="test_aggregator", + AccountAggregationSources=[account_aggregation_source], + ) + + result = config_client.list_discovered_resources(resourceType="AWS::IAM::Role") + assert not result["resourceIdentifiers"] + + role_id = iam_client.create_role( + Path="/", + RoleName="mytestrole", + Description="mytestrole", + AssumeRolePolicyDocument=json.dumps("{ }"), + )["Role"]["RoleId"] + + iam_client.create_role( + Path="/", + RoleName="mytestrole2", + Description="zmytestrole", + AssumeRolePolicyDocument=json.dumps("{ }"), + ) + + # Test non-aggregated query: (everything is getting a random id, so we can't test names by ordering) + result = config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", limit=1 + ) + first_result = result["resourceIdentifiers"][0]["resourceId"] + assert result["resourceIdentifiers"][0]["resourceType"] == "AWS::IAM::Role" + assert len(first_result) == len(random_resource_id()) + + # Test non-aggregated pagination + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", limit=1, nextToken=result["nextToken"] + )["resourceIdentifiers"][0]["resourceId"] + ) != first_result + + # Test aggregated query: (everything is getting a random id, so we can't test names by ordering) + agg_result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::IAM::Role", + ConfigurationAggregatorName="test_aggregator", + Limit=1, + ) + first_agg_result = agg_result["ResourceIdentifiers"][0]["ResourceId"] + assert agg_result["ResourceIdentifiers"][0]["ResourceType"] == "AWS::IAM::Role" + assert len(first_agg_result) == len(random_resource_id()) + assert agg_result["ResourceIdentifiers"][0]["SourceAccountId"] == ACCOUNT_ID + assert agg_result["ResourceIdentifiers"][0]["SourceRegion"] == "global" + + # Test aggregated pagination + assert ( + config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + Limit=1, + NextToken=agg_result["NextToken"], + )["ResourceIdentifiers"][0]["ResourceId"] + != first_agg_result + ) + + # Test non-aggregated batch get + assert ( + config_client.batch_get_resource_config( + resourceKeys=[{"resourceType": "AWS::IAM::Role", "resourceId": role_id}] + )["baseConfigurationItems"][0]["resourceName"] + == "mytestrole" + ) + + # Test aggregated batch get + assert ( + config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="test_aggregator", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": "global", + "ResourceId": role_id, + "ResourceType": "AWS::IAM::Role", + } + ], + )["BaseConfigurationItems"][0]["resourceName"] + == "mytestrole" + ) + + +@mock_iam +def test_policy_list_config_discovered_resources(): + from moto.iam.config import policy_config_query + from moto.iam.utils import random_policy_id + + # Without any policies + assert policy_config_query.list_config_service_resources(None, None, 100, None) == ( + [], + None, + ) + + basic_policy = { + "Version": "2012-10-17", + "Statement": [ + {"Action": ["ec2:DeleteKeyPair"], "Effect": "Deny", "Resource": "*"} + ], + } + + # Create a role + policy_config_query.backends["global"].create_policy( + description="mypolicy", + path="", + policy_document=json.dumps(basic_policy), + policy_name="mypolicy", + ) + + result = policy_config_query.list_config_service_resources(None, None, 100, None)[0] + assert len(result) == 1 + + policy = result[0] + assert policy["type"] == "AWS::IAM::Policy" + assert len(policy["id"]) == len(random_policy_id()) + assert policy["name"] == "mypolicy" + assert policy["region"] == "global" + + @mock_iam def test_policy_config_dict(): from moto.iam.config import role_config_query, policy_config_query @@ -3251,17 +3356,24 @@ def test_policy_config_dict(): ], } - policy_config_query.backends["global"].create_policy( - description="basic_policy", - path="/", - policy_document=json.dumps(basic_policy), - policy_name="basic_policy", + policy_arn = ( + policy_config_query.backends["global"] + .create_policy( + description="basic_policy", + path="/", + policy_document=json.dumps(basic_policy), + policy_name="basic_policy", + ) + .arn ) - policy_arn = policy_config_query.list_config_service_resources( + policy_id = policy_config_query.list_config_service_resources( None, None, 100, None )[0][0]["id"] + assert len(policy_id) == len(random_policy_id()) + assert policy_arn == "arn:aws:iam::123456789012:policy/basic_policy" + assert ( policy_config_query.get_config_resource( "arn:aws:iam::123456789012:policy/basic_policy" @@ -3330,3 +3442,9 @@ def test_policy_config_dict(): }, ] assert policy["supplementaryConfiguration"] == {} + + +@mock_iam +@mock_config +def test_policy_config_client(): + assert 1 == 1 From ceefe970bc5de4e7c35af96978abfcfb13a0405f Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Sun, 2 Aug 2020 21:21:59 -0600 Subject: [PATCH 537/658] small flake issue --- moto/core/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index 422a9dd3d7bf..bd5ae66342e0 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -768,12 +768,12 @@ def get_config_resource( def aggregate_regions(self, path, backend_region, resource_region): """ - This method will is called for both aggregated and non-aggregated calls for config resources. + This method will is called for both aggregated and non-aggregated calls for config resources. It will figure out how to return the full list of resources for a given regional backend and append them to a final list. It produces a list of both the region and the resource name with a delimiter character (CONFIG_BACKEND_DELIM, ASCII Record separator, \x1e). - IE: "us-east-1\x1ei-1234567800" - - Each config-enabled resource has a method named `list_config_service_resources` which has to parse the delimiter + IE: "us-east-1\x1ei-1234567800" + + Each config-enabled resource has a method named `list_config_service_resources` which has to parse the delimiter ... :param path: - A dict accessor string applied to the backend that locates the resource. :param backend_region: From 8dd90db83cc73ec68e842ccd78842a9d3f2a20a1 Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Tue, 4 Aug 2020 09:11:26 -0600 Subject: [PATCH 538/658] add missing test for policies --- moto/iam/config.py | 10 +++- tests/test_iam/test_iam.py | 109 ++++++++++++++++++++++++++++++++++--- 2 files changed, 109 insertions(+), 10 deletions(-) diff --git a/moto/iam/config.py b/moto/iam/config.py index 7074569ec69e..4cd18bedc9f6 100644 --- a/moto/iam/config.py +++ b/moto/iam/config.py @@ -157,8 +157,14 @@ def list_config_service_resources( def get_config_resource( self, resource_id, resource_name=None, backend_region=None, resource_region=None ): - - policy = self.backends["global"].managed_policies.get(resource_id, {}) + # policies are listed in the backend as arns, but we have to accept the PolicyID as the resource_id + # we'll make a really crude search for it + policy = None + for arn in self.backends["global"].managed_policies.keys(): + policy_candidate = self.backends["global"].managed_policies[arn] + if policy_candidate.id == resource_id: + policy = policy_candidate + break if not policy: return diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index c56a9260ff8b..944b14acd441 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -3373,13 +3373,7 @@ def test_policy_config_dict(): assert len(policy_id) == len(random_policy_id()) assert policy_arn == "arn:aws:iam::123456789012:policy/basic_policy" - - assert ( - policy_config_query.get_config_resource( - "arn:aws:iam::123456789012:policy/basic_policy" - ) - is not None - ) + assert policy_config_query.get_config_resource(policy_id) is not None # Create a new version policy_config_query.backends["global"].create_policy_version( @@ -3447,4 +3441,103 @@ def test_policy_config_dict(): @mock_iam @mock_config def test_policy_config_client(): - assert 1 == 1 + from moto.iam.models import ACCOUNT_ID + from moto.iam.utils import random_policy_id + + basic_policy = { + "Version": "2012-10-17", + "Statement": [{"Action": ["ec2:*"], "Effect": "Allow", "Resource": "*"}], + } + + iam_client = boto3.client("iam", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="test_aggregator", + AccountAggregationSources=[account_aggregation_source], + ) + + result = config_client.list_discovered_resources(resourceType="AWS::IAM::Policy") + assert not result["resourceIdentifiers"] + + policy_id = iam_client.create_policy( + PolicyName="mypolicy", + Path="/", + PolicyDocument=json.dumps(basic_policy), + Description="mypolicy", + )["Policy"]["PolicyId"] + + # second policy + iam_client.create_policy( + PolicyName="zmypolicy", + Path="/", + PolicyDocument=json.dumps(basic_policy), + Description="zmypolicy", + ) + + # Test non-aggregated query: (everything is getting a random id, so we can't test names by ordering) + result = config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", limit=1 + ) + first_result = result["resourceIdentifiers"][0]["resourceId"] + assert result["resourceIdentifiers"][0]["resourceType"] == "AWS::IAM::Policy" + assert len(first_result) == len(random_policy_id()) + + # Test non-aggregated pagination + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", limit=1, nextToken=result["nextToken"] + )["resourceIdentifiers"][0]["resourceId"] + ) != first_result + + # Test aggregated query: (everything is getting a random id, so we can't test names by ordering) + agg_result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::IAM::Policy", + ConfigurationAggregatorName="test_aggregator", + Limit=1, + ) + first_agg_result = agg_result["ResourceIdentifiers"][0]["ResourceId"] + assert agg_result["ResourceIdentifiers"][0]["ResourceType"] == "AWS::IAM::Policy" + assert len(first_agg_result) == len(random_policy_id()) + assert agg_result["ResourceIdentifiers"][0]["SourceAccountId"] == ACCOUNT_ID + assert agg_result["ResourceIdentifiers"][0]["SourceRegion"] == "global" + + # Test aggregated pagination + assert ( + config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Policy", + Limit=1, + NextToken=agg_result["NextToken"], + )["ResourceIdentifiers"][0]["ResourceId"] + != first_agg_result + ) + + # Test non-aggregated batch get + assert ( + config_client.batch_get_resource_config( + resourceKeys=[{"resourceType": "AWS::IAM::Policy", "resourceId": policy_id}] + )["baseConfigurationItems"][0]["resourceName"] + == "mypolicy" + ) + + # Test aggregated batch get + assert ( + config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="test_aggregator", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": "global", + "ResourceId": policy_id, + "ResourceType": "AWS::IAM::Policy", + } + ], + )["BaseConfigurationItems"][0]["resourceName"] + == "mypolicy" + ) From d8cea0213d6f98068b80d97d34c35c9fe46f2230 Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Thu, 6 Aug 2020 17:18:57 -0600 Subject: [PATCH 539/658] straighten out filter logic --- moto/core/models.py | 16 ++- moto/iam/config.py | 93 ++++++++++------ tests/test_iam/test_iam.py | 213 +++++++++++++++++++++++++++++++------ 3 files changed, 252 insertions(+), 70 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index bd5ae66342e0..b8b4322be6c7 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -768,16 +768,22 @@ def get_config_resource( def aggregate_regions(self, path, backend_region, resource_region): """ - This method will is called for both aggregated and non-aggregated calls for config resources. + This method is called for both aggregated and non-aggregated calls for config resources. + It will figure out how to return the full list of resources for a given regional backend and append them to a final list. It produces a list of both the region and the resource name with a delimiter character (CONFIG_BACKEND_DELIM, ASCII Record separator, \x1e). IE: "us-east-1\x1ei-1234567800" - Each config-enabled resource has a method named `list_config_service_resources` which has to parse the delimiter + You should only use this method if you need to aggregate resources over more than one region. + If your region is global, just query the global backend directly in the `list_config_service_resources` method + + If you use this method, your config-enabled resource must parse the delimited string in it's `list_config_service_resources` method. ... - :param path: - A dict accessor string applied to the backend that locates the resource. - :param backend_region: - :param resource_region: + :param path: - A dict accessor string applied to the backend that locates resources inside that backend. +            For example, if you passed path="keypairs", and you were working with an ec2 moto backend, it would yield the contents from + ec2_moto_backend[region].keypairs + :param backend_region: - Only used for filtering; A string representing the region IE: us-east-1 + :param resource_region: - Only used for filtering; A string representing the region IE: us-east-1 :return: - Returns a list of "region\x1eresourcename" strings """ diff --git a/moto/iam/config.py b/moto/iam/config.py index 4cd18bedc9f6..fdf31b576c4e 100644 --- a/moto/iam/config.py +++ b/moto/iam/config.py @@ -20,43 +20,59 @@ def list_config_service_resources( # IAM roles are "global" and aren't assigned into any availability zone # The resource ID is a AWS-assigned random string like "AROA0BSVNSZKXVHS00SBJ" # The resource name is a user-assigned string like "MyDevelopmentAdminRole" + # Stored in moto backend with the AWS-assigned random string like "AROA0BSVNSZKXVHS00SBJ" - # Grab roles from backend - role_list = self.aggregate_regions("roles", "global", None) + # Grab roles from backend; need the full values since names and id's are different + role_list = list(self.backends["global"].roles.values()) if not role_list: return [], None - # Pagination logic - sorted_roles = sorted(role_list) + # Filter by resource name or ids + if resource_name or resource_ids: + filtered_roles = [] + # resource_name takes precendence over resource_ids + if resource_name: + for role in role_list: + if role.name == resource_name: + filtered_roles = [role] + break + else: + for role in role_list: + if role.id in resource_ids: + filtered_roles.append(role) + + # Filtered roles are now the subject for the listing + role_list = filtered_roles + + # Pagination logic, sort by role id + sorted_roles = sorted(role_list, key=lambda role: role.id) + # sorted_role_ids matches indicies of sorted_roles + sorted_role_ids = list(map(lambda role: role.id, sorted_roles)) new_token = None # Get the start: if not next_token: start = 0 else: - # "Tokens" are region + \x1e + resource ID. - if next_token not in sorted_roles: + if next_token not in sorted_role_ids: raise InvalidNextTokenException() - start = sorted_roles.index(next_token) + start = sorted_role_ids.index(next_token) # Get the list of items to collect: role_list = sorted_roles[start : (start + limit)] if len(sorted_roles) > (start + limit): - new_token = sorted_roles[start + limit] + new_token = sorted_role_ids[start + limit] - # Each element is a string of "region\x1eresource_id" return ( [ { "type": "AWS::IAM::Role", - "id": role.split(CONFIG_BACKEND_DELIM)[1], - "name": self.backends["global"] - .roles[role.split(CONFIG_BACKEND_DELIM)[1]] - .name, - "region": role.split(CONFIG_BACKEND_DELIM)[0], + "id": role.id, + "name": role.name, + "region": "global", } for role in role_list ], @@ -102,52 +118,67 @@ def list_config_service_resources( # IAM policies are "global" and aren't assigned into any availability zone # The resource ID is a AWS-assigned random string like "ANPA0BSVNSZK00SJSPVUJ" # The resource name is a user-assigned string like "my-development-policy" + # Stored in moto backend with the arn like "arn:aws:iam::123456789012:policy/my-development-policy" + + policy_list = list(self.backends["global"].managed_policies.values()) # We don't want to include AWS Managed Policies. This technically needs to # respect the configuration recorder's 'includeGlobalResourceTypes' setting, # but it's default set be default, and moto's config doesn't yet support # custom configuration recorders, we'll just behave as default. policy_list = filter( - lambda policy: not policy.split(CONFIG_BACKEND_DELIM)[1].startswith( - "arn:aws:iam::aws" - ), - self.aggregate_regions("managed_policies", "global", None), + lambda policy: not policy.arn.startswith("arn:aws:iam::aws"), policy_list, ) if not policy_list: return [], None - # Pagination logic: - sorted_policies = sorted(policy_list) + # Filter by resource name or ids + if resource_name or resource_ids: + filtered_policies = [] + # resource_name takes precendence over resource_ids + if resource_name: + for policy in policy_list: + if policy.name == resource_name: + filtered_policies = [policy] + break + else: + for policy in policy_list: + if policy.id in resource_ids: + filtered_policies.append(policy) + + # Filtered roles are now the subject for the listing + policy_list = filtered_policies + + # Pagination logic, sort by role id + sorted_policies = sorted(policy_list, key=lambda role: role.id) + # sorted_policy_ids matches indicies of sorted_policies + sorted_policy_ids = list(map(lambda policy: policy.id, sorted_policies)) + new_token = None # Get the start: if not next_token: start = 0 else: - # "Tokens" are region + \x1e + resource ID. - if next_token not in sorted_policies: + if next_token not in sorted_policy_ids: raise InvalidNextTokenException() - start = sorted_policies.index(next_token) + start = sorted_policy_ids.index(next_token) # Get the list of items to collect: policy_list = sorted_policies[start : (start + limit)] if len(sorted_policies) > (start + limit): - new_token = sorted_policies[start + limit] + new_token = sorted_policy_ids[start + limit] return ( [ { "type": "AWS::IAM::Policy", - "id": self.backends["global"] - .managed_policies[policy.split(CONFIG_BACKEND_DELIM)[1]] - .id, - "name": self.backends["global"] - .managed_policies[policy.split(CONFIG_BACKEND_DELIM)[1]] - .name, - "region": policy.split(CONFIG_BACKEND_DELIM)[0], + "id": policy.id, + "name": policy.name, + "region": "global", } for policy in policy_list ], diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 944b14acd441..f71b96925d2a 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -3217,19 +3217,24 @@ def test_role_config_client(): result = config_client.list_discovered_resources(resourceType="AWS::IAM::Role") assert not result["resourceIdentifiers"] - role_id = iam_client.create_role( - Path="/", - RoleName="mytestrole", - Description="mytestrole", - AssumeRolePolicyDocument=json.dumps("{ }"), - )["Role"]["RoleId"] + # Make 10 policies + roles = [] + num_roles = 10 + for ix in range(1, num_roles + 1): + this_policy = iam_client.create_role( + RoleName="role{}".format(ix), + Path="/", + Description="role{}".format(ix), + AssumeRolePolicyDocument=json.dumps("{ }"), + ) + roles.append( + { + "id": this_policy["Role"]["RoleId"], + "name": this_policy["Role"]["RoleName"], + } + ) - iam_client.create_role( - Path="/", - RoleName="mytestrole2", - Description="zmytestrole", - AssumeRolePolicyDocument=json.dumps("{ }"), - ) + assert len(roles) == num_roles # Test non-aggregated query: (everything is getting a random id, so we can't test names by ordering) result = config_client.list_discovered_resources( @@ -3269,12 +3274,77 @@ def test_role_config_client(): != first_agg_result ) + # Test non-aggregated resource name/id filter + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", resourceName=roles[1]["name"], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == roles[1]["name"] + ) + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", resourceIds=[roles[0]["id"]], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == roles[0]["name"] + ) + + # Test aggregated resource name/id filter + assert ( + config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + Filters={"ResourceName": roles[5]["name"]}, + Limit=1, + )["ResourceIdentifiers"][0]["ResourceName"] + == roles[5]["name"] + ) + + assert ( + config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + Filters={"ResourceId": roles[4]["id"]}, + Limit=1, + )["ResourceIdentifiers"][0]["ResourceName"] + == roles[4]["name"] + ) + + # Test name/id filter with pagination + first_call = config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", + resourceIds=[roles[1]["id"], roles[2]["id"]], + limit=1, + ) + + assert first_call["nextToken"] in [roles[1]["id"], roles[2]["id"]] + assert first_call["resourceIdentifiers"][0]["resourceName"] in [ + roles[1]["name"], + roles[2]["name"], + ] + second_call = config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", + resourceIds=[roles[1]["id"], roles[2]["id"]], + limit=1, + nextToken=first_call["nextToken"], + ) + assert "nextToken" not in second_call + assert first_call["resourceIdentifiers"][0]["resourceName"] in [ + roles[1]["name"], + roles[2]["name"], + ] + assert ( + first_call["resourceIdentifiers"][0]["resourceName"] + != second_call["resourceIdentifiers"][0]["resourceName"] + ) + # Test non-aggregated batch get assert ( config_client.batch_get_resource_config( - resourceKeys=[{"resourceType": "AWS::IAM::Role", "resourceId": role_id}] + resourceKeys=[ + {"resourceType": "AWS::IAM::Role", "resourceId": roles[0]["id"]} + ] )["baseConfigurationItems"][0]["resourceName"] - == "mytestrole" + == roles[0]["name"] ) # Test aggregated batch get @@ -3285,12 +3355,12 @@ def test_role_config_client(): { "SourceAccountId": ACCOUNT_ID, "SourceRegion": "global", - "ResourceId": role_id, + "ResourceId": roles[1]["id"], "ResourceType": "AWS::IAM::Role", } ], )["BaseConfigurationItems"][0]["resourceName"] - == "mytestrole" + == roles[1]["name"] ) @@ -3312,7 +3382,7 @@ def test_policy_list_config_discovered_resources(): ], } - # Create a role + # Create a policy policy_config_query.backends["global"].create_policy( description="mypolicy", path="", @@ -3320,6 +3390,12 @@ def test_policy_list_config_discovered_resources(): policy_name="mypolicy", ) + # We expect the backend to have arns as their keys + for backend_key in list( + policy_config_query.backends["global"].managed_policies.keys() + ): + assert backend_key.startswith("arn:aws:iam::") + result = policy_config_query.list_config_service_resources(None, None, 100, None)[0] assert len(result) == 1 @@ -3465,20 +3541,24 @@ def test_policy_config_client(): result = config_client.list_discovered_resources(resourceType="AWS::IAM::Policy") assert not result["resourceIdentifiers"] - policy_id = iam_client.create_policy( - PolicyName="mypolicy", - Path="/", - PolicyDocument=json.dumps(basic_policy), - Description="mypolicy", - )["Policy"]["PolicyId"] + # Make 10 policies + policies = [] + num_policies = 10 + for ix in range(1, num_policies + 1): + this_policy = iam_client.create_policy( + PolicyName="policy{}".format(ix), + Path="/", + PolicyDocument=json.dumps(basic_policy), + Description="policy{}".format(ix), + ) + policies.append( + { + "id": this_policy["Policy"]["PolicyId"], + "name": this_policy["Policy"]["PolicyName"], + } + ) - # second policy - iam_client.create_policy( - PolicyName="zmypolicy", - Path="/", - PolicyDocument=json.dumps(basic_policy), - Description="zmypolicy", - ) + assert len(policies) == num_policies # Test non-aggregated query: (everything is getting a random id, so we can't test names by ordering) result = config_client.list_discovered_resources( @@ -3518,12 +3598,77 @@ def test_policy_config_client(): != first_agg_result ) + # Test non-aggregated resource name/id filter + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", resourceName=policies[1]["name"], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == policies[1]["name"] + ) + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", resourceIds=[policies[0]["id"]], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == policies[0]["name"] + ) + + # Test aggregated resource name/id filter + assert ( + config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceName": policies[5]["name"]}, + Limit=1, + )["ResourceIdentifiers"][0]["ResourceName"] + == policies[5]["name"] + ) + + assert ( + config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceId": policies[4]["id"]}, + Limit=1, + )["ResourceIdentifiers"][0]["ResourceName"] + == policies[4]["name"] + ) + + # Test name/id filter with pagination + first_call = config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", + resourceIds=[policies[1]["id"], policies[2]["id"]], + limit=1, + ) + + assert first_call["nextToken"] in [policies[1]["id"], policies[2]["id"]] + assert first_call["resourceIdentifiers"][0]["resourceName"] in [ + policies[1]["name"], + policies[2]["name"], + ] + second_call = config_client.list_discovered_resources( + resourceType="AWS::IAM::Policy", + resourceIds=[policies[1]["id"], policies[2]["id"]], + limit=1, + nextToken=first_call["nextToken"], + ) + assert "nextToken" not in second_call + assert first_call["resourceIdentifiers"][0]["resourceName"] in [ + policies[1]["name"], + policies[2]["name"], + ] + assert ( + first_call["resourceIdentifiers"][0]["resourceName"] + != second_call["resourceIdentifiers"][0]["resourceName"] + ) + # Test non-aggregated batch get assert ( config_client.batch_get_resource_config( - resourceKeys=[{"resourceType": "AWS::IAM::Policy", "resourceId": policy_id}] + resourceKeys=[ + {"resourceType": "AWS::IAM::Policy", "resourceId": policies[7]["id"]} + ] )["baseConfigurationItems"][0]["resourceName"] - == "mypolicy" + == policies[7]["name"] ) # Test aggregated batch get @@ -3534,10 +3679,10 @@ def test_policy_config_client(): { "SourceAccountId": ACCOUNT_ID, "SourceRegion": "global", - "ResourceId": policy_id, + "ResourceId": policies[8]["id"], "ResourceType": "AWS::IAM::Policy", } ], )["BaseConfigurationItems"][0]["resourceName"] - == "mypolicy" + == policies[8]["name"] ) From 8d5c70a9246e8eb24e8e7d2229b369ef048fea29 Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Fri, 7 Aug 2020 22:34:59 -0600 Subject: [PATCH 540/658] different aggregation strategy --- moto/config/models.py | 26 ++- moto/core/models.py | 44 +---- moto/iam/config.py | 141 +++++++++++++--- moto/s3/config.py | 2 + tests/test_iam/test_iam.py | 334 ++++++++++++++++++++++++++++--------- 5 files changed, 408 insertions(+), 139 deletions(-) diff --git a/moto/config/models.py b/moto/config/models.py index 77f46e644c12..b8f31aa8de49 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -48,6 +48,7 @@ from moto.core import BaseBackend, BaseModel from moto.s3.config import s3_account_public_access_block_query, s3_config_query from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID + from moto.iam.config import role_config_query, policy_config_query POP_STRINGS = [ @@ -68,6 +69,29 @@ "AWS::IAM::Policy": policy_config_query, } +CONFIG_REGIONS = [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", +] + def datetime2int(date): return int(time.mktime(date.timetuple())) @@ -979,6 +1003,7 @@ def list_aggregate_discovered_resources( limit, next_token, resource_region=resource_region, + aggregator=self.config_aggregators.get(aggregator_name).__dict__, ) resource_identifiers = [] @@ -989,7 +1014,6 @@ def list_aggregate_discovered_resources( "ResourceType": identifier["type"], "ResourceId": identifier["id"], } - if identifier.get("name"): item["ResourceName"] = identifier["name"] diff --git a/moto/core/models.py b/moto/core/models.py index b8b4322be6c7..a3f720658b65 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -28,7 +28,6 @@ ) ACCOUNT_ID = os.environ.get("MOTO_ACCOUNT_ID", "123456789012") -CONFIG_BACKEND_DELIM = "\x1e" # Record Seperator "RS" ASCII Character class BaseMockAWS(object): @@ -723,6 +722,8 @@ def list_config_service_resources( :param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query. :param resource_region: The region for where the resources reside to pull results from. Set to `None` if this is a non-aggregated query. + :param aggregator: If an aggregated query, this will be the `ConfigAggregator instance from the backend. Set to `None` + if a non-aggregated query. Useful if you need special logic based off the aggregator (ie IAM) :return: This should return a list of Dicts that have the following fields: [ { @@ -766,47 +767,6 @@ def get_config_resource( """ raise NotImplementedError() - def aggregate_regions(self, path, backend_region, resource_region): - """ - This method is called for both aggregated and non-aggregated calls for config resources. - - It will figure out how to return the full list of resources for a given regional backend and append them to a final list. - It produces a list of both the region and the resource name with a delimiter character (CONFIG_BACKEND_DELIM, ASCII Record separator, \x1e). - IE: "us-east-1\x1ei-1234567800" - - You should only use this method if you need to aggregate resources over more than one region. - If your region is global, just query the global backend directly in the `list_config_service_resources` method - - If you use this method, your config-enabled resource must parse the delimited string in it's `list_config_service_resources` method. - ... - :param path: - A dict accessor string applied to the backend that locates resources inside that backend. -            For example, if you passed path="keypairs", and you were working with an ec2 moto backend, it would yield the contents from - ec2_moto_backend[region].keypairs - :param backend_region: - Only used for filtering; A string representing the region IE: us-east-1 - :param resource_region: - Only used for filtering; A string representing the region IE: us-east-1 - :return: - Returns a list of "region\x1eresourcename" strings - """ - - filter_region = backend_region or resource_region - if filter_region: - filter_resources = list(self.backends[filter_region].__dict__[path].keys()) - return list( - map( - lambda resource: "{}{}{}".format( - filter_region, CONFIG_BACKEND_DELIM, resource - ), - filter_resources, - ) - ) - - # If we don't have a filter region - ret = [] - for region in self.backends: - this_region_resources = list(self.backends[region].__dict__[path].keys()) - for resource in this_region_resources: - ret.append("{}{}{}".format(region, CONFIG_BACKEND_DELIM, resource)) - return ret - class base_decorator(object): mock_backend = MockAWS diff --git a/moto/iam/config.py b/moto/iam/config.py index fdf31b576c4e..484153217b2c 100644 --- a/moto/iam/config.py +++ b/moto/iam/config.py @@ -4,8 +4,6 @@ from moto.core.models import ConfigQueryModel from moto.iam import iam_backends -CONFIG_BACKEND_DELIM = "\x1e" # Record Seperator "RS" ASCII Character - class RoleConfigQuery(ConfigQueryModel): def list_config_service_resources( @@ -16,6 +14,7 @@ def list_config_service_resources( next_token, backend_region=None, resource_region=None, + aggregator=None, ): # IAM roles are "global" and aren't assigned into any availability zone # The resource ID is a AWS-assigned random string like "AROA0BSVNSZKXVHS00SBJ" @@ -31,12 +30,16 @@ def list_config_service_resources( # Filter by resource name or ids if resource_name or resource_ids: filtered_roles = [] - # resource_name takes precendence over resource_ids + # resource_name takes precedence over resource_ids if resource_name: for role in role_list: if role.name == resource_name: filtered_roles = [role] break + # but if both are passed, it must be a subset + if filtered_roles and resource_ids: + if filtered_roles[0].id not in resource_ids: + return [], None else: for role in role_list: if role.id in resource_ids: @@ -45,10 +48,54 @@ def list_config_service_resources( # Filtered roles are now the subject for the listing role_list = filtered_roles - # Pagination logic, sort by role id - sorted_roles = sorted(role_list, key=lambda role: role.id) - # sorted_role_ids matches indicies of sorted_roles - sorted_role_ids = list(map(lambda role: role.id, sorted_roles)) + if aggregator: + # IAM is a little special; Roles are created in us-east-1 (which AWS calls the "global" region) + # However, the resource will return in the aggregator (in duplicate) for each region in the aggregator + # Therefore, we'll need to find out the regions where the aggregators are running, and then duplicate the resource there + + # In practice, it looks like AWS will only duplicate these resources if you've "used" any roles in the region, but since + # we can't really tell if this has happened in moto, we'll just bind this to the regions in your aggregator + from moto.config.models import CONFIG_REGIONS + + aggregated_regions = [] + aggregator_sources = aggregator.get( + "account_aggregation_sources" + ) or aggregator.get("organization_aggregation_source") + for source in aggregator_sources: + source_dict = source.__dict__ + if source_dict["all_aws_regions"]: + aggregated_regions = CONFIG_REGIONS + break + for region in source_dict["aws_regions"]: + aggregated_regions.append(region) + + duplicate_role_list = [] + for region in list(set(aggregated_regions)): + for role in role_list: + duplicate_role_list.append( + { + "_id": "{}{}".format( + role.id, region + ), # this is only for sorting, isn't returned outside of this functin + "type": "AWS::IAM::Role", + "id": role.id, + "name": role.name, + "region": region, + } + ) + + # Pagination logic, sort by role id + sorted_roles = sorted(duplicate_role_list, key=lambda role: role["_id"]) + + # sorted_role_ids matches indicies of sorted_roles + sorted_role_ids = list(map(lambda role: role["_id"], sorted_roles)) + else: + # Non-aggregated queries are in the else block, and we can treat these like a normal config resource + # Pagination logic, sort by role id + sorted_roles = sorted(role_list, key=lambda role: role.id) + # sorted_role_ids matches indicies of sorted_roles + sorted_role_ids = list(map(lambda role: role.id, sorted_roles)) + new_token = None # Get the start: @@ -70,9 +117,9 @@ def list_config_service_resources( [ { "type": "AWS::IAM::Role", - "id": role.id, - "name": role.name, - "region": "global", + "id": role["id"] if aggregator else role.id, + "name": role["name"] if aggregator else role.name, + "region": role["region"] if aggregator else "global", } for role in role_list ], @@ -114,6 +161,7 @@ def list_config_service_resources( next_token, backend_region=None, resource_region=None, + aggregator=None, ): # IAM policies are "global" and aren't assigned into any availability zone # The resource ID is a AWS-assigned random string like "ANPA0BSVNSZK00SJSPVUJ" @@ -126,8 +174,11 @@ def list_config_service_resources( # respect the configuration recorder's 'includeGlobalResourceTypes' setting, # but it's default set be default, and moto's config doesn't yet support # custom configuration recorders, we'll just behave as default. - policy_list = filter( - lambda policy: not policy.arn.startswith("arn:aws:iam::aws"), policy_list, + policy_list = list( + filter( + lambda policy: not policy.arn.startswith("arn:aws:iam::aws"), + policy_list, + ) ) if not policy_list: @@ -136,12 +187,17 @@ def list_config_service_resources( # Filter by resource name or ids if resource_name or resource_ids: filtered_policies = [] - # resource_name takes precendence over resource_ids + # resource_name takes precedence over resource_ids if resource_name: for policy in policy_list: if policy.name == resource_name: filtered_policies = [policy] break + # but if both are passed, it must be a subset + if filtered_policies and resource_ids: + if filtered_policies[0].id not in resource_ids: + return [], None + else: for policy in policy_list: if policy.id in resource_ids: @@ -150,10 +206,55 @@ def list_config_service_resources( # Filtered roles are now the subject for the listing policy_list = filtered_policies - # Pagination logic, sort by role id - sorted_policies = sorted(policy_list, key=lambda role: role.id) - # sorted_policy_ids matches indicies of sorted_policies - sorted_policy_ids = list(map(lambda policy: policy.id, sorted_policies)) + if aggregator: + # IAM is a little special; Policies are created in us-east-1 (which AWS calls the "global" region) + # However, the resource will return in the aggregator (in duplicate) for each region in the aggregator + # Therefore, we'll need to find out the regions where the aggregators are running, and then duplicate the resource there + + # In practice, it looks like AWS will only duplicate these resources if you've "used" any policies in the region, but since + # we can't really tell if this has happened in moto, we'll just bind this to the regions in your aggregator + from moto.config.models import CONFIG_REGIONS + + aggregated_regions = [] + aggregator_sources = aggregator.get( + "account_aggregation_sources" + ) or aggregator.get("organization_aggregation_source") + for source in aggregator_sources: + source_dict = source.__dict__ + if source_dict["all_aws_regions"]: + aggregated_regions = CONFIG_REGIONS + break + for region in source_dict["aws_regions"]: + aggregated_regions.append(region) + + duplicate_policy_list = [] + for region in list(set(aggregated_regions)): + for policy in policy_list: + duplicate_policy_list.append( + { + "_id": "{}{}".format( + policy.id, region + ), # this is only for sorting, isn't returned outside of this functin + "type": "AWS::IAM::Policy", + "id": policy.id, + "name": policy.name, + "region": region, + } + ) + + # Pagination logic, sort by role id + sorted_policies = sorted( + duplicate_policy_list, key=lambda policy: policy["_id"] + ) + + # sorted_policy_ids matches indicies of sorted_policies + sorted_policy_ids = list(map(lambda policy: policy["_id"], sorted_policies)) + else: + # Non-aggregated queries are in the else block, and we can treat these like a normal config resource + # Pagination logic, sort by role id + sorted_policies = sorted(policy_list, key=lambda role: role.id) + # sorted_policy_ids matches indicies of sorted_policies + sorted_policy_ids = list(map(lambda policy: policy.id, sorted_policies)) new_token = None @@ -176,9 +277,9 @@ def list_config_service_resources( [ { "type": "AWS::IAM::Policy", - "id": policy.id, - "name": policy.name, - "region": "global", + "id": policy["id"] if aggregator else policy.id, + "name": policy["name"] if aggregator else policy.name, + "region": policy["region"] if aggregator else "global", } for policy in policy_list ], diff --git a/moto/s3/config.py b/moto/s3/config.py index 04b4315f359f..932ebc3be7d8 100644 --- a/moto/s3/config.py +++ b/moto/s3/config.py @@ -19,6 +19,7 @@ def list_config_service_resources( next_token, backend_region=None, resource_region=None, + aggregator=None, ): # The resource_region only matters for aggregated queries as you can filter on bucket regions for them. # For other resource types, you would need to iterate appropriately for the backend_region. @@ -132,6 +133,7 @@ def list_config_service_resources( next_token, backend_region=None, resource_region=None, + aggregator=None, ): # For the Account Public Access Block, they are the same for all regions. The resource ID is the AWS account ID # There is no resource name -- it should be a blank string "" if provided. diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index f71b96925d2a..b662cc527c1c 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -2905,27 +2905,62 @@ def test_role_list_config_discovered_resources(): None, ) - # Create a role - role_config_query.backends["global"].create_role( - role_name="something", - assume_role_policy_document=None, - path="/", - permissions_boundary=None, - description="something", - tags=[], - max_session_duration=3600, - ) + # Make 3 roles + roles = [] + num_roles = 3 + for ix in range(1, num_roles + 1): + this_role = role_config_query.backends["global"].create_role( + role_name="role{}".format(ix), + assume_role_policy_document=None, + path="/", + permissions_boundary=None, + description="role{}".format(ix), + tags=[{"Key": "foo", "Value": "bar"}], + max_session_duration=3600, + ) + roles.append( + {"id": this_role.id, "name": this_role.name,} + ) + + assert len(roles) == num_roles result = role_config_query.list_config_service_resources(None, None, 100, None)[0] - assert len(result) == 1 + assert len(result) == num_roles - # The role gets a random ID, so we have to grab it + # The roles gets a random ID, so we can't directly test it role = result[0] assert role["type"] == "AWS::IAM::Role" - assert len(role["id"]) == len(random_resource_id()) - assert role["name"] == "something" + assert role["id"] in list(map(lambda p: p["id"], roles)) + assert role["name"] in list(map(lambda p: p["name"], roles)) assert role["region"] == "global" + # test passing list of resource ids + resource_ids = role_config_query.list_config_service_resources( + [roles[0]["id"], roles[1]["id"]], None, 100, None + )[0] + assert len(resource_ids) == 2 + + # test passing a single resource name + resource_name = role_config_query.list_config_service_resources( + None, roles[0]["name"], 100, None + )[0] + assert len(resource_name) == 1 + assert resource_name[0]["id"] == roles[0]["id"] + assert resource_name[0]["name"] == roles[0]["name"] + + # test passing a single resource name AND some resource id's + both_filter_good = role_config_query.list_config_service_resources( + [roles[0]["id"], roles[1]["id"]], roles[0]["name"], 100, None + )[0] + assert len(both_filter_good) == 1 + assert both_filter_good[0]["id"] == roles[0]["id"] + assert both_filter_good[0]["name"] == roles[0]["name"] + + both_filter_bad = role_config_query.list_config_service_resources( + [roles[0]["id"], roles[1]["id"]], roles[2]["name"], 100, None + )[0] + assert len(both_filter_bad) == 0 + @mock_iam def test_role_config_dict(): @@ -3200,18 +3235,29 @@ def test_role_config_dict(): def test_role_config_client(): from moto.iam.models import ACCOUNT_ID from moto.iam.utils import random_resource_id + from moto.config.models import CONFIG_REGIONS iam_client = boto3.client("iam", region_name="us-west-2") config_client = boto3.client("config", region_name="us-west-2") - account_aggregation_source = { + all_account_aggregation_source = { "AccountIds": [ACCOUNT_ID], "AllAwsRegions": True, } + two_region_account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AwsRegions": ["us-east-1", "us-west-2"], + } + config_client.put_configuration_aggregator( ConfigurationAggregatorName="test_aggregator", - AccountAggregationSources=[account_aggregation_source], + AccountAggregationSources=[all_account_aggregation_source], + ) + + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="test_aggregator_two_regions", + AccountAggregationSources=[two_region_account_aggregation_source], ) result = config_client.list_discovered_resources(resourceType="AWS::IAM::Role") @@ -3251,29 +3297,88 @@ def test_role_config_client(): )["resourceIdentifiers"][0]["resourceId"] ) != first_result - # Test aggregated query: (everything is getting a random id, so we can't test names by ordering) + # Test aggregated query - by `Limit=len(CONFIG_REGIONS)`, we should get a single policy duplicated across all regions agg_result = config_client.list_aggregate_discovered_resources( ResourceType="AWS::IAM::Role", ConfigurationAggregatorName="test_aggregator", - Limit=1, - ) - first_agg_result = agg_result["ResourceIdentifiers"][0]["ResourceId"] - assert agg_result["ResourceIdentifiers"][0]["ResourceType"] == "AWS::IAM::Role" - assert len(first_agg_result) == len(random_resource_id()) - assert agg_result["ResourceIdentifiers"][0]["SourceAccountId"] == ACCOUNT_ID - assert agg_result["ResourceIdentifiers"][0]["SourceRegion"] == "global" + Limit=len(CONFIG_REGIONS), + ) + assert len(agg_result["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + + agg_name = None + agg_id = None + for resource in agg_result["ResourceIdentifiers"]: + assert resource["ResourceType"] == "AWS::IAM::Role" + assert resource["SourceRegion"] in CONFIG_REGIONS + assert resource["SourceAccountId"] == ACCOUNT_ID + if agg_id: + assert resource["ResourceId"] == agg_id + if agg_name: + assert resource["ResourceName"] == agg_name + agg_name = resource["ResourceName"] + agg_id = resource["ResourceId"] # Test aggregated pagination + for resource in config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + NextToken=agg_result["NextToken"], + )["ResourceIdentifiers"]: + assert resource["ResourceId"] != agg_id + + # Test non-aggregated resource name/id filter assert ( - config_client.list_aggregate_discovered_resources( - ConfigurationAggregatorName="test_aggregator", - ResourceType="AWS::IAM::Role", - Limit=1, - NextToken=agg_result["NextToken"], - )["ResourceIdentifiers"][0]["ResourceId"] - != first_agg_result + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", resourceName=roles[1]["name"], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == roles[1]["name"] + ) + + assert ( + config_client.list_discovered_resources( + resourceType="AWS::IAM::Role", resourceIds=[roles[0]["id"]], limit=1, + )["resourceIdentifiers"][0]["resourceName"] + == roles[0]["name"] ) + # Test aggregated resource name/id filter + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + Filters={"ResourceName": roles[5]["name"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + assert agg_name_filter["ResourceIdentifiers"][0]["ResourceId"] == roles[5]["id"] + + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator_two_regions", + ResourceType="AWS::IAM::Role", + Filters={"ResourceName": roles[5]["name"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len( + two_region_account_aggregation_source["AwsRegions"] + ) + assert agg_name_filter["ResourceIdentifiers"][0]["ResourceId"] == roles[5]["id"] + + agg_id_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Role", + Filters={"ResourceId": roles[4]["id"]}, + ) + + assert len(agg_id_filter["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + assert agg_id_filter["ResourceIdentifiers"][0]["ResourceName"] == roles[4]["name"] + + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator_two_regions", + ResourceType="AWS::IAM::Role", + Filters={"ResourceId": roles[5]["id"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len( + two_region_account_aggregation_source["AwsRegions"] + ) + assert agg_name_filter["ResourceIdentifiers"][0]["ResourceName"] == roles[5]["name"] + # Test non-aggregated resource name/id filter assert ( config_client.list_discovered_resources( @@ -3354,7 +3459,7 @@ def test_role_config_client(): ResourceIdentifiers=[ { "SourceAccountId": ACCOUNT_ID, - "SourceRegion": "global", + "SourceRegion": "us-east-1", "ResourceId": roles[1]["id"], "ResourceType": "AWS::IAM::Role", } @@ -3382,13 +3487,21 @@ def test_policy_list_config_discovered_resources(): ], } - # Create a policy - policy_config_query.backends["global"].create_policy( - description="mypolicy", - path="", - policy_document=json.dumps(basic_policy), - policy_name="mypolicy", - ) + # Make 3 policies + policies = [] + num_policies = 3 + for ix in range(1, num_policies + 1): + this_policy = policy_config_query.backends["global"].create_policy( + description="policy{}".format(ix), + path="", + policy_document=json.dumps(basic_policy), + policy_name="policy{}".format(ix), + ) + policies.append( + {"id": this_policy.id, "name": this_policy.name,} + ) + + assert len(policies) == num_policies # We expect the backend to have arns as their keys for backend_key in list( @@ -3397,14 +3510,41 @@ def test_policy_list_config_discovered_resources(): assert backend_key.startswith("arn:aws:iam::") result = policy_config_query.list_config_service_resources(None, None, 100, None)[0] - assert len(result) == 1 + assert len(result) == num_policies policy = result[0] assert policy["type"] == "AWS::IAM::Policy" - assert len(policy["id"]) == len(random_policy_id()) - assert policy["name"] == "mypolicy" + assert policy["id"] in list(map(lambda p: p["id"], policies)) + assert policy["name"] in list(map(lambda p: p["name"], policies)) assert policy["region"] == "global" + # test passing list of resource ids + resource_ids = policy_config_query.list_config_service_resources( + [policies[0]["id"], policies[1]["id"]], None, 100, None + )[0] + assert len(resource_ids) == 2 + + # test passing a single resource name + resource_name = policy_config_query.list_config_service_resources( + None, policies[0]["name"], 100, None + )[0] + assert len(resource_name) == 1 + assert resource_name[0]["id"] == policies[0]["id"] + assert resource_name[0]["name"] == policies[0]["name"] + + # test passing a single resource name AND some resource id's + both_filter_good = policy_config_query.list_config_service_resources( + [policies[0]["id"], policies[1]["id"]], policies[0]["name"], 100, None + )[0] + assert len(both_filter_good) == 1 + assert both_filter_good[0]["id"] == policies[0]["id"] + assert both_filter_good[0]["name"] == policies[0]["name"] + + both_filter_bad = policy_config_query.list_config_service_resources( + [policies[0]["id"], policies[1]["id"]], policies[2]["name"], 100, None + )[0] + assert len(both_filter_bad) == 0 + @mock_iam def test_policy_config_dict(): @@ -3519,6 +3659,7 @@ def test_policy_config_dict(): def test_policy_config_client(): from moto.iam.models import ACCOUNT_ID from moto.iam.utils import random_policy_id + from moto.config.models import CONFIG_REGIONS basic_policy = { "Version": "2012-10-17", @@ -3528,14 +3669,24 @@ def test_policy_config_client(): iam_client = boto3.client("iam", region_name="us-west-2") config_client = boto3.client("config", region_name="us-west-2") - account_aggregation_source = { + all_account_aggregation_source = { "AccountIds": [ACCOUNT_ID], "AllAwsRegions": True, } + two_region_account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AwsRegions": ["us-east-1", "us-west-2"], + } + config_client.put_configuration_aggregator( ConfigurationAggregatorName="test_aggregator", - AccountAggregationSources=[account_aggregation_source], + AccountAggregationSources=[all_account_aggregation_source], + ) + + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="test_aggregator_two_regions", + AccountAggregationSources=[two_region_account_aggregation_source], ) result = config_client.list_discovered_resources(resourceType="AWS::IAM::Policy") @@ -3575,28 +3726,35 @@ def test_policy_config_client(): )["resourceIdentifiers"][0]["resourceId"] ) != first_result - # Test aggregated query: (everything is getting a random id, so we can't test names by ordering) + # Test aggregated query - by `Limit=len(CONFIG_REGIONS)`, we should get a single policy duplicated across all regions agg_result = config_client.list_aggregate_discovered_resources( ResourceType="AWS::IAM::Policy", ConfigurationAggregatorName="test_aggregator", - Limit=1, - ) - first_agg_result = agg_result["ResourceIdentifiers"][0]["ResourceId"] - assert agg_result["ResourceIdentifiers"][0]["ResourceType"] == "AWS::IAM::Policy" - assert len(first_agg_result) == len(random_policy_id()) - assert agg_result["ResourceIdentifiers"][0]["SourceAccountId"] == ACCOUNT_ID - assert agg_result["ResourceIdentifiers"][0]["SourceRegion"] == "global" + Limit=len(CONFIG_REGIONS), + ) + assert len(agg_result["ResourceIdentifiers"]) == len(CONFIG_REGIONS) + + agg_name = None + agg_id = None + for resource in agg_result["ResourceIdentifiers"]: + assert resource["ResourceType"] == "AWS::IAM::Policy" + assert resource["SourceRegion"] in CONFIG_REGIONS + assert resource["SourceAccountId"] == ACCOUNT_ID + if agg_id: + assert resource["ResourceId"] == agg_id + if agg_name: + assert resource["ResourceName"] == agg_name + agg_name = resource["ResourceName"] + agg_id = resource["ResourceId"] # Test aggregated pagination - assert ( - config_client.list_aggregate_discovered_resources( - ConfigurationAggregatorName="test_aggregator", - ResourceType="AWS::IAM::Policy", - Limit=1, - NextToken=agg_result["NextToken"], - )["ResourceIdentifiers"][0]["ResourceId"] - != first_agg_result - ) + for resource in config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Policy", + Limit=1, + NextToken=agg_result["NextToken"], + )["ResourceIdentifiers"]: + assert resource["ResourceId"] != agg_id # Test non-aggregated resource name/id filter assert ( @@ -3605,6 +3763,7 @@ def test_policy_config_client(): )["resourceIdentifiers"][0]["resourceName"] == policies[1]["name"] ) + assert ( config_client.list_discovered_resources( resourceType="AWS::IAM::Policy", resourceIds=[policies[0]["id"]], limit=1, @@ -3613,24 +3772,47 @@ def test_policy_config_client(): ) # Test aggregated resource name/id filter + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceName": policies[5]["name"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len(CONFIG_REGIONS) assert ( - config_client.list_aggregate_discovered_resources( - ConfigurationAggregatorName="test_aggregator", - ResourceType="AWS::IAM::Policy", - Filters={"ResourceName": policies[5]["name"]}, - Limit=1, - )["ResourceIdentifiers"][0]["ResourceName"] - == policies[5]["name"] + agg_name_filter["ResourceIdentifiers"][0]["ResourceName"] == policies[5]["name"] + ) + + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator_two_regions", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceName": policies[5]["name"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len( + two_region_account_aggregation_source["AwsRegions"] ) + assert agg_name_filter["ResourceIdentifiers"][0]["ResourceId"] == policies[5]["id"] + agg_id_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceId": policies[4]["id"]}, + ) + + assert len(agg_id_filter["ResourceIdentifiers"]) == len(CONFIG_REGIONS) assert ( - config_client.list_aggregate_discovered_resources( - ConfigurationAggregatorName="test_aggregator", - ResourceType="AWS::IAM::Policy", - Filters={"ResourceId": policies[4]["id"]}, - Limit=1, - )["ResourceIdentifiers"][0]["ResourceName"] - == policies[4]["name"] + agg_id_filter["ResourceIdentifiers"][0]["ResourceName"] == policies[4]["name"] + ) + + agg_name_filter = config_client.list_aggregate_discovered_resources( + ConfigurationAggregatorName="test_aggregator_two_regions", + ResourceType="AWS::IAM::Policy", + Filters={"ResourceId": policies[5]["id"]}, + ) + assert len(agg_name_filter["ResourceIdentifiers"]) == len( + two_region_account_aggregation_source["AwsRegions"] + ) + assert ( + agg_name_filter["ResourceIdentifiers"][0]["ResourceName"] == policies[5]["name"] ) # Test name/id filter with pagination @@ -3678,7 +3860,7 @@ def test_policy_config_client(): ResourceIdentifiers=[ { "SourceAccountId": ACCOUNT_ID, - "SourceRegion": "global", + "SourceRegion": "us-east-2", "ResourceId": policies[8]["id"], "ResourceType": "AWS::IAM::Policy", } From fd69c93a09edd2ee3675c6e39063b524e62a16d2 Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Wed, 12 Aug 2020 17:16:47 -0600 Subject: [PATCH 541/658] use botocore regions and refactor sorting --- moto/config/models.py | 23 ------------------- moto/iam/config.py | 47 +++++++++++++++++++------------------- tests/test_iam/test_iam.py | 6 +++-- 3 files changed, 27 insertions(+), 49 deletions(-) diff --git a/moto/config/models.py b/moto/config/models.py index b8f31aa8de49..db25563432e5 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -69,29 +69,6 @@ "AWS::IAM::Policy": policy_config_query, } -CONFIG_REGIONS = [ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-south-1", - "ap-southeast-1", - "ap-southeast-2", - "ca-central-1", - "eu-central-1", - "eu-north-1", - "eu-south-1", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-south-1", - "sa-east-1", - "us-east-1", - "us-east-2", - "us-west-1", - "us-west-2", -] - def datetime2int(date): return int(time.mktime(date.timetuple())) diff --git a/moto/iam/config.py b/moto/iam/config.py index 484153217b2c..2f2cafa5f65d 100644 --- a/moto/iam/config.py +++ b/moto/iam/config.py @@ -1,5 +1,5 @@ import json - +import boto3 from moto.core.exceptions import InvalidNextTokenException from moto.core.models import ConfigQueryModel from moto.iam import iam_backends @@ -55,8 +55,6 @@ def list_config_service_resources( # In practice, it looks like AWS will only duplicate these resources if you've "used" any roles in the region, but since # we can't really tell if this has happened in moto, we'll just bind this to the regions in your aggregator - from moto.config.models import CONFIG_REGIONS - aggregated_regions = [] aggregator_sources = aggregator.get( "account_aggregation_sources" @@ -64,7 +62,7 @@ def list_config_service_resources( for source in aggregator_sources: source_dict = source.__dict__ if source_dict["all_aws_regions"]: - aggregated_regions = CONFIG_REGIONS + aggregated_regions = boto3.Session().get_available_regions("config") break for region in source_dict["aws_regions"]: aggregated_regions.append(region) @@ -86,15 +84,10 @@ def list_config_service_resources( # Pagination logic, sort by role id sorted_roles = sorted(duplicate_role_list, key=lambda role: role["_id"]) - - # sorted_role_ids matches indicies of sorted_roles - sorted_role_ids = list(map(lambda role: role["_id"], sorted_roles)) else: # Non-aggregated queries are in the else block, and we can treat these like a normal config resource # Pagination logic, sort by role id sorted_roles = sorted(role_list, key=lambda role: role.id) - # sorted_role_ids matches indicies of sorted_roles - sorted_role_ids = list(map(lambda role: role.id, sorted_roles)) new_token = None @@ -102,16 +95,22 @@ def list_config_service_resources( if not next_token: start = 0 else: - if next_token not in sorted_role_ids: + try: + # Find the index of the next + start = next( + index + for (index, r) in enumerate(sorted_roles) + if next_token == (r["_id"] if aggregator else r.id) + ) + except StopIteration: raise InvalidNextTokenException() - start = sorted_role_ids.index(next_token) - # Get the list of items to collect: role_list = sorted_roles[start : (start + limit)] if len(sorted_roles) > (start + limit): - new_token = sorted_role_ids[start + limit] + record = sorted_roles[start + limit] + new_token = record["_id"] if aggregator else record.id return ( [ @@ -213,8 +212,6 @@ def list_config_service_resources( # In practice, it looks like AWS will only duplicate these resources if you've "used" any policies in the region, but since # we can't really tell if this has happened in moto, we'll just bind this to the regions in your aggregator - from moto.config.models import CONFIG_REGIONS - aggregated_regions = [] aggregator_sources = aggregator.get( "account_aggregation_sources" @@ -222,7 +219,7 @@ def list_config_service_resources( for source in aggregator_sources: source_dict = source.__dict__ if source_dict["all_aws_regions"]: - aggregated_regions = CONFIG_REGIONS + aggregated_regions = boto3.Session().get_available_regions("config") break for region in source_dict["aws_regions"]: aggregated_regions.append(region) @@ -247,14 +244,10 @@ def list_config_service_resources( duplicate_policy_list, key=lambda policy: policy["_id"] ) - # sorted_policy_ids matches indicies of sorted_policies - sorted_policy_ids = list(map(lambda policy: policy["_id"], sorted_policies)) else: # Non-aggregated queries are in the else block, and we can treat these like a normal config resource # Pagination logic, sort by role id sorted_policies = sorted(policy_list, key=lambda role: role.id) - # sorted_policy_ids matches indicies of sorted_policies - sorted_policy_ids = list(map(lambda policy: policy.id, sorted_policies)) new_token = None @@ -262,16 +255,22 @@ def list_config_service_resources( if not next_token: start = 0 else: - if next_token not in sorted_policy_ids: + try: + # Find the index of the next + start = next( + index + for (index, p) in enumerate(sorted_policies) + if next_token == (p["_id"] if aggregator else p.id) + ) + except StopIteration: raise InvalidNextTokenException() - start = sorted_policy_ids.index(next_token) - # Get the list of items to collect: policy_list = sorted_policies[start : (start + limit)] if len(sorted_policies) > (start + limit): - new_token = sorted_policy_ids[start + limit] + record = sorted_policies[start + limit] + new_token = record["_id"] if aggregator else record.id return ( [ diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index b662cc527c1c..e1bc93d57459 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -3235,7 +3235,8 @@ def test_role_config_dict(): def test_role_config_client(): from moto.iam.models import ACCOUNT_ID from moto.iam.utils import random_resource_id - from moto.config.models import CONFIG_REGIONS + + CONFIG_REGIONS = boto3.Session().get_available_regions("config") iam_client = boto3.client("iam", region_name="us-west-2") config_client = boto3.client("config", region_name="us-west-2") @@ -3659,7 +3660,8 @@ def test_policy_config_dict(): def test_policy_config_client(): from moto.iam.models import ACCOUNT_ID from moto.iam.utils import random_policy_id - from moto.config.models import CONFIG_REGIONS + + CONFIG_REGIONS = boto3.Session().get_available_regions("config") basic_policy = { "Version": "2012-10-17", From 4354cb06d1252cc08733cc6c0b6b615d02bd1dcf Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Wed, 26 Aug 2020 19:02:14 -0600 Subject: [PATCH 542/658] remove comment --- moto/core/models.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index a3f720658b65..96535f500950 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -722,8 +722,6 @@ def list_config_service_resources( :param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query. :param resource_region: The region for where the resources reside to pull results from. Set to `None` if this is a non-aggregated query. - :param aggregator: If an aggregated query, this will be the `ConfigAggregator instance from the backend. Set to `None` - if a non-aggregated query. Useful if you need special logic based off the aggregator (ie IAM) :return: This should return a list of Dicts that have the following fields: [ { From fc7f3fecb69c2c39f223aa6366ef056dfccff858 Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Thu, 17 Sep 2020 17:43:19 -0600 Subject: [PATCH 543/658] clean up and bring up to master --- moto/core/models.py | 5 +++++ moto/iam/models.py | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/moto/core/models.py b/moto/core/models.py index 96535f500950..d8de6b29f2a4 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -691,6 +691,7 @@ def list_config_service_resources( next_token, backend_region=None, resource_region=None, + aggregator=None, ): """For AWS Config. This will list all of the resources of the given type and optional resource name and region. @@ -722,6 +723,10 @@ def list_config_service_resources( :param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query. :param resource_region: The region for where the resources reside to pull results from. Set to `None` if this is a non-aggregated query. + :param aggregator: If the query is an aggregated query, *AND* the resource has "non-standard" aggregation logic (mainly, IAM), + you'll need to pass aggregator used. In most cases, this should be omitted/set to `None`. See the + conditional logic under `if aggregator` in the moto/iam/config.py for the IAM example. + :return: This should return a list of Dicts that have the following fields: [ { diff --git a/moto/iam/models.py b/moto/iam/models.py index 9ae1ddcdf7f3..617da69b0e78 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -12,7 +12,6 @@ from cryptography import x509 from cryptography.hazmat.backends import default_backend -from six.moves.urllib.parse import urlparse from six.moves.urllib import parse from moto.core.exceptions import RESTError From 56c78ee39f8dc82f60ce2dd76c0a0637e930ce9b Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Mon, 21 Sep 2020 17:42:22 -0600 Subject: [PATCH 544/658] use get instead of direct dict access --- moto/iam/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/iam/config.py b/moto/iam/config.py index 2f2cafa5f65d..018709346e2f 100644 --- a/moto/iam/config.py +++ b/moto/iam/config.py @@ -61,10 +61,10 @@ def list_config_service_resources( ) or aggregator.get("organization_aggregation_source") for source in aggregator_sources: source_dict = source.__dict__ - if source_dict["all_aws_regions"]: + if source_dict.get("all_aws_regions", False): aggregated_regions = boto3.Session().get_available_regions("config") break - for region in source_dict["aws_regions"]: + for region in source_dict.get("aws_regions", []): aggregated_regions.append(region) duplicate_role_list = [] From e2fe33bf07786d2119024ed200c6c8ed23b543ae Mon Sep 17 00:00:00 2001 From: Nick Stocchero Date: Mon, 21 Sep 2020 17:56:04 -0600 Subject: [PATCH 545/658] duplicate dict.get logic --- moto/iam/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/iam/config.py b/moto/iam/config.py index 018709346e2f..cf116f945ae7 100644 --- a/moto/iam/config.py +++ b/moto/iam/config.py @@ -218,10 +218,10 @@ def list_config_service_resources( ) or aggregator.get("organization_aggregation_source") for source in aggregator_sources: source_dict = source.__dict__ - if source_dict["all_aws_regions"]: + if source_dict.get("all_aws_regions", False): aggregated_regions = boto3.Session().get_available_regions("config") break - for region in source_dict["aws_regions"]: + for region in source_dict.get("aws_regions", []): aggregated_regions.append(region) duplicate_policy_list = [] From 958e95cf5c872e69ac4bade441f808b6f5d9dfff Mon Sep 17 00:00:00 2001 From: Ben Dennerley Date: Tue, 22 Sep 2020 05:28:12 -0400 Subject: [PATCH 546/658] Make IoT certificate ID generation deterministic and prevent duplicate certificates from being created (#3331) * Make IoT certificate id generation deterministic Fixes #3321 As per https://stackoverflow.com/questions/55847788/how-does-aws-iot-generate-a-certificate-id, the IoT certificate ID is the SHA256 fingerprint of the certificate. Since moto doesn't generate full certificates we will instead use the SHA256 hash of the passed certificate pem. * Don't allow duplicate IoT certificates to be created Fixes #3320 When using boto3, trying to register a certificate that already exists will throw a ResourceAlreadyExistsException. Moto should follow the same pattern to allow testing error handling code in this area. --- moto/iot/exceptions.py | 8 ++++++++ moto/iot/models.py | 15 +++++++++++++-- tests/test_iot/test_iot.py | 37 +++++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 2 deletions(-) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 7a578c22173d..e3acf9690c17 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -52,3 +52,11 @@ class DeleteConflictException(IoTClientError): def __init__(self, msg): self.code = 409 super(DeleteConflictException, self).__init__("DeleteConflictException", msg) + + +class ResourceAlreadyExistsException(IoTClientError): + def __init__(self, msg): + self.code = 409 + super(ResourceAlreadyExistsException, self).__init__( + "ResourceAlreadyExistsException", msg or "The resource already exists." + ) diff --git a/moto/iot/models.py b/moto/iot/models.py index ebd15d10a721..258a387fe02e 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -19,6 +19,7 @@ InvalidRequestException, InvalidStateTransitionException, VersionConflictException, + ResourceAlreadyExistsException, ) from moto.utilities.utils import random_string @@ -130,7 +131,7 @@ def to_dict(self): class FakeCertificate(BaseModel): def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None): m = hashlib.sha256() - m.update(str(uuid.uuid4()).encode("utf-8")) + m.update(certificate_pem.encode("utf-8")) self.certificate_id = m.hexdigest() self.arn = "arn:aws:iot:%s:1:cert/%s" % (region_name, self.certificate_id) self.certificate_pem = certificate_pem @@ -145,7 +146,7 @@ def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None self.ca_certificate_id = None self.ca_certificate_pem = ca_certificate_pem if ca_certificate_pem: - m.update(str(uuid.uuid4()).encode("utf-8")) + m.update(ca_certificate_pem.encode("utf-8")) self.ca_certificate_id = m.hexdigest() def to_dict(self): @@ -668,6 +669,12 @@ def describe_certificate(self, certificate_id): def list_certificates(self): return self.certificates.values() + def __raise_if_certificate_already_exists(self, certificate_id): + if certificate_id in self.certificates: + raise ResourceAlreadyExistsException( + "The certificate is already provisioned or registered" + ) + def register_certificate( self, certificate_pem, ca_certificate_pem, set_as_active, status ): @@ -677,11 +684,15 @@ def register_certificate( self.region_name, ca_certificate_pem, ) + self.__raise_if_certificate_already_exists(certificate.certificate_id) + self.certificates[certificate.certificate_id] = certificate return certificate def register_certificate_without_ca(self, certificate_pem, status): certificate = FakeCertificate(certificate_pem, status, self.region_name) + self.__raise_if_certificate_already_exists(certificate.certificate_id) + self.certificates[certificate.certificate_id] = certificate return certificate diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 12e1ff7b0e2f..7a04cdc16008 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -503,6 +503,20 @@ def test_endpoints(): raise Exception("Should have raised error") +@mock_iot +def test_certificate_id_generation_deterministic(): + # Creating the same certificate twice should result in the same certificate ID + client = boto3.client("iot", region_name="us-east-1") + cert1 = client.create_keys_and_certificate(setAsActive=False) + client.delete_certificate(certificateId=cert1["certificateId"]) + + cert2 = client.register_certificate( + certificatePem=cert1["certificatePem"], setAsActive=False + ) + cert2.should.have.key("certificateId").which.should.equal(cert1["certificateId"]) + client.delete_certificate(certificateId=cert2["certificateId"]) + + @mock_iot def test_certs(): client = boto3.client("iot", region_name="us-east-1") @@ -584,6 +598,29 @@ def test_certs(): res.should.have.key("certificates") +@mock_iot +def test_create_certificate_validation(): + # Test we can't create a cert that already exists + client = boto3.client("iot", region_name="us-east-1") + cert = client.create_keys_and_certificate(setAsActive=False) + + with assert_raises(ClientError) as e: + client.register_certificate( + certificatePem=cert["certificatePem"], setAsActive=False + ) + e.exception.response["Error"]["Message"].should.contain( + "The certificate is already provisioned or registered" + ) + + with assert_raises(ClientError) as e: + client.register_certificate_without_ca( + certificatePem=cert["certificatePem"], status="ACTIVE" + ) + e.exception.response["Error"]["Message"].should.contain( + "The certificate is already provisioned or registered" + ) + + @mock_iot def test_delete_policy_validation(): doc = """{ From 427a222aa02eca40421dffa859d2f445c277a590 Mon Sep 17 00:00:00 2001 From: Macwan Nevil Date: Tue, 22 Sep 2020 17:13:59 +0530 Subject: [PATCH 547/658] feature added: support for api RolePermissionsBoundary (#3329) * feature added: support for api PutUserPermissionsBoundary; DeleteRolePermissionsBoundary * minor test fix * lint fixed * refractored test case * Issue 3224 s3 copy glacier object (#3318) * 3224 Enhancement - S3 Copy restored glacier objects - adds setter for expiry date - copy sets expiry date to none when source is glacier object - throws error for copying glacier object only if not restored/still restoring * 3224 Enhancement - S3 Copy restored glacier objects - throws error for copying deep archive object only if not restored/still restoring * Fix:s3 List Object response:delimiter (#3254) * Fix:s3 List Object delimiter in response * fixed tests * fixed failed tests Co-authored-by: usmankb * feature added: support for api PutUserPermissionsBoundary; DeleteRolePermissionsBoundary * minor test fix * lint fixed * refractored test case * added test case for put role exception Co-authored-by: ruthbovell <63656505+ruthbovell@users.noreply.github.com> Co-authored-by: usmangani1 Co-authored-by: usmankb --- moto/iam/models.py | 17 +++++++++++++++++ moto/iam/responses.py | 19 +++++++++++++++++++ tests/test_iam/test_iam.py | 16 +++++++++++++--- 3 files changed, 49 insertions(+), 3 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 617da69b0e78..3e7b638b24d1 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -1435,6 +1435,23 @@ def update_role(self, role_name, role_description, max_session_duration): role.max_session_duration = max_session_duration return role + def put_role_permissions_boundary(self, role_name, permissions_boundary): + if permissions_boundary and not self.policy_arn_regex.match( + permissions_boundary + ): + raise RESTError( + "InvalidParameterValue", + "Value ({}) for parameter PermissionsBoundary is invalid.".format( + permissions_boundary + ), + ) + role = self.get_role(role_name) + role.permissions_boundary = permissions_boundary + + def delete_role_permissions_boundary(self, role_name): + role = self.get_role(role_name) + role.permissions_boundary = None + def detach_role_policy(self, policy_arn, role_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) try: diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 6f785f8acc62..88ab9aef1d3c 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -265,6 +265,19 @@ def update_role(self): template = self.response_template(UPDATE_ROLE_TEMPLATE) return template.render(role=role) + def put_role_permissions_boundary(self): + permissions_boundary = self._get_param("PermissionsBoundary") + role_name = self._get_param("RoleName") + iam_backend.put_role_permissions_boundary(role_name, permissions_boundary) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name="PutRolePermissionsBoundary") + + def delete_role_permissions_boundary(self): + role_name = self._get_param("RoleName") + iam_backend.delete_role_permissions_boundary(role_name) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name="DeleteRolePermissionsBoundary") + def create_policy_version(self): policy_arn = self._get_param("PolicyArn") policy_document = self._get_param("PolicyDocument") @@ -1315,6 +1328,12 @@ def get_account_summary(self): {{ role.created_iso_8601 }} {{ role.id }} {{ role.max_session_duration }} + {% if role.permissions_boundary %} + + PermissionsBoundaryPolicy + {{ role.permissions_boundary }} + + {% endif %} {% if role.tags %} {% for tag in role.get_tags() %} diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index e1bc93d57459..e9d5e8a4d8f6 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -869,9 +869,7 @@ def test_list_access_keys(): conn = boto3.client("iam", region_name="us-east-1") conn.create_user(UserName="my-user") response = conn.list_access_keys(UserName="my-user") - assert_equals( - response["AccessKeyMetadata"], [], - ) + assert_equals(response["AccessKeyMetadata"], []) access_key = conn.create_access_key(UserName="my-user")["AccessKey"] response = conn.list_access_keys(UserName="my-user") assert_equals( @@ -2377,7 +2375,19 @@ def test_create_role_with_permissions_boundary(): resp.get("Role").get("PermissionsBoundary").should.equal(expected) resp.get("Role").get("Description").should.equal("test") + conn.delete_role_permissions_boundary(RoleName="my-role") + conn.list_roles().get("Roles")[0].should_not.have.key("PermissionsBoundary") + + conn.put_role_permissions_boundary(RoleName="my-role", PermissionsBoundary=boundary) + resp.get("Role").get("PermissionsBoundary").should.equal(expected) + invalid_boundary_arn = "arn:aws:iam::123456789:not_a_boundary" + + with assert_raises(ClientError): + conn.put_role_permissions_boundary( + RoleName="my-role", PermissionsBoundary=invalid_boundary_arn + ) + with assert_raises(ClientError): conn.create_role( RoleName="bad-boundary", From cd20668e9c118dd6f430c9e66eef073db42ea410 Mon Sep 17 00:00:00 2001 From: jweite Date: Wed, 23 Sep 2020 06:21:45 -0400 Subject: [PATCH 548/658] Support for autoscaling policies in run_jobflow, add_instance_group and list_instance_groups. (#3288) Support for cluster_id parameter substitution in autoscaling policy cloudwatch alarm dimensions. New operations put_autoscaling_policy and remove_autoscaling_policy support Co-authored-by: Joseph Weitekamp --- moto/emr/models.py | 61 +++++++- moto/emr/responses.py | 243 ++++++++++++++++++++++++++++++- moto/emr/utils.py | 107 ++++++++++++++ tests/test_emr/test_emr_boto3.py | 136 ++++++++++++++++- 4 files changed, 537 insertions(+), 10 deletions(-) diff --git a/moto/emr/models.py b/moto/emr/models.py index 72c588166c59..63aadf105b08 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -7,7 +7,12 @@ from dateutil.parser import parse as dtparse from moto.core import BaseBackend, BaseModel from moto.emr.exceptions import EmrError -from .utils import random_instance_group_id, random_cluster_id, random_step_id +from .utils import ( + random_instance_group_id, + random_cluster_id, + random_step_id, + CamelToUnderscoresWalker, +) class FakeApplication(BaseModel): @@ -28,6 +33,7 @@ def __init__(self, args, name, script_path): class FakeInstanceGroup(BaseModel): def __init__( self, + cluster_id, instance_count, instance_role, instance_type, @@ -36,8 +42,10 @@ def __init__( id=None, bid_price=None, ebs_configuration=None, + auto_scaling_policy=None, ): self.id = id or random_instance_group_id() + self.cluster_id = cluster_id self.bid_price = bid_price self.market = market @@ -53,7 +61,7 @@ def __init__( self.role = instance_role self.type = instance_type self.ebs_configuration = ebs_configuration - + self.auto_scaling_policy = auto_scaling_policy self.creation_datetime = datetime.now(pytz.utc) self.start_datetime = datetime.now(pytz.utc) self.ready_datetime = datetime.now(pytz.utc) @@ -63,6 +71,34 @@ def __init__( def set_instance_count(self, instance_count): self.num_instances = instance_count + @property + def auto_scaling_policy(self): + return self._auto_scaling_policy + + @auto_scaling_policy.setter + def auto_scaling_policy(self, value): + if value is None: + self._auto_scaling_policy = value + return + self._auto_scaling_policy = CamelToUnderscoresWalker.parse(value) + self._auto_scaling_policy["status"] = {"state": "ATTACHED"} + # Transform common ${emr.clusterId} placeholder in any dimensions it occurs in. + if "rules" in self._auto_scaling_policy: + for rule in self._auto_scaling_policy["rules"]: + if ( + "trigger" in rule + and "cloud_watch_alarm_definition" in rule["trigger"] + and "dimensions" in rule["trigger"]["cloud_watch_alarm_definition"] + ): + for dimension in rule["trigger"]["cloud_watch_alarm_definition"][ + "dimensions" + ]: + if ( + "value" in dimension + and dimension["value"] == "${emr.clusterId}" + ): + dimension["value"] = self.cluster_id + class FakeStep(BaseModel): def __init__( @@ -319,7 +355,7 @@ def add_instance_groups(self, cluster_id, instance_groups): cluster = self.clusters[cluster_id] result_groups = [] for instance_group in instance_groups: - group = FakeInstanceGroup(**instance_group) + group = FakeInstanceGroup(cluster_id=cluster_id, **instance_group) self.instance_groups[group.id] = group cluster.add_instance_group(group) result_groups.append(group) @@ -465,6 +501,25 @@ def terminate_job_flows(self, job_flow_ids): clusters.append(cluster) return clusters + def put_auto_scaling_policy(self, instance_group_id, auto_scaling_policy): + instance_groups = self.get_instance_groups( + instance_group_ids=[instance_group_id] + ) + if len(instance_groups) == 0: + return None + instance_group = instance_groups[0] + instance_group.auto_scaling_policy = auto_scaling_policy + return instance_group + + def remove_auto_scaling_policy(self, cluster_id, instance_group_id): + instance_groups = self.get_instance_groups( + instance_group_ids=[instance_group_id] + ) + if len(instance_groups) == 0: + return None + instance_group = instance_groups[0] + instance_group.auto_scaling_policy = None + emr_backends = {} for region in Session().get_available_regions("emr"): diff --git a/moto/emr/responses.py b/moto/emr/responses.py index d2b234ced0be..38a33519c90b 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -13,7 +13,7 @@ from moto.core.utils import tags_from_query_string from .exceptions import EmrError from .models import emr_backends -from .utils import steps_from_query_string +from .utils import steps_from_query_string, Unflattener def generate_boto3_response(operation): @@ -76,6 +76,8 @@ def add_instance_groups(self): item["instance_count"] = int(item["instance_count"]) # Adding support to EbsConfiguration self._parse_ebs_configuration(item) + # Adding support for auto_scaling_policy + Unflattener.unflatten_complex_params(item, "auto_scaling_policy") instance_groups = self.backend.add_instance_groups(jobflow_id, instance_groups) template = self.response_template(ADD_INSTANCE_GROUPS_TEMPLATE) return template.render(instance_groups=instance_groups) @@ -329,6 +331,8 @@ def run_job_flow(self): ig["instance_count"] = int(ig["instance_count"]) # Adding support to EbsConfiguration self._parse_ebs_configuration(ig) + # Adding support for auto_scaling_policy + Unflattener.unflatten_complex_params(ig, "auto_scaling_policy") self.backend.add_instance_groups(cluster.id, instance_groups) tags = self._get_list_prefix("Tags.member") @@ -442,6 +446,25 @@ def terminate_job_flows(self): template = self.response_template(TERMINATE_JOB_FLOWS_TEMPLATE) return template.render() + @generate_boto3_response("PutAutoScalingPolicy") + def put_auto_scaling_policy(self): + cluster_id = self._get_param("ClusterId") + instance_group_id = self._get_param("InstanceGroupId") + auto_scaling_policy = self._get_param("AutoScalingPolicy") + instance_group = self.backend.put_auto_scaling_policy( + instance_group_id, auto_scaling_policy + ) + template = self.response_template(PUT_AUTO_SCALING_POLICY) + return template.render(cluster_id=cluster_id, instance_group=instance_group) + + @generate_boto3_response("RemoveAutoScalingPolicy") + def remove_auto_scaling_policy(self): + cluster_id = self._get_param("ClusterId") + instance_group_id = self._get_param("InstanceGroupId") + instance_group = self.backend.put_auto_scaling_policy(instance_group_id, None) + template = self.response_template(REMOVE_AUTO_SCALING_POLICY) + return template.render(cluster_id=cluster_id, instance_group=instance_group) + ADD_INSTANCE_GROUPS_TEMPLATE = """ @@ -854,6 +877,107 @@ def terminate_job_flows(self): {% endfor %} {% endif %} + {% if instance_group.auto_scaling_policy is not none %} + + {% if instance_group.auto_scaling_policy.constraints is not none %} + + {% if instance_group.auto_scaling_policy.constraints.min_capacity is not none %} + {{instance_group.auto_scaling_policy.constraints.min_capacity}} + {% endif %} + {% if instance_group.auto_scaling_policy.constraints.max_capacity is not none %} + {{instance_group.auto_scaling_policy.constraints.max_capacity}} + {% endif %} + + {% endif %} + {% if instance_group.auto_scaling_policy.rules is not none %} + + {% for rule in instance_group.auto_scaling_policy.rules %} + + {% if 'name' in rule %} + {{rule['name']}} + {% endif %} + {% if 'description' in rule %} + {{rule['description']}} + {% endif %} + {% if 'action' in rule %} + + {% if 'market' in rule['action'] %} + {{rule['action']['market']}} + {% endif %} + {% if 'simple_scaling_policy_configuration' in rule['action'] %} + + {% if 'adjustment_type' in rule['action']['simple_scaling_policy_configuration'] %} + {{rule['action']['simple_scaling_policy_configuration']['adjustment_type']}} + {% endif %} + {% if 'scaling_adjustment' in rule['action']['simple_scaling_policy_configuration'] %} + {{rule['action']['simple_scaling_policy_configuration']['scaling_adjustment']}} + {% endif %} + {% if 'cool_down' in rule['action']['simple_scaling_policy_configuration'] %} + {{rule['action']['simple_scaling_policy_configuration']['cool_down']}} + {% endif %} + + {% endif %} + + {% endif %} + {% if 'trigger' in rule %} + + {% if 'cloud_watch_alarm_definition' in rule['trigger'] %} + + {% if 'comparison_operator' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['comparison_operator']}} + {% endif %} + {% if 'evaluation_periods' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['evaluation_periods']}} + {% endif %} + {% if 'metric_name' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['metric_name']}} + {% endif %} + {% if 'namespace' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['namespace']}} + {% endif %} + {% if 'period' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['period']}} + {% endif %} + {% if 'statistic' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['statistic']}} + {% endif %} + {% if 'threshold' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['threshold']}} + {% endif %} + {% if 'unit' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['unit']}} + {% endif %} + {% if 'dimensions' in rule['trigger']['cloud_watch_alarm_definition'] %} + + {% for dimension in rule['trigger']['cloud_watch_alarm_definition']['dimensions'] %} + + {% if 'key' in dimension %} + {{dimension['key']}} + {% endif %} + {% if 'value' in dimension %} + {{dimension['value']}} + {% endif %} + + {% endfor %} + + {% endif %} + + {% endif %} + + {% endif %} + + {% endfor %} + + {% endif %} + {% if instance_group.auto_scaling_policy.status is not none %} + + {% if 'state' in instance_group.auto_scaling_policy.status %} + {{instance_group.auto_scaling_policy.status['state']}} + {% endif %} + + {% endif %} + + {% endif %} {% if instance_group.ebs_optimized is not none %} {{ instance_group.ebs_optimized }} {% endif %} @@ -989,3 +1113,120 @@ def terminate_job_flows(self): 2690d7eb-ed86-11dd-9877-6fad448a8419 """ + +PUT_AUTO_SCALING_POLICY = """ + + {{cluster_id}} + {{instance_group.id}} + {% if instance_group.auto_scaling_policy is not none %} + + {% if instance_group.auto_scaling_policy.constraints is not none %} + + {% if instance_group.auto_scaling_policy.constraints.min_capacity is not none %} + {{instance_group.auto_scaling_policy.constraints.min_capacity}} + {% endif %} + {% if instance_group.auto_scaling_policy.constraints.max_capacity is not none %} + {{instance_group.auto_scaling_policy.constraints.max_capacity}} + {% endif %} + + {% endif %} + {% if instance_group.auto_scaling_policy.rules is not none %} + + {% for rule in instance_group.auto_scaling_policy.rules %} + + {% if 'name' in rule %} + {{rule['name']}} + {% endif %} + {% if 'description' in rule %} + {{rule['description']}} + {% endif %} + {% if 'action' in rule %} + + {% if 'market' in rule['action'] %} + {{rule['action']['market']}} + {% endif %} + {% if 'simple_scaling_policy_configuration' in rule['action'] %} + + {% if 'adjustment_type' in rule['action']['simple_scaling_policy_configuration'] %} + {{rule['action']['simple_scaling_policy_configuration']['adjustment_type']}} + {% endif %} + {% if 'scaling_adjustment' in rule['action']['simple_scaling_policy_configuration'] %} + {{rule['action']['simple_scaling_policy_configuration']['scaling_adjustment']}} + {% endif %} + {% if 'cool_down' in rule['action']['simple_scaling_policy_configuration'] %} + {{rule['action']['simple_scaling_policy_configuration']['cool_down']}} + {% endif %} + + {% endif %} + + {% endif %} + {% if 'trigger' in rule %} + + {% if 'cloud_watch_alarm_definition' in rule['trigger'] %} + + {% if 'comparison_operator' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['comparison_operator']}} + {% endif %} + {% if 'evaluation_periods' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['evaluation_periods']}} + {% endif %} + {% if 'metric_name' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['metric_name']}} + {% endif %} + {% if 'namespace' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['namespace']}} + {% endif %} + {% if 'period' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['period']}} + {% endif %} + {% if 'statistic' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['statistic']}} + {% endif %} + {% if 'threshold' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['threshold']}} + {% endif %} + {% if 'unit' in rule['trigger']['cloud_watch_alarm_definition'] %} + {{rule['trigger']['cloud_watch_alarm_definition']['unit']}} + {% endif %} + {% if 'dimensions' in rule['trigger']['cloud_watch_alarm_definition'] %} + + {% for dimension in rule['trigger']['cloud_watch_alarm_definition']['dimensions'] %} + + {% if 'key' in dimension %} + {{dimension['key']}} + {% endif %} + {% if 'value' in dimension %} + {{dimension['value']}} + {% endif %} + + {% endfor %} + + {% endif %} + + {% endif %} + + {% endif %} + + {% endfor %} + + {% endif %} + {% if instance_group.auto_scaling_policy.status is not none %} + + {% if 'state' in instance_group.auto_scaling_policy.status %} + {{instance_group.auto_scaling_policy.status['state']}} + {% endif %} + + {% endif %} + + {% endif %} + + + d47379d9-b505-49af-9335-a68950d82535 + +""" + +REMOVE_AUTO_SCALING_POLICY = """ + + c04a1042-5340-4c0a-a7b5-7779725ce4f7 + +""" diff --git a/moto/emr/utils.py b/moto/emr/utils.py index fb33214c8592..4d9da84349c5 100644 --- a/moto/emr/utils.py +++ b/moto/emr/utils.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import random import string +from moto.core.utils import camelcase_to_underscores import six @@ -37,3 +38,109 @@ def steps_from_query_string(querystring_dict): idx += 1 steps.append(step) return steps + + +class Unflattener: + @staticmethod + def unflatten_complex_params(input_dict, param_name): + """ Function to unflatten (portions of) dicts with complex keys. The moto request parser flattens the incoming + request bodies, which is generally helpful, but for nested dicts/lists can result in a hard-to-manage + parameter exposion. This function allows one to selectively unflatten a set of dict keys, replacing them + with a deep dist/list structure named identically to the root component in the complex name. + + Complex keys are composed of multiple components + separated by periods. Components may be prefixed with _, which is stripped. Lists indexes are represented + with two components, 'member' and the index number. """ + items_to_process = {} + for k in input_dict.keys(): + if k.startswith(param_name): + items_to_process[k] = input_dict[k] + if len(items_to_process) == 0: + return + + for k in items_to_process.keys(): + del input_dict[k] + + for k in items_to_process.keys(): + Unflattener._set_deep(k, input_dict, items_to_process[k]) + + @staticmethod + def _set_deep(complex_key, container, value): + keys = complex_key.split(".") + keys.reverse() + + while len(keys) > 0: + if len(keys) == 1: + key = keys.pop().strip("_") + Unflattener._add_to_container(container, key, value) + else: + key = keys.pop().strip("_") + if keys[-1] == "member": + keys.pop() + if not Unflattener._key_in_container(container, key): + container = Unflattener._add_to_container(container, key, []) + else: + container = Unflattener._get_child(container, key) + else: + if not Unflattener._key_in_container(container, key): + container = Unflattener._add_to_container(container, key, {}) + else: + container = Unflattener._get_child(container, key) + + @staticmethod + def _add_to_container(container, key, value): + if type(container) is dict: + container[key] = value + elif type(container) is list: + i = int(key) + while len(container) < i: + container.append(None) + container[i - 1] = value + return value + + @staticmethod + def _get_child(container, key): + if type(container) is dict: + return container[key] + elif type(container) is list: + i = int(key) + return container[i - 1] + + @staticmethod + def _key_in_container(container, key): + if type(container) is dict: + return key in container + elif type(container) is list: + i = int(key) + return len(container) >= i + + +class CamelToUnderscoresWalker: + """A class to convert the keys in dict/list hierarchical data structures from CamelCase to snake_case (underscores)""" + + @staticmethod + def parse(x): + if isinstance(x, dict): + return CamelToUnderscoresWalker.parse_dict(x) + elif isinstance(x, list): + return CamelToUnderscoresWalker.parse_list(x) + else: + return CamelToUnderscoresWalker.parse_scalar(x) + + @staticmethod + def parse_dict(x): + temp = {} + for key in x.keys(): + temp[camelcase_to_underscores(key)] = CamelToUnderscoresWalker.parse(x[key]) + return temp + + @staticmethod + def parse_list(x): + temp = [] + for i in x: + temp.append(CamelToUnderscoresWalker.parse(i)) + return temp + + @staticmethod + def parse_scalar(x): + return x diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index adfc3fa9ca5b..3f577c69a4d2 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -476,6 +476,118 @@ def test_run_job_flow_with_instance_groups(): _do_assertion_ebs_configuration(x, y) +auto_scaling_policy = { + "Constraints": {"MinCapacity": 2, "MaxCapacity": 10}, + "Rules": [ + { + "Name": "Default-scale-out", + "Description": "Replicates the default scale-out rule in the console for YARN memory.", + "Action": { + "SimpleScalingPolicyConfiguration": { + "AdjustmentType": "CHANGE_IN_CAPACITY", + "ScalingAdjustment": 1, + "CoolDown": 300, + } + }, + "Trigger": { + "CloudWatchAlarmDefinition": { + "ComparisonOperator": "LESS_THAN", + "EvaluationPeriods": 1, + "MetricName": "YARNMemoryAvailablePercentage", + "Namespace": "AWS/ElasticMapReduce", + "Period": 300, + "Threshold": 15.0, + "Statistic": "AVERAGE", + "Unit": "PERCENT", + "Dimensions": [{"Key": "JobFlowId", "Value": "${emr.clusterId}"}], + } + }, + } + ], +} + + +@mock_emr +def test_run_job_flow_with_instance_groups_with_autoscaling(): + input_groups = dict((g["Name"], g) for g in input_instance_groups) + + input_groups["core"]["AutoScalingPolicy"] = auto_scaling_policy + input_groups["task-1"]["AutoScalingPolicy"] = auto_scaling_policy + + client = boto3.client("emr", region_name="us-east-1") + args = deepcopy(run_job_flow_args) + args["Instances"] = {"InstanceGroups": input_instance_groups} + cluster_id = client.run_job_flow(**args)["JobFlowId"] + groups = client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] + for x in groups: + y = deepcopy(input_groups[x["Name"]]) + if "AutoScalingPolicy" in y: + x["AutoScalingPolicy"]["Status"]["State"].should.equal("ATTACHED") + returned_policy = deepcopy(x["AutoScalingPolicy"]) + auto_scaling_policy_with_cluster_id = _patch_cluster_id_placeholder_in_autoscaling_policy( + y["AutoScalingPolicy"], cluster_id + ) + del returned_policy["Status"] + returned_policy.should.equal(auto_scaling_policy_with_cluster_id) + + +@mock_emr +def test_put_remove_auto_scaling_policy(): + input_groups = dict((g["Name"], g) for g in input_instance_groups) + client = boto3.client("emr", region_name="us-east-1") + args = deepcopy(run_job_flow_args) + args["Instances"] = {"InstanceGroups": input_instance_groups} + cluster_id = client.run_job_flow(**args)["JobFlowId"] + + core_instance_group = [ + ig + for ig in client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] + if ig["InstanceGroupType"] == "CORE" + ][0] + + resp = client.put_auto_scaling_policy( + ClusterId=cluster_id, + InstanceGroupId=core_instance_group["Id"], + AutoScalingPolicy=auto_scaling_policy, + ) + + auto_scaling_policy_with_cluster_id = _patch_cluster_id_placeholder_in_autoscaling_policy( + auto_scaling_policy, cluster_id + ) + del resp["AutoScalingPolicy"]["Status"] + resp["AutoScalingPolicy"].should.equal(auto_scaling_policy_with_cluster_id) + + core_instance_group = [ + ig + for ig in client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] + if ig["InstanceGroupType"] == "CORE" + ][0] + + ("AutoScalingPolicy" in core_instance_group).should.equal(True) + + client.remove_auto_scaling_policy( + ClusterId=cluster_id, InstanceGroupId=core_instance_group["Id"] + ) + + core_instance_group = [ + ig + for ig in client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] + if ig["InstanceGroupType"] == "CORE" + ][0] + + ("AutoScalingPolicy" not in core_instance_group).should.equal(True) + + +def _patch_cluster_id_placeholder_in_autoscaling_policy( + auto_scaling_policy, cluster_id +): + policy_copy = deepcopy(auto_scaling_policy) + for rule in policy_copy["Rules"]: + for dimension in rule["Trigger"]["CloudWatchAlarmDefinition"]["Dimensions"]: + dimension["Value"] = cluster_id + return policy_copy + + @mock_emr def test_run_job_flow_with_custom_ami(): client = boto3.client("emr", region_name="us-east-1") @@ -619,8 +731,11 @@ def test_instance_groups(): jf = client.describe_job_flows(JobFlowIds=[cluster_id])["JobFlows"][0] base_instance_count = jf["Instances"]["InstanceCount"] + instance_groups_to_add = deepcopy(input_instance_groups[2:]) + instance_groups_to_add[0]["AutoScalingPolicy"] = auto_scaling_policy + instance_groups_to_add[1]["AutoScalingPolicy"] = auto_scaling_policy client.add_instance_groups( - JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:] + JobFlowId=cluster_id, InstanceGroups=instance_groups_to_add ) jf = client.describe_job_flows(JobFlowIds=[cluster_id])["JobFlows"][0] @@ -629,8 +744,8 @@ def test_instance_groups(): ) for x in jf["Instances"]["InstanceGroups"]: y = input_groups[x["Name"]] - if hasattr(y, "BidPrice"): - x["BidPrice"].should.equal("BidPrice") + if "BidPrice" in y: + x["BidPrice"].should.equal(y["BidPrice"]) x["CreationDateTime"].should.be.a("datetime.datetime") # x['EndDateTime'].should.be.a('datetime.datetime') x.should.have.key("InstanceGroupId") @@ -647,9 +762,18 @@ def test_instance_groups(): groups = client.list_instance_groups(ClusterId=cluster_id)["InstanceGroups"] for x in groups: - y = input_groups[x["Name"]] - if hasattr(y, "BidPrice"): - x["BidPrice"].should.equal("BidPrice") + y = deepcopy(input_groups[x["Name"]]) + if "BidPrice" in y: + x["BidPrice"].should.equal(y["BidPrice"]) + if "AutoScalingPolicy" in y: + x["AutoScalingPolicy"]["Status"]["State"].should.equal("ATTACHED") + returned_policy = dict(x["AutoScalingPolicy"]) + del returned_policy["Status"] + for dimension in y["AutoScalingPolicy"]["Rules"]["Trigger"][ + "CloudWatchAlarmDefinition" + ]["Dimensions"]: + dimension["Value"] = cluster_id + returned_policy.should.equal(y["AutoScalingPolicy"]) if "EbsConfiguration" in y: _do_assertion_ebs_configuration(x, y) # Configurations From a4701dbbe692826ab1cd75a1d54fc4fe7f19f9ae Mon Sep 17 00:00:00 2001 From: Wolfgang Bauer Date: Fri, 25 Sep 2020 16:25:30 +0200 Subject: [PATCH 549/658] Add tags to Elastic IP Addresses (#3310) * Make ElasticAddress a tagged resource To be able to filter on tags on ElasticAddresses, I need to have tags. * remove unneeded commented lines Was beginning of how to to it before further checking how it is done with other resources. * do not ignore network-interface-owner-id filter * add TODO about currently hardcoded region * remove hardcoding region * add testing for tags creating and allocation, adding tags and querying for it * separate test for tags into own method * Linting Co-authored-by: Bert Blommers --- moto/ec2/models.py | 16 +++++--- tests/test_ec2/test_elastic_ip_addresses.py | 45 +++++++++++++++++++++ 2 files changed, 56 insertions(+), 5 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 60f179128ea4..5e55b627644e 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -4510,13 +4510,15 @@ def modify_spot_fleet_request( return True -class ElasticAddress(CloudFormationModel): - def __init__(self, domain, address=None): +class ElasticAddress(TaggedEC2Resource, CloudFormationModel): + def __init__(self, ec2_backend, domain, address=None): + self.ec2_backend = ec2_backend if address: self.public_ip = address else: self.public_ip = random_ip() self.allocation_id = random_eip_allocation_id() if domain == "vpc" else None + self.id = self.allocation_id self.domain = domain self.instance = None self.eni = None @@ -4578,9 +4580,13 @@ def get_filter_value(self, filter_name): return self.eni.private_ip_address elif filter_name == "public-ip": return self.public_ip - else: + elif filter_name == "network-interface-owner-id": # TODO: implement network-interface-owner-id raise FilterNotImplementedError(filter_name, "DescribeAddresses") + else: + return super(ElasticAddress, self).get_filter_value( + filter_name, "DescribeAddresses" + ) class ElasticAddressBackend(object): @@ -4592,9 +4598,9 @@ def allocate_address(self, domain, address=None): if domain not in ["standard", "vpc"]: raise InvalidDomainError(domain) if address: - address = ElasticAddress(domain, address) + address = ElasticAddress(self, domain=domain, address=address) else: - address = ElasticAddress(domain) + address = ElasticAddress(self, domain=domain) self.addresses.append(address) return address diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index 886cdff563b4..baecb94d6d15 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -537,3 +537,48 @@ def check_vpc_filter(filter_name, filter_values): service.vpc_addresses.filter(Filters=[{"Name": "domain", "Values": ["vpc"]}]) ) len(addresses).should.equal(3) + + +@mock_ec2 +def test_eip_tags(): + service = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + # Allocate one address without tags + client.allocate_address(Domain="vpc") + # Allocate one address and add tags + alloc_tags = client.allocate_address(Domain="vpc") + with_tags = client.create_tags( + Resources=[alloc_tags["AllocationId"]], + Tags=[{"Key": "ManagedBy", "Value": "MyCode"}], + ) + addresses_with_tags = client.describe_addresses( + Filters=[ + {"Name": "domain", "Values": ["vpc"]}, + {"Name": "tag:ManagedBy", "Values": ["MyCode"]}, + ] + ) + len(addresses_with_tags["Addresses"]).should.equal(1) + addresses_with_tags = list( + service.vpc_addresses.filter( + Filters=[ + {"Name": "domain", "Values": ["vpc"]}, + {"Name": "tag:ManagedBy", "Values": ["MyCode"]}, + ] + ) + ) + len(addresses_with_tags).should.equal(1) + addresses_with_tags = list( + service.vpc_addresses.filter( + Filters=[ + {"Name": "domain", "Values": ["vpc"]}, + {"Name": "tag:ManagedBy", "Values": ["SomethingOther"]}, + ] + ) + ) + len(addresses_with_tags).should.equal(0) + addresses = list( + service.vpc_addresses.filter(Filters=[{"Name": "domain", "Values": ["vpc"]}]) + ) + # Expected total is 2, one with and one without tags + len(addresses).should.equal(2) From 82dbaadfc4b9e040b5caf13ce920d8eaa852aef4 Mon Sep 17 00:00:00 2001 From: Benjamin Date: Fri, 25 Sep 2020 11:55:29 -0400 Subject: [PATCH 550/658] =?UTF-8?q?added=20organizations=20detach=5Fpolicy?= =?UTF-8?q?=20response,=20model,=20and=20tests,=20issue=20#=E2=80=A6=20(#3?= =?UTF-8?q?278)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * added organizations detach_policy response, model, and tests, issue #3239 Signed-off-by: Ben * Created individual tests for detach_policy exceptions, updated regex statements for Root, OU, and Account Id --- moto/organizations/models.py | 31 ++++ moto/organizations/responses.py | 5 + .../test_organizations_boto3.py | 135 +++++++++++++++++- 3 files changed, 170 insertions(+), 1 deletion(-) diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 09bd62b79010..5655326c02ff 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -821,5 +821,36 @@ def disable_policy_type(self, **kwargs): return dict(Root=root.describe()) + def detach_policy(self, **kwargs): + policy = self.get_policy_by_id(kwargs["PolicyId"]) + root_id_regex = utils.ROOT_ID_REGEX + ou_id_regex = utils.OU_ID_REGEX + account_id_regex = utils.ACCOUNT_ID_REGEX + target_id = kwargs["TargetId"] + + if re.match(root_id_regex, target_id) or re.match(ou_id_regex, target_id): + ou = next((ou for ou in self.ou if ou.id == target_id), None) + if ou is not None: + if ou in ou.attached_policies: + ou.attached_policies.remove(policy) + policy.attachments.remove(ou) + else: + raise RESTError( + "OrganizationalUnitNotFoundException", + "You specified an organizational unit that doesn't exist.", + ) + elif re.match(account_id_regex, target_id): + account = next( + (account for account in self.accounts if account.id == target_id), None, + ) + if account is not None: + if account in account.attached_policies: + account.attached_policies.remove(policy) + policy.attachments.remove(account) + else: + raise AccountNotFoundException + else: + raise InvalidInputException("You specified an invalid value.") + organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index ae0bb731b1c4..73e25178ad7d 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -201,3 +201,8 @@ def disable_policy_type(self): return json.dumps( self.organizations_backend.disable_policy_type(**self.request_params) ) + + def detach_policy(self): + return json.dumps( + self.organizations_backend.detach_policy(**self.request_params) + ) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 647236118014..65f9640820ef 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -467,6 +467,139 @@ def test_attach_policy(): response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) +@mock_organizations +def test_detach_policy(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_id = client.create_organizational_unit(ParentId=root_id, Name="ou01")[ + "OrganizationalUnit" + ]["Id"] + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.detach_policy(PolicyId=policy_id, TargetId=ou_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.detach_policy(PolicyId=policy_id, TargetId=root_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.detach_policy(PolicyId=policy_id, TargetId=account_id) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + +@mock_organizations +def test_detach_policy_root_ou_not_found_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_id = client.create_organizational_unit(ParentId=root_id, Name="ou01")[ + "OrganizationalUnit" + ]["Id"] + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + with assert_raises(ClientError) as e: + response = client.detach_policy(PolicyId=policy_id, TargetId="r-xy85") + ex = e.exception + ex.operation_name.should.equal("DetachPolicy") + ex.response["Error"]["Code"].should.equal("400") + ex.response["Error"]["Message"].should.contain( + "OrganizationalUnitNotFoundException" + ) + + +@mock_organizations +def test_detach_policy_ou_not_found_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_id = client.create_organizational_unit(ParentId=root_id, Name="ou01")[ + "OrganizationalUnit" + ]["Id"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + with assert_raises(ClientError) as e: + response = client.detach_policy( + PolicyId=policy_id, TargetId="ou-zx86-z3x4yr2t7" + ) + ex = e.exception + ex.operation_name.should.equal("DetachPolicy") + ex.response["Error"]["Code"].should.equal("400") + ex.response["Error"]["Message"].should.contain( + "OrganizationalUnitNotFoundException" + ) + + +@mock_organizations +def test_detach_policy_account_id_not_found_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + account_id = client.create_account(AccountName=mockname, Email=mockemail)[ + "CreateAccountStatus" + ]["AccountId"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + with assert_raises(ClientError) as e: + response = client.detach_policy(PolicyId=policy_id, TargetId="111619863336") + ex = e.exception + ex.operation_name.should.equal("DetachPolicy") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("AccountNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified an account that doesn't exist." + ) + + +@mock_organizations +def test_detach_policy_invalid_target_exception(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_id = client.create_organizational_unit(ParentId=root_id, Name="ou01")[ + "OrganizationalUnit" + ]["Id"] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description="A dummy service control policy", + Name="MockServiceControlPolicy", + Type="SERVICE_CONTROL_POLICY", + )["Policy"]["PolicySummary"]["Id"] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + with assert_raises(ClientError) as e: + response = client.detach_policy(PolicyId=policy_id, TargetId="invalidtargetid") + ex = e.exception + ex.operation_name.should.equal("DetachPolicy") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + @mock_organizations def test_delete_policy(): client = boto3.client("organizations", region_name="us-east-1") @@ -798,7 +931,7 @@ def test_tag_resource_errors(): with assert_raises(ClientError) as e: client.tag_resource( - ResourceId="000000000000", Tags=[{"Key": "key", "Value": "value"},] + ResourceId="000000000000", Tags=[{"Key": "key", "Value": "value"},], ) ex = e.exception ex.operation_name.should.equal("TagResource") From 55e7caccfe2482c90e4fb52214ef9e031ccdda1e Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Sun, 27 Sep 2020 13:54:17 +0530 Subject: [PATCH 551/658] Fix:EC2 Tags in create vpc and create subnet (#3338) Co-authored-by: usmankb --- moto/ec2/models.py | 13 +++++++++++++ moto/ec2/responses/subnets.py | 21 ++++++++++++++++++++- moto/ec2/responses/vpcs.py | 5 +++++ tests/test_ec2/test_subnets.py | 19 ++++++++++++++++++- tests/test_ec2/test_vpcs.py | 13 +++++++++++++ 5 files changed, 69 insertions(+), 2 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 5e55b627644e..d6d92da4306e 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2892,6 +2892,7 @@ def create_vpc( cidr_block, instance_tenancy="default", amazon_provided_ipv6_cidr_block=False, + tags=[], ): vpc_id = random_vpc_id() try: @@ -2910,6 +2911,12 @@ def create_vpc( instance_tenancy, amazon_provided_ipv6_cidr_block, ) + + for tag in tags: + tag_key = tag.get("Key") + tag_value = tag.get("Value") + vpc.add_tag(tag_key, tag_value) + self.vpcs[vpc_id] = vpc # AWS creates a default main route table and security group. @@ -3409,6 +3416,7 @@ def create_subnet( availability_zone=None, availability_zone_id=None, context=None, + tags=[], ): subnet_id = random_subnet_id() vpc = self.get_vpc( @@ -3479,6 +3487,11 @@ def create_subnet( assign_ipv6_address_on_creation=False, ) + for tag in tags: + tag_key = tag.get("Key") + tag_value = tag.get("Value") + subnet.add_tag(tag_key, tag_value) + # AWS associates a new subnet with the default Network ACL self.associate_default_network_acl_with_subnet(subnet_id, vpc_id) self.subnets[availability_zone][subnet_id] = subnet diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py index 3bad8e12ff9f..ef1b6249c47b 100644 --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -11,12 +11,21 @@ def create_subnet(self): cidr_block = self._get_param("CidrBlock") availability_zone = self._get_param("AvailabilityZone") availability_zone_id = self._get_param("AvailabilityZoneId") + tags = self._get_multi_param("TagSpecification") + if tags: + tags = tags[0].get("Tag") + if not availability_zone and not availability_zone_id: availability_zone = random.choice( self.ec2_backend.describe_availability_zones() ).name subnet = self.ec2_backend.create_subnet( - vpc_id, cidr_block, availability_zone, availability_zone_id, context=self + vpc_id, + cidr_block, + availability_zone, + availability_zone_id, + context=self, + tags=tags, ) template = self.response_template(CREATE_SUBNET_RESPONSE) return template.render(subnet=subnet) @@ -64,6 +73,16 @@ def modify_subnet_attribute(self): {{ subnet.assign_ipv6_address_on_creation }} {{ subnet.ipv6_cidr_block_associations }} arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }} + + {% for tag in subnet.get_tags() %} + + {{ tag.resource_id }} + {{ tag.resource_type }} + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + """ diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index fc752fa7de45..de4bb3febdaa 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -14,14 +14,19 @@ def _get_doc_date(self): def create_vpc(self): cidr_block = self._get_param("CidrBlock") + tags = self._get_multi_param("TagSpecification") instance_tenancy = self._get_param("InstanceTenancy", if_none="default") amazon_provided_ipv6_cidr_blocks = self._get_param( "AmazonProvidedIpv6CidrBlock" ) + if tags: + tags = tags[0].get("Tag") + vpc = self.ec2_backend.create_vpc( cidr_block, instance_tenancy, amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_blocks, + tags=tags, ) doc_date = self._get_doc_date() template = self.response_template(CREATE_VPC_RESPONSE) diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 45c9040fcdbc..2d30171f0d1d 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -327,7 +327,7 @@ def test_create_subnet_response_fields(): subnet.should.have.key("State") subnet.should.have.key("SubnetId") subnet.should.have.key("VpcId") - subnet.shouldnt.have.key("Tags") + subnet.should.have.key("Tags") subnet.should.have.key("DefaultForAz").which.should.equal(False) subnet.should.have.key("MapPublicIpOnLaunch").which.should.equal(False) subnet.should.have.key("OwnerId") @@ -456,6 +456,23 @@ def test_create_subnets_with_overlapping_cidr_blocks(): ) +@mock_ec2 +def test_create_subnet_with_tags(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="172.31.0.0/16") + + subnet = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock="172.31.48.0/20", + AvailabilityZoneId="use1-az6", + TagSpecifications=[ + {"ResourceType": "subnet", "Tags": [{"Key": "name", "Value": "some-vpc"}]} + ], + ) + + assert subnet.tags == [{"Key": "name", "Value": "some-vpc"}] + + @mock_ec2 def test_available_ip_addresses_in_subnet(): ec2 = boto3.resource("ec2", region_name="us-west-1") diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 35705e482025..8ad85072c8f3 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -680,6 +680,19 @@ def test_create_vpc_with_invalid_cidr_range(): ) +@mock_ec2 +def test_create_vpc_with_tags(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + # Create VPC + vpc = ec2.create_vpc( + CidrBlock="10.0.0.0/16", + TagSpecifications=[ + {"ResourceType": "vpc", "Tags": [{"Key": "name", "Value": "some-vpc"}]} + ], + ) + assert vpc.tags == [{"Key": "name", "Value": "some-vpc"}] + + @mock_ec2 def test_enable_vpc_classic_link(): ec2 = boto3.resource("ec2", region_name="us-west-1") From 3bc18455a2dad70148dc095435a55b2a43eaeac1 Mon Sep 17 00:00:00 2001 From: ljakimczuk <39192420+ljakimczuk@users.noreply.github.com> Date: Mon, 28 Sep 2020 08:16:06 +0200 Subject: [PATCH 552/658] WIP: Introducing VPC Flow Logs (#3337) * Start working on flow logs * Change test * Constructing tests * Changing exceptions and adding more tests * Adding more tests * Changing model and adding more tests * Adding support for tags * Mocking Access error with non-existing Log Group Name * Adding FlowLogAlreadyExists support * Changing style * Reformatted code * Reformatted tests * Removing needless test * Adding support for CloudFormation * Reformatting slightly * Removing arnparse and using split * Rearranging tests * Fixing FilterNotImplementedError test * Moving imports to 'if' clauses and adding explicit test for 'cloud-watch-logs' type * Setting names matching boto3 API and restoring 'not-implementd-filter' test * Reformatting tests with black --- moto/ec2/exceptions.py | 53 +++ moto/ec2/models.py | 304 ++++++++++++++ moto/ec2/responses/__init__.py | 2 + moto/ec2/responses/flow_logs.py | 122 ++++++ moto/ec2/utils.py | 5 + tests/test_ec2/test_flow_logs.py | 678 +++++++++++++++++++++++++++++++ 6 files changed, 1164 insertions(+) create mode 100644 moto/ec2/responses/flow_logs.py create mode 100644 tests/test_ec2/test_flow_logs.py diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 4c47adbb9129..b2d7e8aab9a9 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -71,6 +71,24 @@ def __init__(self, subnet_id): ) +class InvalidFlowLogIdError(EC2ClientError): + def __init__(self, count, flow_log_ids): + super(InvalidFlowLogIdError, self).__init__( + "InvalidFlowLogId.NotFound", + "These flow log ids in the input list are not found: [TotalCount: {0}] {1}".format( + count, flow_log_ids + ), + ) + + +class FlowLogAlreadyExists(EC2ClientError): + def __init__(self): + super(FlowLogAlreadyExists, self).__init__( + "FlowLogAlreadyExists", + "Error. There is an existing Flow Log with the same configuration and log destination.", + ) + + class InvalidNetworkAclIdError(EC2ClientError): def __init__(self, network_acl_id): super(InvalidNetworkAclIdError, self).__init__( @@ -263,6 +281,14 @@ def __init__(self, ip): ) +class LogDestinationNotFoundError(EC2ClientError): + def __init__(self, bucket_name): + super(LogDestinationNotFoundError, self).__init__( + "LogDestinationNotFoundException", + "LogDestination: '{0}' does not exist.".format(bucket_name), + ) + + class InvalidAllocationIdError(EC2ClientError): def __init__(self, allocation_id): super(InvalidAllocationIdError, self).__init__( @@ -309,6 +335,33 @@ def __init__(self, vpc_peering_connection_id): ) +class InvalidDependantParameterError(EC2ClientError): + def __init__(self, dependant_parameter, parameter, parameter_value): + super(InvalidDependantParameterError, self).__init__( + "InvalidParameter", + "{0} can't be empty if {1} is {2}.".format( + dependant_parameter, parameter, parameter_value, + ), + ) + + +class InvalidDependantParameterTypeError(EC2ClientError): + def __init__(self, dependant_parameter, parameter_value, parameter): + super(InvalidDependantParameterTypeError, self).__init__( + "InvalidParameter", + "{0} type must be {1} if {2} is provided.".format( + dependant_parameter, parameter_value, parameter, + ), + ) + + +class InvalidAggregationIntervalParameterError(EC2ClientError): + def __init__(self, parameter): + super(InvalidAggregationIntervalParameterError, self).__init__( + "InvalidParameter", "Invalid {0}".format(parameter), + ) + + class InvalidParameterValueError(EC2ClientError): def __init__(self, parameter_value): super(InvalidParameterValueError, self).__init__( diff --git a/moto/ec2/models.py b/moto/ec2/models.py index d6d92da4306e..e85dab800859 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -28,11 +28,13 @@ camelcase_to_underscores, ) from moto.core import ACCOUNT_ID + from .exceptions import ( CidrLimitExceeded, DependencyViolationError, EC2ClientError, FilterNotImplementedError, + FlowLogAlreadyExists, GatewayNotAttachedError, InvalidAddressError, InvalidAllocationIdError, @@ -52,6 +54,10 @@ InvalidKeyPairDuplicateError, InvalidKeyPairFormatError, InvalidKeyPairNameError, + InvalidAggregationIntervalParameterError, + InvalidDependantParameterError, + InvalidDependantParameterTypeError, + InvalidFlowLogIdError, InvalidLaunchTemplateNameError, InvalidNetworkAclIdError, InvalidNetworkAttachmentIdError, @@ -123,6 +129,7 @@ random_spot_request_id, random_subnet_id, random_subnet_association_id, + random_flow_log_id, random_volume_id, random_vpc_id, random_vpc_cidr_association_id, @@ -1176,6 +1183,7 @@ class TagBackend(object): "subnet", "volume", "vpc", + "vpc-flow-log", "vpc-peering-connection" "vpn-connection", "vpn-gateway", ] @@ -3524,6 +3532,301 @@ def modify_subnet_attribute(self, subnet_id, attr_name, attr_value): raise InvalidParameterValueError(attr_name) +class Unsuccessful(object): + def __init__( + self, resource_id, error_code, error_message, + ): + self.resource_id = resource_id + self.error_code = error_code + self.error_message = error_message + + +class FlowLogs(TaggedEC2Resource, CloudFormationModel): + def __init__( + self, + ec2_backend, + flow_log_id, + resource_id, + traffic_type, + log_destination, + log_group_name, + deliver_logs_permission_arn, + max_aggregation_interval, + log_destination_type, + log_format, + deliver_logs_status="SUCCESS", + deliver_logs_error_message=None, + ): + self.ec2_backend = ec2_backend + self.id = flow_log_id + self.resource_id = resource_id + self.traffic_type = traffic_type + self.log_destination = log_destination + self.log_group_name = log_group_name + self.deliver_logs_permission_arn = deliver_logs_permission_arn + self.deliver_logs_status = deliver_logs_status + self.deliver_logs_error_message = deliver_logs_error_message + self.max_aggregation_interval = max_aggregation_interval + self.log_destination_type = log_destination_type + self.log_format = log_format + + self.created_at = utc_date_and_time() + + @staticmethod + def cloudformation_name_type(): + return None + + @staticmethod + def cloudformation_type(): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-flowlog.html + return "AWS::EC2::FlowLog" + + @classmethod + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + + resource_type = properties.get("ResourceType") + resource_id = [properties.get("ResourceId")] + traffic_type = properties.get("TrafficType") + deliver_logs_permission_arn = properties.get("DeliverLogsPermissionArn") + log_destination_type = properties.get("LogDestinationType") + log_destination = properties.get("LogDestination") + log_group_name = properties.get("LogGroupName") + log_format = properties.get("LogFormat") + max_aggregation_interval = properties.get("MaxAggregationInterval") + + ec2_backend = ec2_backends[region_name] + flow_log, _ = ec2_backend.create_flow_logs( + resource_type, + resource_id, + traffic_type, + deliver_logs_permission_arn, + log_destination_type, + log_destination, + log_group_name, + log_format, + max_aggregation_interval, + ) + for tag in properties.get("Tags", []): + tag_key = tag["Key"] + tag_value = tag["Value"] + flow_log[0].add_tag(tag_key, tag_value) + + return flow_log[0] + + @property + def physical_resource_id(self): + return self.id + + def get_filter_value(self, filter_name): + """ + API Version 2016-11-15 defines the following filters for DescribeFlowLogs: + + * deliver-log-status + * log-destination-type + * flow-log-id + * log-group-name + * resource-id + * traffic-type + * tag:key=value + * tag-key + + Taken from: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeFlowLogs.html + """ + if filter_name == "resource-id": + return self.resource_id + elif filter_name == "traffic-type": + return self.traffic_type + elif filter_name == "log-destination-type": + return self.log_destination_type + elif filter_name == "flow-log-id": + return self.id + elif filter_name == "log-group-name": + return self.log_group_name + elif filter_name == "deliver-log-status": + return "SUCCESS" + else: + return super(FlowLogs, self).get_filter_value( + filter_name, "DescribeFlowLogs" + ) + + +class FlowLogsBackend(object): + def __init__(self): + self.flow_logs = defaultdict(dict) + super(FlowLogsBackend, self).__init__() + + def _validate_request( + self, + log_group_name, + log_destination, + log_destination_type, + max_aggregation_interval, + deliver_logs_permission_arn, + ): + if log_group_name is None and log_destination is None: + raise InvalidDependantParameterError( + "LogDestination", "LogGroupName", "not provided", + ) + + if log_destination_type == "s3": + if log_group_name is not None: + raise InvalidDependantParameterTypeError( + "LogDestination", "cloud-watch-logs", "LogGroupName", + ) + elif log_destination_type == "cloud-watch-logs": + if deliver_logs_permission_arn is None: + raise InvalidDependantParameterError( + "DeliverLogsPermissionArn", + "LogDestinationType", + "cloud-watch-logs", + ) + + if max_aggregation_interval not in ["60", "600"]: + raise InvalidAggregationIntervalParameterError( + "Flow Log Max Aggregation Interval" + ) + + def create_flow_logs( + self, + resource_type, + resource_ids, + traffic_type, + deliver_logs_permission_arn, + log_destination_type, + log_destination, + log_group_name, + log_format, + max_aggregation_interval, + ): + # Guess it's best to put it here due to possible + # lack of them in the CloudFormation template + max_aggregation_interval = ( + "600" if max_aggregation_interval is None else max_aggregation_interval + ) + log_destination_type = ( + "cloud-watch-logs" if log_destination_type is None else log_destination_type + ) + log_format = ( + "${version} ${account-id} ${interface-id} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${protocol} ${packets} ${bytes} ${start} ${end} ${action} ${log-status}" + if log_format is None + else log_format + ) + + # Validate the requests paremeters + self._validate_request( + log_group_name, + log_destination, + log_destination_type, + max_aggregation_interval, + deliver_logs_permission_arn, + ) + + flow_logs_set = [] + unsuccessful = [] + + for resource_id in resource_ids: + deliver_logs_status = "SUCCESS" + deliver_logs_error_message = None + flow_log_id = random_flow_log_id() + if resource_type == "VPC": + # Validate VPCs exist + self.get_vpc(resource_id) + elif resource_type == "Subnet": + # Validate Subnets exist + self.get_subnet(resource_id) + elif resource_type == "NetworkInterface": + # Validate NetworkInterfaces exist + self.get_network_interface(resource_id) + + if log_destination_type == "s3": + from moto.s3.models import s3_backend + from moto.s3.exceptions import MissingBucket + + arn = log_destination.split(":", 5)[5] + try: + s3_backend.get_bucket(arn) + except MissingBucket: + unsuccessful.append( + # Instead of creating FlowLog report + # the unsuccessful status for the + # given resource_id + Unsuccessful( + resource_id, + "400", + "LogDestination: {0} does not exist.".format(arn), + ) + ) + continue + elif log_destination_type == "cloud-watch-logs": + from moto.logs.models import logs_backends + from moto.logs.exceptions import ResourceNotFoundException + + # API allows to create a FlowLog with a + # non-existing LogGroup. It however later + # on reports the FAILED delivery status. + try: + # Need something easy to check the group exists. + # The list_tags_log_group seems to do the trick. + logs_backends[self.region_name].list_tags_log_group(log_group_name) + except ResourceNotFoundException: + deliver_logs_status = "FAILED" + deliver_logs_error_message = "Access error" + + all_flow_logs = self.describe_flow_logs() + if any( + fl.resource_id == resource_id + and ( + fl.log_group_name == log_group_name + or fl.log_destination == log_destination + ) + for fl in all_flow_logs + ): + raise FlowLogAlreadyExists() + flow_logs = FlowLogs( + self, + flow_log_id, + resource_id, + traffic_type, + log_destination, + log_group_name, + deliver_logs_permission_arn, + max_aggregation_interval, + log_destination_type, + log_format, + deliver_logs_status, + deliver_logs_error_message, + ) + self.flow_logs[flow_log_id] = flow_logs + flow_logs_set.append(flow_logs) + + return flow_logs_set, unsuccessful + + def describe_flow_logs(self, flow_log_ids=None, filters=None): + matches = itertools.chain([i for i in self.flow_logs.values()]) + if flow_log_ids: + matches = [flow_log for flow_log in matches if flow_log.id in flow_log_ids] + if filters: + matches = generic_filter(filters, matches) + return matches + + def delete_flow_logs(self, flow_log_ids): + non_existing = [] + for flow_log in flow_log_ids: + if flow_log in self.flow_logs: + self.flow_logs.pop(flow_log, None) + else: + non_existing.append(flow_log) + + if non_existing: + raise InvalidFlowLogIdError( + len(flow_log_ids), " ".join(x for x in flow_log_ids), + ) + return True + + class SubnetRouteTableAssociation(CloudFormationModel): def __init__(self, route_table_id, subnet_id): self.route_table_id = route_table_id @@ -5530,6 +5833,7 @@ class EC2Backend( VPCBackend, SubnetBackend, SubnetRouteTableAssociationBackend, + FlowLogsBackend, NetworkInterfaceBackend, VPNConnectionBackend, VPCPeeringConnectionBackend, diff --git a/moto/ec2/responses/__init__.py b/moto/ec2/responses/__init__.py index 21cbf8249af4..893a25e89014 100644 --- a/moto/ec2/responses/__init__.py +++ b/moto/ec2/responses/__init__.py @@ -24,6 +24,7 @@ from .spot_fleets import SpotFleets from .spot_instances import SpotInstances from .subnets import Subnets +from .flow_logs import FlowLogs from .tags import TagResponse from .virtual_private_gateways import VirtualPrivateGateways from .vm_export import VMExport @@ -60,6 +61,7 @@ class EC2Response( SpotFleets, SpotInstances, Subnets, + FlowLogs, TagResponse, VirtualPrivateGateways, VMExport, diff --git a/moto/ec2/responses/flow_logs.py b/moto/ec2/responses/flow_logs.py new file mode 100644 index 000000000000..9978f89c2b26 --- /dev/null +++ b/moto/ec2/responses/flow_logs.py @@ -0,0 +1,122 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from moto.ec2.models import validate_resource_ids +from moto.ec2.utils import filters_from_querystring + + +class FlowLogs(BaseResponse): + def create_flow_logs(self): + resource_type = self._get_param("ResourceType") + resource_ids = self._get_multi_param("ResourceId") + traffic_type = self._get_param("TrafficType") + deliver_logs_permission_arn = self._get_param("DeliverLogsPermissionArn") + log_destination_type = self._get_param("LogDestinationType") + log_destination = self._get_param("LogDestination") + log_group_name = self._get_param("LogGroupName") + log_format = self._get_param("LogFormat") + max_aggregation_interval = self._get_param("MaxAggregationInterval") + validate_resource_ids(resource_ids) + + tags = self._parse_tag_specification("TagSpecification") + tags = tags.get("vpc-flow-log", {}) + if self.is_not_dryrun("CreateFlowLogs"): + flow_logs, errors = self.ec2_backend.create_flow_logs( + resource_type=resource_type, + resource_ids=resource_ids, + traffic_type=traffic_type, + deliver_logs_permission_arn=deliver_logs_permission_arn, + log_destination_type=log_destination_type, + log_destination=log_destination, + log_group_name=log_group_name, + log_format=log_format, + max_aggregation_interval=max_aggregation_interval, + ) + for fl in flow_logs: + fl.add_tags(tags) + template = self.response_template(CREATE_FLOW_LOGS_RESPONSE) + return template.render(flow_logs=flow_logs, errors=errors) + + def describe_flow_logs(self): + flow_log_ids = self._get_multi_param("FlowLogId") + filters = filters_from_querystring(self.querystring) + flow_logs = self.ec2_backend.describe_flow_logs(flow_log_ids, filters) + if self.is_not_dryrun("DescribeFlowLogs"): + template = self.response_template(DESCRIBE_FLOW_LOGS_RESPONSE) + return template.render(flow_logs=flow_logs) + + def delete_flow_logs(self): + flow_log_ids = self._get_multi_param("FlowLogId") + self.ec2_backend.delete_flow_logs(flow_log_ids) + if self.is_not_dryrun("DeleteFlowLogs"): + template = self.response_template(DELETE_FLOW_LOGS_RESPONSE) + return template.render() + + +CREATE_FLOW_LOGS_RESPONSE = """ + + 2d96dae3-504b-4fc4-bf50-266EXAMPLE + + {% for error in errors %} + + + {{ error.error_code }} + {{ error.error_message }} + + {{ error.resource_id }} + + {% endfor %} + + + {% for flow_log in flow_logs %} + {{ flow_log.id }} + {% endfor %} + +""" + +DELETE_FLOW_LOGS_RESPONSE = """ + + c5c4f51f-f4e9-42bc-8700-EXAMPLE + +""" + +DESCRIBE_FLOW_LOGS_RESPONSE = """ + + 3cb46f23-099e-4bf0-891c-EXAMPLE + + {% for flow_log in flow_logs %} + + {% if flow_log.log_destination is not none %} + {{ flow_log.log_destination }} + {% endif %} + {{ flow_log.resource_id }} + {{ flow_log.log_destination_type }} + {{ flow_log.created_at }} + {{ flow_log.traffic_type }} + {{ flow_log.deliver_logs_status }} + {% if flow_log.deliver_logs_error_message is not none %} + {{ flow_log.deliver_logs_error_message }} + {% endif %} + {{ flow_log.log_format }} + ACTIVE + {{ flow_log.id }} + {{ flow_log.max_aggregation_interval }} + {% if flow_log.deliver_logs_permission_arn is not none %} + {{ flow_log.deliver_logs_permission_arn }} + {% endif %} + {% if flow_log.log_group_name is not none %} + {{ flow_log.log_group_name }} + {% endif %} + {% if flow_log.get_tags() %} + + {% for tag in flow_log.get_tags() %} + + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + + {% endif %} + + {% endfor %} + +""" diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 653cd055d45e..e6763fec1dbc 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -16,6 +16,7 @@ EC2_RESOURCE_TO_PREFIX = { "customer-gateway": "cgw", "dhcp-options": "dopt", + "flow-logs": "fl", "image": "ami", "instance": "i", "internet-gateway": "igw", @@ -74,6 +75,10 @@ def random_security_group_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX["security-group"]) +def random_flow_log_id(): + return random_id(prefix=EC2_RESOURCE_TO_PREFIX["flow-logs"]) + + def random_snapshot_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX["snapshot"]) diff --git a/tests/test_ec2/test_flow_logs.py b/tests/test_ec2/test_flow_logs.py new file mode 100644 index 000000000000..044e6c31d202 --- /dev/null +++ b/tests/test_ec2/test_flow_logs.py @@ -0,0 +1,678 @@ +from __future__ import unicode_literals + +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +import boto3 + +from botocore.exceptions import ParamValidationError, ClientError +from botocore.parsers import ResponseParserError +import json +import sure # noqa +import random +import sys + +from moto import ( + settings, + mock_cloudformation, + mock_ec2, + mock_s3, + mock_logs, +) +from moto.core import ACCOUNT_ID +from moto.ec2.exceptions import FilterNotImplementedError + + +@mock_s3 +@mock_ec2 +def test_create_flow_logs_s3(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + with assert_raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + DryRun=True, + ) + ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "An error occurred (DryRunOperation) when calling the CreateFlowLogs operation: Request would have succeeded, but DryRun flag is set" + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + )["FlowLogIds"] + response.should.have.length_of(1) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + + flow_log = flow_logs[0] + + flow_log["FlowLogId"].should.equal(response[0]) + flow_log["DeliverLogsStatus"].should.equal("SUCCESS") + flow_log["FlowLogStatus"].should.equal("ACTIVE") + flow_log["ResourceId"].should.equal(vpc["VpcId"]) + flow_log["TrafficType"].should.equal("ALL") + flow_log["LogDestinationType"].should.equal("s3") + flow_log["LogDestination"].should.equal("arn:aws:s3:::" + bucket.name) + flow_log["LogFormat"].should.equal( + "${version} ${account-id} ${interface-id} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${protocol} ${packets} ${bytes} ${start} ${end} ${action} ${log-status}" + ) + flow_log["MaxAggregationInterval"].should.equal(600) + + +@mock_logs +@mock_ec2 +def test_create_flow_logs_cloud_watch(): + client = boto3.client("ec2", region_name="us-west-1") + logs_client = boto3.client("logs", region_name="us-west-1") + + vpc = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + logs_client.create_log_group(logGroupName="test-group") + + with assert_raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="cloud-watch-logs", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + DryRun=True, + ) + ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "An error occurred (DryRunOperation) when calling the CreateFlowLogs operation: Request would have succeeded, but DryRun flag is set" + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="cloud-watch-logs", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + )["FlowLogIds"] + response.should.have.length_of(1) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + + flow_log = flow_logs[0] + + flow_log["FlowLogId"].should.equal(response[0]) + flow_log["DeliverLogsStatus"].should.equal("SUCCESS") + flow_log["FlowLogStatus"].should.equal("ACTIVE") + flow_log["ResourceId"].should.equal(vpc["VpcId"]) + flow_log["TrafficType"].should.equal("ALL") + flow_log["LogDestinationType"].should.equal("cloud-watch-logs") + flow_log["LogGroupName"].should.equal("test-group") + flow_log["DeliverLogsPermissionArn"].should.equal( + "arn:aws:iam::" + ACCOUNT_ID + ":role/test-role" + ) + flow_log["LogFormat"].should.equal( + "${version} ${account-id} ${interface-id} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${protocol} ${packets} ${bytes} ${start} ${end} ${action} ${log-status}" + ) + flow_log["MaxAggregationInterval"].should.equal(600) + + +@mock_s3 +@mock_ec2 +def test_create_flow_log_create(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1",}, + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"], vpc2["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + LogFormat="${version} ${vpc-id} ${subnet-id} ${instance-id} ${interface-id} ${account-id} ${type} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${pkt-srcaddr} ${pkt-dstaddr} ${protocol} ${bytes} ${packets} ${start} ${end} ${action} ${tcp-flags} ${log-status}", + )["FlowLogIds"] + response.should.have.length_of(2) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(2) + + flow_logs[0]["LogFormat"].should.equal( + "${version} ${vpc-id} ${subnet-id} ${instance-id} ${interface-id} ${account-id} ${type} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${pkt-srcaddr} ${pkt-dstaddr} ${protocol} ${bytes} ${packets} ${start} ${end} ${action} ${tcp-flags} ${log-status}" + ) + flow_logs[1]["LogFormat"].should.equal( + "${version} ${vpc-id} ${subnet-id} ${instance-id} ${interface-id} ${account-id} ${type} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ${pkt-srcaddr} ${pkt-dstaddr} ${protocol} ${bytes} ${packets} ${start} ${end} ${action} ${tcp-flags} ${log-status}" + ) + + +@mock_s3 +@mock_ec2 +def test_delete_flow_logs(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"], vpc2["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + )["FlowLogIds"] + response.should.have.length_of(2) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(2) + + client.delete_flow_logs(FlowLogIds=[response[0]]) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + flow_logs[0]["FlowLogId"].should.equal(response[1]) + + client.delete_flow_logs(FlowLogIds=[response[1]]) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(0) + + +@mock_s3 +@mock_ec2 +def test_delete_flow_logs_delete_many(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"], vpc2["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + )["FlowLogIds"] + response.should.have.length_of(2) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(2) + + client.delete_flow_logs(FlowLogIds=response) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(0) + + +@mock_ec2 +def test_delete_flow_logs_non_existing(): + client = boto3.client("ec2", region_name="us-west-1") + + with assert_raises(ClientError) as ex: + client.delete_flow_logs(FlowLogIds=["fl-1a2b3c4d"]) + ex.exception.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "These flow log ids in the input list are not found: [TotalCount: 1] fl-1a2b3c4d" + ) + + with assert_raises(ClientError) as ex: + client.delete_flow_logs(FlowLogIds=["fl-1a2b3c4d", "fl-2b3c4d5e"]) + ex.exception.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "These flow log ids in the input list are not found: [TotalCount: 2] fl-1a2b3c4d fl-2b3c4d5e" + ) + + +@mock_ec2 +def test_create_flow_logs_unsuccessful(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"], vpc2["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::non-existing-bucket", + ) + response["FlowLogIds"].should.have.length_of(0) + response["Unsuccessful"].should.have.length_of(2) + + error1 = response["Unsuccessful"][0]["Error"] + error2 = response["Unsuccessful"][1]["Error"] + + error1["Code"].should.equal("400") + error1["Message"].should.equal( + "LogDestination: non-existing-bucket does not exist." + ) + error2["Code"].should.equal("400") + error2["Message"].should.equal( + "LogDestination: non-existing-bucket does not exist." + ) + + +@mock_s3 +@mock_ec2 +def test_create_flow_logs_invalid_parameters(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + with assert_raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + MaxAggregationInterval=10, + ) + ex.exception.response["Error"]["Code"].should.equal("InvalidParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "Invalid Flow Log Max Aggregation Interval" + ) + + with assert_raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + ) + ex.exception.response["Error"]["Code"].should.equal("InvalidParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "LogDestination can't be empty if LogGroupName is not provided." + ) + + with assert_raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogGroupName="test", + ) + ex.exception.response["Error"]["Code"].should.equal("InvalidParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "LogDestination type must be cloud-watch-logs if LogGroupName is provided." + ) + + with assert_raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogGroupName="test", + ) + ex.exception.response["Error"]["Code"].should.equal("InvalidParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "DeliverLogsPermissionArn can't be empty if LogDestinationType is cloud-watch-logs." + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + )["FlowLogIds"] + response.should.have.length_of(1) + + with assert_raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket.name, + ) + ex.exception.response["Error"]["Code"].should.equal("FlowLogAlreadyExists") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "Error. There is an existing Flow Log with the same configuration and log destination." + ) + + response = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + )["FlowLogIds"] + response.should.have.length_of(1) + + with assert_raises(ClientError) as ex: + client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc["VpcId"]], + TrafficType="ALL", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + ) + ex.exception.response["Error"]["Code"].should.equal("FlowLogAlreadyExists") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "Error. There is an existing Flow Log with the same configuration and log destination." + ) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(2) + + +@mock_s3 +@mock_ec2 +@mock_logs +def test_describe_flow_logs_filtering(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + logs_client = boto3.client("logs", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + vpc3 = client.create_vpc(CidrBlock="10.2.0.0/16")["Vpc"] + + subnet1 = client.create_subnet(VpcId=vpc1["VpcId"], CidrBlock="10.0.0.0/18")[ + "Subnet" + ] + + bucket1 = s3.create_bucket( + Bucket="test-flow-logs-1", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + logs_client.create_log_group(logGroupName="test-group") + + fl1 = client.create_flow_logs( + ResourceType="Subnet", + ResourceIds=[subnet1["SubnetId"]], + TrafficType="ALL", + LogGroupName="test-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + )["FlowLogIds"][0] + + fl2 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc2["VpcId"]], + TrafficType="Accept", + LogDestinationType="s3", + LogDestination="arn:aws:s3:::" + bucket1.name, + TagSpecifications=[ + {"ResourceType": "vpc-flow-log", "Tags": [{"Key": "foo", "Value": "bar"}]} + ], + )["FlowLogIds"][0] + + fl3 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc3["VpcId"]], + TrafficType="Reject", + LogGroupName="non-existing-group", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", + )["FlowLogIds"][0] + + all_flow_logs = client.describe_flow_logs()["FlowLogs"] + all_flow_logs.should.have.length_of(3) + + fl_by_deliver_status = client.describe_flow_logs( + Filters=[{"Name": "deliver-log-status", "Values": ["SUCCESS"]}], + )["FlowLogs"] + fl_by_deliver_status.should.have.length_of(3) + + fl_by_s3_bucket = client.describe_flow_logs( + Filters=[{"Name": "log-destination-type", "Values": ["s3"]}], + )["FlowLogs"] + fl_by_s3_bucket.should.have.length_of(1) + fl_by_s3_bucket[0]["FlowLogId"].should.equal(fl2) + fl_by_s3_bucket[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + fl_by_cloud_watch = client.describe_flow_logs( + Filters=[{"Name": "log-destination-type", "Values": ["cloud-watch-logs"]}], + )["FlowLogs"] + fl_by_cloud_watch.should.have.length_of(2) + + flow_logs_ids = tuple(map(lambda fl: fl["FlowLogId"], fl_by_cloud_watch)) + fl1.should.be.within(flow_logs_ids) + fl3.should.be.within(flow_logs_ids) + + flow_logs_resource_ids = tuple(map(lambda fl: fl["ResourceId"], fl_by_cloud_watch)) + subnet1["SubnetId"].should.be.within(flow_logs_resource_ids) + vpc3["VpcId"].should.be.within(flow_logs_resource_ids) + + test_fl3 = next(fl for fl in fl_by_cloud_watch if fl["FlowLogId"] == fl3) + test_fl3["DeliverLogsStatus"].should.equal("FAILED") + test_fl3["DeliverLogsErrorMessage"].should.equal("Access error") + + fl_by_both = client.describe_flow_logs( + Filters=[ + {"Name": "log-destination-type", "Values": ["cloud-watch-logs", "s3"]} + ], + )["FlowLogs"] + fl_by_both.should.have.length_of(3) + + fl_by_flow_log_ids = client.describe_flow_logs( + Filters=[{"Name": "flow-log-id", "Values": [fl1, fl3]}], + )["FlowLogs"] + fl_by_flow_log_ids.should.have.length_of(2) + flow_logs_ids = tuple(map(lambda fl: fl["FlowLogId"], fl_by_flow_log_ids)) + fl1.should.be.within(flow_logs_ids) + fl3.should.be.within(flow_logs_ids) + + flow_logs_resource_ids = tuple(map(lambda fl: fl["ResourceId"], fl_by_flow_log_ids)) + subnet1["SubnetId"].should.be.within(flow_logs_resource_ids) + vpc3["VpcId"].should.be.within(flow_logs_resource_ids) + + fl_by_group_name = client.describe_flow_logs( + Filters=[{"Name": "log-group-name", "Values": ["test-group"]}], + )["FlowLogs"] + fl_by_group_name.should.have.length_of(1) + fl_by_group_name[0]["FlowLogId"].should.equal(fl1) + fl_by_group_name[0]["ResourceId"].should.equal(subnet1["SubnetId"]) + + fl_by_group_name = client.describe_flow_logs( + Filters=[{"Name": "log-group-name", "Values": ["non-existing-group"]}], + )["FlowLogs"] + fl_by_group_name.should.have.length_of(1) + fl_by_group_name[0]["FlowLogId"].should.equal(fl3) + fl_by_group_name[0]["ResourceId"].should.equal(vpc3["VpcId"]) + + fl_by_resource_id = client.describe_flow_logs( + Filters=[{"Name": "resource-id", "Values": [vpc2["VpcId"]]}], + )["FlowLogs"] + fl_by_resource_id.should.have.length_of(1) + fl_by_resource_id[0]["FlowLogId"].should.equal(fl2) + fl_by_resource_id[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + fl_by_traffic_type = client.describe_flow_logs( + Filters=[{"Name": "traffic-type", "Values": ["ALL"]}], + )["FlowLogs"] + fl_by_traffic_type.should.have.length_of(1) + fl_by_traffic_type[0]["FlowLogId"].should.equal(fl1) + fl_by_traffic_type[0]["ResourceId"].should.equal(subnet1["SubnetId"]) + + fl_by_traffic_type = client.describe_flow_logs( + Filters=[{"Name": "traffic-type", "Values": ["Reject"]}], + )["FlowLogs"] + fl_by_traffic_type.should.have.length_of(1) + fl_by_traffic_type[0]["FlowLogId"].should.equal(fl3) + fl_by_traffic_type[0]["ResourceId"].should.equal(vpc3["VpcId"]) + + fl_by_traffic_type = client.describe_flow_logs( + Filters=[{"Name": "traffic-type", "Values": ["Accept"]}], + )["FlowLogs"] + fl_by_traffic_type.should.have.length_of(1) + fl_by_traffic_type[0]["FlowLogId"].should.equal(fl2) + fl_by_traffic_type[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + fl_by_tag_key = client.describe_flow_logs( + Filters=[{"Name": "tag-key", "Values": ["foo"]}], + )["FlowLogs"] + fl_by_tag_key.should.have.length_of(1) + fl_by_tag_key[0]["FlowLogId"].should.equal(fl2) + fl_by_tag_key[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + fl_by_tag_key = client.describe_flow_logs( + Filters=[{"Name": "tag-key", "Values": ["non-existing"]}], + )["FlowLogs"] + fl_by_tag_key.should.have.length_of(0) + + if not settings.TEST_SERVER_MODE: + client.describe_flow_logs.when.called_with( + Filters=[{"Name": "not-implemented-filter", "Values": ["foobar"]}], + ).should.throw(FilterNotImplementedError) + else: + client.describe_flow_logs.when.called_with( + Filters=[{"Name": "not-implemented-filter", "Values": ["foobar"]}], + ).should.throw(ResponseParserError) + + +@mock_s3 +@mock_ec2 +def test_flow_logs_by_ids(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + vpc2 = client.create_vpc(CidrBlock="10.1.0.0/16")["Vpc"] + vpc3 = client.create_vpc(CidrBlock="10.2.0.0/16")["Vpc"] + + fl1 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc1["VpcId"]], + TrafficType="Reject", + LogGroupName="test-group-1", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role-1", + )["FlowLogIds"][0] + + fl2 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc2["VpcId"]], + TrafficType="Reject", + LogGroupName="test-group-3", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role-3", + )["FlowLogIds"][0] + + fl3 = client.create_flow_logs( + ResourceType="VPC", + ResourceIds=[vpc3["VpcId"]], + TrafficType="Reject", + LogGroupName="test-group-3", + DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role-3", + )["FlowLogIds"][0] + + flow_logs = client.describe_flow_logs(FlowLogIds=[fl1, fl3])["FlowLogs"] + flow_logs.should.have.length_of(2) + flow_logs_ids = tuple(map(lambda fl: fl["FlowLogId"], flow_logs)) + fl1.should.be.within(flow_logs_ids) + fl3.should.be.within(flow_logs_ids) + + flow_logs_resource_ids = tuple(map(lambda fl: fl["ResourceId"], flow_logs)) + vpc1["VpcId"].should.be.within(flow_logs_resource_ids) + vpc3["VpcId"].should.be.within(flow_logs_resource_ids) + + client.delete_flow_logs(FlowLogIds=[fl1, fl3]) + + flow_logs = client.describe_flow_logs(FlowLogIds=[fl1, fl3])["FlowLogs"] + flow_logs.should.have.length_of(0) + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + flow_logs[0]["FlowLogId"].should.equal(fl2) + flow_logs[0]["ResourceId"].should.equal(vpc2["VpcId"]) + + flow_logs = client.delete_flow_logs(FlowLogIds=[fl2]) + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(0) + + +@mock_cloudformation +@mock_ec2 +@mock_s3 +def test_flow_logs_by_cloudformation(): + s3 = boto3.resource("s3", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + cf_client = boto3.client("cloudformation", "us-west-1") + + vpc = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] + + bucket = s3.create_bucket( + Bucket="test-flow-logs", + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) + + flow_log_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Template for VPC Flow Logs creation.", + "Resources": { + "TestFlowLogs": { + "Type": "AWS::EC2::FlowLog", + "Properties": { + "ResourceType": "VPC", + "ResourceId": vpc["VpcId"], + "TrafficType": "ALL", + "LogDestinationType": "s3", + "LogDestination": "arn:aws:s3:::" + bucket.name, + "MaxAggregationInterval": "60", + "Tags": [{"Key": "foo", "Value": "bar"}], + }, + } + }, + } + flow_log_template_json = json.dumps(flow_log_template) + stack_id = cf_client.create_stack( + StackName="test_stack", TemplateBody=flow_log_template_json + )["StackId"] + + flow_logs = client.describe_flow_logs()["FlowLogs"] + flow_logs.should.have.length_of(1) + flow_logs[0]["ResourceId"].should.equal(vpc["VpcId"]) + flow_logs[0]["LogDestination"].should.equal("arn:aws:s3:::" + bucket.name) + flow_logs[0]["MaxAggregationInterval"].should.equal(60) From 369f6bbfc94782e656a418241e981bd5390ce14c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 28 Sep 2020 14:49:14 +0100 Subject: [PATCH 553/658] 3302 - Make Secret ARN persistent --- moto/secretsmanager/list_secrets/filters.py | 8 +- moto/secretsmanager/models.py | 304 +++++++++--------- .../test_secretsmanager.py | 5 +- 3 files changed, 154 insertions(+), 163 deletions(-) diff --git a/moto/secretsmanager/list_secrets/filters.py b/moto/secretsmanager/list_secrets/filters.py index 813b1f544cf0..c888ebe6430b 100644 --- a/moto/secretsmanager/list_secrets/filters.py +++ b/moto/secretsmanager/list_secrets/filters.py @@ -7,21 +7,21 @@ def _matcher(pattern, str): def name(secret, names): for n in names: - if _matcher(n, secret["name"]): + if _matcher(n, secret.name): return True return False def description(secret, descriptions): for d in descriptions: - if _matcher(d, secret["description"]): + if _matcher(d, secret.description): return True return False def tag_key(secret, tag_keys): for k in tag_keys: - for tag in secret["tags"]: + for tag in secret.tags: if _matcher(k, tag["Key"]): return True return False @@ -29,7 +29,7 @@ def tag_key(secret, tag_keys): def tag_value(secret, tag_values): for v in tag_values: - for tag in secret["tags"]: + for tag in secret.tags: if _matcher(v, tag["Value"]): return True return False diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 0339dc575b48..41b70bc1f726 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -50,6 +50,101 @@ def __init__(self, region_name, **kwargs): self.region = region_name +class FakeSecret: + def __init__( + self, + region_name, + secret_id, + secret_string=None, + secret_binary=None, + description=None, + tags=[], + version_id=None, + version_stages=None, + ): + self.secret_id = secret_id + self.name = secret_id + self.arn = secret_arn(region_name, secret_id) + self.secret_string = secret_string + self.secret_binary = secret_binary + self.description = description + self.tags = tags + self.version_id = version_id + self.version_stages = version_stages + self.rotation_enabled = False + self.rotation_lambda_arn = "" + self.auto_rotate_after_days = 0 + self.deleted_date = None + + def update(self, description=None, tags=[]): + self.description = description + self.tags = tags + + def set_versions(self, versions): + self.versions = versions + + def set_default_version_id(self, version_id): + self.default_version_id = version_id + + def reset_default_version(self, secret_version, version_id): + # remove all old AWSPREVIOUS stages + for old_version in self.versions.values(): + if "AWSPREVIOUS" in old_version["version_stages"]: + old_version["version_stages"].remove("AWSPREVIOUS") + + # set old AWSCURRENT secret to AWSPREVIOUS + previous_current_version_id = self.default_version_id + self.versions[previous_current_version_id]["version_stages"] = ["AWSPREVIOUS"] + + self.versions[version_id] = secret_version + self.default_version_id = version_id + + def delete(self, deleted_date): + self.deleted_date = deleted_date + + def restore(self): + self.deleted_date = None + + def is_deleted(self): + return self.deleted_date is not None + + def to_short_dict(self, include_version_stages=False): + dct = { + "ARN": self.arn, + "Name": self.name, + "VersionId": self.default_version_id, + } + if include_version_stages: + dct["VersionStages"] = self.version_stages + return json.dumps(dct) + + def to_dict(self): + version_id_to_stages = self._form_version_ids_to_stages() + + return { + "ARN": self.arn, + "Name": self.name, + "Description": self.description or "", + "KmsKeyId": "", + "RotationEnabled": self.rotation_enabled, + "RotationLambdaARN": self.rotation_lambda_arn, + "RotationRules": {"AutomaticallyAfterDays": self.auto_rotate_after_days}, + "LastRotatedDate": None, + "LastChangedDate": None, + "LastAccessedDate": None, + "DeletedDate": self.deleted_date, + "Tags": self.tags, + "VersionIdsToStages": version_id_to_stages, + } + + def _form_version_ids_to_stages(self): + version_id_to_stages = {} + for key, value in self.versions.items(): + version_id_to_stages[key] = value["version_stages"] + + return version_id_to_stages + + class SecretsStore(dict): def __setitem__(self, key, value): new_key = get_secret_name_from_arn(key) @@ -92,7 +187,7 @@ def get_secret_value(self, secret_id, version_id, version_stage): if not version_id and version_stage: # set version_id to match version_stage - versions_dict = self.secrets[secret_id]["versions"] + versions_dict = self.secrets[secret_id].versions for ver_id, ver_val in versions_dict.items(): if version_stage in ver_val["version_stages"]: version_id = ver_id @@ -101,20 +196,20 @@ def get_secret_value(self, secret_id, version_id, version_stage): raise SecretNotFoundException() # TODO check this part - if "deleted_date" in self.secrets[secret_id]: + if self.secrets[secret_id].is_deleted(): raise InvalidRequestException( "An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \ perform the operation on a secret that's currently marked deleted." ) secret = self.secrets[secret_id] - version_id = version_id or secret["default_version_id"] + version_id = version_id or secret.default_version_id - secret_version = secret["versions"][version_id] + secret_version = secret.versions[version_id] response_data = { - "ARN": secret_arn(self.region, secret["secret_id"]), - "Name": secret["name"], + "ARN": secret.arn, + "Name": secret.name, "VersionId": secret_version["version_id"], "VersionStages": secret_version["version_stages"], "CreatedDate": secret_version["createdate"], @@ -144,17 +239,17 @@ def update_secret( if secret_id not in self.secrets.keys(): raise SecretNotFoundException() - if "deleted_date" in self.secrets[secret_id]: + if self.secrets[secret_id].is_deleted(): raise InvalidRequestException( "An error occurred (InvalidRequestException) when calling the UpdateSecret operation: " "You can't perform this operation on the secret because it was marked for deletion." ) secret = self.secrets[secret_id] - tags = secret["tags"] - description = secret["description"] + tags = secret.tags + description = secret.description - version_id = self._add_secret( + secret = self._add_secret( secret_id, secret_string=secret_string, secret_binary=secret_binary, @@ -162,15 +257,7 @@ def update_secret( tags=tags, ) - response = json.dumps( - { - "ARN": secret_arn(self.region, secret_id), - "Name": secret_id, - "VersionId": version_id, - } - ) - - return response + return secret.to_short_dict() def create_secret( self, @@ -188,7 +275,7 @@ def create_secret( "A resource with the ID you requested already exists." ) - version_id = self._add_secret( + secret = self._add_secret( name, secret_string=secret_string, secret_binary=secret_binary, @@ -196,15 +283,7 @@ def create_secret( tags=tags, ) - response = json.dumps( - { - "ARN": secret_arn(self.region, name), - "Name": name, - "VersionId": version_id, - } - ) - - return response + return secret.to_short_dict() def _add_secret( self, @@ -228,7 +307,6 @@ def _add_secret( "version_id": version_id, "version_stages": version_stages, } - if secret_string is not None: secret_version["secret_string"] = secret_string @@ -236,49 +314,35 @@ def _add_secret( secret_version["secret_binary"] = secret_binary if secret_id in self.secrets: - # remove all old AWSPREVIOUS stages - for secret_verion_to_look_at in self.secrets[secret_id][ - "versions" - ].values(): - if "AWSPREVIOUS" in secret_verion_to_look_at["version_stages"]: - secret_verion_to_look_at["version_stages"].remove("AWSPREVIOUS") - - # set old AWSCURRENT secret to AWSPREVIOUS - previous_current_version_id = self.secrets[secret_id]["default_version_id"] - self.secrets[secret_id]["versions"][previous_current_version_id][ - "version_stages" - ] = ["AWSPREVIOUS"] - - self.secrets[secret_id]["versions"][version_id] = secret_version - self.secrets[secret_id]["default_version_id"] = version_id + secret = self.secrets[secret_id] + secret.update(description, tags) + secret.reset_default_version(secret_version, version_id) else: - self.secrets[secret_id] = { - "versions": {version_id: secret_version}, - "default_version_id": version_id, - } - - secret = self.secrets[secret_id] - secret["secret_id"] = secret_id - secret["name"] = secret_id - secret["rotation_enabled"] = False - secret["rotation_lambda_arn"] = "" - secret["auto_rotate_after_days"] = 0 - secret["tags"] = tags - secret["description"] = description + secret = FakeSecret( + region_name=self.region, + secret_id=secret_id, + secret_string=secret_string, + secret_binary=secret_binary, + description=description, + tags=tags, + ) + secret.set_versions({version_id: secret_version}) + secret.set_default_version_id(version_id) + self.secrets[secret_id] = secret - return version_id + return secret def put_secret_value(self, secret_id, secret_string, secret_binary, version_stages): if secret_id in self.secrets.keys(): secret = self.secrets[secret_id] - tags = secret["tags"] - description = secret["description"] + tags = secret.tags + description = secret.description else: tags = [] description = "" - version_id = self._add_secret( + secret = self._add_secret( secret_id, secret_string, secret_binary, @@ -287,45 +351,15 @@ def put_secret_value(self, secret_id, secret_string, secret_binary, version_stag version_stages=version_stages, ) - response = json.dumps( - { - "ARN": secret_arn(self.region, secret_id), - "Name": secret_id, - "VersionId": version_id, - "VersionStages": version_stages, - } - ) - - return response + return secret.to_short_dict(include_version_stages=True) def describe_secret(self, secret_id): if not self._is_valid_identifier(secret_id): raise SecretNotFoundException() secret = self.secrets[secret_id] - version_id_to_stages = self.form_version_ids_to_stages(secret["versions"]) - - response = json.dumps( - { - "ARN": secret_arn(self.region, secret["secret_id"]), - "Name": secret["name"], - "Description": secret.get("description", ""), - "KmsKeyId": "", - "RotationEnabled": secret["rotation_enabled"], - "RotationLambdaARN": secret["rotation_lambda_arn"], - "RotationRules": { - "AutomaticallyAfterDays": secret["auto_rotate_after_days"] - }, - "LastRotatedDate": None, - "LastChangedDate": None, - "LastAccessedDate": None, - "DeletedDate": secret.get("deleted_date", None), - "Tags": secret["tags"], - "VersionIdsToStages": version_id_to_stages, - } - ) - return response + return json.dumps(secret.to_dict()) def rotate_secret( self, @@ -340,7 +374,7 @@ def rotate_secret( if not self._is_valid_identifier(secret_id): raise SecretNotFoundException() - if "deleted_date" in self.secrets[secret_id]: + if self.secrets[secret_id].is_deleted(): raise InvalidRequestException( "An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \ perform the operation on a secret that's currently marked deleted." @@ -368,36 +402,28 @@ def rotate_secret( secret = self.secrets[secret_id] - old_secret_version = secret["versions"][secret["default_version_id"]] + old_secret_version = secret.versions[secret.default_version_id] new_version_id = client_request_token or str(uuid.uuid4()) self._add_secret( secret_id, old_secret_version["secret_string"], - secret["description"], - secret["tags"], + secret.description, + secret.tags, version_id=new_version_id, version_stages=["AWSCURRENT"], ) - secret["rotation_lambda_arn"] = rotation_lambda_arn or "" + secret.rotation_lambda_arn = rotation_lambda_arn or "" if rotation_rules: - secret["auto_rotate_after_days"] = rotation_rules.get(rotation_days, 0) - if secret["auto_rotate_after_days"] > 0: - secret["rotation_enabled"] = True + secret.auto_rotate_after_days = rotation_rules.get(rotation_days, 0) + if secret.auto_rotate_after_days > 0: + secret.rotation_enabled = True if "AWSCURRENT" in old_secret_version["version_stages"]: old_secret_version["version_stages"].remove("AWSCURRENT") - response = json.dumps( - { - "ARN": secret_arn(self.region, secret["secret_id"]), - "Name": secret["name"], - "VersionId": new_version_id, - } - ) - - return response + return secret.to_short_dict() def get_random_password( self, @@ -446,7 +472,7 @@ def list_secret_version_ids(self, secret_id): secret = self.secrets[secret_id] version_list = [] - for version_id, version in secret["versions"].items(): + for version_id, version in secret.versions.items(): version_list.append( { "CreatedDate": int(time.time()), @@ -458,8 +484,8 @@ def list_secret_version_ids(self, secret_id): response = json.dumps( { - "ARN": secret["secret_id"], - "Name": secret["name"], + "ARN": secret.secret_id, + "Name": secret.name, "NextToken": "", "Versions": version_list, } @@ -473,29 +499,7 @@ def list_secrets(self, filters, max_results, next_token): secret_list = [] for secret in self.secrets.values(): if _matches(secret, filters): - versions_to_stages = {} - for version_id, version in secret["versions"].items(): - versions_to_stages[version_id] = version["version_stages"] - - secret_list.append( - { - "ARN": secret_arn(self.region, secret["secret_id"]), - "DeletedDate": secret.get("deleted_date", None), - "Description": secret.get("description", ""), - "KmsKeyId": "", - "LastAccessedDate": None, - "LastChangedDate": None, - "LastRotatedDate": None, - "Name": secret["name"], - "RotationEnabled": secret["rotation_enabled"], - "RotationLambdaARN": secret["rotation_lambda_arn"], - "RotationRules": { - "AutomaticallyAfterDays": secret["auto_rotate_after_days"] - }, - "SecretVersionsToStages": versions_to_stages, - "Tags": secret["tags"], - } - ) + secret_list.append(secret.to_dict()) return secret_list, None @@ -506,7 +510,7 @@ def delete_secret( if not self._is_valid_identifier(secret_id): raise SecretNotFoundException() - if "deleted_date" in self.secrets[secret_id]: + if self.secrets[secret_id].is_deleted(): raise InvalidRequestException( "An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \ perform the operation on a secret that's currently marked deleted." @@ -532,16 +536,14 @@ def delete_secret( secret = self.secrets.pop(secret_id, None) else: deletion_date += datetime.timedelta(days=recovery_window_in_days or 30) - self.secrets[secret_id]["deleted_date"] = self._unix_time_secs( - deletion_date - ) + self.secrets[secret_id].delete(self._unix_time_secs(deletion_date)) secret = self.secrets.get(secret_id, None) if not secret: raise SecretNotFoundException() - arn = secret_arn(self.region, secret["secret_id"]) - name = secret["name"] + arn = secret.arn + name = secret.name return arn, name, self._unix_time_secs(deletion_date) @@ -550,14 +552,10 @@ def restore_secret(self, secret_id): if not self._is_valid_identifier(secret_id): raise SecretNotFoundException() - self.secrets[secret_id].pop("deleted_date", None) - secret = self.secrets[secret_id] + secret.restore() - arn = secret_arn(self.region, secret["secret_id"]) - name = secret["name"] - - return arn, name + return secret.arn, secret.name @staticmethod def get_resource_policy(secret_id): @@ -583,14 +581,6 @@ def get_resource_policy(secret_id): } ) - @staticmethod - def form_version_ids_to_stages(secret): - version_id_to_stages = {} - for key, value in secret.items(): - version_id_to_stages[key] = value["version_stages"] - - return version_id_to_stages - secretsmanager_backends = {} for region in Session().get_available_regions("secretsmanager"): diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 69e055bb2107..94e745659881 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -439,8 +439,9 @@ def test_describe_secret_with_arn(): secret_description = conn.describe_secret(SecretId=results["ARN"]) assert secret_description # Returned dict is not empty - assert secret_description["Name"] == ("test-secret") - assert secret_description["ARN"] != results["ARN"] + secret_description["Name"].should.equal("test-secret") + secret_description["ARN"].should.equal(results["ARN"]) + conn.list_secrets()["SecretList"][0]["ARN"].should.equal(results["ARN"]) @mock_secretsmanager From 195ba81c56458580e33003814075500302d934a2 Mon Sep 17 00:00:00 2001 From: Giovanni Torres Date: Tue, 29 Sep 2020 06:51:17 -0400 Subject: [PATCH 554/658] feat: update events remove_targets API response (#3340) * feat: update events remove_targets API response * test: add missing test for expected exception --- moto/events/models.py | 9 ++++++--- moto/events/responses.py | 5 ++++- tests/test_events/test_events.py | 16 +++++++++++++++- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/moto/events/models.py b/moto/events/models.py index 4d5047891589..8b7a084f796c 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -368,9 +368,12 @@ def remove_targets(self, name, ids): if rule: rule.remove_targets(ids) - return True - - return False + return {"FailedEntries": [], "FailedEntryCount": 0} + else: + raise JsonRESTError( + "ResourceNotFoundException", + "An entity that you specified does not exist", + ) def test_event_pattern(self): raise NotImplementedError() diff --git a/moto/events/responses.py b/moto/events/responses.py index c4e49fc80c81..a72a869759e4 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -238,7 +238,10 @@ def remove_targets(self): "ResourceNotFoundException", "Rule " + rule_name + " does not exist." ) - return "", self.response_headers + return ( + json.dumps({"FailedEntryCount": 0, "FailedEntries": []}), + self.response_headers, + ) def test_event_pattern(self): pass diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 678e0a6221c2..b65171603f81 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -195,13 +195,27 @@ def test_remove_targets(): targets_before = len(targets) assert targets_before > 0 - client.remove_targets(Rule=rule_name, Ids=[targets[0]["Id"]]) + response = client.remove_targets(Rule=rule_name, Ids=[targets[0]["Id"]]) + response["FailedEntryCount"].should.equal(0) + response["FailedEntries"].should.have.length_of(0) targets = client.list_targets_by_rule(Rule=rule_name)["Targets"] targets_after = len(targets) assert targets_before - 1 == targets_after +@mock_events +def test_remove_targets_errors(): + client = boto3.client("events", "us-east-1") + + client.remove_targets.when.called_with( + Rule="non-existent", Ids=["Id12345678"] + ).should.throw( + client.exceptions.ResourceNotFoundException, + "An entity that you specified does not exist", + ) + + @mock_events def test_put_targets(): client = boto3.client("events", "us-west-2") From ebb1c6bd6881a9d3e69c39755ba16bbb8c439439 Mon Sep 17 00:00:00 2001 From: Ellynas <64953449+Ellynas@users.noreply.github.com> Date: Tue, 29 Sep 2020 14:58:13 +0200 Subject: [PATCH 555/658] Update IMPLEMENTATION_COVERAGE.md (#3342) ec2 `run_instances()` seems to be implemented [here](https://github.com/spulec/moto/blob/3bc18455a2dad70148dc095435a55b2a43eaeac1/moto/ec2/responses/instances.py#L48) and should be marked as such --- IMPLEMENTATION_COVERAGE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 81611ace04e2..101f9c0dc932 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -2968,7 +2968,7 @@ - [ ] revoke_client_vpn_ingress - [X] revoke_security_group_egress - [X] revoke_security_group_ingress -- [ ] run_instances +- [X] run_instances - [ ] run_scheduled_instances - [ ] search_local_gateway_routes - [ ] search_transit_gateway_multicast_groups From 1dd5cf08a89ffa75e4be9d7cd76d865bf4c89796 Mon Sep 17 00:00:00 2001 From: jweite Date: Wed, 30 Sep 2020 08:18:26 -0400 Subject: [PATCH 556/658] Transcribe Medical Support (#3299) * Transcribe first cut: Medical: start, get and delete jobs. * Added list_medical_transcription_job() support to Transcribe. * Support for medical vocabularies. * Added transcribe to list of backends to fix server mode error. * PR3299 requested changes: don't offer deprecated decorator, regionalize download_uri, create/use service-specific exceptions. Co-authored-by: Joseph Weitekamp --- moto/__init__.py | 1 + moto/backends.py | 1 + moto/transcribe/__init__.py | 6 + moto/transcribe/exceptions.py | 13 + moto/transcribe/models.py | 387 +++++++++++++++++ moto/transcribe/responses.py | 111 +++++ moto/transcribe/urls.py | 7 + tests/test_transcribe/__init__.py | 0 .../test_transcribe/test_transcribe_boto3.py | 391 ++++++++++++++++++ 9 files changed, 917 insertions(+) create mode 100644 moto/transcribe/__init__.py create mode 100644 moto/transcribe/exceptions.py create mode 100644 moto/transcribe/models.py create mode 100644 moto/transcribe/responses.py create mode 100644 moto/transcribe/urls.py create mode 100644 tests/test_transcribe/__init__.py create mode 100644 tests/test_transcribe/test_transcribe_boto3.py diff --git a/moto/__init__.py b/moto/__init__.py index d9e57189390e..c73e111a0b76 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -110,6 +110,7 @@ def f(*args, **kwargs): mock_sts_deprecated = lazy_load(".sts", "mock_sts_deprecated") mock_swf = lazy_load(".swf", "mock_swf") mock_swf_deprecated = lazy_load(".swf", "mock_swf_deprecated") +mock_transcribe = lazy_load(".transcribe", "mock_transcribe") XRaySegment = lazy_load(".xray", "XRaySegment") mock_xray = lazy_load(".xray", "mock_xray") mock_xray_client = lazy_load(".xray", "mock_xray_client") diff --git a/moto/backends.py b/moto/backends.py index 7b1c1d08db7b..e76a89ccb3bc 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -68,6 +68,7 @@ "stepfunctions": ("stepfunctions", "stepfunction_backends"), "sts": ("sts", "sts_backends"), "swf": ("swf", "swf_backends"), + "transcribe": ("transcribe", "transcribe_backends"), "xray": ("xray", "xray_backends"), "kinesisvideo": ("kinesisvideo", "kinesisvideo_backends"), "kinesis-video-archived-media": ( diff --git a/moto/transcribe/__init__.py b/moto/transcribe/__init__.py new file mode 100644 index 000000000000..9c4a7ba2ef76 --- /dev/null +++ b/moto/transcribe/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals + +from .models import transcribe_backends + +transcribe_backend = transcribe_backends["us-east-1"] +mock_transcribe = transcribe_backend.decorator diff --git a/moto/transcribe/exceptions.py b/moto/transcribe/exceptions.py new file mode 100644 index 000000000000..d80f1e3e2ab3 --- /dev/null +++ b/moto/transcribe/exceptions.py @@ -0,0 +1,13 @@ +from moto.core.exceptions import JsonRESTError + + +class ConflictException(JsonRESTError): + def __init__(self, message, **kwargs): + super(ConflictException, self).__init__("ConflictException", message, **kwargs) + + +class BadRequestException(JsonRESTError): + def __init__(self, message, **kwargs): + super(BadRequestException, self).__init__( + "BadRequestException", message, **kwargs + ) diff --git a/moto/transcribe/models.py b/moto/transcribe/models.py new file mode 100644 index 000000000000..bf8e602e63dc --- /dev/null +++ b/moto/transcribe/models.py @@ -0,0 +1,387 @@ +import uuid +from datetime import datetime, timedelta + +from moto.core import BaseBackend, BaseModel +from moto.ec2 import ec2_backends +from moto.sts.models import ACCOUNT_ID +from .exceptions import ConflictException, BadRequestException + + +class BaseObject(BaseModel): + def camelCase(self, key): + words = [] + for i, word in enumerate(key.split("_")): + words.append(word.title()) + return "".join(words) + + def gen_response_object(self): + response_object = dict() + for key, value in self.__dict__.items(): + if "_" in key: + response_object[self.camelCase(key)] = value + else: + response_object[key[0].upper() + key[1:]] = value + return response_object + + @property + def response_object(self): + return self.gen_response_object() + + +class FakeMedicalTranscriptionJob(BaseObject): + def __init__( + self, + region_name, + medical_transcription_job_name, + language_code, + media_sample_rate_hertz, + media_format, + media, + output_bucket_name, + output_encryption_kms_key_id, + settings, + specialty, + type, + ): + self._region_name = region_name + self.medical_transcription_job_name = medical_transcription_job_name + self.transcription_job_status = None + self.language_code = language_code + self.media_sample_rate_hertz = media_sample_rate_hertz + self.media_format = media_format + self.media = media + self.transcript = None + self.start_time = self.completion_time = None + self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + self.failure_reason = None + self.settings = settings or { + "ChannelIdentification": False, + "ShowAlternatives": False, + } + self.specialty = specialty + self.type = type + self._output_bucket_name = output_bucket_name + self._output_encryption_kms_key_id = output_encryption_kms_key_id + self.output_location_type = "CUSTOMER_BUCKET" + + def response_object(self, response_type): + response_field_dict = { + "CREATE": [ + "MedicalTranscriptionJobName", + "TranscriptionJobStatus", + "LanguageCode", + "MediaFormat", + "Media", + "StartTime", + "CreationTime", + "Specialty", + "Type", + ], + "GET": [ + "MedicalTranscriptionJobName", + "TranscriptionJobStatus", + "LanguageCode", + "MediaSampleRateHertz", + "MediaFormat", + "Media", + "Transcript", + "StartTime", + "CreationTime", + "CompletionTime", + "Settings", + "Specialty", + "Type", + ], + "LIST": [ + "MedicalTranscriptionJobName", + "CreationTime", + "StartTime", + "CompletionTime", + "LanguageCode", + "TranscriptionJobStatus", + "FailureReason", + "OutputLocationType", + "Specialty", + "Type", + ], + } + response_fields = response_field_dict[response_type] + response_object = self.gen_response_object() + if response_type != "LIST": + return { + "MedicalTranscriptionJob": { + k: v + for k, v in response_object.items() + if k in response_fields and v is not None and v != [None] + } + } + else: + return { + k: v + for k, v in response_object.items() + if k in response_fields and v is not None and v != [None] + } + + def advance_job_status(self): + # On each call advances the fake job status + + if not self.transcription_job_status: + self.transcription_job_status = "QUEUED" + elif self.transcription_job_status == "QUEUED": + self.transcription_job_status = "IN_PROGRESS" + self.start_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + if not self.media_sample_rate_hertz: + self.media_sample_rate_hertz = 44100 + if not self.media_format: + file_ext = self.media["MediaFileUri"].split(".")[-1].lower() + self.media_format = ( + file_ext if file_ext in ["mp3", "mp4", "wav", "flac"] else "mp3" + ) + elif self.transcription_job_status == "IN_PROGRESS": + self.transcription_job_status = "COMPLETED" + self.completion_time = (datetime.now() + timedelta(seconds=10)).strftime( + "%Y-%m-%d %H:%M:%S" + ) + self.transcript = { + "TranscriptFileUri": "https://s3.{}.amazonaws.com/{}/medical/{}.json".format( + self._region_name, + self._output_bucket_name, + self.medical_transcription_job_name, + ) + } + + +class FakeMedicalVocabulary(BaseObject): + def __init__( + self, region_name, vocabulary_name, language_code, vocabulary_file_uri, + ): + self._region_name = region_name + self.vocabulary_name = vocabulary_name + self.language_code = language_code + self.vocabulary_file_uri = vocabulary_file_uri + self.vocabulary_state = None + self.last_modified_time = None + self.failure_reason = None + self.download_uri = "https://s3.us-east-1.amazonaws.com/aws-transcribe-dictionary-model-{}-prod/{}/medical/{}/{}/input.txt".format( + region_name, ACCOUNT_ID, self.vocabulary_name, uuid.uuid4() + ) + + def response_object(self, response_type): + response_field_dict = { + "CREATE": [ + "VocabularyName", + "LanguageCode", + "VocabularyState", + "LastModifiedTime", + "FailureReason", + ], + "GET": [ + "VocabularyName", + "LanguageCode", + "VocabularyState", + "LastModifiedTime", + "FailureReason", + "DownloadUri", + ], + "LIST": [ + "VocabularyName", + "LanguageCode", + "LastModifiedTime", + "VocabularyState", + ], + } + response_fields = response_field_dict[response_type] + response_object = self.gen_response_object() + return { + k: v + for k, v in response_object.items() + if k in response_fields and v is not None and v != [None] + } + + def advance_job_status(self): + # On each call advances the fake job status + + if not self.vocabulary_state: + self.vocabulary_state = "PENDING" + self.last_modified_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + elif self.vocabulary_state == "PENDING": + self.vocabulary_state = "READY" + self.last_modified_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + +class TranscribeBackend(BaseBackend): + def __init__(self, region_name=None): + self.medical_transcriptions = {} + self.medical_vocabularies = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def start_medical_transcription_job(self, **kwargs): + + name = kwargs.get("medical_transcription_job_name") + + if name in self.medical_transcriptions: + raise ConflictException( + message="The requested job name already exists. Use a different job name." + ) + + settings = kwargs.get("settings") + vocabulary_name = settings.get("VocabularyName") if settings else None + if vocabulary_name and vocabulary_name not in self.medical_vocabularies: + raise BadRequestException( + message="The requested vocabulary couldn't be found. Check the vocabulary name and try your request again." + ) + + transcription_job_object = FakeMedicalTranscriptionJob( + region_name=self.region_name, + medical_transcription_job_name=name, + language_code=kwargs.get("language_code"), + media_sample_rate_hertz=kwargs.get("media_sample_rate_hertz"), + media_format=kwargs.get("media_format"), + media=kwargs.get("media"), + output_bucket_name=kwargs.get("output_bucket_name"), + output_encryption_kms_key_id=kwargs.get("output_encryption_kms_key_id"), + settings=settings, + specialty=kwargs.get("specialty"), + type=kwargs.get("type"), + ) + + self.medical_transcriptions[name] = transcription_job_object + + return transcription_job_object.response_object("CREATE") + + def get_medical_transcription_job(self, medical_transcription_job_name): + try: + job = self.medical_transcriptions[medical_transcription_job_name] + job.advance_job_status() # Fakes advancement through statuses. + return job.response_object("GET") + except KeyError: + raise BadRequestException( + message="The requested job couldn't be found. Check the job name and try your request again." + ) + + def delete_medical_transcription_job(self, medical_transcription_job_name): + try: + del self.medical_transcriptions[medical_transcription_job_name] + except KeyError: + raise BadRequestException( + message="The requested job couldn't be found. Check the job name and try your request again.", + ) + + def list_medical_transcription_jobs( + self, status, job_name_contains, next_token, max_results + ): + jobs = list(self.medical_transcriptions.values()) + + if status: + jobs = [job for job in jobs if job.transcription_job_status == status] + + if job_name_contains: + jobs = [ + job + for job in jobs + if job_name_contains in job.medical_transcription_job_name + ] + + start_offset = int(next_token) if next_token else 0 + end_offset = start_offset + ( + max_results if max_results else 100 + ) # Arbitrarily selected... + jobs_paginated = jobs[start_offset:end_offset] + + response = { + "MedicalTranscriptionJobSummaries": [ + job.response_object("LIST") for job in jobs_paginated + ] + } + if end_offset < len(jobs): + response["NextToken"] = str(end_offset) + if status: + response["Status"] = status + return response + + def create_medical_vocabulary(self, **kwargs): + + vocabulary_name = kwargs.get("vocabulary_name") + language_code = kwargs.get("language_code") + vocabulary_file_uri = kwargs.get("vocabulary_file_uri") + + if vocabulary_name in self.medical_vocabularies: + raise ConflictException( + message="The requested vocabulary name already exists. Use a different vocabulary name." + ) + + medical_vocabulary_object = FakeMedicalVocabulary( + region_name=self.region_name, + vocabulary_name=vocabulary_name, + language_code=language_code, + vocabulary_file_uri=vocabulary_file_uri, + ) + + self.medical_vocabularies[vocabulary_name] = medical_vocabulary_object + + return medical_vocabulary_object.response_object("CREATE") + + def get_medical_vocabulary(self, vocabulary_name): + try: + job = self.medical_vocabularies[vocabulary_name] + job.advance_job_status() # Fakes advancement through statuses. + return job.response_object("GET") + except KeyError: + raise BadRequestException( + message="The requested vocabulary couldn't be found. Check the vocabulary name and try your request again." + ) + + def delete_medical_vocabulary(self, vocabulary_name): + try: + del self.medical_vocabularies[vocabulary_name] + except KeyError: + raise BadRequestException( + message="The requested vocabulary couldn't be found. Check the vocabulary name and try your request again." + ) + + def list_medical_vocabularies( + self, state_equals, name_contains, next_token, max_results + ): + vocabularies = list(self.medical_vocabularies.values()) + + if state_equals: + vocabularies = [ + vocabulary + for vocabulary in vocabularies + if vocabulary.vocabulary_state == state_equals + ] + + if name_contains: + vocabularies = [ + vocabulary + for vocabulary in vocabularies + if name_contains in vocabulary.vocabulary_name + ] + + start_offset = int(next_token) if next_token else 0 + end_offset = start_offset + ( + max_results if max_results else 100 + ) # Arbitrarily selected... + vocabularies_paginated = vocabularies[start_offset:end_offset] + + response = { + "Vocabularies": [ + vocabulary.response_object("LIST") + for vocabulary in vocabularies_paginated + ] + } + if end_offset < len(vocabularies): + response["NextToken"] = str(end_offset) + if state_equals: + response["Status"] = state_equals + return response + + +transcribe_backends = {} +for region, ec2_backend in ec2_backends.items(): + transcribe_backends[region] = TranscribeBackend(region_name=region) diff --git a/moto/transcribe/responses.py b/moto/transcribe/responses.py new file mode 100644 index 000000000000..54d718b3ccf5 --- /dev/null +++ b/moto/transcribe/responses.py @@ -0,0 +1,111 @@ +from __future__ import unicode_literals + +import json + +from moto.core.responses import BaseResponse +from moto.core.utils import amzn_request_id +from .models import transcribe_backends + + +class TranscribeResponse(BaseResponse): + @property + def transcribe_backend(self): + return transcribe_backends[self.region] + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + @amzn_request_id + def start_medical_transcription_job(self): + name = self._get_param("MedicalTranscriptionJobName") + response = self.transcribe_backend.start_medical_transcription_job( + medical_transcription_job_name=name, + language_code=self._get_param("LanguageCode"), + media_sample_rate_hertz=self._get_param("MediaSampleRateHertz"), + media_format=self._get_param("MediaFormat"), + media=self._get_param("Media"), + output_bucket_name=self._get_param("OutputBucketName"), + output_encryption_kms_key_id=self._get_param("OutputEncryptionKMSKeyId"), + settings=self._get_param("Settings"), + specialty=self._get_param("Specialty"), + type=self._get_param("Type"), + ) + return json.dumps(response) + + @amzn_request_id + def list_medical_transcription_jobs(self): + status = self._get_param("Status") + job_name_contains = self._get_param("JobNameContains") + next_token = self._get_param("NextToken") + max_results = self._get_param("MaxResults") + + response = self.transcribe_backend.list_medical_transcription_jobs( + status=status, + job_name_contains=job_name_contains, + next_token=next_token, + max_results=max_results, + ) + return json.dumps(response) + + @amzn_request_id + def get_medical_transcription_job(self): + medical_transcription_job_name = self._get_param("MedicalTranscriptionJobName") + response = self.transcribe_backend.get_medical_transcription_job( + medical_transcription_job_name=medical_transcription_job_name + ) + return json.dumps(response) + + @amzn_request_id + def delete_medical_transcription_job(self): + medical_transcription_job_name = self._get_param("MedicalTranscriptionJobName") + response = self.transcribe_backend.delete_medical_transcription_job( + medical_transcription_job_name=medical_transcription_job_name + ) + return json.dumps(response) + + @amzn_request_id + def create_medical_vocabulary(self): + vocabulary_name = self._get_param("VocabularyName") + language_code = self._get_param("LanguageCode") + vocabulary_file_uri = self._get_param("VocabularyFileUri") + response = self.transcribe_backend.create_medical_vocabulary( + vocabulary_name=vocabulary_name, + language_code=language_code, + vocabulary_file_uri=vocabulary_file_uri, + ) + return json.dumps(response) + + @amzn_request_id + def get_medical_vocabulary(self): + vocabulary_name = self._get_param("VocabularyName") + response = self.transcribe_backend.get_medical_vocabulary( + vocabulary_name=vocabulary_name + ) + return json.dumps(response) + + @amzn_request_id + def list_medical_vocabularies(self): + state_equals = self._get_param("StateEquals") + name_contains = self._get_param("NameContains") + next_token = self._get_param("NextToken") + max_results = self._get_param("MaxResults") + + response = self.transcribe_backend.list_medical_vocabularies( + state_equals=state_equals, + name_contains=name_contains, + next_token=next_token, + max_results=max_results, + ) + return json.dumps(response) + + @amzn_request_id + def delete_medical_vocabulary(self): + vocabulary_name = self._get_param("VocabularyName") + response = self.transcribe_backend.delete_medical_vocabulary( + vocabulary_name=vocabulary_name + ) + return json.dumps(response) diff --git a/moto/transcribe/urls.py b/moto/transcribe/urls.py new file mode 100644 index 000000000000..175f6fea998c --- /dev/null +++ b/moto/transcribe/urls.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals + +from .responses import TranscribeResponse + +url_bases = ["https?://transcribe.(.+).amazonaws.com"] + +url_paths = {"{0}/$": TranscribeResponse.dispatch} diff --git a/tests/test_transcribe/__init__.py b/tests/test_transcribe/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/test_transcribe/test_transcribe_boto3.py b/tests/test_transcribe/test_transcribe_boto3.py new file mode 100644 index 000000000000..3de958bc1140 --- /dev/null +++ b/tests/test_transcribe/test_transcribe_boto3.py @@ -0,0 +1,391 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_transcribe + + +@mock_transcribe +def test_run_medical_transcription_job_minimal_params(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + job_name = "MyJob" + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, + "OutputBucketName": "my-output-bucket", + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + resp = client.start_medical_transcription_job(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # CREATED + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["MedicalTranscriptionJobName"].should.equal( + args["MedicalTranscriptionJobName"] + ) + transcription_job["TranscriptionJobStatus"].should.equal("QUEUED") + transcription_job["LanguageCode"].should.equal(args["LanguageCode"]) + transcription_job["Media"].should.equal(args["Media"]) + transcription_job.should.contain("CreationTime") + transcription_job.doesnt.contain("StartTime") + transcription_job.doesnt.contain("CompletionTime") + transcription_job.doesnt.contain("Transcript") + transcription_job["Settings"]["ChannelIdentification"].should.equal(False) + transcription_job["Settings"]["ShowAlternatives"].should.equal(False) + transcription_job["Specialty"].should.equal(args["Specialty"]) + transcription_job["Type"].should.equal(args["Type"]) + + # IN_PROGRESS + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["TranscriptionJobStatus"].should.equal("IN_PROGRESS") + transcription_job["MediaFormat"].should.equal("wav") + transcription_job.should.contain("StartTime") + transcription_job.doesnt.contain("CompletionTime") + transcription_job.doesnt.contain("Transcript") + transcription_job["MediaSampleRateHertz"].should.equal(44100) + + # COMPLETED + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["TranscriptionJobStatus"].should.equal("COMPLETED") + transcription_job.should.contain("CompletionTime") + transcription_job["Transcript"].should.equal( + { + "TranscriptFileUri": "https://s3.{}.amazonaws.com/{}/medical/{}.json".format( + region_name, + args["OutputBucketName"], + args["MedicalTranscriptionJobName"], + ) + } + ) + + # Delete + client.delete_medical_transcription_job(MedicalTranscriptionJobName=job_name) + client.get_medical_transcription_job.when.called_with( + MedicalTranscriptionJobName=job_name + ).should.throw(client.exceptions.BadRequestException) + + +@mock_transcribe +def test_run_medical_transcription_job_all_params(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + vocabulary_name = "MyMedicalVocabulary" + resp = client.create_medical_vocabulary( + VocabularyName=vocabulary_name, + LanguageCode="en-US", + VocabularyFileUri="https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt", + ) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + job_name = "MyJob2" + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "MediaSampleRateHertz": 48000, + "MediaFormat": "flac", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.dat",}, + "OutputBucketName": "my-output-bucket", + "OutputEncryptionKMSKeyId": "arn:aws:kms:us-east-1:012345678901:key/37111b5e-8eff-4706-ae3a-d4f9d1d559fc", + "Settings": { + "ShowSpeakerLabels": True, + "MaxSpeakerLabels": 5, + "ChannelIdentification": True, + "ShowAlternatives": True, + "MaxAlternatives": 6, + "VocabularyName": vocabulary_name, + }, + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + resp = client.start_medical_transcription_job(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # CREATED + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["MedicalTranscriptionJobName"].should.equal( + args["MedicalTranscriptionJobName"] + ) + transcription_job["TranscriptionJobStatus"].should.equal("QUEUED") + transcription_job["LanguageCode"].should.equal(args["LanguageCode"]) + transcription_job["Media"].should.equal(args["Media"]) + transcription_job.should.contain("CreationTime") + transcription_job.doesnt.contain("StartTime") + transcription_job.doesnt.contain("CompletionTime") + transcription_job.doesnt.contain("Transcript") + transcription_job["Settings"]["ShowSpeakerLabels"].should.equal( + args["Settings"]["ShowSpeakerLabels"] + ) + transcription_job["Settings"]["MaxSpeakerLabels"].should.equal( + args["Settings"]["MaxSpeakerLabels"] + ) + transcription_job["Settings"]["ChannelIdentification"].should.equal( + args["Settings"]["ChannelIdentification"] + ) + transcription_job["Settings"]["ShowAlternatives"].should.equal( + args["Settings"]["ShowAlternatives"] + ) + transcription_job["Settings"]["MaxAlternatives"].should.equal( + args["Settings"]["MaxAlternatives"] + ) + transcription_job["Settings"]["VocabularyName"].should.equal( + args["Settings"]["VocabularyName"] + ) + + transcription_job["Specialty"].should.equal(args["Specialty"]) + transcription_job["Type"].should.equal(args["Type"]) + + # IN_PROGRESS + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["TranscriptionJobStatus"].should.equal("IN_PROGRESS") + transcription_job["MediaFormat"].should.equal("flac") + transcription_job.should.contain("StartTime") + transcription_job.doesnt.contain("CompletionTime") + transcription_job.doesnt.contain("Transcript") + transcription_job["MediaSampleRateHertz"].should.equal(48000) + + # COMPLETED + resp = client.get_medical_transcription_job(MedicalTranscriptionJobName=job_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + transcription_job = resp["MedicalTranscriptionJob"] + transcription_job["TranscriptionJobStatus"].should.equal("COMPLETED") + transcription_job.should.contain("CompletionTime") + transcription_job["Transcript"].should.equal( + { + "TranscriptFileUri": "https://s3.{}.amazonaws.com/{}/medical/{}.json".format( + region_name, + args["OutputBucketName"], + args["MedicalTranscriptionJobName"], + ) + } + ) + + +@mock_transcribe +def test_get_nonexistent_medical_transcription_job(): + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + client.get_medical_transcription_job.when.called_with( + MedicalTranscriptionJobName="NonexistentJobName" + ).should.throw(client.exceptions.BadRequestException) + + +@mock_transcribe +def test_run_medical_transcription_job_with_existing_job_name(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + job_name = "MyJob" + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, + "OutputBucketName": "my-output-bucket", + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + resp = client.start_medical_transcription_job(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + client.start_medical_transcription_job.when.called_with(**args).should.throw( + client.exceptions.ConflictException + ) + + +@mock_transcribe +def test_run_medical_transcription_job_nonexistent_vocabulary(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + job_name = "MyJob3" + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.dat",}, + "OutputBucketName": "my-output-bucket", + "Settings": {"VocabularyName": "NonexistentVocabulary"}, + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + client.start_medical_transcription_job.when.called_with(**args).should.throw( + client.exceptions.BadRequestException + ) + + +@mock_transcribe +def test_list_medical_transcription_jobs(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + def run_job(index, target_status): + job_name = "Job_{}".format(index) + args = { + "MedicalTranscriptionJobName": job_name, + "LanguageCode": "en-US", + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, + "OutputBucketName": "my-output-bucket", + "Specialty": "PRIMARYCARE", + "Type": "CONVERSATION", + } + resp = client.start_medical_transcription_job(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # IMPLICITLY PROMOTE JOB STATUS TO QUEUED + resp = client.get_medical_transcription_job( + MedicalTranscriptionJobName=job_name + ) + + # IN_PROGRESS + if target_status in ["IN_PROGRESS", "COMPLETED"]: + resp = client.get_medical_transcription_job( + MedicalTranscriptionJobName=job_name + ) + + # COMPLETED + if target_status == "COMPLETED": + resp = client.get_medical_transcription_job( + MedicalTranscriptionJobName=job_name + ) + + # Run 5 pending jobs + for i in range(5): + run_job(i, "PENDING") + + # Run 10 job to IN_PROGRESS + for i in range(5, 15): + run_job(i, "IN_PROGRESS") + + # Run 15 job to COMPLETED + for i in range(15, 30): + run_job(i, "COMPLETED") + + # List all + response = client.list_medical_transcription_jobs() + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(30) + response.shouldnt.contain("NextToken") + response.shouldnt.contain("Status") + + # List IN_PROGRESS + response = client.list_medical_transcription_jobs(Status="IN_PROGRESS") + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(10) + response.shouldnt.contain("NextToken") + response.should.contain("Status") + response["Status"].should.equal("IN_PROGRESS") + + # List JobName contains "8" + response = client.list_medical_transcription_jobs(JobNameContains="8") + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(3) + response.shouldnt.contain("NextToken") + response.shouldnt.contain("Status") + + # Pagination by 11 + response = client.list_medical_transcription_jobs(MaxResults=11) + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(11) + response.should.contain("NextToken") + response.shouldnt.contain("Status") + + response = client.list_medical_transcription_jobs( + NextToken=response["NextToken"], MaxResults=11 + ) + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(11) + response.should.contain("NextToken") + + response = client.list_medical_transcription_jobs( + NextToken=response["NextToken"], MaxResults=11 + ) + response.should.contain("MedicalTranscriptionJobSummaries") + len(response["MedicalTranscriptionJobSummaries"]).should.equal(8) + response.shouldnt.contain("NextToken") + + +@mock_transcribe +def test_create_medical_vocabulary(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + vocabulary_name = "MyVocabulary" + resp = client.create_medical_vocabulary( + VocabularyName=vocabulary_name, + LanguageCode="en-US", + VocabularyFileUri="https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt", + ) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + # PENDING + resp = client.get_medical_vocabulary(VocabularyName=vocabulary_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + resp["VocabularyName"].should.equal(vocabulary_name) + resp["LanguageCode"].should.equal("en-US") + resp["VocabularyState"].should.equal("PENDING") + resp.should.contain("LastModifiedTime") + resp.shouldnt.contain("FailureReason") + resp["DownloadUri"].should.contain(vocabulary_name) + + # IN_PROGRESS + resp = client.get_medical_vocabulary(VocabularyName=vocabulary_name) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + resp["VocabularyState"].should.equal("READY") + + # Delete + client.delete_medical_vocabulary(VocabularyName=vocabulary_name) + client.get_medical_vocabulary.when.called_with( + VocabularyName=vocabulary_name + ).should.throw(client.exceptions.BadRequestException) + + +@mock_transcribe +def test_get_nonexistent_medical_vocabulary(): + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + client.get_medical_vocabulary.when.called_with( + VocabularyName="NonexistentVocabularyName" + ).should.throw(client.exceptions.BadRequestException) + + +@mock_transcribe +def test_create_medical_vocabulary_with_existing_vocabulary_name(): + + region_name = "us-east-1" + client = boto3.client("transcribe", region_name=region_name) + + vocabulary_name = "MyVocabulary" + args = { + "VocabularyName": vocabulary_name, + "LanguageCode": "en-US", + "VocabularyFileUri": "https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt", + } + resp = client.create_medical_vocabulary(**args) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + client.create_medical_vocabulary.when.called_with(**args).should.throw( + client.exceptions.ConflictException + ) From 1c3ba83fc2df785eea6db36ca058df2ff1727e40 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 30 Sep 2020 21:04:15 +0530 Subject: [PATCH 557/658] Fix : SQS Create Queue with attributes (#3345) * Considering only new attributes while queue creation * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/sqs/models.py | 18 +++--------------- tests/test_sqs/test_sqs.py | 12 ++++++++++++ 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 71ca62941507..720ab6e75f05 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -545,22 +545,10 @@ def create_queue(self, name, tags=None, **kwargs): queue_attributes = queue.attributes new_queue_attributes = new_queue.attributes - static_attributes = ( - "DelaySeconds", - "MaximumMessageSize", - "MessageRetentionPeriod", - "Policy", - "QueueArn", - "ReceiveMessageWaitTimeSeconds", - "RedrivePolicy", - "VisibilityTimeout", - "KmsMasterKeyId", - "KmsDataKeyReusePeriodSeconds", - "FifoQueue", - "ContentBasedDeduplication", - ) - for key in static_attributes: + # only the attributes which are being sent for the queue + # creation have to be compared if the queue is existing. + for key in kwargs: if queue_attributes.get(key) != new_queue_attributes.get(key): raise QueueAlreadyExists("The specified queue already exists.") else: diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 8c05e0f35268..9ce0f21cdae2 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -89,6 +89,18 @@ def test_create_queue_with_different_attributes_fail(): else: raise RuntimeError("Should of raised QueueAlreadyExists Exception") + response = sqs.create_queue( + QueueName="test-queue1", Attributes={"FifoQueue": "True"} + ) + + attributes = {"VisibilityTimeout": "60"} + sqs.set_queue_attributes(QueueUrl=response.get("QueueUrl"), Attributes=attributes) + + new_response = sqs.create_queue( + QueueName="test-queue1", Attributes={"FifoQueue": "True"} + ) + new_response["QueueUrl"].should.equal(response.get("QueueUrl")) + @mock_sqs def test_create_fifo_queue(): From f9ce99f0d1ae79c27872d6e28a1ddb058a1eb113 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 30 Sep 2020 23:10:00 +0530 Subject: [PATCH 558/658] Fix:SQS md5 calculation for custom string data type. (#3346) * Fix:SQS md5 calculation for custom string data type. * Linting Co-authored-by: Bert Blommers --- moto/sqs/models.py | 25 +++++++++++++++---------- tests/test_sqs/test_sqs.py | 11 +++++++++++ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 720ab6e75f05..72218826b55a 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -43,7 +43,12 @@ MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB -TRANSPORT_TYPE_ENCODINGS = {"String": b"\x01", "Binary": b"\x02", "Number": b"\x01"} +TRANSPORT_TYPE_ENCODINGS = { + "String": b"\x01", + "Binary": b"\x02", + "Number": b"\x01", + "String.custom": b"\x01", +} class Message(BaseModel): @@ -88,14 +93,14 @@ def utf8(str): struct_format = "!I".encode("ascii") # ensure it's a bytestring for name in sorted(self.message_attributes.keys()): attr = self.message_attributes[name] - data_type_parts = attr["data_type"].split(".") - data_type = data_type_parts[0] - - if data_type not in [ - "String", - "Binary", - "Number", - ]: + whole_data_type = attr.get("data_type") + if TRANSPORT_TYPE_ENCODINGS.get(whole_data_type): + data_type = whole_data_type + else: + data_type_parts = attr["data_type"].split(".") + data_type = data_type_parts[0] + + if data_type not in ["String", "Binary", "Number", "String.custom"]: raise MessageAttributesInvalid( "The message attribute '{0}' has an invalid message attribute type, the set of supported type prefixes is Binary, Number, and String.".format( name[0] @@ -112,7 +117,7 @@ def utf8(str): encoded += struct.pack(struct_format, len(data_type)) + utf8(data_type) encoded += TRANSPORT_TYPE_ENCODINGS[data_type] - if data_type == "String" or data_type == "Number": + if data_type in ["String", "Number", "String.custom"]: value = attr["string_value"] elif data_type == "Binary": value = base64.b64decode(attr["binary_value"]) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9ce0f21cdae2..48fa202912b2 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -658,6 +658,17 @@ def test_send_receive_message_with_attributes_with_labels(): "994258b45346a2cc3f9cbb611aa7af30" ) + response = queue.send_message( + MessageBody="test message", + MessageAttributes={ + "somevalue": {"StringValue": "somevalue", "DataType": "String.custom",} + }, + ) + + response.get("MD5OfMessageAttributes").should.equal( + "9e05cca738e70ff6c6041e82d5e77ef1" + ) + @mock_sqs def test_send_receive_message_timestamps(): From a668349a70ffce571c74c77074e6270f3cb59dd7 Mon Sep 17 00:00:00 2001 From: ljakimczuk <39192420+ljakimczuk@users.noreply.github.com> Date: Thu, 1 Oct 2020 11:24:03 +0200 Subject: [PATCH 559/658] Add `set_default_policy_version` to the IAM backend (#3347) * Adding set_default_policy_version * Adding tests and reformatting * Reformatting tests --- moto/iam/models.py | 26 ++++++++++++++++++- moto/iam/responses.py | 14 ++++++++++ tests/test_iam/test_iam.py | 53 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 1 deletion(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 3e7b638b24d1..6397fd099668 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -125,9 +125,10 @@ def __init__( def update_default_version(self, new_default_version_id): for version in self.versions: + if version.version_id == new_default_version_id: + version.is_default = True if version.version_id == self.default_version_id: version.is_default = False - break self.default_version_id = new_default_version_id @property @@ -1544,6 +1545,29 @@ def list_policies(self, marker, max_items, only_attached, path_prefix, scope): return self._filter_attached_policies(policies, marker, max_items, path_prefix) + def set_default_policy_version(self, policy_arn, version_id): + import re + + if re.match("v[1-9][0-9]*(\.[A-Za-z0-9-]*)?", version_id) is None: + raise ValidationError( + "Value '{0}' at 'versionId' failed to satisfy constraint: Member must satisfy regular expression pattern: v[1-9][0-9]*(\.[A-Za-z0-9-]*)?".format( + version_id + ) + ) + + policy = self.get_policy(policy_arn) + + for version in policy.versions: + if version.version_id == version_id: + policy.update_default_version(version_id) + return True + + raise NoSuchEntity( + "Policy {0} version {1} does not exist or is not attachable.".format( + policy_arn, version_id + ) + ) + def _filter_attached_policies(self, policies, marker, max_items, path_prefix): if path_prefix: policies = [p for p in policies if p.path.startswith(path_prefix)] diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 88ab9aef1d3c..eed610f13846 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -175,6 +175,13 @@ def list_entities_for_policy(self): roles=entity_roles, users=entity_users, groups=entity_groups ) + def set_default_policy_version(self): + policy_arn = self._get_param("PolicyArn") + version_id = self._get_param("VersionId") + iam_backend.set_default_policy_version(policy_arn, version_id) + template = self.response_template(SET_DEFAULT_POLICY_VERSION_TEMPLATE) + return template.render() + def create_role(self): role_name = self._get_param("RoleName") path = self._get_param("Path") @@ -1010,6 +1017,13 @@ def get_account_summary(self): """ +SET_DEFAULT_POLICY_VERSION_TEMPLATE = """ + + 35f241af-3ebc-11e4-9d0d-6f969EXAMPLE + +""" + + ATTACH_ROLE_POLICY_TEMPLATE = """ 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index e9d5e8a4d8f6..9cf7decb6c89 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -549,6 +549,59 @@ def test_set_default_policy_version(): versions.get("Versions")[2].get("Document").should.equal(json.loads(MOCK_POLICY_3)) versions.get("Versions")[2].get("IsDefaultVersion").should.be.ok + conn.set_default_policy_version( + PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( + ACCOUNT_ID + ), + VersionId="v1", + ) + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( + ACCOUNT_ID + ) + ) + versions.get("Versions")[0].get("Document").should.equal(json.loads(MOCK_POLICY)) + versions.get("Versions")[0].get("IsDefaultVersion").should.be.ok + versions.get("Versions")[1].get("Document").should.equal(json.loads(MOCK_POLICY_2)) + versions.get("Versions")[1].get("IsDefaultVersion").shouldnt.be.ok + versions.get("Versions")[2].get("Document").should.equal(json.loads(MOCK_POLICY_3)) + versions.get("Versions")[2].get("IsDefaultVersion").shouldnt.be.ok + + # Set default version for non-existing policy + conn.set_default_policy_version.when.called_with( + PolicyArn="arn:aws:iam::{}:policy/TestNonExistingPolicy".format(ACCOUNT_ID), + VersionId="v1", + ).should.throw( + ClientError, + "Policy arn:aws:iam::{}:policy/TestNonExistingPolicy not found".format( + ACCOUNT_ID + ), + ) + + # Set default version for incorrect version + conn.set_default_policy_version.when.called_with( + PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( + ACCOUNT_ID + ), + VersionId="wrong_version_id", + ).should.throw( + ClientError, + "Value 'wrong_version_id' at 'versionId' failed to satisfy constraint: Member must satisfy regular expression pattern: v[1-9][0-9]*(\.[A-Za-z0-9-]*)?", + ) + + # Set default version for non-existing version + conn.set_default_policy_version.when.called_with( + PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( + ACCOUNT_ID + ), + VersionId="v4", + ).should.throw( + ClientError, + "Policy arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion version v4 does not exist or is not attachable.".format( + ACCOUNT_ID + ), + ) + @mock_iam def test_get_policy(): From 9bc6bded6e6472f04a45f0bc429acc926c2cdc08 Mon Sep 17 00:00:00 2001 From: jweite Date: Fri, 2 Oct 2020 09:07:13 -0400 Subject: [PATCH 560/658] EMR: Support for StepConcurrencyLevel. (#3351) Co-authored-by: Joseph Weitekamp --- moto/emr/models.py | 7 +++++++ moto/emr/responses.py | 29 ++++++++++++++++++++++++++--- tests/test_emr/test_emr_boto3.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 3 deletions(-) diff --git a/moto/emr/models.py b/moto/emr/models.py index 63aadf105b08..5a34c4d104db 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -146,6 +146,7 @@ def __init__( requested_ami_version=None, running_ami_version=None, custom_ami_id=None, + step_concurrency_level=1, ): self.id = cluster_id or random_cluster_id() emr_backend.clusters[self.id] = self @@ -236,6 +237,7 @@ def __init__( self.role = job_flow_role or "EMRJobflowDefault" self.service_role = service_role + self.step_concurrency_level = step_concurrency_level self.creation_datetime = datetime.now(pytz.utc) self.start_datetime = None @@ -469,6 +471,11 @@ def list_steps(self, cluster_id, marker=None, step_ids=None, step_states=None): ) return steps[start_idx : start_idx + max_items], marker + def modify_cluster(self, cluster_id, step_concurrency_level): + cluster = self.clusters[cluster_id] + cluster.step_concurrency_level = step_concurrency_level + return cluster + def modify_instance_groups(self, instance_groups): result_groups = [] for instance_group in instance_groups: diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 38a33519c90b..9ced4569bedb 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -127,9 +127,6 @@ def describe_job_flows(self): template = self.response_template(DESCRIBE_JOB_FLOWS_TEMPLATE) return template.render(clusters=clusters) - def describe_security_configuration(self): - raise NotImplementedError - @generate_boto3_response("DescribeStep") def describe_step(self): cluster_id = self._get_param("ClusterId") @@ -185,6 +182,17 @@ def list_steps(self): template = self.response_template(LIST_STEPS_TEMPLATE) return template.render(steps=steps, marker=marker) + @generate_boto3_response("ModifyCluster") + def modify_cluster(self): + cluster_id = self._get_param("ClusterId") + step_concurrency_level = self._get_param("StepConcurrencyLevel") + cluster = self.backend.modify_cluster(cluster_id, step_concurrency_level) + template = self.response_template(MODIFY_CLUSTER_TEMPLATE) + return template.render(cluster=cluster) + + def describe_security_configuration(self): + raise NotImplementedError + @generate_boto3_response("ModifyInstanceGroups") def modify_instance_groups(self): instance_groups = self._get_list_prefix("InstanceGroups.member") @@ -315,6 +323,10 @@ def run_job_flow(self): template="error_json", ) + step_concurrency_level = self._get_param("StepConcurrencyLevel") + if step_concurrency_level: + kwargs["step_concurrency_level"] = step_concurrency_level + cluster = self.backend.run_job_flow(**kwargs) applications = self._get_list_prefix("Applications.member") @@ -591,6 +603,7 @@ def remove_auto_scaling_policy(self): {{ cluster.termination_protected|lower }} {{ cluster.visible_to_all_users|lower }} + {{ cluster.step_concurrency_level }} @@ -1075,6 +1088,16 @@ def remove_auto_scaling_policy(self): """ +MODIFY_CLUSTER_TEMPLATE = """ + + {{ cluster.step_concurrency_level }} + + + 0751c837-e78d-4aef-95c9-9c4d29a092ff + + +""" + MODIFY_INSTANCE_GROUPS_TEMPLATE = """ 2690d7eb-ed86-11dd-9877-6fad448a8419 diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 3f577c69a4d2..af6939f80b5f 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -631,6 +631,36 @@ def test_run_job_flow_with_custom_ami(): resp["Cluster"]["CustomAmiId"].should.equal("MyEmrCustomAmi") +@mock_emr +def test_run_job_flow_with_step_concurrency(): + client = boto3.client("emr", region_name="us-east-1") + args = deepcopy(run_job_flow_args) + args["StepConcurrencyLevel"] = 2 + cluster_id = client.run_job_flow(**args)["JobFlowId"] + resp = client.describe_cluster(ClusterId=cluster_id)["Cluster"] + resp["Name"].should.equal(args["Name"]) + resp["Status"]["State"].should.equal("WAITING") + resp["StepConcurrencyLevel"].should.equal(2) + + +@mock_emr +def test_modify_cluster(): + client = boto3.client("emr", region_name="us-east-1") + args = deepcopy(run_job_flow_args) + args["StepConcurrencyLevel"] = 2 + cluster_id = client.run_job_flow(**args)["JobFlowId"] + resp = client.describe_cluster(ClusterId=cluster_id)["Cluster"] + resp["Name"].should.equal(args["Name"]) + resp["Status"]["State"].should.equal("WAITING") + resp["StepConcurrencyLevel"].should.equal(2) + + resp = client.modify_cluster(ClusterId=cluster_id, StepConcurrencyLevel=4) + resp["StepConcurrencyLevel"].should.equal(4) + + resp = client.describe_cluster(ClusterId=cluster_id)["Cluster"] + resp["StepConcurrencyLevel"].should.equal(4) + + @mock_emr def test_set_termination_protection(): client = boto3.client("emr", region_name="us-east-1") From a65c0f004c0a5ff795cc9a020a4e703939b7215b Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 5 Oct 2020 14:40:24 +0530 Subject: [PATCH 561/658] Fix:SQS Receive Message (MessageAttributes) in response (#3303) * Fix:SQS Receive Message (MessageAttributes) in response * Fixed tests Co-authored-by: usmankb --- moto/sqs/models.py | 18 +++++- moto/sqs/responses.py | 7 ++- moto/sqs/utils.py | 15 +++++ tests/test_sns/test_publishing_boto3.py | 6 -- tests/test_sqs/test_sqs.py | 76 ++++++++++++++++++++++--- 5 files changed, 104 insertions(+), 18 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 72218826b55a..2784ee625108 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -526,6 +526,14 @@ def policy(self, policy): } +def _filter_message_attributes(message, input_message_attributes): + filtered_message_attributes = {} + for key, value in message.message_attributes.items(): + if key in input_message_attributes: + filtered_message_attributes[key] = value + message.message_attributes = filtered_message_attributes + + class SQSBackend(BaseBackend): def __init__(self, region_name): self.region_name = region_name @@ -718,7 +726,12 @@ def _get_first_duplicate_id(self, ids): return None def receive_messages( - self, queue_name, count, wait_seconds_timeout, visibility_timeout + self, + queue_name, + count, + wait_seconds_timeout, + visibility_timeout, + message_attribute_names=None, ): """ Attempt to retrieve visible messages from a queue. @@ -734,6 +747,8 @@ def receive_messages( :param int wait_seconds_timeout: The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds """ + if message_attribute_names is None: + message_attribute_names = [] queue = self.get_queue(queue_name) result = [] previous_result_count = len(result) @@ -775,6 +790,7 @@ def receive_messages( continue message.mark_received(visibility_timeout=visibility_timeout) + _filter_message_attributes(message, message_attribute_names) result.append(message) if len(result) >= count: break diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index e28fbca8a328..016637b4c8a3 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -13,7 +13,7 @@ ReceiptHandleIsInvalid, ) from .models import sqs_backends -from .utils import parse_message_attributes +from .utils import parse_message_attributes, extract_input_message_attributes MAXIMUM_VISIBILTY_TIMEOUT = 43200 MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB @@ -352,6 +352,9 @@ def purge_queue(self): def receive_message(self): queue_name = self._get_queue_name() + message_attributes = self._get_multi_param("message_attributes") + if not message_attributes: + message_attributes = extract_input_message_attributes(self.querystring,) queue = self.sqs_backend.get_queue(queue_name) @@ -391,7 +394,7 @@ def receive_message(self): return ERROR_MAX_VISIBILITY_TIMEOUT_RESPONSE, dict(status=400) messages = self.sqs_backend.receive_messages( - queue_name, message_count, wait_time, visibility_timeout + queue_name, message_count, wait_time, visibility_timeout, message_attributes ) template = self.response_template(RECEIVE_MESSAGE_RESPONSE) return template.render(messages=messages) diff --git a/moto/sqs/utils.py b/moto/sqs/utils.py index 315fce56b0d5..876d6b40ef35 100644 --- a/moto/sqs/utils.py +++ b/moto/sqs/utils.py @@ -11,6 +11,21 @@ def generate_receipt_handle(): return "".join(random.choice(string.ascii_lowercase) for x in range(length)) +def extract_input_message_attributes(querystring): + message_attributes = [] + index = 1 + while True: + # Loop through looking for message attributes + name_key = "MessageAttributeName.{0}".format(index) + name = querystring.get(name_key) + if not name: + # Found all attributes + break + message_attributes.append(name[0]) + index = index + 1 + return message_attributes + + def parse_message_attributes(querystring, base="", value_namespace="Value."): message_attributes = {} index = 1 diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index c84f19694964..63c409302f8f 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -183,9 +183,6 @@ def test_publish_to_sqs_msg_attr_byte_value(): message = queue_raw.receive_messages()[0] message.body.should.equal("my message") - message.message_attributes.should.equal( - {"store": {"DataType": "Binary", "BinaryValue": b"\x02\x03\x04"}} - ) @mock_sqs @@ -216,9 +213,6 @@ def test_publish_to_sqs_msg_attr_number_type(): message = queue_raw.receive_messages()[0] message.body.should.equal("test message") - message.message_attributes.should.equal( - {"retries": {"DataType": "Number", "StringValue": "0"}} - ) @mock_sns diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 48fa202912b2..f98131db4e15 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -593,9 +593,9 @@ def test_send_receive_message_with_attributes(): }, ) - messages = conn.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=2)[ - "Messages" - ] + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2, MessageAttributeNames=["timestamp"] + )["Messages"] message1 = messages[0] message2 = messages[1] @@ -641,9 +641,9 @@ def test_send_receive_message_with_attributes_with_labels(): }, ) - messages = conn.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=2)[ - "Messages" - ] + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2, MessageAttributeNames=["timestamp"] + )["Messages"] message1 = messages[0] message2 = messages[1] @@ -779,7 +779,14 @@ def test_send_message_with_attributes(): queue.write(message) - messages = conn.receive_message(queue) + messages = conn.receive_message( + queue, + message_attributes=[ + "test.attribute_name", + "test.binary_attribute", + "test.number_attribute", + ], + ) messages[0].get_body().should.equal(body) @@ -999,7 +1006,7 @@ def test_send_batch_operation_with_message_attributes(): ) queue.write_batch([message_tuple]) - messages = queue.get_messages() + messages = queue.get_messages(message_attributes=["name1"]) messages[0].get_body().should.equal("test message 1") for name, value in message_tuple[3].items(): @@ -1234,7 +1241,11 @@ def test_send_message_batch(): ["id_1", "id_2"] ) - response = client.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=10) + response = client.receive_message( + QueueUrl=queue_url, + MaxNumberOfMessages=10, + MessageAttributeNames=["attribute_name_1", "attribute_name_2"], + ) response["Messages"][0]["Body"].should.equal("body_1") response["Messages"][0]["MessageAttributes"].should.equal( @@ -1258,6 +1269,53 @@ def test_send_message_batch(): ) +@mock_sqs +def test_message_attributes_in_receive_message(): + sqs = boto3.resource("sqs", region_name="us-east-1") + conn = boto3.client("sqs", region_name="us-east-1") + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = "this is a test message" + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + }, + ) + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2, MessageAttributeNames=["timestamp"] + )["Messages"] + + messages[0]["MessageAttributes"].should.equal( + { + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + } + ) + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + }, + ) + messages = conn.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=2)[ + "Messages" + ] + + messages[0].get("MessageAttributes").should.equal(None) + + @mock_sqs def test_send_message_batch_errors(): client = boto3.client("sqs", region_name="us-east-1") From c1a7f29c62a09086ccdea79a7cda4cd4ea89f91c Mon Sep 17 00:00:00 2001 From: Roman Dmytrenko Date: Mon, 5 Oct 2020 13:10:32 +0300 Subject: [PATCH 562/658] Fix issue with wrong parameter signed to secret_binary during secret rotation in Secrets Manager (#3348) * fix issue with wrong parameter signed to secret_binary * reformat test --- moto/secretsmanager/models.py | 4 ++-- tests/test_secretsmanager/test_secretsmanager.py | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 41b70bc1f726..419b765c16c1 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -408,8 +408,8 @@ def rotate_secret( self._add_secret( secret_id, old_secret_version["secret_string"], - secret.description, - secret.tags, + description=secret.description, + tags=secret.tags, version_id=new_version_id, version_stages=["AWSCURRENT"], ) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 94e745659881..1ae53603e764 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -502,7 +502,9 @@ def test_restore_secret_that_does_not_exist(): @mock_secretsmanager def test_rotate_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") - conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString="foosecret") + conn.create_secret( + Name=DEFAULT_SECRET_NAME, SecretString="foosecret", Description="foodescription" + ) rotated_secret = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME) @@ -511,6 +513,10 @@ def test_rotate_secret(): assert rotated_secret["Name"] == DEFAULT_SECRET_NAME assert rotated_secret["VersionId"] != "" + describe_secret = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) + + assert describe_secret["Description"] == "foodescription" + @mock_secretsmanager def test_rotate_secret_enable_rotation(): From 2391a4ab97eac31842977acc19b70d2dc3cab754 Mon Sep 17 00:00:00 2001 From: Jon Michaelchuck Date: Mon, 5 Oct 2020 04:22:54 -0700 Subject: [PATCH 563/658] [SecretsManager] Handle missing secrets versions (#3349) * SecretsManager - handle missing secrets versions The get_secret_value method should raise ResourceNotFoundException if a secret exists but the provided VersionId does not. * Run black * 2.x support * black fix? * secret is not a dict. Fix error msg output. --- moto/secretsmanager/models.py | 10 +++++++++- .../test_secretsmanager.py | 20 +++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 419b765c16c1..f39b91eaab87 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -14,6 +14,7 @@ SecretHasNoValueException, InvalidParameterException, ResourceExistsException, + ResourceNotFoundException, InvalidRequestException, ClientError, ) @@ -205,7 +206,14 @@ def get_secret_value(self, secret_id, version_id, version_stage): secret = self.secrets[secret_id] version_id = version_id or secret.default_version_id - secret_version = secret.versions[version_id] + secret_version = secret.versions.get(version_id) + if not secret_version: + raise ResourceNotFoundException( + "An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets " + "Manager can't find the specified secret value for VersionId: {}".format( + version_id + ) + ) response_data = { "ARN": secret.arn, diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 1ae53603e764..dcb3b9b0c063 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -105,6 +105,26 @@ def test_get_secret_that_has_no_value(): ) +@mock_secretsmanager +def test_get_secret_version_that_does_not_exist(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + result = conn.create_secret(Name="java-util-test-password") + secret_arn = result["ARN"] + missing_version_id = "00000000-0000-0000-0000-000000000000" + + with assert_raises(ClientError) as cm: + conn.get_secret_value(SecretId=secret_arn, VersionId=missing_version_id) + + assert_equal( + ( + "An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets " + "Manager can't find the specified secret value for VersionId: 00000000-0000-0000-0000-000000000000" + ), + cm.exception.response["Error"]["Message"], + ) + + @mock_secretsmanager def test_create_secret(): conn = boto3.client("secretsmanager", region_name="us-east-1") From 08da2c92d92defbcc5390abb3e831a09a17b2e82 Mon Sep 17 00:00:00 2001 From: Sarang Joshi Date: Mon, 5 Oct 2020 08:40:33 -0400 Subject: [PATCH 564/658] fix(dynamodb2): Fix update_item nested insert (#3355) When comparing old and new values when doing a nested item update, the `!=` implementation fails when the value being compared is `None`. This results in an exception when trying to insert a new item into a nested map. So just do a quick check that the original value is exists before doing the comparison, as the `None` default is what is tripping this. --- moto/dynamodb2/responses.py | 2 +- tests/test_dynamodb2/test_dynamodb.py | 31 +++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 2b252af15053..25ec292382c5 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -826,7 +826,7 @@ def _build_updated_new_attributes(self, original, changed): original.get(key, None), changed[key] ) for key in changed.keys() - if changed[key] != original.get(key, None) + if key not in original or changed[key] != original[key] } elif type(changed) in (set, list): if len(changed) != len(original): diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index d56fd3f11eb3..e2dd744e383c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3936,6 +3936,37 @@ def test_update_supports_list_append_maps(): ) +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_supports_nested_update_if_nested_value_not_exists(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + name = "TestTable" + + dynamodb.create_table( + TableName=name, + KeySchema=[{"AttributeName": "user_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "user_id", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + table = dynamodb.Table(name) + table.put_item( + Item={"user_id": "1234", "friends": {"5678": {"name": "friend_5678"}},}, + ) + table.update_item( + Key={"user_id": "1234"}, + ExpressionAttributeNames={"#friends": "friends", "#friendid": "0000",}, + ExpressionAttributeValues={":friend": {"name": "friend_0000"},}, + UpdateExpression="SET #friends.#friendid = :friend", + ReturnValues="UPDATED_NEW", + ) + item = table.get_item(Key={"user_id": "1234"})["Item"] + assert item == { + "user_id": "1234", + "friends": {"5678": {"name": "friend_5678"}, "0000": {"name": "friend_0000"},}, + } + + @mock_dynamodb2 def test_update_supports_list_append_with_nested_if_not_exists_operation(): dynamo = boto3.resource("dynamodb", region_name="us-west-1") From 349b9a990daf17cb1ff5cf74c54c9521ad2f0c0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= Date: Mon, 5 Oct 2020 16:39:59 +0200 Subject: [PATCH 565/658] Add registeredAt to ecs container instance (#3358) --- moto/ecs/models.py | 11 ++++++++--- tests/test_ecs/test_ecs_boto3.py | 1 + 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 7041a322b1d9..210344f1038b 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -1,18 +1,18 @@ from __future__ import unicode_literals + import re import uuid +from copy import copy from datetime import datetime from random import random, randint import pytz from boto3 import Session -from moto.core.exceptions import JsonRESTError from moto.core import BaseBackend, BaseModel, CloudFormationModel +from moto.core.exceptions import JsonRESTError from moto.core.utils import unix_time from moto.ec2 import ec2_backends -from copy import copy - from .exceptions import ( ServiceNotFoundException, TaskDefinitionNotFoundException, @@ -481,6 +481,7 @@ def __init__(self, ec2_instance_id, region_name): if ec2_instance.platform == "windows" else "linux", # options are windows and linux, linux is default } + self.registered_at = datetime.now(pytz.utc) @property def response_object(self): @@ -489,6 +490,10 @@ def response_object(self): self._format_attribute(name, value) for name, value in response_object["attributes"].items() ] + if isinstance(response_object["registeredAt"], datetime): + response_object["registeredAt"] = unix_time( + response_object["registeredAt"].replace(tzinfo=None) + ) return response_object def _format_attribute(self, name, value): diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index c528349f545a..919124c6e15e 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -950,6 +950,7 @@ def test_describe_container_instances(): for instance in response["containerInstances"]: instance.keys().should.contain("runningTasksCount") instance.keys().should.contain("pendingTasksCount") + instance["registeredAt"].should.be.a("datetime.datetime") with assert_raises(ClientError) as e: ecs_client.describe_container_instances( From c26bef6f797c3f894ff929e0e36901a3c7a97156 Mon Sep 17 00:00:00 2001 From: ljakimczuk <39192420+ljakimczuk@users.noreply.github.com> Date: Tue, 6 Oct 2020 07:33:16 +0200 Subject: [PATCH 566/658] Updates to FlowLogs and IMPLEMENTATION_COVERAGE (#3356) * Replacing Unsuccessful class with tuple * Updating coverage --- IMPLEMENTATION_COVERAGE.md | 8 ++++---- moto/ec2/models.py | 17 ++++------------- moto/ec2/responses/flow_logs.py | 6 +++--- 3 files changed, 11 insertions(+), 20 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 101f9c0dc932..a108361d36f2 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -2632,7 +2632,7 @@ - [X] create_dhcp_options - [ ] create_egress_only_internet_gateway - [ ] create_fleet -- [ ] create_flow_logs +- [X] create_flow_logs - [ ] create_fpga_image - [X] create_image - [ ] create_instance_export_task @@ -2682,7 +2682,7 @@ - [ ] delete_dhcp_options - [ ] delete_egress_only_internet_gateway - [ ] delete_fleets -- [ ] delete_flow_logs +- [X] delete_flow_logs - [ ] delete_fpga_image - [X] delete_internet_gateway - [X] delete_key_pair @@ -2753,7 +2753,7 @@ - [ ] describe_fleet_history - [ ] describe_fleet_instances - [ ] describe_fleets -- [ ] describe_flow_logs +- [X] describe_flow_logs - [ ] describe_fpga_image_attribute - [ ] describe_fpga_images - [ ] describe_host_reservation_offerings @@ -4235,7 +4235,7 @@ - [X] remove_user_from_group - [ ] reset_service_specific_credential - [ ] resync_mfa_device -- [ ] set_default_policy_version +- [X] set_default_policy_version - [ ] set_security_token_service_preferences - [ ] simulate_custom_policy - [ ] simulate_principal_policy diff --git a/moto/ec2/models.py b/moto/ec2/models.py index e85dab800859..d1187ac9d23a 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3532,15 +3532,6 @@ def modify_subnet_attribute(self, subnet_id, attr_name, attr_value): raise InvalidParameterValueError(attr_name) -class Unsuccessful(object): - def __init__( - self, resource_id, error_code, error_message, - ): - self.resource_id = resource_id - self.error_code = error_code - self.error_message = error_message - - class FlowLogs(TaggedEC2Resource, CloudFormationModel): def __init__( self, @@ -3749,11 +3740,11 @@ def create_flow_logs( try: s3_backend.get_bucket(arn) except MissingBucket: + # Instead of creating FlowLog report + # the unsuccessful status for the + # given resource_id unsuccessful.append( - # Instead of creating FlowLog report - # the unsuccessful status for the - # given resource_id - Unsuccessful( + ( resource_id, "400", "LogDestination: {0} does not exist.".format(arn), diff --git a/moto/ec2/responses/flow_logs.py b/moto/ec2/responses/flow_logs.py index 9978f89c2b26..74930f2911ce 100644 --- a/moto/ec2/responses/flow_logs.py +++ b/moto/ec2/responses/flow_logs.py @@ -59,10 +59,10 @@ def delete_flow_logs(self): {% for error in errors %} - {{ error.error_code }} - {{ error.error_message }} + {{ error.1 }} + {{ error.2 }} - {{ error.resource_id }} + {{ error.0 }} {% endfor %} From d00cefa25cfd338caaabcefc7a79e6e803f577cf Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Fri, 9 Oct 2020 04:33:07 -0700 Subject: [PATCH 567/658] Add tagging to ec2.CopySnapshot (#3365) The `@freeze_time` decorator was removed because it is not necessary (and was causing the test to be skipped). Closes #2940 --- moto/ec2/responses/elastic_block_store.py | 13 +++++++++++++ tests/test_ec2/test_elastic_block_store.py | 9 ++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index f7f4df9dc8d8..853af936d0ba 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -19,10 +19,13 @@ def copy_snapshot(self): source_snapshot_id = self._get_param("SourceSnapshotId") source_region = self._get_param("SourceRegion") description = self._get_param("Description") + tags = self._parse_tag_specification("TagSpecification") + snapshot_tags = tags.get("snapshot", {}) if self.is_not_dryrun("CopySnapshot"): snapshot = self.ec2_backend.copy_snapshot( source_snapshot_id, source_region, description ) + snapshot.add_tags(snapshot_tags) template = self.response_template(COPY_SNAPSHOT_RESPONSE) return template.render(snapshot=snapshot) @@ -272,6 +275,16 @@ def reset_snapshot_attribute(self): COPY_SNAPSHOT_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE {{ snapshot.id }} + + {% for tag in snapshot.get_tags() %} + + {{ tag.resource_id }} + {{ tag.resource_type }} + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + """ DESCRIBE_SNAPSHOTS_RESPONSE = """ diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 7f8313da4af8..ef140b06e10e 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -9,7 +9,6 @@ import boto3 from botocore.exceptions import ClientError from boto.exception import EC2ResponseError -from freezegun import freeze_time import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 @@ -834,22 +833,26 @@ def test_volume_property_hidden_when_no_tags_exist(): volume_response.get("Tags").should.equal(None) -@freeze_time @mock_ec2 def test_copy_snapshot(): ec2_client = boto3.client("ec2", region_name="eu-west-1") dest_ec2_client = boto3.client("ec2", region_name="eu-west-2") volume_response = ec2_client.create_volume(AvailabilityZone="eu-west-1a", Size=10) + tag_spec = [ + {"ResourceType": "snapshot", "Tags": [{"Key": "key", "Value": "value"}]} + ] create_snapshot_response = ec2_client.create_snapshot( - VolumeId=volume_response["VolumeId"] + VolumeId=volume_response["VolumeId"], TagSpecifications=tag_spec ) copy_snapshot_response = dest_ec2_client.copy_snapshot( SourceSnapshotId=create_snapshot_response["SnapshotId"], SourceRegion="eu-west-1", + TagSpecifications=tag_spec, ) + copy_snapshot_response["Tags"].should.equal(tag_spec[0]["Tags"]) ec2 = boto3.resource("ec2", region_name="eu-west-1") dest_ec2 = boto3.resource("ec2", region_name="eu-west-2") From c1b2c78db2c4156e44d501b33f36330034d37b00 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Fri, 9 Oct 2020 05:55:48 -0700 Subject: [PATCH 568/658] Fix `TagFilter` implementation in `tag:GetResources` (#3366) The `tag_filter` method has been re-arranged to mimic the actual AWS behavior: Return `True` if *any* tag matches a filter and *all* filters are matched. Python's closures are late-binding, so we have to modify the lambdas accordingly! Closes #2814 --- moto/resourcegroupstaggingapi/models.py | 21 ++++--- .../test_resourcegroupstaggingapi.py | 59 +++++++++++++++++++ 2 files changed, 71 insertions(+), 9 deletions(-) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index 4cdf73cc7b92..bd63847a085c 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -113,32 +113,35 @@ def _get_resources_generator(self, tag_filters=None, resource_type_filters=None) # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html # TODO move these to their respective backends - filters = [lambda t, v: True] + filters = [] for tag_filter_dict in tag_filters: values = tag_filter_dict.get("Values", []) if len(values) == 0: # Check key matches - filters.append(lambda t, v: t == tag_filter_dict["Key"]) + filters.append(lambda t, v, key=tag_filter_dict["Key"]: t == key) elif len(values) == 1: # Check its exactly the same as key, value filters.append( - lambda t, v: t == tag_filter_dict["Key"] and v == values[0] + lambda t, v, key=tag_filter_dict["Key"], value=values[0]: t == key + and v == value ) else: # Check key matches and value is one of the provided values - filters.append(lambda t, v: t == tag_filter_dict["Key"] and v in values) + filters.append( + lambda t, v, key=tag_filter_dict["Key"], vl=values: t == key + and v in vl + ) def tag_filter(tag_list): result = [] if tag_filters: - for tag in tag_list: + for f in filters: temp_result = [] - for f in filters: + for tag in tag_list: f_result = f(tag["Key"], tag["Value"]) temp_result.append(f_result) - result.append(all(temp_result)) - - return any(result) + result.append(any(temp_result)) + return all(result) else: return True diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index c14636fff258..154744a14b15 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -293,3 +293,62 @@ def test_get_resources_s3(): response_keys.remove(resource["Tags"][0]["Key"]) response_keys.should.have.length_of(0) + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_multiple_tag_filters(): + client = boto3.client("ec2", region_name="eu-central-1") + + resp = client.run_instances( + ImageId="ami-123", + MinCount=1, + MaxCount=1, + InstanceType="t2.micro", + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + {"Key": "MY_TAG1", "Value": "MY_UNIQUE_VALUE"}, + {"Key": "MY_TAG2", "Value": "MY_SHARED_VALUE"}, + ], + }, + { + "ResourceType": "instance", + "Tags": [{"Key": "MY_TAG3", "Value": "MY_VALUE3"}], + }, + ], + ) + instance_1_id = resp["Instances"][0]["InstanceId"] + + resp = client.run_instances( + ImageId="ami-456", + MinCount=1, + MaxCount=1, + InstanceType="t2.micro", + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + {"Key": "MY_TAG1", "Value": "MY_ALT_UNIQUE_VALUE"}, + {"Key": "MY_TAG2", "Value": "MY_SHARED_VALUE"}, + ], + }, + { + "ResourceType": "instance", + "Tags": [{"Key": "MY_ALT_TAG3", "Value": "MY_VALUE3"}], + }, + ], + ) + instance_2_id = resp["Instances"][0]["InstanceId"] + + rtapi = boto3.client("resourcegroupstaggingapi", region_name="eu-central-1") + results = rtapi.get_resources( + TagFilters=[ + {"Key": "MY_TAG1", "Values": ["MY_UNIQUE_VALUE"]}, + {"Key": "MY_TAG2", "Values": ["MY_SHARED_VALUE"]}, + ] + ).get("ResourceTagMappingList", []) + results.should.have.length_of(1) + instance_1_id.should.be.within(results[0]["ResourceARN"]) + instance_2_id.shouldnt.be.within(results[0]["ResourceARN"]) From db7842653fbc30735e84037a768748c01b203023 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Fri, 9 Oct 2020 07:57:00 -0700 Subject: [PATCH 569/658] `iot:DeleteThingGroup` should return success even for non-existent groups (#3367) Closes #3026 --- moto/iot/models.py | 8 ++++++-- tests/test_iot/test_iot.py | 6 +++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 258a387fe02e..4a7d432395a3 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -924,8 +924,12 @@ def delete_thing_group(self, thing_group_name, expected_version): + thing_group_name + " when there are still child groups attached to it" ) - thing_group = self.describe_thing_group(thing_group_name) - del self.thing_groups[thing_group.arn] + try: + thing_group = self.describe_thing_group(thing_group_name) + del self.thing_groups[thing_group.arn] + except ResourceNotFoundException: + # AWS returns success even if the thing group does not exist. + pass def list_thing_groups(self, parent_group, name_prefix_filter, recursive): if recursive is None: diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 7a04cdc16008..e80a12a0fab8 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1038,11 +1038,15 @@ def test_delete_thing_group(): res.should.have.key("thingGroups").which.should.have.length_of(1) res["thingGroups"].should_not.have.key(group_name_2a) - # now that there is no child group, we can delete the previus group safely + # now that there is no child group, we can delete the previous group safely client.delete_thing_group(thingGroupName=group_name_1a) res = client.list_thing_groups() res.should.have.key("thingGroups").which.should.have.length_of(0) + # Deleting an invalid thing group does not raise an error. + res = client.delete_thing_group(thingGroupName="non-existent-group-name") + res["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + @mock_iot def test_describe_thing_group_metadata_hierarchy(): From 1e8f87a55d9d69ab07bfd17f7d8d7f9c5b5eedcd Mon Sep 17 00:00:00 2001 From: ezramorris Date: Sat, 10 Oct 2020 16:57:46 +0100 Subject: [PATCH 570/658] Update table in README.md to render nicely in GitHub (#3369) Co-authored-by: Ezra Morris --- README.md | 53 ++++------------------------------------------------- 1 file changed, 4 insertions(+), 49 deletions(-) diff --git a/README.md b/README.md index 58ab04f962c8..3915a85cdbb5 100644 --- a/README.md +++ b/README.md @@ -76,100 +76,55 @@ With the decorator wrapping the test, all the calls to s3 are automatically mock It gets even better! Moto isn't just for Python code and it isn't just for S3. Look at the [standalone server mode](https://github.com/spulec/moto#stand-alone-server-mode) for more information about running Moto with other languages. Here's the status of the other AWS services implemented: -```gherkin -|-------------------------------------------------------------------------------------|-----------------------------| | Service Name | Decorator | Development Status | Comment | -|-------------------------------------------------------------------------------------| | +|---------------------------|-----------------------|---------------------------------|-----------------------------| | ACM | @mock_acm | all endpoints done | | -|-------------------------------------------------------------------------------------| | | API Gateway | @mock_apigateway | core endpoints done | | -|-------------------------------------------------------------------------------------| | | Application Autoscaling | @mock_applicationautoscaling | basic endpoints done | | -|-------------------------------------------------------------------------------------| | | Autoscaling | @mock_autoscaling | core endpoints done | | -|-------------------------------------------------------------------------------------| | | Cloudformation | @mock_cloudformation | core endpoints done | | -|-------------------------------------------------------------------------------------| | | Cloudwatch | @mock_cloudwatch | basic endpoints done | | -|-------------------------------------------------------------------------------------| | | CloudwatchEvents | @mock_events | all endpoints done | | -|-------------------------------------------------------------------------------------| | | Cognito Identity | @mock_cognitoidentity | basic endpoints done | | -|-------------------------------------------------------------------------------------| | | Cognito Identity Provider | @mock_cognitoidp | basic endpoints done | | -|-------------------------------------------------------------------------------------| | -| Config | @mock_config | basic endpoints done | | -| | | core endpoints done | | -|-------------------------------------------------------------------------------------| | +| Config | @mock_config | basic + core endpoints done | | | Data Pipeline | @mock_datapipeline | basic endpoints done | | -|-------------------------------------------------------------------------------------| | | DynamoDB | @mock_dynamodb | core endpoints done | API 20111205. Deprecated. | | DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | API 20120810 (Latest) | -|-------------------------------------------------------------------------------------| | | EC2 | @mock_ec2 | core endpoints done | | | - AMI | | core endpoints done | | | - EBS | | core endpoints done | | | - Instances | | all endpoints done | | | - Security Groups | | core endpoints done | | | - Tags | | all endpoints done | | -|-------------------------------------------------------------------------------------| | | ECR | @mock_ecr | basic endpoints done | | -|-------------------------------------------------------------------------------------| | | ECS | @mock_ecs | basic endpoints done | | -|-------------------------------------------------------------------------------------| | | ELB | @mock_elb | core endpoints done | | -|-------------------------------------------------------------------------------------| | | ELBv2 | @mock_elbv2 | all endpoints done | | -|-------------------------------------------------------------------------------------| | | EMR | @mock_emr | core endpoints done | | -|-------------------------------------------------------------------------------------| | | Glacier | @mock_glacier | core endpoints done | | -|-------------------------------------------------------------------------------------| | | IAM | @mock_iam | core endpoints done | | -|-------------------------------------------------------------------------------------| | | IoT | @mock_iot | core endpoints done | | -| | @mock_iotdata | core endpoints done | | -|-------------------------------------------------------------------------------------| | +| IoT data | @mock_iotdata | core endpoints done | | | Kinesis | @mock_kinesis | core endpoints done | | -|-------------------------------------------------------------------------------------| | | KMS | @mock_kms | basic endpoints done | | -|-------------------------------------------------------------------------------------| | -| Lambda | @mock_lambda | basic endpoints done, requires | | -| | | docker | | -|-------------------------------------------------------------------------------------| | +| Lambda | @mock_lambda | basic endpoints done, requires docker | | | Logs | @mock_logs | basic endpoints done | | -|-------------------------------------------------------------------------------------| | | Organizations | @mock_organizations | some core endpoints done | | -|-------------------------------------------------------------------------------------| | | Polly | @mock_polly | all endpoints done | | -|-------------------------------------------------------------------------------------| | | RDS | @mock_rds | core endpoints done | | -|-------------------------------------------------------------------------------------| | | RDS2 | @mock_rds2 | core endpoints done | | -|-------------------------------------------------------------------------------------| | | Redshift | @mock_redshift | core endpoints done | | -|-------------------------------------------------------------------------------------| | | Route53 | @mock_route53 | core endpoints done | | -|-------------------------------------------------------------------------------------| | | S3 | @mock_s3 | core endpoints done | | -|-------------------------------------------------------------------------------------| | | SecretsManager | @mock_secretsmanager | basic endpoints done | | -|-------------------------------------------------------------------------------------| | | SES | @mock_ses | all endpoints done | | -|-------------------------------------------------------------------------------------| | | SNS | @mock_sns | all endpoints done | | -|-------------------------------------------------------------------------------------| | | SQS | @mock_sqs | core endpoints done | | -|-------------------------------------------------------------------------------------| | | SSM | @mock_ssm | core endpoints done | | -|-------------------------------------------------------------------------------------| | | STS | @mock_sts | core endpoints done | | -|-------------------------------------------------------------------------------------| | | SWF | @mock_swf | basic endpoints done | | -|-------------------------------------------------------------------------------------| | | X-Ray | @mock_xray | all endpoints done | | -|-------------------------------------------------------------------------------------| -``` For a full list of endpoint [implementation coverage](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) From a2bd4515eb75e670935e46d796584b6fe682b917 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 10 Oct 2020 09:54:36 -0700 Subject: [PATCH 571/658] Add better support for SQS `MaximumMessageSize` attribute (#3374) Closes #3205 --- moto/sqs/exceptions.py | 10 ++++++++ moto/sqs/models.py | 17 ++++++++++++- tests/test_sqs/test_sqs.py | 51 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 1 deletion(-) diff --git a/moto/sqs/exceptions.py b/moto/sqs/exceptions.py index 46d2af400fc5..872c25412a11 100644 --- a/moto/sqs/exceptions.py +++ b/moto/sqs/exceptions.py @@ -103,6 +103,16 @@ def __init__(self, attribute_name): ) +class InvalidAttributeValue(RESTError): + code = 400 + + def __init__(self, attribute_name): + super(InvalidAttributeValue, self).__init__( + "InvalidAttributeValue", + "Invalid value for the parameter {}.".format(attribute_name), + ) + + class InvalidParameterValue(RESTError): code = 400 diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 2784ee625108..a837aacdc3a1 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -35,6 +35,7 @@ InvalidParameterValue, MissingParameter, OverLimit, + InvalidAttributeValue, ) from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID @@ -43,6 +44,9 @@ MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB +MAXIMUM_MESSAGE_SIZE_ATTR_LOWER_BOUND = 1024 +MAXIMUM_MESSAGE_SIZE_ATTR_UPPER_BOUND = MAXIMUM_MESSAGE_LENGTH + TRANSPORT_TYPE_ENCODINGS = { "String": b"\x01", "Binary": b"\x02", @@ -248,7 +252,7 @@ def __init__(self, name, region, **kwargs): "FifoQueue": "false", "KmsDataKeyReusePeriodSeconds": 300, # five minutes "KmsMasterKeyId": None, - "MaximumMessageSize": int(64 << 12), + "MaximumMessageSize": MAXIMUM_MESSAGE_LENGTH, "MessageRetentionPeriod": 86400 * 4, # four days "Policy": None, "ReceiveMessageWaitTimeSeconds": 0, @@ -262,6 +266,11 @@ def __init__(self, name, region, **kwargs): # Check some conditions if self.fifo_queue and not self.name.endswith(".fifo"): raise InvalidParameterValue("Queue name must end in .fifo for FIFO queues") + if ( + self.maximum_message_size < MAXIMUM_MESSAGE_SIZE_ATTR_LOWER_BOUND + or self.maximum_message_size > MAXIMUM_MESSAGE_SIZE_ATTR_UPPER_BOUND + ): + raise InvalidAttributeValue("MaximumMessageSize") @property def pending_messages(self): @@ -649,6 +658,12 @@ def send_message( queue = self.get_queue(queue_name) + if len(message_body) > queue.maximum_message_size: + msg = "One or more parameters are invalid. Reason: Message must be shorter than {} bytes.".format( + queue.maximum_message_size + ) + raise InvalidParameterValue(msg) + if delay_seconds: delay_seconds = int(delay_seconds) else: diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index f98131db4e15..05f4bffabbc1 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -22,6 +22,11 @@ from tests.helpers import requires_boto_gte from tests.test_awslambda.test_lambda import get_test_zip_file1, get_role_name from moto.core import ACCOUNT_ID +from moto.sqs.models import ( + MAXIMUM_MESSAGE_SIZE_ATTR_LOWER_BOUND, + MAXIMUM_MESSAGE_SIZE_ATTR_UPPER_BOUND, + MAXIMUM_MESSAGE_LENGTH, +) TEST_POLICY = """ { @@ -2157,3 +2162,49 @@ def test_invoke_function_from_sqs_exception(): time.sleep(1) assert False, "Test Failed" + + +@mock_sqs +def test_maximum_message_size_attribute_default(): + sqs = boto3.resource("sqs", region_name="eu-west-3") + queue = sqs.create_queue(QueueName="test-queue",) + int(queue.attributes["MaximumMessageSize"]).should.equal(MAXIMUM_MESSAGE_LENGTH) + with assert_raises(Exception) as e: + queue.send_message(MessageBody="a" * (MAXIMUM_MESSAGE_LENGTH + 1)) + ex = e.exception + ex.response["Error"]["Code"].should.equal("InvalidParameterValue") + + +@mock_sqs +def test_maximum_message_size_attribute_fails_for_invalid_values(): + sqs = boto3.resource("sqs", region_name="eu-west-3") + invalid_values = [ + MAXIMUM_MESSAGE_SIZE_ATTR_LOWER_BOUND - 1, + MAXIMUM_MESSAGE_SIZE_ATTR_UPPER_BOUND + 1, + ] + for message_size in invalid_values: + with assert_raises(ClientError) as e: + sqs.create_queue( + QueueName="test-queue", + Attributes={"MaximumMessageSize": str(message_size)}, + ) + ex = e.exception + ex.response["Error"]["Code"].should.equal("InvalidAttributeValue") + + +@mock_sqs +def test_send_message_fails_when_message_size_greater_than_max_message_size(): + sqs = boto3.resource("sqs", region_name="eu-west-3") + message_size_limit = 12345 + queue = sqs.create_queue( + QueueName="test-queue", + Attributes={"MaximumMessageSize": str(message_size_limit)}, + ) + int(queue.attributes["MaximumMessageSize"]).should.equal(message_size_limit) + with assert_raises(ClientError) as e: + queue.send_message(MessageBody="a" * (message_size_limit + 1)) + ex = e.exception + ex.response["Error"]["Code"].should.equal("InvalidParameterValue") + ex.response["Error"]["Message"].should.contain( + "{} bytes".format(message_size_limit) + ) From 4a336b8b0471497165c3397674f27d3e62a4321a Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 10 Oct 2020 11:05:21 -0700 Subject: [PATCH 572/658] Add `ec2.vpc` resource support to Tagging API (#3375) Closes #1849 --- moto/resourcegroupstaggingapi/models.py | 18 ++++++++++++++++ .../test_resourcegroupstaggingapi.py | 21 +++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index bd63847a085c..1cf38e8d1ab0 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -3,6 +3,7 @@ import six from boto3 import Session +from moto.core import ACCOUNT_ID from moto.core import BaseBackend from moto.core.exceptions import RESTError @@ -367,6 +368,23 @@ def get_kms_tags(kms_key_id): # RedShift Subnet group # VPC + if ( + not resource_type_filters + or "ec2" in resource_type_filters + or "ec2:vpc" in resource_type_filters + ): + for vpc in self.ec2_backend.vpcs.values(): + tags = get_ec2_tags(vpc.id) + if not tags or not tag_filter( + tags + ): # Skip if no tags, or invalid filter + continue + yield { + "ResourceARN": "arn:aws:ec2:{0}:{1}:vpc/{2}".format( + self.region_name, ACCOUNT_ID, vpc.id + ), + "Tags": tags, + } # VPC Customer Gateway # VPC DHCP Option Set # VPC Internet Gateway diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 154744a14b15..f5a934b9b323 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -61,6 +61,27 @@ def test_get_resources_ec2(): resp["ResourceTagMappingList"][0]["ResourceARN"].should.contain("instance/") +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_resources_ec2_vpc(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + ec2.create_tags(Resources=[vpc.id], Tags=[{"Key": "test", "Value": "test"}]) + + def assert_response(resp): + results = resp.get("ResourceTagMappingList", []) + results.should.have.length_of(1) + vpc.id.should.be.within(results[0]["ResourceARN"]) + + rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-west-1") + resp = rtapi.get_resources(ResourceTypeFilters=["ec2"]) + assert_response(resp) + resp = rtapi.get_resources(ResourceTypeFilters=["ec2:vpc"]) + assert_response(resp) + resp = rtapi.get_resources(TagFilters=[{"Key": "test", "Values": ["test"]}]) + assert_response(resp) + + @mock_ec2 @mock_resourcegroupstaggingapi def test_get_tag_keys_ec2(): From c54f182ca1cdd3d4673a8fc3d61d5fbf6b0d5974 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 10 Oct 2020 12:02:08 -0700 Subject: [PATCH 573/658] Implement additional filters for `ecs.ListTasks` (#3376) Closes #1785 --- moto/ecs/models.py | 20 ++++++ tests/test_ecs/test_ecs_boto3.py | 101 +++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 210344f1038b..69ed51cb2380 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -940,10 +940,30 @@ def list_tasks( ) ) + if family: + task_definition_arns = self.list_task_definitions(family) + filtered_tasks = list( + filter( + lambda t: t.task_definition_arn in task_definition_arns, + filtered_tasks, + ) + ) + if started_by: filtered_tasks = list( filter(lambda t: started_by == t.started_by, filtered_tasks) ) + + if service_name: + # TODO: We can't filter on `service_name` until the backend actually + # launches tasks as part of the service creation process. + pass + + if desiredStatus: + filtered_tasks = list( + filter(lambda t: t.desired_status == desiredStatus, filtered_tasks) + ) + return [t.task_arn for t in filtered_tasks] def stop_task(self, cluster_str, task_str, reason): diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 919124c6e15e..2ef801807be0 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -2742,3 +2742,104 @@ def test_update_task_set(): )["taskSets"][0] assert updated_task_set["scale"]["value"] == 25.0 assert updated_task_set["scale"]["unit"] == "PERCENT" + + +@mock_ec2 +@mock_ecs +def test_list_tasks_with_filters(): + ecs = boto3.client("ecs", region_name="us-east-1") + ec2 = boto3.resource("ec2", region_name="us-east-1") + + _ = ecs.create_cluster(clusterName="test_cluster_1") + _ = ecs.create_cluster(clusterName="test_cluster_2") + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", MinCount=1, MaxCount=1 + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = ecs.register_container_instance( + cluster="test_cluster_1", instanceIdentityDocument=instance_id_document + ) + _ = ecs.register_container_instance( + cluster="test_cluster_2", instanceIdentityDocument=instance_id_document + ) + + container_instances = ecs.list_container_instances(cluster="test_cluster_1") + container_id_1 = container_instances["containerInstanceArns"][0].split("/")[-1] + container_instances = ecs.list_container_instances(cluster="test_cluster_2") + container_id_2 = container_instances["containerInstanceArns"][0].split("/")[-1] + + test_container_def = { + "name": "hello_world", + "image": "docker/hello-world:latest", + "cpu": 1024, + "memory": 400, + "essential": True, + "environment": [{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}], + "logConfiguration": {"logDriver": "json-file"}, + } + + _ = ecs.register_task_definition( + family="test_task_def_1", containerDefinitions=[test_container_def], + ) + + _ = ecs.register_task_definition( + family="test_task_def_2", containerDefinitions=[test_container_def], + ) + + _ = ecs.start_task( + cluster="test_cluster_1", + taskDefinition="test_task_def_1", + overrides={}, + containerInstances=[container_id_1], + startedBy="foo", + ) + + resp = ecs.start_task( + cluster="test_cluster_2", + taskDefinition="test_task_def_2", + overrides={}, + containerInstances=[container_id_2], + startedBy="foo", + ) + task_to_stop = resp["tasks"][0]["taskArn"] + + _ = ecs.start_task( + cluster="test_cluster_1", + taskDefinition="test_task_def_1", + overrides={}, + containerInstances=[container_id_1], + startedBy="bar", + ) + + len(ecs.list_tasks()["taskArns"]).should.equal(3) + + len(ecs.list_tasks(cluster="test_cluster_1")["taskArns"]).should.equal(2) + len(ecs.list_tasks(cluster="test_cluster_2")["taskArns"]).should.equal(1) + + len(ecs.list_tasks(containerInstance="bad-id")["taskArns"]).should.equal(0) + len(ecs.list_tasks(containerInstance=container_id_1)["taskArns"]).should.equal(2) + len(ecs.list_tasks(containerInstance=container_id_2)["taskArns"]).should.equal(1) + + len(ecs.list_tasks(family="non-existent-family")["taskArns"]).should.equal(0) + len(ecs.list_tasks(family="test_task_def_1")["taskArns"]).should.equal(2) + len(ecs.list_tasks(family="test_task_def_2")["taskArns"]).should.equal(1) + + len(ecs.list_tasks(startedBy="non-existent-entity")["taskArns"]).should.equal(0) + len(ecs.list_tasks(startedBy="foo")["taskArns"]).should.equal(2) + len(ecs.list_tasks(startedBy="bar")["taskArns"]).should.equal(1) + + len(ecs.list_tasks(desiredStatus="RUNNING")["taskArns"]).should.equal(3) + _ = ecs.stop_task(cluster="test_cluster_2", task=task_to_stop, reason="for testing") + len(ecs.list_tasks(desiredStatus="RUNNING")["taskArns"]).should.equal(2) + len(ecs.list_tasks(desiredStatus="STOPPED")["taskArns"]).should.equal(1) + + resp = ecs.list_tasks(cluster="test_cluster_1", startedBy="foo") + len(resp["taskArns"]).should.equal(1) + + resp = ecs.list_tasks(containerInstance=container_id_1, startedBy="bar") + len(resp["taskArns"]).should.equal(1) From 502818be4ce1255da64fd8a7201b474c26f51e16 Mon Sep 17 00:00:00 2001 From: nom3ad <19239479+nom3ad@users.noreply.github.com> Date: Sun, 11 Oct 2020 03:02:42 +0530 Subject: [PATCH 574/658] Fixes acm:describe_certificate for imported certificates having no SAN extension (#3370) * Fix(test_acm): describe_certificate testcase does't execute * handle acm certificates lacking extensions Co-authored-by: nom3ad --- moto/acm/models.py | 9 ++++++--- tests/test_acm/test_acm.py | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/moto/acm/models.py b/moto/acm/models.py index 3df541982557..0608400f78ec 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -293,9 +293,12 @@ def describe(self): key_algo = "EC_prime256v1" # Look for SANs - san_obj = self._cert.extensions.get_extension_for_oid( - cryptography.x509.OID_SUBJECT_ALTERNATIVE_NAME - ) + try: + san_obj = self._cert.extensions.get_extension_for_oid( + cryptography.x509.OID_SUBJECT_ALTERNATIVE_NAME + ) + except cryptography.x509.ExtensionNotFound: + san_obj = None sans = [] if san_obj is not None: sans = [item.value for item in san_obj.value] diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index b38cd1843404..017d166ddb4f 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -140,7 +140,7 @@ def test_describe_certificate(): @mock_acm -def test_describe_certificate(): +def test_describe_certificate_with_bad_arn(): client = boto3.client("acm", region_name="eu-central-1") try: From ea0ba91f639bb16c36f1cda6f3f555e0801eca9d Mon Sep 17 00:00:00 2001 From: nom3ad <19239479+nom3ad@users.noreply.github.com> Date: Mon, 12 Oct 2020 12:25:14 +0530 Subject: [PATCH 575/658] support Tags parameter in ACM import_certificate() and request_certificate() methods (#3373) * ACM: support `tags` parameter in import_certificate() * ACM: support tags parameter in request_certificate() * ACM: better tag operations with more unit tests Co-authored-by: nom3ad --- moto/acm/models.py | 96 ++++++++++++++++----- moto/acm/responses.py | 5 +- tests/test_acm/test_acm.py | 169 +++++++++++++++++++++++++++++++++++++ 3 files changed, 247 insertions(+), 23 deletions(-) diff --git a/moto/acm/models.py b/moto/acm/models.py index 0608400f78ec..6e4ac150892d 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -70,6 +70,68 @@ class AWSResourceNotFoundException(AWSError): TYPE = "ResourceNotFoundException" +class AWSTooManyTagsException(AWSError): + TYPE = "TooManyTagsException" + + +class TagHolder(dict): + MAX_TAG_COUNT = 50 + MAX_KEY_LENGTH = 128 + MAX_VALUE_LENGTH = 256 + + def _validate_kv(self, key, value, index): + if len(key) > self.MAX_KEY_LENGTH: + raise AWSValidationException( + "Value '%s' at 'tags.%d.member.key' failed to satisfy constraint: Member must have length less than or equal to %s" + % (key, index, self.MAX_KEY_LENGTH) + ) + if value and len(value) > self.MAX_VALUE_LENGTH: + raise AWSValidationException( + "Value '%s' at 'tags.%d.member.value' failed to satisfy constraint: Member must have length less than or equal to %s" + % (value, index, self.MAX_VALUE_LENGTH) + ) + if key.startswith("aws:"): + raise AWSValidationException( + 'Invalid Tag Key: "%s". AWS internal tags cannot be changed with this API' + % key + ) + + def add(self, tags): + tags_copy = self.copy() + for i, tag in enumerate(tags): + key = tag["Key"] + value = tag.get("Value", None) + self._validate_kv(key, value, i + 1) + + tags_copy[key] = value + if len(tags_copy) > self.MAX_TAG_COUNT: + raise AWSTooManyTagsException( + "the TagSet: '{%s}' contains too many Tags" + % ", ".join(k + "=" + str(v or "") for k, v in tags_copy.items()) + ) + + self.update(tags_copy) + + def remove(self, tags): + for i, tag in enumerate(tags): + key = tag["Key"] + value = tag.get("Value", None) + self._validate_kv(key, value, i + 1) + try: + # If value isnt provided, just delete key + if value is None: + del self[key] + # If value is provided, only delete if it matches what already exists + elif self[key] == value: + del self[key] + except KeyError: + pass + + def equals(self, tags): + tags = {t["Key"]: t.get("Value", None) for t in tags} if tags else {} + return self == tags + + class CertBundle(BaseModel): def __init__( self, @@ -88,7 +150,7 @@ def __init__( self.key = private_key self._key = None self.chain = chain - self.tags = {} + self.tags = TagHolder() self._chain = None self.type = cert_type # Should really be an enum self.status = cert_status # Should really be an enum @@ -388,7 +450,7 @@ def _set_idempotency_token_arn(self, token, arn): "expires": datetime.datetime.now() + datetime.timedelta(hours=1), } - def import_cert(self, certificate, private_key, chain=None, arn=None): + def import_cert(self, certificate, private_key, chain=None, arn=None, tags=None): if arn is not None: if arn not in self._certificates: raise self._arn_not_found(arn) @@ -403,6 +465,9 @@ def import_cert(self, certificate, private_key, chain=None, arn=None): self._certificates[bundle.arn] = bundle + if tags: + self.add_tags_to_certificate(bundle.arn, tags) + return bundle.arn def get_certificates_list(self, statuses): @@ -437,10 +502,11 @@ def request_certificate( domain_validation_options, idempotency_token, subject_alt_names, + tags=None, ): if idempotency_token is not None: arn = self._get_arn_from_idempotency_token(idempotency_token) - if arn is not None: + if arn and self._certificates[arn].tags.equals(tags): return arn cert = CertBundle.generate_cert( @@ -450,34 +516,20 @@ def request_certificate( self._set_idempotency_token_arn(idempotency_token, cert.arn) self._certificates[cert.arn] = cert + if tags: + cert.tags.add(tags) + return cert.arn def add_tags_to_certificate(self, arn, tags): # get_cert does arn check cert_bundle = self.get_certificate(arn) - - for tag in tags: - key = tag["Key"] - value = tag.get("Value", None) - cert_bundle.tags[key] = value + cert_bundle.tags.add(tags) def remove_tags_from_certificate(self, arn, tags): # get_cert does arn check cert_bundle = self.get_certificate(arn) - - for tag in tags: - key = tag["Key"] - value = tag.get("Value", None) - - try: - # If value isnt provided, just delete key - if value is None: - del cert_bundle.tags[key] - # If value is provided, only delete if it matches what already exists - elif cert_bundle.tags[key] == value: - del cert_bundle.tags[key] - except KeyError: - pass + cert_bundle.tags.remove(tags) acm_backends = {} diff --git a/moto/acm/responses.py b/moto/acm/responses.py index 13b22fa958b8..0908c6ff7069 100644 --- a/moto/acm/responses.py +++ b/moto/acm/responses.py @@ -117,6 +117,7 @@ def import_certificate(self): private_key = self._get_param("PrivateKey") chain = self._get_param("CertificateChain") # Optional current_arn = self._get_param("CertificateArn") # Optional + tags = self._get_param("Tags") # Optional # Simple parameter decoding. Rather do it here as its a data transport decision not part of the # actual data @@ -142,7 +143,7 @@ def import_certificate(self): try: arn = self.acm_backend.import_cert( - certificate, private_key, chain=chain, arn=current_arn + certificate, private_key, chain=chain, arn=current_arn, tags=tags ) except AWSError as err: return err.response() @@ -210,6 +211,7 @@ def request_certificate(self): ) # is ignored atm idempotency_token = self._get_param("IdempotencyToken") subject_alt_names = self._get_param("SubjectAlternativeNames") + tags = self._get_param("Tags") # Optional if subject_alt_names is not None and len(subject_alt_names) > 10: # There is initial AWS limit of 10 @@ -227,6 +229,7 @@ def request_certificate(self): domain_validation_options, idempotency_token, subject_alt_names, + tags, ) except AWSError as err: return err.response() diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index 017d166ddb4f..790ae443091c 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -11,6 +11,7 @@ from moto import mock_acm from moto.core import ACCOUNT_ID +from nose.tools import assert_raises RESOURCE_FOLDER = os.path.join(os.path.dirname(__file__), "resources") _GET_RESOURCE = lambda x: open(os.path.join(RESOURCE_FOLDER, x), "rb").read() @@ -46,6 +47,30 @@ def test_import_certificate(): resp.should.contain("CertificateChain") +@mock_acm +def test_import_certificate_with_tags(): + client = boto3.client("acm", region_name="eu-central-1") + + resp = client.import_certificate( + Certificate=SERVER_CRT, + PrivateKey=SERVER_KEY, + CertificateChain=CA_CRT, + Tags=[{"Key": "Environment", "Value": "QA"}, {"Key": "KeyOnly"},], + ) + arn = resp["CertificateArn"] + + resp = client.get_certificate(CertificateArn=arn) + resp["Certificate"].should.equal(SERVER_CRT.decode()) + resp.should.contain("CertificateChain") + + resp = client.list_tags_for_certificate(CertificateArn=arn) + tags = {item["Key"]: item.get("Value", "__NONE__") for item in resp["Tags"]} + tags.should.contain("Environment") + tags.should.contain("KeyOnly") + tags["Environment"].should.equal("QA") + tags["KeyOnly"].should.equal("__NONE__") + + @mock_acm def test_import_bad_certificate(): client = boto3.client("acm", region_name="eu-central-1") @@ -313,6 +338,150 @@ def test_request_certificate(): resp["CertificateArn"].should.equal(arn) +@mock_acm +def test_request_certificate_with_tags(): + client = boto3.client("acm", region_name="eu-central-1") + + token = str(uuid.uuid4()) + + resp = client.request_certificate( + DomainName="google.com", + IdempotencyToken=token, + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + Tags=[ + {"Key": "Environment", "Value": "QA"}, + {"Key": "WithEmptyStr", "Value": ""}, + ], + ) + resp.should.contain("CertificateArn") + arn_1 = resp["CertificateArn"] + + resp = client.list_tags_for_certificate(CertificateArn=arn_1) + tags = {item["Key"]: item.get("Value", "__NONE__") for item in resp["Tags"]} + tags.should.have.length_of(2) + tags["Environment"].should.equal("QA") + tags["WithEmptyStr"].should.equal("") + + # Request certificate for "google.com" with same IdempotencyToken but with different Tags + resp = client.request_certificate( + DomainName="google.com", + IdempotencyToken=token, + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + Tags=[{"Key": "Environment", "Value": "Prod"}, {"Key": "KeyOnly"},], + ) + arn_2 = resp["CertificateArn"] + + assert arn_1 != arn_2 # if tags are matched, ACM would have returned same arn + + resp = client.list_tags_for_certificate(CertificateArn=arn_2) + tags = {item["Key"]: item.get("Value", "__NONE__") for item in resp["Tags"]} + tags.should.have.length_of(2) + tags["Environment"].should.equal("Prod") + tags["KeyOnly"].should.equal("__NONE__") + + resp = client.request_certificate( + DomainName="google.com", + IdempotencyToken=token, + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + Tags=[ + {"Key": "Environment", "Value": "QA"}, + {"Key": "WithEmptyStr", "Value": ""}, + ], + ) + + +@mock_acm +def test_operations_with_invalid_tags(): + client = boto3.client("acm", region_name="eu-central-1") + + # request certificate with invalid tags + with assert_raises(ClientError) as ex: + client.request_certificate( + DomainName="example.com", Tags=[{"Key": "X" * 200, "Value": "Valid"}], + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.exception.response["Error"]["Message"].should.contain( + "Member must have length less than or equal to 128" + ) + + # import certificate with invalid tags + with assert_raises(ClientError) as ex: + client.import_certificate( + Certificate=SERVER_CRT, + PrivateKey=SERVER_KEY, + CertificateChain=CA_CRT, + Tags=[ + {"Key": "Valid", "Value": "X" * 300}, + {"Key": "aws:xx", "Value": "Valid"}, + ], + ) + + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.exception.response["Error"]["Message"].should.contain( + "Member must have length less than or equal to 256" + ) + + arn = _import_cert(client) + + # add invalid tags to existing certificate + with assert_raises(ClientError) as ex: + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[{"Key": "aws:xxx", "Value": "Valid"}, {"Key": "key2"}], + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.exception.response["Error"]["Message"].should.contain( + "AWS internal tags cannot be changed with this API" + ) + + # try removing invalid tags from existing certificate + with assert_raises(ClientError) as ex: + client.remove_tags_from_certificate( + CertificateArn=arn, Tags=[{"Key": "aws:xxx", "Value": "Valid"}] + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.exception.response["Error"]["Message"].should.contain( + "AWS internal tags cannot be changed with this API" + ) + + +@mock_acm +def test_add_too_many_tags(): + client = boto3.client("acm", region_name="eu-central-1") + arn = _import_cert(client) + + # Add 51 tags + with assert_raises(ClientError) as ex: + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[{"Key": "a-%d" % i, "Value": "abcd"} for i in range(1, 52)], + ) + ex.exception.response["Error"]["Code"].should.equal("TooManyTagsException") + ex.exception.response["Error"]["Message"].should.contain("contains too many Tags") + client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.empty + + # Add 49 tags first, then try to add 2 more. + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[{"Key": "p-%d" % i, "Value": "pqrs"} for i in range(1, 50)], + ) + client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.length_of( + 49 + ) + with assert_raises(ClientError) as ex: + client.add_tags_to_certificate( + CertificateArn=arn, + Tags=[{"Key": "x-1", "Value": "xyz"}, {"Key": "x-2", "Value": "xyz"}], + ) + ex.exception.response["Error"]["Code"].should.equal("TooManyTagsException") + ex.exception.response["Error"]["Message"].should.contain("contains too many Tags") + ex.exception.response["Error"]["Message"].count("pqrs").should.equal(49) + ex.exception.response["Error"]["Message"].count("xyz").should.equal(2) + client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.length_of( + 49 + ) + + @mock_acm def test_request_certificate_no_san(): client = boto3.client("acm", region_name="eu-central-1") From 0a938f7bb4864b39464115559c120c57fb3add8e Mon Sep 17 00:00:00 2001 From: waynemetcalfe Date: Mon, 12 Oct 2020 12:13:20 +0100 Subject: [PATCH 576/658] issue-3379 iam list_roles: implement PathPrefix, MaxItems and Marker (#3380) * issue-3379 iam list_roles: implement PathPrefix, MaxItems and Marker * issue-3379 fix cloudformation test --- moto/iam/models.py | 25 +++--- moto/iam/responses.py | 17 ++-- .../test_cloudformation_stack_integration.py | 4 +- tests/test_iam/test_iam.py | 78 ++++++++++++++++++- 4 files changed, 106 insertions(+), 18 deletions(-) diff --git a/moto/iam/models.py b/moto/iam/models.py index 6397fd099668..76b824d609bb 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -2011,16 +2011,23 @@ def update_user(self, user_name, new_path=None, new_user_name=None): user.name = new_user_name self.users[new_user_name] = self.users.pop(user_name) - def list_roles(self, path_prefix, marker, max_items): - roles = None - try: - roles = self.roles.values() - except KeyError: - raise IAMNotFoundException( - "Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items) - ) + def list_roles(self, path_prefix=None, marker=None, max_items=None): + path_prefix = path_prefix if path_prefix else "/" + max_items = int(max_items) if max_items else 100 + start_index = int(marker) if marker else 0 + + roles = self.roles.values() + roles = filter_items_with_path_prefix(path_prefix, roles) + sorted_roles = sorted(roles, key=lambda role: role.id) + + roles_to_return = sorted_roles[start_index : start_index + max_items] + + if len(sorted_roles) <= (start_index + max_items): + marker = None + else: + marker = str(start_index + max_items) - return roles + return roles_to_return, marker def upload_signing_certificate(self, user_name, body): user = self.get_user(user_name) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index eed610f13846..55a7c207626b 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -133,7 +133,7 @@ def list_entities_for_policy(self): entity_users.append(user.name) elif entity == "Role": - roles = iam_backend.list_roles(path_prefix, marker, max_items) + roles, _ = iam_backend.list_roles(path_prefix, marker, max_items) if roles: for role in roles: for p in role.managed_policies: @@ -156,7 +156,7 @@ def list_entities_for_policy(self): if p == policy_arn: entity_users.append(user.name) - roles = iam_backend.list_roles(path_prefix, marker, max_items) + roles, _ = iam_backend.list_roles(path_prefix, marker, max_items) if roles: for role in roles: for p in role.managed_policies: @@ -356,9 +356,13 @@ def remove_role_from_instance_profile(self): return template.render() def list_roles(self): - roles = iam_backend.get_roles() + path_prefix = self._get_param("PathPrefix", "/") + marker = self._get_param("Marker", "0") + max_items = self._get_param("MaxItems", 100) + + roles, marker = iam_backend.list_roles(path_prefix, marker, max_items) template = self.response_template(LIST_ROLES_TEMPLATE) - return template.render(roles=roles) + return template.render(roles=roles, marker=marker) def list_instance_profiles(self): profiles = iam_backend.get_instance_profiles() @@ -1379,7 +1383,10 @@ def get_account_summary(self): LIST_ROLES_TEMPLATE = """ - false + {{ 'true' if marker else 'false' }} + {% if marker %} + {{ marker }} + {% endif %} {% for role in roles %} diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index ee2fbc94cfbc..9949bb4a5035 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -870,7 +870,7 @@ def test_iam_roles(): } ] }, - "Path": "my-path", + "Path": "/my-path/", "Policies": [ { "PolicyDocument": { @@ -939,7 +939,7 @@ def test_iam_roles(): # Role name is not specified, so randomly generated - can't check exact name if "with-path" in role.role_name: role_name_to_id["with-path"] = role.role_id - role.path.should.equal("my-path") + role.path.should.equal("/my-path/") else: role_name_to_id["no-path"] = role.role_id role.role_name.should.equal("my-role-no-path-name") diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 9cf7decb6c89..7db2f0162775 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -155,13 +155,13 @@ def test_create_role_and_instance_profile(): conn = boto.connect_iam() conn.create_instance_profile("my-profile", path="my-path") conn.create_role( - "my-role", assume_role_policy_document="some policy", path="my-path" + "my-role", assume_role_policy_document="some policy", path="/my-path/" ) conn.add_role_to_instance_profile("my-profile", "my-role") role = conn.get_role("my-role") - role.path.should.equal("my-path") + role.path.should.equal("/my-path/") role.assume_role_policy_document.should.equal("some policy") profile = conn.get_instance_profile("my-profile") @@ -3933,3 +3933,77 @@ def test_policy_config_client(): )["BaseConfigurationItems"][0]["resourceName"] == policies[8]["name"] ) + + +@mock_iam() +def test_list_roles_with_more_than_100_roles_no_max_items_defaults_to_100(): + iam = boto3.client("iam", region_name="us-east-1") + for i in range(150): + iam.create_role( + RoleName="test_role_{}".format(i), AssumeRolePolicyDocument="some policy" + ) + response = iam.list_roles() + roles = response["Roles"] + + assert response["IsTruncated"] is True + assert len(roles) == 100 + + +@mock_iam() +def test_list_roles_max_item_and_marker_values_adhered(): + iam = boto3.client("iam", region_name="us-east-1") + for i in range(10): + iam.create_role( + RoleName="test_role_{}".format(i), AssumeRolePolicyDocument="some policy" + ) + response = iam.list_roles(MaxItems=2) + roles = response["Roles"] + + assert response["IsTruncated"] is True + assert len(roles) == 2 + + response = iam.list_roles(Marker=response["Marker"]) + roles = response["Roles"] + + assert response["IsTruncated"] is False + assert len(roles) == 8 + + +@mock_iam() +def test_list_roles_path_prefix_value_adhered(): + iam = boto3.client("iam", region_name="us-east-1") + iam.create_role( + RoleName="test_role_without_path", AssumeRolePolicyDocument="some policy" + ) + iam.create_role( + RoleName="test_role_with_path", + AssumeRolePolicyDocument="some policy", + Path="/TestPath/", + ) + + response = iam.list_roles(PathPrefix="/TestPath/") + roles = response["Roles"] + + assert len(roles) == 1 + assert roles[0]["RoleName"] == "test_role_with_path" + + +@mock_iam() +def test_list_roles_none_found_returns_empty_list(): + iam = boto3.client("iam", region_name="us-east-1") + + response = iam.list_roles() + roles = response["Roles"] + assert len(roles) == 0 + + response = iam.list_roles(PathPrefix="/TestPath") + roles = response["Roles"] + assert len(roles) == 0 + + response = iam.list_roles(Marker="10") + roles = response["Roles"] + assert len(roles) == 0 + + response = iam.list_roles(MaxItems=10) + roles = response["Roles"] + assert len(roles) == 0 From fe361f861da28e2896e28d4c0b32fc52d324868d Mon Sep 17 00:00:00 2001 From: nom3ad <19239479+nom3ad@users.noreply.github.com> Date: Mon, 12 Oct 2020 19:13:36 +0530 Subject: [PATCH 577/658] Enable more unit tests for ACM (#3372) * Enable more unit tests for ACM * put a smile on travis-ci face Co-authored-by: nom3ad Co-authored-by: Bert Blommers --- tests/test_acm/test_acm.py | 148 ++++++++++++++++++++----------------- 1 file changed, 80 insertions(+), 68 deletions(-) diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index 790ae443091c..5a1596a4d05f 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -8,9 +8,10 @@ from botocore.exceptions import ClientError -from moto import mock_acm +from moto import mock_acm, settings from moto.core import ACCOUNT_ID +from nose import SkipTest from nose.tools import assert_raises RESOURCE_FOLDER = os.path.join(os.path.dirname(__file__), "resources") @@ -493,70 +494,81 @@ def test_request_certificate_no_san(): resp2.should.contain("Certificate") -# # Also tests the SAN code -# # requires Pull: https://github.com/spulec/freezegun/pull/210 -# @freeze_time("2012-01-01 12:00:00", as_arg=True) -# @mock_acm -# def test_request_certificate(frozen_time): -# # After requesting a certificate, it should then auto-validate after 1 minute -# # Some sneaky programming for that ;-) -# client = boto3.client('acm', region_name='eu-central-1') -# -# resp = client.request_certificate( -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# arn = resp['CertificateArn'] -# -# resp = client.describe_certificate(CertificateArn=arn) -# resp['Certificate']['CertificateArn'].should.equal(arn) -# resp['Certificate']['DomainName'].should.equal('google.com') -# resp['Certificate']['Issuer'].should.equal('Amazon') -# resp['Certificate']['KeyAlgorithm'].should.equal('RSA_2048') -# resp['Certificate']['Status'].should.equal('PENDING_VALIDATION') -# resp['Certificate']['Type'].should.equal('AMAZON_ISSUED') -# len(resp['Certificate']['SubjectAlternativeNames']).should.equal(3) -# -# # Move time -# frozen_time.move_to('2012-01-01 12:02:00') -# resp = client.describe_certificate(CertificateArn=arn) -# resp['Certificate']['CertificateArn'].should.equal(arn) -# resp['Certificate']['Status'].should.equal('ISSUED') -# -# -# # requires Pull: https://github.com/spulec/freezegun/pull/210 -# @freeze_time("2012-01-01 12:00:00", as_arg=True) -# @mock_acm -# def test_request_certificate(frozen_time): -# # After requesting a certificate, it should then auto-validate after 1 minute -# # Some sneaky programming for that ;-) -# client = boto3.client('acm', region_name='eu-central-1') -# -# resp = client.request_certificate( -# IdempotencyToken='test_token', -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# original_arn = resp['CertificateArn'] -# -# # Should be able to request a certificate multiple times in an hour -# # after that it makes a new one -# for time_intervals in ('2012-01-01 12:15:00', '2012-01-01 12:30:00', '2012-01-01 12:45:00'): -# frozen_time.move_to(time_intervals) -# resp = client.request_certificate( -# IdempotencyToken='test_token', -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# arn = resp['CertificateArn'] -# arn.should.equal(original_arn) -# -# # Move time -# frozen_time.move_to('2012-01-01 13:01:00') -# resp = client.request_certificate( -# IdempotencyToken='test_token', -# DomainName='google.com', -# SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], -# ) -# arn = resp['CertificateArn'] -# arn.should_not.equal(original_arn) +# Also tests the SAN code +@freeze_time("2012-01-01 12:00:00", as_arg=True) +@mock_acm +def test_request_certificate_issued_status(frozen_time): + # After requesting a certificate, it should then auto-validate after 1 minute + # Some sneaky programming for that ;-) + client = boto3.client("acm", region_name="eu-central-1") + + resp = client.request_certificate( + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) + arn = resp["CertificateArn"] + + resp = client.describe_certificate(CertificateArn=arn) + resp["Certificate"]["CertificateArn"].should.equal(arn) + resp["Certificate"]["DomainName"].should.equal("google.com") + resp["Certificate"]["Issuer"].should.equal("Amazon") + resp["Certificate"]["KeyAlgorithm"].should.equal("RSA_2048") + resp["Certificate"]["Status"].should.equal("PENDING_VALIDATION") + resp["Certificate"]["Type"].should.equal("AMAZON_ISSUED") + len(resp["Certificate"]["SubjectAlternativeNames"]).should.equal(3) + + # validation will be pending for 1 minute. + resp = client.describe_certificate(CertificateArn=arn) + resp["Certificate"]["CertificateArn"].should.equal(arn) + resp["Certificate"]["Status"].should.equal("PENDING_VALIDATION") + + if not settings.TEST_SERVER_MODE: + # Move time to get it issued. + frozen_time.move_to("2012-01-01 12:02:00") + resp = client.describe_certificate(CertificateArn=arn) + resp["Certificate"]["CertificateArn"].should.equal(arn) + resp["Certificate"]["Status"].should.equal("ISSUED") + + +@freeze_time("2012-01-01 12:00:00", as_arg=True) +@mock_acm +def test_request_certificate_with_mutiple_times(frozen_time): + if settings.TEST_SERVER_MODE: + raise SkipTest("Cant manipulate time in server mode") + + # After requesting a certificate, it should then auto-validate after 1 minute + # Some sneaky programming for that ;-) + client = boto3.client("acm", region_name="eu-central-1") + + resp = client.request_certificate( + IdempotencyToken="test_token", + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) + original_arn = resp["CertificateArn"] + + # Should be able to request a certificate multiple times in an hour + # after that it makes a new one + for time_intervals in ( + "2012-01-01 12:15:00", + "2012-01-01 12:30:00", + "2012-01-01 12:45:00", + ): + frozen_time.move_to(time_intervals) + resp = client.request_certificate( + IdempotencyToken="test_token", + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) + arn = resp["CertificateArn"] + arn.should.equal(original_arn) + + # Move time + frozen_time.move_to("2012-01-01 13:01:00") + resp = client.request_certificate( + IdempotencyToken="test_token", + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) + arn = resp["CertificateArn"] + arn.should_not.equal(original_arn) From ea19466c381184a2bbfab946deaf982e33fc850c Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Mon, 12 Oct 2020 12:53:30 -0700 Subject: [PATCH 578/658] Fix missing properties when ecs:TaskDefinition created via CloudFormation (#3378) There's a larger problem here that needs a more generalized solution, but this solves the immediate issue with a minimum amount of code. Closes #3171 --- moto/core/utils.py | 32 +++++++++++++++++++++++ moto/ecs/models.py | 8 +++--- tests/test_ecs/test_ecs_cloudformation.py | 22 +++++++++++++++- 3 files changed, 58 insertions(+), 4 deletions(-) diff --git a/moto/core/utils.py b/moto/core/utils.py index 235b895eca3c..5f35538de36e 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -57,6 +57,11 @@ def underscores_to_camelcase(argument): return result +def pascal_to_camelcase(argument): + """Converts a PascalCase param to the camelCase equivalent""" + return argument[0].lower() + argument[1:] + + def method_names_from_class(clazz): # On Python 2, methods are different from functions, and the `inspect` # predicates distinguish between them. On Python 3, methods are just @@ -367,3 +372,30 @@ def tags_from_cloudformation_tags_list(tags_list): tags[key] = value return tags + + +def remap_nested_keys(root, key_transform): + """This remap ("recursive map") function is used to traverse and + transform the dictionary keys of arbitrarily nested structures. + List comprehensions do not recurse, making it tedious to apply + transforms to all keys in a tree-like structure. + + A common issue for `moto` is changing the casing of dict keys: + + >>> remap_nested_keys({'KeyName': 'Value'}, camelcase_to_underscores) + {'key_name': 'Value'} + + Args: + root: The target data to traverse. Supports iterables like + :class:`list`, :class:`tuple`, and :class:`dict`. + key_transform (callable): This function is called on every + dictionary key found in *root*. + """ + if isinstance(root, (list, tuple)): + return [remap_nested_keys(item, key_transform) for item in root] + if isinstance(root, dict): + return { + key_transform(k): remap_nested_keys(v, key_transform) + for k, v in six.iteritems(root) + } + return root diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 69ed51cb2380..a4522660e0ee 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -11,7 +11,7 @@ from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.exceptions import JsonRESTError -from moto.core.utils import unix_time +from moto.core.utils import unix_time, pascal_to_camelcase, remap_nested_keys from moto.ec2 import ec2_backends from .exceptions import ( ServiceNotFoundException, @@ -174,8 +174,10 @@ def create_from_cloudformation_json( family = properties.get( "Family", "task-definition-{0}".format(int(random() * 10 ** 6)) ) - container_definitions = properties["ContainerDefinitions"] - volumes = properties.get("Volumes") + container_definitions = remap_nested_keys( + properties.get("ContainerDefinitions", []), pascal_to_camelcase + ) + volumes = remap_nested_keys(properties.get("Volumes", []), pascal_to_camelcase) ecs_backend = ecs_backends[region_name] return ecs_backend.register_task_definition( diff --git a/tests/test_ecs/test_ecs_cloudformation.py b/tests/test_ecs/test_ecs_cloudformation.py index a34c89aa7a92..fcb1beec7832 100644 --- a/tests/test_ecs/test_ecs_cloudformation.py +++ b/tests/test_ecs/test_ecs_cloudformation.py @@ -2,6 +2,8 @@ import json from copy import deepcopy from moto import mock_cloudformation, mock_ecs +from moto.core.utils import pascal_to_camelcase, remap_nested_keys +import sure # noqa @mock_ecs @@ -231,9 +233,16 @@ def test_create_task_definition_through_cloudformation(): "Cpu": "200", "Memory": "500", "Essential": "true", + "PortMappings": [ + { + "ContainerPort": 123, + "HostPort": 123, + "Protocol": "tcp", + }, + ], } ], - "Volumes": [], + "Volumes": [{"Name": "ecs-vol"}], }, } }, @@ -252,3 +261,14 @@ def test_create_task_definition_through_cloudformation(): StackName=stack_name, LogicalResourceId="testTaskDefinition" )["StackResourceDetail"] task_definition_details["PhysicalResourceId"].should.equal(task_definition_arn) + + task_definition = ecs_conn.describe_task_definition( + taskDefinition=task_definition_arn + ).get("taskDefinition") + expected_properties = remap_nested_keys( + template["Resources"]["testTaskDefinition"]["Properties"], pascal_to_camelcase + ) + task_definition["volumes"].should.equal(expected_properties["volumes"]) + task_definition["containerDefinitions"].should.equal( + expected_properties["containerDefinitions"] + ) From ccda76898a024de0862bc20dd544fe40eba2b30d Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 14 Oct 2020 07:18:50 -0700 Subject: [PATCH 579/658] Add KMS Support to EBS Encrypted Volumes (#3383) * Properly coerce `Encrypted` attribute to bool on request/response. * Create and use a default AWS managed CMK for EBS when clients request an encrypted volume without specifying a KmsKeyId. NOTE: A client-provided KmsKeyId is simply stored as-is, and is not validated against the KMS backend. This is in keeping with other moto backends (RDS, Redshift) that currently also accept unvalidated customer master key (CMK) parameters, but could be an area for future improvement. Closes #3248 --- moto/ec2/exceptions.py | 10 ++++ moto/ec2/models.py | 41 ++++++++++++-- moto/ec2/responses/elastic_block_store.py | 17 ++++-- tests/test_ec2/test_elastic_block_store.py | 63 ++++++++++++++++++++++ 4 files changed, 124 insertions(+), 7 deletions(-) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index b2d7e8aab9a9..e14a60bf1c2c 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -573,3 +573,13 @@ def __init__(self): "InvalidLaunchTemplateName.AlreadyExistsException", "Launch template name already in use.", ) + + +class InvalidParameterDependency(EC2ClientError): + def __init__(self, param, param_needed): + super(InvalidParameterDependency, self).__init__( + "InvalidParameterDependency", + "The parameter [{0}] requires the parameter {1} to be set.".format( + param, param_needed + ), + ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index d1187ac9d23a..a7a34cbf9935 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -28,6 +28,7 @@ camelcase_to_underscores, ) from moto.core import ACCOUNT_ID +from moto.kms import kms_backends from .exceptions import ( CidrLimitExceeded, @@ -97,6 +98,7 @@ ResourceAlreadyAssociatedError, RulesPerSecurityGroupLimitExceededError, TagLimitExceeded, + InvalidParameterDependency, ) from .utils import ( EC2_RESOURCE_TO_PREFIX, @@ -2425,7 +2427,14 @@ def create_from_cloudformation_json( class Volume(TaggedEC2Resource, CloudFormationModel): def __init__( - self, ec2_backend, volume_id, size, zone, snapshot_id=None, encrypted=False + self, + ec2_backend, + volume_id, + size, + zone, + snapshot_id=None, + encrypted=False, + kms_key_id=None, ): self.id = volume_id self.size = size @@ -2435,6 +2444,7 @@ def __init__( self.snapshot_id = snapshot_id self.ec2_backend = ec2_backend self.encrypted = encrypted + self.kms_key_id = kms_key_id @staticmethod def cloudformation_name_type(): @@ -2548,7 +2558,13 @@ def __init__(self): self.snapshots = {} super(EBSBackend, self).__init__() - def create_volume(self, size, zone_name, snapshot_id=None, encrypted=False): + def create_volume( + self, size, zone_name, snapshot_id=None, encrypted=False, kms_key_id=None + ): + if kms_key_id and not encrypted: + raise InvalidParameterDependency("KmsKeyId", "Encrypted") + if encrypted and not kms_key_id: + kms_key_id = self._get_default_encryption_key() volume_id = random_volume_id() zone = self.get_zone_by_name(zone_name) if snapshot_id: @@ -2557,7 +2573,7 @@ def create_volume(self, size, zone_name, snapshot_id=None, encrypted=False): size = snapshot.volume.size if snapshot.encrypted: encrypted = snapshot.encrypted - volume = Volume(self, volume_id, size, zone, snapshot_id, encrypted) + volume = Volume(self, volume_id, size, zone, snapshot_id, encrypted, kms_key_id) self.volumes[volume_id] = volume return volume @@ -2705,6 +2721,25 @@ def remove_create_volume_permission(self, snapshot_id, user_ids=None, groups=Non return True + def _get_default_encryption_key(self): + # https://aws.amazon.com/kms/features/#AWS_Service_Integration + # An AWS managed CMK is created automatically when you first create + # an encrypted resource using an AWS service integrated with KMS. + kms = kms_backends[self.region_name] + ebs_alias = "alias/aws/ebs" + if not kms.alias_exists(ebs_alias): + key = kms.create_key( + policy="", + key_usage="ENCRYPT_DECRYPT", + customer_master_key_spec="SYMMETRIC_DEFAULT", + description="Default master key that protects my EBS volumes when no other key is defined", + tags=None, + region=self.region_name, + ) + kms.add_alias(key.id, ebs_alias) + ebs_key = kms.describe_key(ebs_alias) + return ebs_key.arn + class VPC(TaggedEC2Resource, CloudFormationModel): def __init__( diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index 853af936d0ba..fd237e2e480a 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -46,9 +46,12 @@ def create_volume(self): snapshot_id = self._get_param("SnapshotId") tags = self._parse_tag_specification("TagSpecification") volume_tags = tags.get("volume", {}) - encrypted = self._get_param("Encrypted", if_none=False) + encrypted = self._get_bool_param("Encrypted", if_none=False) + kms_key_id = self._get_param("KmsKeyId") if self.is_not_dryrun("CreateVolume"): - volume = self.ec2_backend.create_volume(size, zone, snapshot_id, encrypted) + volume = self.ec2_backend.create_volume( + size, zone, snapshot_id, encrypted, kms_key_id + ) volume.add_tags(volume_tags) template = self.response_template(CREATE_VOLUME_RESPONSE) return template.render(volume=volume) @@ -161,7 +164,10 @@ def reset_snapshot_attribute(self): {% else %} {% endif %} - {{ volume.encrypted }} + {{ 'true' if volume.encrypted else 'false' }} + {% if volume.encrypted %} + {{ volume.kms_key_id }} + {% endif %} {{ volume.zone.name }} creating {{ volume.create_time}} @@ -192,7 +198,10 @@ def reset_snapshot_attribute(self): {% else %} {% endif %} - {{ volume.encrypted }} + {{ 'true' if volume.encrypted else 'false' }} + {% if volume.encrypted %} + {{ volume.kms_key_id }} + {% endif %} {{ volume.zone.name }} {{ volume.status }} {{ volume.create_time}} diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index ef140b06e10e..2a5dfbf2ad15 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -13,6 +13,7 @@ from moto import mock_ec2_deprecated, mock_ec2 from moto.ec2.models import OWNER_ID +from moto.kms import mock_kms @mock_ec2_deprecated @@ -915,3 +916,65 @@ def test_search_for_many_snapshots(): snapshots_response = ec2_client.describe_snapshots(SnapshotIds=snapshot_ids) assert len(snapshots_response["Snapshots"]) == len(snapshot_ids) + + +@mock_ec2 +def test_create_unencrypted_volume_with_kms_key_fails(): + resource = boto3.resource("ec2", region_name="us-east-1") + with assert_raises(ClientError) as ex: + resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=False, KmsKeyId="key", Size=10 + ) + ex.exception.response["Error"]["Code"].should.equal("InvalidParameterDependency") + ex.exception.response["Error"]["Message"].should.contain("KmsKeyId") + + +@mock_kms +@mock_ec2 +def test_create_encrypted_volume_without_kms_key_should_use_default_key(): + kms = boto3.client("kms", region_name="us-east-1") + # Default master key for EBS does not exist until needed. + with assert_raises(ClientError) as ex: + kms.describe_key(KeyId="alias/aws/ebs") + ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + # Creating an encrypted volume should create (and use) the default key. + resource = boto3.resource("ec2", region_name="us-east-1") + volume = resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=True, Size=10 + ) + default_ebs_key_arn = kms.describe_key(KeyId="alias/aws/ebs")["KeyMetadata"]["Arn"] + volume.kms_key_id.should.equal(default_ebs_key_arn) + volume.encrypted.should.be.true + # Subsequent encrypted volumes should use the now-created default key. + volume = resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=True, Size=10 + ) + volume.kms_key_id.should.equal(default_ebs_key_arn) + volume.encrypted.should.be.true + + +@mock_ec2 +def test_create_volume_with_kms_key(): + resource = boto3.resource("ec2", region_name="us-east-1") + volume = resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=True, KmsKeyId="key", Size=10 + ) + volume.kms_key_id.should.equal("key") + volume.encrypted.should.be.true + + +@mock_ec2 +def test_kms_key_id_property_hidden_when_volume_not_encrypted(): + client = boto3.client("ec2", region_name="us-east-1") + resp = client.create_volume(AvailabilityZone="us-east-1a", Encrypted=False, Size=10) + resp["Encrypted"].should.be.false + resp.should_not.have.key("KmsKeyId") + resp = client.describe_volumes(VolumeIds=[resp["VolumeId"]]) + resp["Volumes"][0]["Encrypted"].should.be.false + resp["Volumes"][0].should_not.have.key("KmsKeyId") + resource = boto3.resource("ec2", region_name="us-east-1") + volume = resource.create_volume( + AvailabilityZone="us-east-1a", Encrypted=False, Size=10 + ) + volume.encrypted.should.be.false + volume.kms_key_id.should.be.none From 99556620a95da8fbfc7bca71e1a904f5f54f08be Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 14 Oct 2020 08:32:42 -0700 Subject: [PATCH 580/658] Fix: Empty sets not removed from item after UpdateExpression (#3386) DynamoDB does not support empty sets. If the last item in a set is deleted as part of an UpdateExpression, the attribute must be removed. Ref: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html Fixes #3296 --- moto/dynamodb2/parsing/executors.py | 7 ++++++ tests/test_dynamodb2/test_dynamodb.py | 34 +++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/moto/dynamodb2/parsing/executors.py b/moto/dynamodb2/parsing/executors.py index 2f2f2bb8219c..76642542d247 100644 --- a/moto/dynamodb2/parsing/executors.py +++ b/moto/dynamodb2/parsing/executors.py @@ -161,6 +161,13 @@ def execute(self, item): # DynamoDB does not mind if value is not present pass + # DynamoDB does not support empty sets. If we've deleted + # the last item in the set, we have to remove the attribute. + if not string_set_list: + element = self.get_element_to_action() + container = self.get_item_before_end_of_path(item) + container.pop(element.get_attribute_name()) + class RemoveExecutor(NodeExecutor): def execute(self, item): diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index e2dd744e383c..06dfec01e8a9 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -5445,3 +5445,37 @@ def test_lsi_projection_type_keys_only(): items[0].should.equal( {"partitionKey": "pk-1", "sortKey": "sk-1", "lsiK1SortKey": "lsi-sk"} ) + + +@mock_dynamodb2 +def test_set_attribute_is_dropped_if_empty_after_update_expression(): + table_name, item_key, set_item = "test-table", "test-id", "test-data" + client = boto3.client("dynamodb", region_name="us-east-1") + client.create_table( + TableName=table_name, + KeySchema=[{"AttributeName": "customer", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "customer", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + client.update_item( + TableName=table_name, + Key={"customer": {"S": item_key}}, + UpdateExpression="ADD orders :order", + ExpressionAttributeValues={":order": {"SS": [set_item]}}, + ) + resp = client.scan(TableName=table_name, ProjectionExpression="customer, orders") + item = resp["Items"][0] + item.should.have.key("customer") + item.should.have.key("orders") + + client.update_item( + TableName=table_name, + Key={"customer": {"S": item_key}}, + UpdateExpression="DELETE orders :order", + ExpressionAttributeValues={":order": {"SS": [set_item]}}, + ) + resp = client.scan(TableName=table_name, ProjectionExpression="customer, orders") + item = resp["Items"][0] + item.should.have.key("customer") + item.should_not.have.key("orders") From 6505c893b88ad703f1d5f8ede541e888f66da5f5 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Fri, 16 Oct 2020 03:29:26 -0700 Subject: [PATCH 581/658] Fix: S3 Bucket does not support attribute type Arn in Fn::GetAtt (#3388) Fixes #3387 --- moto/s3/models.py | 2 ++ tests/test_s3/test_s3_cloudformation.py | 34 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/moto/s3/models.py b/moto/s3/models.py index 41857e0a8a4b..c0c5512dde7b 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1065,6 +1065,8 @@ def get_cfn_attribute(self, attribute_name): raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "DomainName" ]"') elif attribute_name == "WebsiteURL": raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"') + elif attribute_name == "Arn": + return self.arn raise UnformattedGetAttTemplateException() def set_acl(self, acl): diff --git a/tests/test_s3/test_s3_cloudformation.py b/tests/test_s3/test_s3_cloudformation.py index 69d0c9f984c0..e3803aa2c8eb 100644 --- a/tests/test_s3/test_s3_cloudformation.py +++ b/tests/test_s3/test_s3_cloudformation.py @@ -143,3 +143,37 @@ def test_s3_bucket_cloudformation_update_replacement(): cf.update_stack(StackName="test_stack", TemplateBody=template_json) stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_outputs(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.resource("cloudformation", region_name="us-east-1") + stack_name = "test-stack" + bucket_name = "test-bucket" + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "TestBucket": { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": bucket_name}, + } + }, + "Outputs": { + "BucketARN": { + "Value": {"Fn::GetAtt": ["TestBucket", "Arn"]}, + "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketARN"}}, + }, + "BucketName": { + "Value": {"Ref": "TestBucket"}, + "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketName"}}, + }, + }, + } + cf.create_stack(StackName=stack_name, TemplateBody=json.dumps(template)) + outputs_list = cf.Stack(stack_name).outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + s3.head_bucket(Bucket=output["BucketName"]) + output["BucketARN"].should.match("arn:aws:s3.+{bucket}".format(bucket=bucket_name)) + output["BucketName"].should.equal(bucket_name) From 28c1690fc2a9c5b785853fa169b59ff2a2a459ad Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Fri, 16 Oct 2020 04:30:07 -0700 Subject: [PATCH 582/658] Add Support for SNS Topic `KmsMasterKeyId` Attribute (#3389) We do not do any validation of the `KmsMasterKeyId` attribute, and simply store it as-as. This mimics the behavior in AWS, where the key is not validated until it is actually used (when publishing[1]). [1]: https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html Closes #3216 --- moto/sns/models.py | 1 + moto/sns/responses.py | 48 ++++++++++++++++------------- tests/test_sns/test_topics.py | 22 +++++++++++++ tests/test_sns/test_topics_boto3.py | 24 +++++++++++++++ 4 files changed, 74 insertions(+), 21 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index 6ac709098172..ea0790c6a5fe 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -45,6 +45,7 @@ def __init__(self, name, sns_backend): self.account_id = DEFAULT_ACCOUNT_ID self.display_name = "" self.delivery_policy = "" + self.kms_master_key_id = "" self.effective_delivery_policy = json.dumps(DEFAULT_EFFECTIVE_DELIVERY_POLICY) self.arn = make_arn_for_topic(self.account_id, name, sns_backend.region_name) diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 7fdc37ab6362..dd30d65175c4 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -158,28 +158,28 @@ def get_topic_attributes(self): topic = self.backend.get_topic(topic_arn) if self.request_json: - return json.dumps( - { - "GetTopicAttributesResponse": { - "GetTopicAttributesResult": { - "Attributes": { - "Owner": topic.account_id, - "Policy": topic.policy, - "TopicArn": topic.arn, - "DisplayName": topic.display_name, - "SubscriptionsPending": topic.subscriptions_pending, - "SubscriptionsConfirmed": topic.subscriptions_confimed, - "SubscriptionsDeleted": topic.subscriptions_deleted, - "DeliveryPolicy": topic.delivery_policy, - "EffectiveDeliveryPolicy": topic.effective_delivery_policy, - } - }, - "ResponseMetadata": { - "RequestId": "057f074c-33a7-11df-9540-99d0768312d3" - }, - } + attributes = { + "Owner": topic.account_id, + "Policy": topic.policy, + "TopicArn": topic.arn, + "DisplayName": topic.display_name, + "SubscriptionsPending": topic.subscriptions_pending, + "SubscriptionsConfirmed": topic.subscriptions_confimed, + "SubscriptionsDeleted": topic.subscriptions_deleted, + "DeliveryPolicy": topic.delivery_policy, + "EffectiveDeliveryPolicy": topic.effective_delivery_policy, + } + if topic.kms_master_key_id: + attributes["KmsMasterKeyId"] = topic.kms_master_key_id + response = { + "GetTopicAttributesResponse": { + "GetTopicAttributesResult": {"Attributes": attributes}, + "ResponseMetadata": { + "RequestId": "057f074c-33a7-11df-9540-99d0768312d3" + }, } - ) + } + return json.dumps(response) template = self.response_template(GET_TOPIC_ATTRIBUTES_TEMPLATE) return template.render(topic=topic) @@ -827,6 +827,12 @@ def untag_resource(self): EffectiveDeliveryPolicy {{ topic.effective_delivery_policy }} + {% if topic.kms_master_key_id %} + + KmsMasterKeyId + {{ topic.kms_master_key_id }} + + {% endif %} diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index b561b94a18a2..e46c44cc745b 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -168,3 +168,25 @@ def test_topic_paging(): topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) next_token.should.equal(None) + + +@mock_sns_deprecated +def test_topic_kms_master_key_id_attribute(): + conn = boto.connect_sns() + + conn.create_topic("test-sns-no-key-attr") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0][ + "TopicArn" + ] + attributes = conn.get_topic_attributes(topic_arn)["GetTopicAttributesResponse"][ + "GetTopicAttributesResult" + ]["Attributes"] + attributes.should_not.have.key("KmsMasterKeyId") + + conn.set_topic_attributes(topic_arn, "KmsMasterKeyId", "test-key") + attributes = conn.get_topic_attributes(topic_arn)["GetTopicAttributesResponse"][ + "GetTopicAttributesResult" + ]["Attributes"] + attributes.should.have.key("KmsMasterKeyId") + attributes["KmsMasterKeyId"].should.equal("test-key") diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index a2d12f56f4ac..49aa656aaa3c 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -520,3 +520,27 @@ def test_untag_resource_error(): conn.untag_resource.when.called_with( ResourceArn="not-existing-topic", TagKeys=["tag_key_1"] ).should.throw(ClientError, "Resource does not exist") + + +@mock_sns +def test_topic_kms_master_key_id_attribute(): + client = boto3.client("sns", region_name="us-west-2") + resp = client.create_topic(Name="test-sns-no-key-attr",) + topic_arn = resp["TopicArn"] + resp = client.get_topic_attributes(TopicArn=topic_arn) + resp["Attributes"].should_not.have.key("KmsMasterKeyId") + + client.set_topic_attributes( + TopicArn=topic_arn, AttributeName="KmsMasterKeyId", AttributeValue="test-key" + ) + resp = client.get_topic_attributes(TopicArn=topic_arn) + resp["Attributes"].should.have.key("KmsMasterKeyId") + resp["Attributes"]["KmsMasterKeyId"].should.equal("test-key") + + resp = client.create_topic( + Name="test-sns-with-key-attr", Attributes={"KmsMasterKeyId": "key-id",} + ) + topic_arn = resp["TopicArn"] + resp = client.get_topic_attributes(TopicArn=topic_arn) + resp["Attributes"].should.have.key("KmsMasterKeyId") + resp["Attributes"]["KmsMasterKeyId"].should.equal("key-id") From 2fe3aee359170a372a75b7d436935112d6e8df27 Mon Sep 17 00:00:00 2001 From: davidaah Date: Fri, 16 Oct 2020 11:02:01 -0400 Subject: [PATCH 583/658] Allow creation of subnets from secondary VPC IPv4 CIDR blocks (#3391) * allow subnets to be created from secondary vpc cidr block * add additional test case for invalid cidr --- moto/ec2/models.py | 25 +++++++++++---- tests/test_ec2/test_subnets.py | 58 ++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 7 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index a7a34cbf9935..6666a964ba57 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3465,19 +3465,30 @@ def create_subnet( vpc = self.get_vpc( vpc_id ) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's - vpc_cidr_block = ipaddress.IPv4Network( - six.text_type(vpc.cidr_block), strict=False - ) + vpc_cidr_blocks = [ + ipaddress.IPv4Network( + six.text_type(cidr_block_association["cidr_block"]), strict=False + ) + for cidr_block_association in vpc.get_cidr_block_association_set() + ] try: subnet_cidr_block = ipaddress.IPv4Network( six.text_type(cidr_block), strict=False ) except ValueError: raise InvalidCIDRBlockParameterError(cidr_block) - if not ( - vpc_cidr_block.network_address <= subnet_cidr_block.network_address - and vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address - ): + + subnet_in_vpc_cidr_range = False + for vpc_cidr_block in vpc_cidr_blocks: + if ( + vpc_cidr_block.network_address <= subnet_cidr_block.network_address + and vpc_cidr_block.broadcast_address + >= subnet_cidr_block.broadcast_address + ): + subnet_in_vpc_cidr_range = True + break + + if not subnet_in_vpc_cidr_range: raise InvalidSubnetRangeError(cidr_block) for subnet in self.get_all_subnets(filters={"vpc-id": vpc_id}): diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 2d30171f0d1d..416235f434a7 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -417,6 +417,24 @@ def test_create_subnet_with_invalid_cidr_range(): ) +@mock_ec2 +def test_create_subnet_with_invalid_cidr_range_multiple_vpc_cidr_blocks(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + ec2.meta.client.associate_vpc_cidr_block(CidrBlock="10.1.0.0/16", VpcId=vpc.id) + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = "10.2.0.0/20" + with assert_raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " + "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block) + ) + + @mock_ec2 def test_create_subnet_with_invalid_cidr_block_parameter(): ec2 = boto3.resource("ec2", region_name="us-west-1") @@ -436,6 +454,46 @@ def test_create_subnet_with_invalid_cidr_block_parameter(): ) +@mock_ec2 +def test_create_subnets_with_multiple_vpc_cidr_blocks(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + ec2.meta.client.associate_vpc_cidr_block(CidrBlock="10.1.0.0/16", VpcId=vpc.id) + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block_primary = "10.0.0.0/24" + subnet_primary = ec2.create_subnet( + VpcId=vpc.id, CidrBlock=subnet_cidr_block_primary + ) + + subnet_cidr_block_secondary = "10.1.0.0/24" + subnet_secondary = ec2.create_subnet( + VpcId=vpc.id, CidrBlock=subnet_cidr_block_secondary + ) + + subnets = client.describe_subnets( + SubnetIds=[subnet_primary.id, subnet_secondary.id] + )["Subnets"] + subnets.should.have.length_of(2) + + for subnet in subnets: + subnet.should.have.key("AvailabilityZone") + subnet.should.have.key("AvailabilityZoneId") + subnet.should.have.key("AvailableIpAddressCount") + subnet.should.have.key("CidrBlock") + subnet.should.have.key("State") + subnet.should.have.key("SubnetId") + subnet.should.have.key("VpcId") + subnet.shouldnt.have.key("Tags") + subnet.should.have.key("DefaultForAz").which.should.equal(False) + subnet.should.have.key("MapPublicIpOnLaunch").which.should.equal(False) + subnet.should.have.key("OwnerId") + subnet.should.have.key("AssignIpv6AddressOnCreation").which.should.equal(False) + + @mock_ec2 def test_create_subnets_with_overlapping_cidr_blocks(): ec2 = boto3.resource("ec2", region_name="us-west-1") From fcc85a96453d7aa346ddd43e8fa4cbdf9dd120e9 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 19 Oct 2020 13:34:38 +0530 Subject: [PATCH 584/658] Including Message attributes when ALL is passed (#3393) * Including Message attributes when ALL is passes * Added tests Co-authored-by: usmankb --- moto/sqs/models.py | 3 ++- tests/test_sqs/test_sqs.py | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index a837aacdc3a1..34e81be8a8d1 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -537,8 +537,9 @@ def policy(self, policy): def _filter_message_attributes(message, input_message_attributes): filtered_message_attributes = {} + return_all = "All" in input_message_attributes for key, value in message.message_attributes.items(): - if key in input_message_attributes: + if return_all or key in input_message_attributes: filtered_message_attributes[key] = value message.message_attributes = filtered_message_attributes diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 05f4bffabbc1..6305a163abe3 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1320,6 +1320,28 @@ def test_message_attributes_in_receive_message(): messages[0].get("MessageAttributes").should.equal(None) + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + }, + ) + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2, MessageAttributeNames=["All"] + )["Messages"] + + messages[0]["MessageAttributes"].should.equal( + { + "timestamp": { + "StringValue": "1493147359900", + "DataType": "Number.java.lang.Long", + } + } + ) + @mock_sqs def test_send_message_batch_errors(): From 9eb58eea411214ded617ea45ce9a4407a8f95194 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 21 Oct 2020 01:47:09 -0700 Subject: [PATCH 585/658] Fix: `nextToken` value in logs:DescribeLogGroups response (#3398) The pagination for this endpoint has been modified to more closely model the real AWS behavior: * Log Groups are now sorted alphabetically by `logGroupName`. * `nextToken` is now a string containing the last `logGroupName` in the current response. * Specifying an invalid `nextToken` does not generate an error, but does return an empty group list. * `nextToken` is not included in the response if there are no additional items to return. Fixes #3395 --- moto/logs/models.py | 35 +++++++++++++++++++++++++++-------- moto/logs/responses.py | 5 ++++- tests/test_logs/test_logs.py | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+), 9 deletions(-) diff --git a/moto/logs/models.py b/moto/logs/models.py index dcc0e85e1bea..8425f87f20e9 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -485,20 +485,39 @@ def delete_log_group(self, log_group_name): def describe_log_groups(self, limit, log_group_name_prefix, next_token): if log_group_name_prefix is None: log_group_name_prefix = "" - if next_token is None: - next_token = 0 groups = [ group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix) ] - groups = sorted(groups, key=lambda x: x["creationTime"], reverse=True) - groups_page = groups[next_token : next_token + limit] - - next_token += limit - if next_token >= len(groups): - next_token = None + groups = sorted(groups, key=lambda x: x["logGroupName"]) + + index_start = 0 + if next_token: + try: + index_start = ( + next( + index + for (index, d) in enumerate(groups) + if d["logGroupName"] == next_token + ) + + 1 + ) + except StopIteration: + index_start = 0 + # AWS returns an empty list if it receives an invalid token. + groups = [] + + index_end = index_start + limit + if index_end > len(groups): + index_end = len(groups) + + groups_page = groups[index_start:index_end] + + next_token = None + if groups_page and index_end < len(groups): + next_token = groups_page[-1]["logGroupName"] return groups_page, next_token diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 9e6886a42647..715c4b5c1095 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -42,7 +42,10 @@ def describe_log_groups(self): groups, next_token = self.logs_backend.describe_log_groups( limit, log_group_name_prefix, next_token ) - return json.dumps({"logGroups": groups, "nextToken": next_token}) + result = {"logGroups": groups} + if next_token: + result["nextToken"] = next_token + return json.dumps(result) def create_log_stream(self): log_group_name = self._get_param("logGroupName") diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index e234cc561a93..648d561aa90a 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -458,3 +458,39 @@ def test_describe_subscription_filters_errors(): ex.response["Error"]["Message"].should.equal( "The specified log group does not exist" ) + + +@mock_logs +def test_describe_log_groups_paging(): + client = boto3.client("logs", "us-east-1") + + group_names = [ + "/aws/lambda/lowercase-dev", + "/aws/lambda/FileMonitoring", + "/aws/events/GetMetricData", + "/aws/lambda/fileAvailable", + ] + + for name in group_names: + client.create_log_group(logGroupName=name) + + resp = client.describe_log_groups() + resp["logGroups"].should.have.length_of(4) + resp.should_not.have.key("nextToken") + + resp = client.describe_log_groups(limit=2) + resp["logGroups"].should.have.length_of(2) + resp["nextToken"].should.equal("/aws/lambda/FileMonitoring") + + resp = client.describe_log_groups(nextToken=resp["nextToken"], limit=1) + resp["logGroups"].should.have.length_of(1) + resp["nextToken"].should.equal("/aws/lambda/fileAvailable") + + resp = client.describe_log_groups(nextToken=resp["nextToken"]) + resp["logGroups"].should.have.length_of(1) + resp["logGroups"][0]["logGroupName"].should.equal("/aws/lambda/lowercase-dev") + resp.should_not.have.key("nextToken") + + resp = client.describe_log_groups(nextToken="invalid-token") + resp["logGroups"].should.have.length_of(0) + resp.should_not.have.key("nextToken") From 14980371d796ef849b1ea401ce2c028911b537d1 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Thu, 22 Oct 2020 15:44:32 +0530 Subject: [PATCH 586/658] FIX:Add secrets Manager Tag resource Funtionality (#3392) * FIX:Add secrets Manager Tag resoruce Funtionality * Fixed review comments Co-authored-by: usmankb --- moto/secretsmanager/models.py | 13 ++++++++ moto/secretsmanager/responses.py | 5 ++++ .../test_secretsmanager.py | 30 +++++++++++++++++++ 3 files changed, 48 insertions(+) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index f39b91eaab87..46c1d1f05374 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -565,6 +565,19 @@ def restore_secret(self, secret_id): return secret.arn, secret.name + def tag_resource(self, secret_id, tags): + + if secret_id not in self.secrets.keys(): + raise SecretNotFoundException() + + secret = self.secrets[secret_id] + old_tags = secret.tags + + for tag in tags: + old_tags.append(tag) + + return secret_id + @staticmethod def get_resource_policy(secret_id): resource_policy = { diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index fcf991ea2557..e1c0517db51f 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -157,3 +157,8 @@ def get_resource_policy(self): return secretsmanager_backends[self.region].get_resource_policy( secret_id=secret_id ) + + def tag_resource(self): + secret_id = self._get_param("SecretId") + tags = self._get_param("Tags", if_none=[]) + return secretsmanager_backends[self.region].tag_resource(secret_id, tags) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index dcb3b9b0c063..cbcee74994b2 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -912,3 +912,33 @@ def test_update_secret_marked_as_deleted_after_restoring(): assert updated_secret["ARN"] assert updated_secret["Name"] == "test-secret" assert updated_secret["VersionId"] != "" + + +@mock_secretsmanager +def test_tag_resource(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name="test-secret", SecretString="foosecret") + conn.tag_resource( + SecretId="test-secret", Tags=[{"Key": "FirstTag", "Value": "SomeValue"},], + ) + + conn.tag_resource( + SecretId="test-secret", Tags=[{"Key": "SecondTag", "Value": "AnotherValue"},], + ) + + secrets = conn.list_secrets() + assert secrets["SecretList"][0].get("Tags") == [ + {"Key": "FirstTag", "Value": "SomeValue"}, + {"Key": "SecondTag", "Value": "AnotherValue"}, + ] + + with assert_raises(ClientError) as cm: + conn.tag_resource( + SecretId="dummy-test-secret", + Tags=[{"Key": "FirstTag", "Value": "SomeValue"},], + ) + + assert_equal( + "Secrets Manager can't find the specified secret.", + cm.exception.response["Error"]["Message"], + ) From b3ae6a0f12ba86fe40d5d5dae0d75d01e126db39 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 24 Oct 2020 11:16:30 +0100 Subject: [PATCH 587/658] #3411 - CF - Allow multiple cfn-lint API versions --- moto/cloudformation/utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/moto/cloudformation/utils.py b/moto/cloudformation/utils.py index 54c338b9b157..d025af5fd092 100644 --- a/moto/cloudformation/utils.py +++ b/moto/cloudformation/utils.py @@ -71,7 +71,12 @@ def validate_template_cfn_lint(template): abs_filename = os.path.abspath(filename) # decode handles both yaml and json - template, matches = decode.decode(abs_filename, False) + try: + template, matches = decode.decode(abs_filename, False) + except TypeError: + # As of cfn-lint 0.39.0, the second argument (ignore_bad_template) was dropped + # https://github.com/aws-cloudformation/cfn-python-lint/pull/1580 + template, matches = decode.decode(abs_filename) # Set cfn-lint to info core.configure_logging(None) From c0a25bbd9abbae6d10cf8443e523d6779a8e1648 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Tue, 27 Oct 2020 00:41:01 -0700 Subject: [PATCH 588/658] Fix: `VpnGatewayId` parameter casing in ec2:CreateVpnConnection request (#3401) Fixes #3397 --- moto/ec2/responses/vpn_connections.py | 2 +- tests/test_ec2/test_vpn_connections.py | 24 +++++++++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/moto/ec2/responses/vpn_connections.py b/moto/ec2/responses/vpn_connections.py index 9ddd4d7d95bb..d0e2eead25ae 100644 --- a/moto/ec2/responses/vpn_connections.py +++ b/moto/ec2/responses/vpn_connections.py @@ -7,7 +7,7 @@ class VPNConnections(BaseResponse): def create_vpn_connection(self): type = self._get_param("Type") cgw_id = self._get_param("CustomerGatewayId") - vgw_id = self._get_param("VPNGatewayId") + vgw_id = self._get_param("VpnGatewayId") static_routes = self._get_param("StaticRoutesOnly") vpn_connection = self.ec2_backend.create_vpn_connection( type, cgw_id, vgw_id, static_routes_only=static_routes diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index 24396d3d1b47..4360c8b2eaa6 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -1,10 +1,11 @@ from __future__ import unicode_literals import boto +import boto3 from nose.tools import assert_raises import sure # noqa from boto.exception import EC2ResponseError -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated @@ -51,3 +52,24 @@ def test_describe_vpn_connections(): list_of_vpn_connections.should.have.length_of(2) list_of_vpn_connections = conn.get_all_vpn_connections(vpn.id) list_of_vpn_connections.should.have.length_of(1) + + +@mock_ec2 +def test_create_vpn_connection_with_vpn_gateway(): + client = boto3.client("ec2", region_name="us-east-1") + + vpn_gateway = client.create_vpn_gateway(Type="ipsec.1").get("VpnGateway", {}) + customer_gateway = client.create_customer_gateway( + Type="ipsec.1", PublicIp="205.251.242.54", BgpAsn=65534, + ).get("CustomerGateway", {}) + vpn_connection = client.create_vpn_connection( + Type="ipsec.1", + VpnGatewayId=vpn_gateway["VpnGatewayId"], + CustomerGatewayId=customer_gateway["CustomerGatewayId"], + ).get("VpnConnection", {}) + + vpn_connection["Type"].should.equal("ipsec.1") + vpn_connection["VpnGatewayId"].should.equal(vpn_gateway["VpnGatewayId"]) + vpn_connection["CustomerGatewayId"].should.equal( + customer_gateway["CustomerGatewayId"] + ) From 53cc3dd67a9733d734bb1f654dd4653f7bac789f Mon Sep 17 00:00:00 2001 From: Artem Date: Tue, 27 Oct 2020 14:13:47 +0200 Subject: [PATCH 589/658] Fix SQS md5 attribute hashing. (#3403) * Fix sqs md5 attribute hashing. * Fix test name. * Fix format. --- moto/sqs/models.py | 114 +++++++++++++++++++------------------ tests/test_sqs/test_sqs.py | 76 +++++++++++++++++++++---- 2 files changed, 123 insertions(+), 67 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 34e81be8a8d1..1ab98e94c2d0 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -54,6 +54,15 @@ "String.custom": b"\x01", } +STRING_TYPE_FIELD_INDEX = 1 +BINARY_TYPE_FIELD_INDEX = 2 +STRING_LIST_TYPE_FIELD_INDEX = 3 +BINARY_LIST_TYPE_FIELD_INDEX = 4 + +# Valid attribute name rules can found at +# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html +ATTRIBUTE_NAME_PATTERN = re.compile("^([a-z]|[A-Z]|[0-9]|[_.\\-])+$") + class Message(BaseModel): def __init__(self, message_id, body): @@ -78,69 +87,62 @@ def body_md5(self): @property def attribute_md5(self): - """ - The MD5 of all attributes is calculated by first generating a - utf-8 string from each attribute and MD5-ing the concatenation - of them all. Each attribute is encoded with some bytes that - describe the length of each part and the type of attribute. - - Not yet implemented: - List types (https://github.com/aws/aws-sdk-java/blob/7844c64cf248aed889811bf2e871ad6b276a89ca/aws-java-sdk-sqs/src/main/java/com/amazonaws/services/sqs/MessageMD5ChecksumHandler.java#L58k) - """ - - def utf8(str): - if isinstance(str, six.string_types): - return str.encode("utf-8") - return str md5 = hashlib.md5() - struct_format = "!I".encode("ascii") # ensure it's a bytestring - for name in sorted(self.message_attributes.keys()): - attr = self.message_attributes[name] - whole_data_type = attr.get("data_type") - if TRANSPORT_TYPE_ENCODINGS.get(whole_data_type): - data_type = whole_data_type - else: - data_type_parts = attr["data_type"].split(".") - data_type = data_type_parts[0] - if data_type not in ["String", "Binary", "Number", "String.custom"]: - raise MessageAttributesInvalid( - "The message attribute '{0}' has an invalid message attribute type, the set of supported type prefixes is Binary, Number, and String.".format( - name[0] - ) + for attrName in sorted(self.message_attributes.keys()): + self.validate_attribute_name(attrName) + attrValue = self.message_attributes[attrName] + # Encode name + self.update_binary_length_and_value(md5, self.utf8(attrName)) + # Encode type + self.update_binary_length_and_value(md5, self.utf8(attrValue["data_type"])) + + if attrValue.get("string_value"): + md5.update(bytearray([STRING_TYPE_FIELD_INDEX])) + self.update_binary_length_and_value( + md5, self.utf8(attrValue.get("string_value")) ) + elif attrValue.get("binary_value"): + md5.update(bytearray([BINARY_TYPE_FIELD_INDEX])) + decoded_binary_value = base64.b64decode(attrValue.get("binary_value")) + self.update_binary_length_and_value(md5, decoded_binary_value) + # string_list_value type is not implemented, reserved for the future use. + # See https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_MessageAttributeValue.html + elif len(attrValue["string_list_value"]) > 0: + md5.update(bytearray([STRING_LIST_TYPE_FIELD_INDEX])) + for strListMember in attrValue["string_list_value"]: + self.update_binary_length_and_value(md5, self.utf8(strListMember)) + # binary_list_value type is not implemented, reserved for the future use. + # See https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_MessageAttributeValue.html + elif len(attrValue["binary_list_value"]) > 0: + md5.update(bytearray([BINARY_LIST_TYPE_FIELD_INDEX])) + for strListMember in attrValue["binary_list_value"]: + decoded_binary_value = base64.b64decode(strListMember) + self.update_binary_length_and_value(md5, decoded_binary_value) - encoded = utf8("") - # Each part of each attribute is encoded right after it's - # own length is packed into a 4-byte integer - # 'timestamp' -> b'\x00\x00\x00\t' - encoded += struct.pack(struct_format, len(utf8(name))) + utf8(name) - # The datatype is additionally given a final byte - # representing which type it is - encoded += struct.pack(struct_format, len(data_type)) + utf8(data_type) - encoded += TRANSPORT_TYPE_ENCODINGS[data_type] - - if data_type in ["String", "Number", "String.custom"]: - value = attr["string_value"] - elif data_type == "Binary": - value = base64.b64decode(attr["binary_value"]) - else: - print( - "Moto hasn't implemented MD5 hashing for {} attributes".format( - data_type - ) - ) - # The following should be enough of a clue to users that - # they are not, in fact, looking at a correct MD5 while - # also following the character and length constraints of - # MD5 so as not to break client softwre - return "deadbeefdeadbeefdeadbeefdeadbeef" + return md5.hexdigest() - encoded += struct.pack(struct_format, len(utf8(value))) + utf8(value) + @staticmethod + def update_binary_length_and_value(md5, value): + length_bytes = struct.pack("!I".encode("ascii"), len(value)) + md5.update(length_bytes) + md5.update(value) - md5.update(encoded) - return md5.hexdigest() + @staticmethod + def validate_attribute_name(name): + if not ATTRIBUTE_NAME_PATTERN.match(name): + raise MessageAttributesInvalid( + "The message attribute name '{0}' is invalid. " + "Attribute name can contain A-Z, a-z, 0-9, " + "underscore (_), hyphen (-), and period (.) characters.".format(name) + ) + + @staticmethod + def utf8(string): + if isinstance(string, six.string_types): + return string.encode("utf-8") + return string @property def body(self): diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 6305a163abe3..b974e04f6da0 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -256,11 +256,14 @@ def test_message_send_with_attributes(): msg = queue.send_message( MessageBody="derp", MessageAttributes={ - "timestamp": {"StringValue": "1493147359900", "DataType": "Number"} + "SOME_Valid.attribute-Name": { + "StringValue": "1493147359900", + "DataType": "Number", + } }, ) msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") - msg.get("MD5OfMessageAttributes").should.equal("235c5c510d26fb653d073faed50ae77c") + msg.get("MD5OfMessageAttributes").should.equal("36655e7e9d7c0e8479fa3f3f42247ae7") msg.get("MessageId").should_not.contain(" \n") messages = queue.receive_messages() @@ -268,20 +271,71 @@ def test_message_send_with_attributes(): @mock_sqs -def test_message_with_complex_attributes(): +def test_message_with_invalid_attributes(): + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="blah") + with assert_raises(ClientError) as e: + queue.send_message( + MessageBody="derp", + MessageAttributes={ + "öther_encodings": {"DataType": "String", "StringValue": "str"}, + }, + ) + ex = e.exception + ex.response["Error"]["Code"].should.equal("MessageAttributesInvalid") + ex.response["Error"]["Message"].should.equal( + "The message attribute name 'öther_encodings' is invalid. " + "Attribute name can contain A-Z, a-z, 0-9, underscore (_), hyphen (-), and period (.) characters." + ) + + +@mock_sqs +def test_message_with_string_attributes(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="blah") msg = queue.send_message( MessageBody="derp", MessageAttributes={ - "ccc": {"StringValue": "testjunk", "DataType": "String"}, - "aaa": {"BinaryValue": b"\x02\x03\x04", "DataType": "Binary"}, - "zzz": {"DataType": "Number", "StringValue": "0230.01"}, - "öther_encodings": {"DataType": "String", "StringValue": "T\xFCst"}, + "id": { + "StringValue": "2018fc74-4f77-1a5a-1be0-c2d037d5052b", + "DataType": "String", + }, + "contentType": {"StringValue": "application/json", "DataType": "String"}, + "timestamp": { + "StringValue": "1602845432024", + "DataType": "Number.java.lang.Long", + }, }, ) msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") - msg.get("MD5OfMessageAttributes").should.equal("8ae21a7957029ef04146b42aeaa18a22") + msg.get("MD5OfMessageAttributes").should.equal("b12289320bb6e494b18b645ef562b4a9") + msg.get("MessageId").should_not.contain(" \n") + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_with_binary_attribute(): + sqs = boto3.resource("sqs", region_name="us-east-1") + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + "id": { + "StringValue": "453ae55e-f03b-21a6-a4b1-70c2e2e8fe71", + "DataType": "String", + }, + "mybin": {"BinaryValue": "kekchebukek", "DataType": "Binary"}, + "timestamp": { + "StringValue": "1603134247654", + "DataType": "Number.java.lang.Long", + }, + "contentType": {"StringValue": "application/json", "DataType": "String"}, + }, + ) + msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") + msg.get("MD5OfMessageAttributes").should.equal("049075255ebc53fb95f7f9f3cedf3c50") msg.get("MessageId").should_not.contain(" \n") messages = queue.receive_messages() @@ -302,7 +356,7 @@ def test_message_with_attributes_have_labels(): }, ) msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") - msg.get("MD5OfMessageAttributes").should.equal("235c5c510d26fb653d073faed50ae77c") + msg.get("MD5OfMessageAttributes").should.equal("2e2e4876d8e0bd6b8c2c8f556831c349") msg.get("MessageId").should_not.contain(" \n") messages = queue.receive_messages() @@ -657,10 +711,10 @@ def test_send_receive_message_with_attributes_with_labels(): message2.get("Body").should.equal(body_two) message1.get("MD5OfMessageAttributes").should.equal( - "235c5c510d26fb653d073faed50ae77c" + "2e2e4876d8e0bd6b8c2c8f556831c349" ) message2.get("MD5OfMessageAttributes").should.equal( - "994258b45346a2cc3f9cbb611aa7af30" + "cfa7c73063c6e2dbf9be34232a1978cf" ) response = queue.send_message( From a5fc14b5bc99f71d027b7b3ec30acfa20ec4d5dc Mon Sep 17 00:00:00 2001 From: Neal Granger Date: Tue, 27 Oct 2020 09:04:32 -0700 Subject: [PATCH 590/658] Add missing `Fn::GetAtt` attributes to S3 bucket mock (#3396) * Add missing `Fn::GetAtt` attributes to S3 bucket mock Addresses an issue reported here https://github.com/localstack/aws-cdk-local/issues/1 * Reformat touched files with `black` * Reformat touched files with `black` on Python 3.7 --- moto/s3/models.py | 34 +++++++++++++++---- tests/test_s3/test_s3_cloudformation.py | 43 +++++++++++++++++++++++-- 2 files changed, 69 insertions(+), 8 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index c0c5512dde7b..17282739a321 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -66,7 +66,7 @@ def get_moto_s3_account_id(): """This makes it easy for mocking AWS Account IDs when using AWS Config - -- Simply mock.patch the ACCOUNT_ID here, and Config gets it for free. + -- Simply mock.patch the ACCOUNT_ID here, and Config gets it for free. """ return ACCOUNT_ID @@ -1061,12 +1061,16 @@ def set_website_configuration(self, website_configuration): def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException - if attribute_name == "DomainName": - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "DomainName" ]"') - elif attribute_name == "WebsiteURL": - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"') - elif attribute_name == "Arn": + if attribute_name == "Arn": return self.arn + elif attribute_name == "DomainName": + return self.domain_name + elif attribute_name == "DualStackDomainName": + return self.dual_stack_domain_name + elif attribute_name == "RegionalDomainName": + return self.regional_domain_name + elif attribute_name == "WebsiteURL": + return self.website_url raise UnformattedGetAttTemplateException() def set_acl(self, acl): @@ -1076,6 +1080,24 @@ def set_acl(self, acl): def arn(self): return "arn:aws:s3:::{}".format(self.name) + @property + def domain_name(self): + return "{}.s3.amazonaws.com".format(self.name) + + @property + def dual_stack_domain_name(self): + return "{}.s3.dualstack.{}.amazonaws.com".format(self.name, self.region_name) + + @property + def regional_domain_name(self): + return "{}.s3.{}.amazonaws.com".format(self.name, self.region_name) + + @property + def website_url(self): + return "http://{}.s3-website.{}.amazonaws.com".format( + self.name, self.region_name + ) + @property def physical_resource_id(self): return self.name diff --git a/tests/test_s3/test_s3_cloudformation.py b/tests/test_s3/test_s3_cloudformation.py index e3803aa2c8eb..ebaa03b7879c 100644 --- a/tests/test_s3/test_s3_cloudformation.py +++ b/tests/test_s3/test_s3_cloudformation.py @@ -148,8 +148,9 @@ def test_s3_bucket_cloudformation_update_replacement(): @mock_s3 @mock_cloudformation def test_s3_bucket_cloudformation_outputs(): - s3 = boto3.client("s3", region_name="us-east-1") - cf = boto3.resource("cloudformation", region_name="us-east-1") + region_name = "us-east-1" + s3 = boto3.client("s3", region_name=region_name) + cf = boto3.resource("cloudformation", region_name=region_name) stack_name = "test-stack" bucket_name = "test-bucket" template = { @@ -165,6 +166,26 @@ def test_s3_bucket_cloudformation_outputs(): "Value": {"Fn::GetAtt": ["TestBucket", "Arn"]}, "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketARN"}}, }, + "BucketDomainName": { + "Value": {"Fn::GetAtt": ["TestBucket", "DomainName"]}, + "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketDomainName"}}, + }, + "BucketDualStackDomainName": { + "Value": {"Fn::GetAtt": ["TestBucket", "DualStackDomainName"]}, + "Export": { + "Name": {"Fn::Sub": "${AWS::StackName}:BucketDualStackDomainName"} + }, + }, + "BucketRegionalDomainName": { + "Value": {"Fn::GetAtt": ["TestBucket", "RegionalDomainName"]}, + "Export": { + "Name": {"Fn::Sub": "${AWS::StackName}:BucketRegionalDomainName"} + }, + }, + "BucketWebsiteURL": { + "Value": {"Fn::GetAtt": ["TestBucket", "WebsiteURL"]}, + "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketWebsiteURL"}}, + }, "BucketName": { "Value": {"Ref": "TestBucket"}, "Export": {"Name": {"Fn::Sub": "${AWS::StackName}:BucketName"}}, @@ -176,4 +197,22 @@ def test_s3_bucket_cloudformation_outputs(): output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} s3.head_bucket(Bucket=output["BucketName"]) output["BucketARN"].should.match("arn:aws:s3.+{bucket}".format(bucket=bucket_name)) + output["BucketDomainName"].should.equal( + "{bucket}.s3.amazonaws.com".format(bucket=bucket_name) + ) + output["BucketDualStackDomainName"].should.equal( + "{bucket}.s3.dualstack.{region}.amazonaws.com".format( + bucket=bucket_name, region=region_name + ) + ) + output["BucketRegionalDomainName"].should.equal( + "{bucket}.s3.{region}.amazonaws.com".format( + bucket=bucket_name, region=region_name + ) + ) + output["BucketWebsiteURL"].should.equal( + "http://{bucket}.s3-website.{region}.amazonaws.com".format( + bucket=bucket_name, region=region_name + ) + ) output["BucketName"].should.equal(bucket_name) From cc27f1ef0c2c3c1070f211bb207736e2af29cb0b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 28 Oct 2020 14:22:18 +0000 Subject: [PATCH 591/658] S3 - Add more detail to error responses --- moto/s3/exceptions.py | 20 ++++++++++++++++++++ moto/s3/responses.py | 18 +++++++++++++----- tests/test_s3/red.jpg | Bin 0 -> 633 bytes tests/test_s3/test_s3.py | 34 ++++++++++++++++++++++++++++++++++ 4 files changed, 67 insertions(+), 5 deletions(-) create mode 100644 tests/test_s3/red.jpg diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 7ea21b096190..3b33791c5a37 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -14,6 +14,11 @@ {% block extra %}{{ condition }}{% endblock %} """ +ERROR_WITH_RANGE = """{% extends 'single_error' %} +{% block extra %}{{ actual_size }} +{{ range_requested }}{% endblock %} +""" + class S3ClientError(RESTError): def __init__(self, *args, **kwargs): @@ -404,3 +409,18 @@ def __init__(self, failed_condition, **kwargs): condition=failed_condition, **kwargs ) + + +class InvalidRange(S3ClientError): + code = 416 + + def __init__(self, range_requested, actual_size, **kwargs): + kwargs.setdefault("template", "range_error") + self.templates["range_error"] = ERROR_WITH_RANGE + super(InvalidRange, self).__init__( + "InvalidRange", + "The requested range is not satisfiable", + range_requested=range_requested, + actual_size=actual_size, + **kwargs + ) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 4cb366195505..b01bed1fbd85 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -37,6 +37,7 @@ ObjectNotInActiveTierError, NoSystemTags, PreconditionFailed, + InvalidRange, ) from .models import ( s3_backend, @@ -936,11 +937,15 @@ def toint(i): else: return 400, response_headers, "" if begin < 0 or end > last or begin > min(end, last): - return 416, response_headers, "" + raise InvalidRange( + actual_size=str(length), range_requested=request.headers.get("range") + ) response_headers["content-range"] = "bytes {0}-{1}/{2}".format( begin, end, length ) - return 206, response_headers, response_content[begin : end + 1] + content = response_content[begin : end + 1] + response_headers["content-length"] = len(content) + return 206, response_headers, content def key_or_control_response(self, request, full_url, headers): # Key and Control are lumped in because splitting out the regex is too much of a pain :/ @@ -967,9 +972,12 @@ def key_or_control_response(self, request, full_url, headers): status_code, response_headers, response_content = response if status_code == 200 and "range" in request.headers: - return self._handle_range_header( - request, response_headers, response_content - ) + try: + return self._handle_range_header( + request, response_headers, response_content + ) + except S3ClientError as s3error: + return s3error.code, {}, s3error.description return status_code, response_headers, response_content def _control_response(self, request, full_url, headers): diff --git a/tests/test_s3/red.jpg b/tests/test_s3/red.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fb9aed7c88160a18eb8cb0b412f80a7a47ffced GIT binary patch literal 633 zcmex=^(PF6}rMnOeST|r4lSw=>~TvNxu(8R<c1}I=;VrF4wW9Q)H;sz?% zD!{d!pzFb!U9xX3zTPI5o8roG<0MW4oqZMDikqloVbuf*=gfJ(V&YTRE(2~ znmD<{#3dx9RMpfqG__1j&CD$#!=o3Ax_(anF8u!{0FnsMm;e9( literal 0 HcmV?d00001 diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index b213a9a72624..d8f08e9ef4c7 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4889,3 +4889,37 @@ def test_presigned_put_url_with_custom_headers(): s3.delete_object(Bucket=bucket, Key=key) s3.delete_bucket(Bucket=bucket) + + +@mock_s3 +def test_request_partial_content_should_contain_content_length(): + bucket = "bucket" + object_key = "key" + s3 = boto3.resource("s3") + s3.create_bucket(Bucket=bucket) + s3.Object(bucket, object_key).put(Body="some text") + + file = s3.Object(bucket, object_key) + response = file.get(Range="bytes=0-1024") + response["ContentLength"].should.equal(9) + + +@mock_s3 +def test_request_partial_content_should_contain_actual_content_length(): + bucket = "bucket" + object_key = "key" + s3 = boto3.resource("s3") + s3.create_bucket(Bucket=bucket) + s3.Object(bucket, object_key).put(Body="some text") + + file = s3.Object(bucket, object_key) + requested_range = "bytes=1024-" + try: + file.get(Range=requested_range) + except botocore.client.ClientError as e: + e.response["Error"]["Code"].should.equal("InvalidRange") + e.response["Error"]["Message"].should.equal( + "The requested range is not satisfiable" + ) + e.response["Error"]["ActualObjectSize"].should.equal("9") + e.response["Error"]["RangeRequested"].should.equal(requested_range) From 19fc76f4669d545b56cfd79c55377f46379d546d Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Thu, 29 Oct 2020 14:22:02 +0530 Subject: [PATCH 592/658] Fix: SNS Delete subscriptions on topic deletion (#3410) * Fix:Delete subscriptions on delete topic * Changed tests Co-authored-by: usmankb --- moto/sns/models.py | 7 +++++ tests/test_sns/test_subscriptions.py | 4 +-- tests/test_sns/test_subscriptions_boto3.py | 30 ++++++++++++++++++---- 3 files changed, 33 insertions(+), 8 deletions(-) diff --git a/moto/sns/models.py b/moto/sns/models.py index ea0790c6a5fe..7d297fbdc097 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -426,8 +426,15 @@ def _get_topic_subscriptions(self, topic): def list_topics(self, next_token=None): return self._get_values_nexttoken(self.topics, next_token) + def delete_topic_subscriptions(self, topic): + for key, value in self.subscriptions.items(): + if value.topic == topic: + self.subscriptions.pop(key) + def delete_topic(self, arn): try: + topic = self.get_topic(arn) + self.delete_topic_subscriptions(topic) self.topics.pop(arn) except KeyError: raise SNSNotFoundError("Topic with arn {0} not found".format(arn)) diff --git a/tests/test_sns/test_subscriptions.py b/tests/test_sns/test_subscriptions.py index f773438d7adf..d11830dc6d2b 100644 --- a/tests/test_sns/test_subscriptions.py +++ b/tests/test_sns/test_subscriptions.py @@ -72,9 +72,7 @@ def test_deleting_subscriptions_by_deleting_topic(): subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ "ListSubscriptionsResult" ]["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["SubscriptionArn"].should.equal(subscription_arn) + subscriptions.should.have.length_of(0) # Now delete hanging subscription conn.unsubscribe(subscription_arn) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index d91b3566b2e2..c15658dcaba2 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -7,7 +7,7 @@ from botocore.exceptions import ClientError from nose.tools import assert_raises -from moto import mock_sns +from moto import mock_sns, mock_sqs from moto.sns.models import ( DEFAULT_PAGE_SIZE, DEFAULT_EFFECTIVE_DELIVERY_POLICY, @@ -124,11 +124,9 @@ def test_unsubscribe_from_deleted_topic(): topics = topics_json["Topics"] topics.should.have.length_of(0) - # And the subscription should still be left + # as per the documentation deleting a topic deletes all the subscriptions subscriptions = client.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["SubscriptionArn"].should.equal(subscription_arn) + subscriptions.should.have.length_of(0) # Now delete hanging subscription client.unsubscribe(SubscriptionArn=subscription_arn) @@ -304,6 +302,28 @@ def test_creating_subscription_with_attributes(): ) +@mock_sns +@mock_sqs +def test_delete_subscriptions_on_delete_topic(): + sqs = boto3.client("sqs", region_name="us-east-1") + conn = boto3.client("sns", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-queue") + topic = conn.create_topic(Name="some-topic") + + conn.subscribe( + TopicArn=topic.get("TopicArn"), Protocol="sqs", Endpoint=queue.get("QueueUrl") + ) + subscriptions = conn.list_subscriptions()["Subscriptions"] + + subscriptions.should.have.length_of(1) + + conn.delete_topic(TopicArn=topic.get("TopicArn")) + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) + + @mock_sns def test_set_subscription_attributes(): conn = boto3.client("sns", region_name="us-east-1") From 2f23f6b26be00efae0cada07d8e5a6986011c4b7 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Thu, 29 Oct 2020 04:50:45 -0700 Subject: [PATCH 593/658] Fix `dynamodb:TransactGetItems` response for items that do not exist (#3420) If the requested item has no projected attributes, the corresponding ItemResponse object is an empty Map.[1] Verified against real AWS. Fix existing general test case and add an explicit test case to cover this scenario. [1]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html#API_TransactGetItems_ResponseElements Fixes #3404 --- moto/dynamodb2/responses.py | 1 + tests/test_dynamodb2/test_dynamodb.py | 28 ++++++++++++++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 25ec292382c5..8eb1023b662f 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -906,6 +906,7 @@ def transact_get_items(self): return self.error(er, "Requested resource not found") if not item: + responses.append({}) continue item_describe = item.describe_attrs(False) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 06dfec01e8a9..41baddc79e1c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -4285,7 +4285,8 @@ def test_valid_transact_get_items(): ] ) res["Responses"][0]["Item"].should.equal({"id": {"S": "1"}, "sort_key": {"S": "1"}}) - len(res["Responses"]).should.equal(1) + len(res["Responses"]).should.equal(2) + res["Responses"][1].should.equal({}) res = client.transact_get_items( TransactItems=[ @@ -5479,3 +5480,28 @@ def test_set_attribute_is_dropped_if_empty_after_update_expression(): item = resp["Items"][0] item.should.have.key("customer") item.should_not.have.key("orders") + + +@mock_dynamodb2 +def test_transact_get_items_should_return_empty_map_for_non_existent_item(): + client = boto3.client("dynamodb", region_name="us-west-2") + table_name = "test-table" + key_schema = [{"AttributeName": "id", "KeyType": "HASH"}] + attribute_definitions = [{"AttributeName": "id", "AttributeType": "S"}] + client.create_table( + TableName=table_name, + KeySchema=key_schema, + AttributeDefinitions=attribute_definitions, + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + item = {"id": {"S": "1"}} + client.put_item(TableName=table_name, Item=item) + items = client.transact_get_items( + TransactItems=[ + {"Get": {"Key": {"id": {"S": "1"}}, "TableName": table_name}}, + {"Get": {"Key": {"id": {"S": "2"}}, "TableName": table_name}}, + ] + ).get("Responses", []) + items.should.have.length_of(2) + items[0].should.equal({"Item": item}) + items[1].should.equal({}) From d499d4d1795961d9b55a2e51446edb73e0a6653c Mon Sep 17 00:00:00 2001 From: Erinna Chen Date: Thu, 29 Oct 2020 09:18:38 -0500 Subject: [PATCH 594/658] Fix update for application autoscaling register target (#3423) --- moto/applicationautoscaling/models.py | 4 +- .../test_applicationautoscaling.py | 44 +++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/moto/applicationautoscaling/models.py b/moto/applicationautoscaling/models.py index a6303c75c1da..ebf6594d70c5 100644 --- a/moto/applicationautoscaling/models.py +++ b/moto/applicationautoscaling/models.py @@ -98,7 +98,7 @@ def register_scalable_target(self, namespace, r_id, dimension, **kwargs): _ = self._ecs_service_exists_for_target(r_id) if self._scalable_target_exists(r_id, dimension): target = self.targets[dimension][r_id] - target.update(kwargs) + target.update(**kwargs) else: target = FakeScalableTarget(self, namespace, r_id, dimension, **kwargs) self._add_scalable_target(target) @@ -197,6 +197,8 @@ def update(self, **kwargs): self.min_capacity = kwargs["min_capacity"] if kwargs["max_capacity"] is not None: self.max_capacity = kwargs["max_capacity"] + if kwargs["suspended_state"] is not None: + self.suspended_state = kwargs["suspended_state"] applicationautoscaling_backends = {} diff --git a/tests/test_applicationautoscaling/test_applicationautoscaling.py b/tests/test_applicationautoscaling/test_applicationautoscaling.py index 8e5e136e5969..ce8351990555 100644 --- a/tests/test_applicationautoscaling/test_applicationautoscaling.py +++ b/tests/test_applicationautoscaling/test_applicationautoscaling.py @@ -260,3 +260,47 @@ def test_register_scalable_target_resource_id_variations(): t.should.have.key("ResourceId").which.should.equal(resource_id) t.should.have.key("ScalableDimension").which.should.equal(scalable_dimension) t.should.have.key("CreationTime").which.should.be.a("datetime.datetime") + + +@mock_ecs +@mock_applicationautoscaling +def test_register_scalable_target_updates_existing_target(): + ecs = boto3.client("ecs", region_name=DEFAULT_REGION) + _create_ecs_defaults(ecs) + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + register_scalable_target(client) + + updated_min_capacity = 3 + updated_max_capacity = 10 + updated_suspended_state = { + "DynamicScalingInSuspended": False, + "DynamicScalingOutSuspended": False, + "ScheduledScalingSuspended": False, + } + + client.register_scalable_target( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, + ResourceId=DEFAULT_RESOURCE_ID, + ScalableDimension=DEFAULT_SCALABLE_DIMENSION, + MinCapacity=updated_min_capacity, + MaxCapacity=updated_max_capacity, + SuspendedState=updated_suspended_state, + ) + response = client.describe_scalable_targets( + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE + ) + + len(response["ScalableTargets"]).should.equal(1) + t = response["ScalableTargets"][0] + t.should.have.key("MinCapacity").which.should.equal(updated_min_capacity) + t.should.have.key("MaxCapacity").which.should.equal(updated_max_capacity) + t.should.have.key("SuspendedState") + t["SuspendedState"]["DynamicScalingInSuspended"].should.equal( + updated_suspended_state["DynamicScalingInSuspended"] + ) + t["SuspendedState"]["DynamicScalingOutSuspended"].should.equal( + updated_suspended_state["DynamicScalingOutSuspended"] + ) + t["SuspendedState"]["ScheduledScalingSuspended"].should.equal( + updated_suspended_state["ScheduledScalingSuspended"] + ) From cbd4efb42da855d61ad73cf1c52eaea9b4048a50 Mon Sep 17 00:00:00 2001 From: jweite Date: Fri, 30 Oct 2020 11:21:34 -0400 Subject: [PATCH 595/658] ApplicationAutoscaling: support autoscaling policies, deregister_scalable_target (#3350) * ApplicationAutoscaling: support autoscaling policies, deregister_scalable_target. * PR3350 comment changes: drop unnecessary pass statements, unit test three exception cases. Co-authored-by: Joseph Weitekamp --- moto/applicationautoscaling/exceptions.py | 8 +- moto/applicationautoscaling/models.py | 140 ++++++++++++ moto/applicationautoscaling/responses.py | 96 ++++++-- .../test_applicationautoscaling.py | 216 +++++++++++++++++- 4 files changed, 439 insertions(+), 21 deletions(-) diff --git a/moto/applicationautoscaling/exceptions.py b/moto/applicationautoscaling/exceptions.py index 2e2e0ef9f86d..8d5fb3c0c11b 100644 --- a/moto/applicationautoscaling/exceptions.py +++ b/moto/applicationautoscaling/exceptions.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import json +from moto.core.exceptions import JsonRESTError class AWSError(Exception): @@ -18,5 +19,8 @@ def response(self): return json.dumps(resp), dict(status=self.STATUS) -class AWSValidationException(AWSError): - TYPE = "ValidationException" +class AWSValidationException(JsonRESTError): + def __init__(self, message, **kwargs): + super(AWSValidationException, self).__init__( + "ValidationException", message, **kwargs + ) diff --git a/moto/applicationautoscaling/models.py b/moto/applicationautoscaling/models.py index ebf6594d70c5..47a1adad834a 100644 --- a/moto/applicationautoscaling/models.py +++ b/moto/applicationautoscaling/models.py @@ -5,6 +5,7 @@ from collections import OrderedDict from enum import Enum, unique import time +import uuid @unique @@ -58,6 +59,7 @@ def __init__(self, region, ecs): self.region = region self.ecs_backend = ecs self.targets = OrderedDict() + self.policies = {} def reset(self): region = self.region @@ -124,6 +126,100 @@ def _add_scalable_target(self, target): self.targets[target.scalable_dimension][target.resource_id] = target return target + def deregister_scalable_target(self, namespace, r_id, dimension): + """ Registers or updates a scalable target. """ + if self._scalable_target_exists(r_id, dimension): + del self.targets[dimension][r_id] + else: + raise AWSValidationException( + "No scalable target found for service namespace: {}, resource ID: {}, scalable dimension: {}".format( + namespace, r_id, dimension + ) + ) + + def put_scaling_policy( + self, + policy_name, + service_namespace, + resource_id, + scalable_dimension, + policy_body, + policy_type=None, + ): + policy_key = FakeApplicationAutoscalingPolicy.formulate_key( + service_namespace, resource_id, scalable_dimension, policy_name + ) + if policy_key in self.policies: + old_policy = self.policies[policy_name] + policy = FakeApplicationAutoscalingPolicy( + region_name=self.region, + policy_name=policy_name, + service_namespace=service_namespace, + resource_id=resource_id, + scalable_dimension=scalable_dimension, + policy_type=policy_type if policy_type else old_policy.policy_type, + policy_body=policy_body if policy_body else old_policy._policy_body, + ) + else: + policy = FakeApplicationAutoscalingPolicy( + region_name=self.region, + policy_name=policy_name, + service_namespace=service_namespace, + resource_id=resource_id, + scalable_dimension=scalable_dimension, + policy_type=policy_type, + policy_body=policy_body, + ) + self.policies[policy_key] = policy + return policy + + def describe_scaling_policies(self, service_namespace, **kwargs): + policy_names = kwargs.get("policy_names") + resource_id = kwargs.get("resource_id") + scalable_dimension = kwargs.get("scalable_dimension") + max_results = kwargs.get("max_results") or 100 + next_token = kwargs.get("next_token") + policies = [ + policy + for policy in self.policies.values() + if policy.service_namespace == service_namespace + ] + if policy_names: + policies = [ + policy for policy in policies if policy.policy_name in policy_names + ] + if resource_id: + policies = [ + policy for policy in policies if policy.resource_id in resource_id + ] + if scalable_dimension: + policies = [ + policy + for policy in policies + if policy.scalable_dimension in scalable_dimension + ] + starting_point = int(next_token) if next_token else 0 + ending_point = starting_point + max_results + policies_page = policies[starting_point:ending_point] + new_next_token = str(ending_point) if ending_point < len(policies) else None + return new_next_token, policies_page + + def delete_scaling_policy( + self, policy_name, service_namespace, resource_id, scalable_dimension + ): + policy_key = FakeApplicationAutoscalingPolicy.formulate_key( + service_namespace, resource_id, scalable_dimension, policy_name + ) + if policy_key in self.policies: + del self.policies[policy_key] + return {} + else: + raise AWSValidationException( + "No scaling policy found for service namespace: {}, resource ID: {}, scalable dimension: {}, policy name: {}".format( + service_namespace, resource_id, scalable_dimension, policy_name + ) + ) + def _target_params_are_valid(namespace, r_id, dimension): """ Check whether namespace, resource_id and dimension are valid and consistent with each other. """ @@ -201,6 +297,50 @@ def update(self, **kwargs): self.suspended_state = kwargs["suspended_state"] +class FakeApplicationAutoscalingPolicy(BaseModel): + def __init__( + self, + region_name, + policy_name, + service_namespace, + resource_id, + scalable_dimension, + policy_type, + policy_body, + ): + self.step_scaling_policy_configuration = None + self.target_tracking_scaling_policy_configuration = None + + if "policy_type" == "StepScaling": + self.step_scaling_policy_configuration = policy_body + self.target_tracking_scaling_policy_configuration = None + elif policy_type == "TargetTrackingScaling": + self.step_scaling_policy_configuration = None + self.target_tracking_scaling_policy_configuration = policy_body + else: + raise AWSValidationException( + "Unknown policy type {} specified.".format(policy_type) + ) + + self._policy_body = policy_body + self.service_namespace = service_namespace + self.resource_id = resource_id + self.scalable_dimension = scalable_dimension + self.policy_name = policy_name + self.policy_type = policy_type + self._guid = uuid.uuid4() + self.policy_arn = "arn:aws:autoscaling:{}:scalingPolicy:{}:resource/sagemaker/{}:policyName/{}".format( + region_name, self._guid, self.resource_id, self.policy_name + ) + self.creation_time = time.time() + + @staticmethod + def formulate_key(service_namespace, resource_id, scalable_dimension, policy_name): + return "{}\t{}\t{}\t{}".format( + service_namespace, resource_id, scalable_dimension, policy_name + ) + + applicationautoscaling_backends = {} for region_name, ecs_backend in ecs_backends.items(): applicationautoscaling_backends[region_name] = ApplicationAutoscalingBackend( diff --git a/moto/applicationautoscaling/responses.py b/moto/applicationautoscaling/responses.py index 9a2905d79190..5bb0a4144e45 100644 --- a/moto/applicationautoscaling/responses.py +++ b/moto/applicationautoscaling/responses.py @@ -15,10 +15,7 @@ def applicationautoscaling_backend(self): return applicationautoscaling_backends[self.region] def describe_scalable_targets(self): - try: - self._validate_params() - except AWSValidationException as e: - return e.response() + self._validate_params() service_namespace = self._get_param("ServiceNamespace") resource_ids = self._get_param("ResourceIds") scalable_dimension = self._get_param("ScalableDimension") @@ -37,19 +34,65 @@ def describe_scalable_targets(self): def register_scalable_target(self): """ Registers or updates a scalable target. """ - try: - self._validate_params() - self.applicationautoscaling_backend.register_scalable_target( - self._get_param("ServiceNamespace"), - self._get_param("ResourceId"), - self._get_param("ScalableDimension"), - min_capacity=self._get_int_param("MinCapacity"), - max_capacity=self._get_int_param("MaxCapacity"), - role_arn=self._get_param("RoleARN"), - suspended_state=self._get_param("SuspendedState"), - ) - except AWSValidationException as e: - return e.response() + self._validate_params() + self.applicationautoscaling_backend.register_scalable_target( + self._get_param("ServiceNamespace"), + self._get_param("ResourceId"), + self._get_param("ScalableDimension"), + min_capacity=self._get_int_param("MinCapacity"), + max_capacity=self._get_int_param("MaxCapacity"), + role_arn=self._get_param("RoleARN"), + suspended_state=self._get_param("SuspendedState"), + ) + return json.dumps({}) + + def deregister_scalable_target(self): + """ Deregisters a scalable target. """ + self._validate_params() + self.applicationautoscaling_backend.deregister_scalable_target( + self._get_param("ServiceNamespace"), + self._get_param("ResourceId"), + self._get_param("ScalableDimension"), + ) + return json.dumps({}) + + def put_scaling_policy(self): + policy = self.applicationautoscaling_backend.put_scaling_policy( + policy_name=self._get_param("PolicyName"), + service_namespace=self._get_param("ServiceNamespace"), + resource_id=self._get_param("ResourceId"), + scalable_dimension=self._get_param("ScalableDimension"), + policy_type=self._get_param("PolicyType"), + policy_body=self._get_param( + "StepScalingPolicyConfiguration", + self._get_param("TargetTrackingScalingPolicyConfiguration"), + ), + ) + return json.dumps({"PolicyARN": policy.policy_arn, "Alarms": []}) # ToDo + + def describe_scaling_policies(self): + ( + next_token, + policy_page, + ) = self.applicationautoscaling_backend.describe_scaling_policies( + service_namespace=self._get_param("ServiceNamespace"), + resource_id=self._get_param("ResourceId"), + scalable_dimension=self._get_param("ScalableDimension"), + max_results=self._get_param("MaxResults"), + next_token=self._get_param("NextToken"), + ) + response_obj = {"ScalingPolicies": [_build_policy(p) for p in policy_page]} + if next_token: + response_obj["NextToken"] = next_token + return json.dumps(response_obj) + + def delete_scaling_policy(self): + self.applicationautoscaling_backend.delete_scaling_policy( + policy_name=self._get_param("PolicyName"), + service_namespace=self._get_param("ServiceNamespace"), + resource_id=self._get_param("ResourceId"), + scalable_dimension=self._get_param("ScalableDimension"), + ) return json.dumps({}) def _validate_params(self): @@ -95,3 +138,22 @@ def _build_target(t): "MinCapacity": t.min_capacity, "SuspendedState": t.suspended_state, } + + +def _build_policy(p): + response = { + "PolicyARN": p.policy_arn, + "PolicyName": p.policy_name, + "ServiceNamespace": p.service_namespace, + "ResourceId": p.resource_id, + "ScalableDimension": p.scalable_dimension, + "PolicyType": p.policy_type, + "CreationTime": p.creation_time, + } + if p.policy_type == "StepScaling": + response["StepScalingPolicyConfiguration"] = p.step_scaling_policy_configuration + elif p.policy_type == "TargetTrackingScaling": + response[ + "TargetTrackingScalingPolicyConfiguration" + ] = p.target_tracking_scaling_policy_configuration + return response diff --git a/tests/test_applicationautoscaling/test_applicationautoscaling.py b/tests/test_applicationautoscaling/test_applicationautoscaling.py index ce8351990555..9b1c0b678f10 100644 --- a/tests/test_applicationautoscaling/test_applicationautoscaling.py +++ b/tests/test_applicationautoscaling/test_applicationautoscaling.py @@ -1,8 +1,10 @@ from __future__ import unicode_literals +import botocore import boto3 -from moto import mock_applicationautoscaling, mock_ecs import sure # noqa -from nose.tools import with_setup +from nose.tools import assert_raises +from moto import mock_applicationautoscaling, mock_ecs +from moto.applicationautoscaling.exceptions import AWSValidationException DEFAULT_REGION = "us-east-1" DEFAULT_ECS_CLUSTER = "default" @@ -250,6 +252,8 @@ def test_register_scalable_target_resource_id_variations(): ServiceNamespace=namespace, ResourceId=resource_id, ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, ) response = client.describe_scalable_targets(ServiceNamespace=namespace) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @@ -304,3 +308,211 @@ def test_register_scalable_target_updates_existing_target(): t["SuspendedState"]["ScheduledScalingSuspended"].should.equal( updated_suspended_state["ScheduledScalingSuspended"] ) + + +@mock_applicationautoscaling +def test_put_scaling_policy(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + namespace = "sagemaker" + resource_id = "endpoint/MyEndPoint/variant/MyVariant" + scalable_dimension = "sagemaker:variant:DesiredInstanceCount" + + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, + ) + + policy_name = "MyPolicy" + policy_type = "TargetTrackingScaling" + policy_body = { + "TargetValue": 70.0, + "PredefinedMetricSpecification": { + "PredefinedMetricType": "SageMakerVariantInvocationsPerInstance" + }, + } + + with assert_raises(client.exceptions.ValidationException) as e: + client.put_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + PolicyType="ABCDEFG", + TargetTrackingScalingPolicyConfiguration=policy_body, + ) + e.exception.response["Error"]["Message"].should.match( + r"Unknown policy type .* specified." + ) + + response = client.put_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + PolicyType=policy_type, + TargetTrackingScalingPolicyConfiguration=policy_body, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response["PolicyARN"].should.match( + r"arn:aws:autoscaling:.*1:scalingPolicy:.*:resource/{}/{}:policyName/{}".format( + namespace, resource_id, policy_name + ) + ) + + +@mock_applicationautoscaling +def test_describe_scaling_policies(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + namespace = "sagemaker" + resource_id = "endpoint/MyEndPoint/variant/MyVariant" + scalable_dimension = "sagemaker:variant:DesiredInstanceCount" + + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, + ) + + policy_name = "MyPolicy" + policy_type = "TargetTrackingScaling" + policy_body = { + "TargetValue": 70.0, + "PredefinedMetricSpecification": { + "PredefinedMetricType": "SageMakerVariantInvocationsPerInstance" + }, + } + + response = client.put_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + PolicyType=policy_type, + TargetTrackingScalingPolicyConfiguration=policy_body, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_scaling_policies( + PolicyNames=[policy_name], + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + policy = response["ScalingPolicies"][0] + policy["PolicyName"].should.equal(policy_name) + policy["ServiceNamespace"].should.equal(namespace) + policy["ResourceId"].should.equal(resource_id) + policy["ScalableDimension"].should.equal(scalable_dimension) + policy["PolicyType"].should.equal(policy_type) + policy["TargetTrackingScalingPolicyConfiguration"].should.equal(policy_body) + policy["PolicyARN"].should.match( + r"arn:aws:autoscaling:.*1:scalingPolicy:.*:resource/{}/{}:policyName/{}".format( + namespace, resource_id, policy_name + ) + ) + policy.should.have.key("CreationTime").which.should.be.a("datetime.datetime") + + +@mock_applicationautoscaling +def test_delete_scaling_policies(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + namespace = "sagemaker" + resource_id = "endpoint/MyEndPoint/variant/MyVariant" + scalable_dimension = "sagemaker:variant:DesiredInstanceCount" + + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, + ) + + policy_name = "MyPolicy" + policy_type = "TargetTrackingScaling" + policy_body = { + "TargetValue": 70.0, + "PredefinedMetricSpecification": { + "PredefinedMetricType": "SageMakerVariantInvocationsPerInstance" + }, + } + + with assert_raises(client.exceptions.ValidationException) as e: + client.delete_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + e.exception.response["Error"]["Message"].should.match(r"No scaling policy found .*") + + response = client.put_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + PolicyType=policy_type, + TargetTrackingScalingPolicyConfiguration=policy_body, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.delete_scaling_policy( + PolicyName=policy_name, + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + response = client.describe_scaling_policies( + PolicyNames=[policy_name], + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + len(response["ScalingPolicies"]).should.equal(0) + + +@mock_applicationautoscaling +def test_deregister_scalable_target(): + client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) + namespace = "sagemaker" + resource_id = "endpoint/MyEndPoint/variant/MyVariant" + scalable_dimension = "sagemaker:variant:DesiredInstanceCount" + + client.register_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + MinCapacity=1, + MaxCapacity=8, + ) + + response = client.describe_scalable_targets(ServiceNamespace=namespace) + len(response["ScalableTargets"]).should.equal(1) + + client.deregister_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + + response = client.describe_scalable_targets(ServiceNamespace=namespace) + len(response["ScalableTargets"]).should.equal(0) + + with assert_raises(client.exceptions.ValidationException) as e: + client.deregister_scalable_target( + ServiceNamespace=namespace, + ResourceId=resource_id, + ScalableDimension=scalable_dimension, + ) + e.exception.response["Error"]["Message"].should.match( + r"No scalable target found .*" + ) From f8d2ce2e6a68215195c2e06f94456c78892b6c5d Mon Sep 17 00:00:00 2001 From: jweite Date: Fri, 30 Oct 2020 17:05:06 -0400 Subject: [PATCH 596/658] Notebook Lifecycle Config create, describe and delete (#3417) * Notebook Lifecycle Config create, describe and delete * PR3417 comment changes: raise on create with duplicate name, derive a ValidationException class and use it instead of RESTException, unit test for delete non-existing. Co-authored-by: Joseph Weitekamp --- moto/sagemaker/exceptions.py | 8 +- moto/sagemaker/models.py | 206 ++++++++++-------- moto/sagemaker/responses.py | 35 +++ .../test_sagemaker_notebooks.py | 65 ++++++ 4 files changed, 220 insertions(+), 94 deletions(-) diff --git a/moto/sagemaker/exceptions.py b/moto/sagemaker/exceptions.py index dc2ce915aca1..e2d01e82e62d 100644 --- a/moto/sagemaker/exceptions.py +++ b/moto/sagemaker/exceptions.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals import json -from moto.core.exceptions import RESTError - +from moto.core.exceptions import RESTError, JsonRESTError ERROR_WITH_MODEL_NAME = """{% extends 'single_error' %} {% block extra %}{{ model }}{% endblock %} @@ -45,3 +44,8 @@ def response(self): json.dumps({"__type": self.type, "message": self.message}), dict(status=self.status), ) + + +class ValidationError(JsonRESTError): + def __init__(self, message, **kwargs): + super(ValidationError, self).__init__("ValidationException", message, **kwargs) diff --git a/moto/sagemaker/models.py b/moto/sagemaker/models.py index 9c394cc23f9e..8fef306b8d3a 100644 --- a/moto/sagemaker/models.py +++ b/moto/sagemaker/models.py @@ -8,7 +8,7 @@ from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.exceptions import RESTError from moto.sagemaker import validators -from .exceptions import MissingModel +from .exceptions import MissingModel, ValidationError class BaseObject(BaseModel): @@ -285,11 +285,7 @@ def validate_instance_type(self, instance_type): message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format( instance_type, VALID_INSTANCE_TYPES ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) @property def response_object(self): @@ -431,11 +427,7 @@ def __init__( def validate_volume_size_in_gb(self, volume_size_in_gb): if not validators.is_integer_between(volume_size_in_gb, mn=5, optional=True): message = "Invalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf" - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) def validate_instance_type(self, instance_type): VALID_INSTANCE_TYPES = [ @@ -482,11 +474,7 @@ def validate_instance_type(self, instance_type): message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format( instance_type, VALID_INSTANCE_TYPES ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) @property def arn(self): @@ -516,6 +504,46 @@ def stop(self): self.status = "Stopped" +class FakeSageMakerNotebookInstanceLifecycleConfig(BaseObject): + def __init__( + self, region_name, notebook_instance_lifecycle_config_name, on_create, on_start + ): + self.region_name = region_name + self.notebook_instance_lifecycle_config_name = ( + notebook_instance_lifecycle_config_name + ) + self.on_create = on_create + self.on_start = on_start + self.creation_time = self.last_modified_time = datetime.now().strftime( + "%Y-%m-%d %H:%M:%S" + ) + self.notebook_instance_lifecycle_config_arn = FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + self.notebook_instance_lifecycle_config_name, self.region_name + ) + + @staticmethod + def arn_formatter(notebook_instance_lifecycle_config_name, region_name): + return ( + "arn:aws:sagemaker:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":notebook-instance-lifecycle-configuration/" + + notebook_instance_lifecycle_config_name + ) + + @property + def response_object(self): + response_object = self.gen_response_object() + return { + k: v for k, v in response_object.items() if v is not None and v != [None] + } + + @property + def response_create(self): + return {"TrainingJobArn": self.training_job_arn} + + class SageMakerModelBackend(BaseBackend): def __init__(self, region_name=None): self._models = {} @@ -523,6 +551,7 @@ def __init__(self, region_name=None): self.endpoint_configs = {} self.endpoints = {} self.training_jobs = {} + self.notebook_instance_lifecycle_configurations = {} self.region_name = region_name def reset(self): @@ -551,9 +580,7 @@ def describe_model(self, model_name=None): message = "Could not find model '{}'.".format( Model.arn_for_model_name(model_name, self.region_name) ) - raise RESTError( - error_type="ValidationException", message=message, template="error_json", - ) + raise ValidationError(message=message) def list_models(self): models = [] @@ -617,22 +644,13 @@ def _validate_unique_notebook_instance_name(self, notebook_instance_name): message = "Cannot create a duplicate Notebook Instance ({})".format( duplicate_arn ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) def get_notebook_instance(self, notebook_instance_name): try: return self.notebook_instances[notebook_instance_name] except KeyError: - message = "RecordNotFound" - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message="RecordNotFound") def get_notebook_instance_by_arn(self, arn): instances = [ @@ -641,12 +659,7 @@ def get_notebook_instance_by_arn(self, arn): if notebook_instance.arn == arn ] if len(instances) == 0: - message = "RecordNotFound" - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message="RecordNotFound") return instances[0] def start_notebook_instance(self, notebook_instance_name): @@ -663,11 +676,7 @@ def delete_notebook_instance(self, notebook_instance_name): message = "Status ({}) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format( notebook_instance.status, notebook_instance.arn ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) del self.notebook_instances[notebook_instance_name] def get_notebook_instance_tags(self, arn): @@ -677,6 +686,60 @@ def get_notebook_instance_tags(self, arn): except RESTError: return [] + def create_notebook_instance_lifecycle_config( + self, notebook_instance_lifecycle_config_name, on_create, on_start + ): + if ( + notebook_instance_lifecycle_config_name + in self.notebook_instance_lifecycle_configurations + ): + message = "Unable to create Notebook Instance Lifecycle Config {}. (Details: Notebook Instance Lifecycle Config already exists.)".format( + FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + notebook_instance_lifecycle_config_name, self.region_name + ) + ) + raise ValidationError(message=message) + lifecycle_config = FakeSageMakerNotebookInstanceLifecycleConfig( + region_name=self.region_name, + notebook_instance_lifecycle_config_name=notebook_instance_lifecycle_config_name, + on_create=on_create, + on_start=on_start, + ) + self.notebook_instance_lifecycle_configurations[ + notebook_instance_lifecycle_config_name + ] = lifecycle_config + return lifecycle_config + + def describe_notebook_instance_lifecycle_config( + self, notebook_instance_lifecycle_config_name + ): + try: + return self.notebook_instance_lifecycle_configurations[ + notebook_instance_lifecycle_config_name + ].response_object + except KeyError: + message = "Unable to describe Notebook Instance Lifecycle Config '{}'. (Details: Notebook Instance Lifecycle Config does not exist.)".format( + FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + notebook_instance_lifecycle_config_name, self.region_name + ) + ) + raise ValidationError(message=message) + + def delete_notebook_instance_lifecycle_config( + self, notebook_instance_lifecycle_config_name + ): + try: + del self.notebook_instance_lifecycle_configurations[ + notebook_instance_lifecycle_config_name + ] + except KeyError: + message = "Unable to delete Notebook Instance Lifecycle Config '{}'. (Details: Notebook Instance Lifecycle Config does not exist.)".format( + FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + notebook_instance_lifecycle_config_name, self.region_name + ) + ) + raise ValidationError(message=message) + def create_endpoint_config( self, endpoint_config_name, @@ -706,11 +769,7 @@ def validate_production_variants(self, production_variants): production_variant["ModelName"], self.region_name ) ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) def describe_endpoint_config(self, endpoint_config_name): try: @@ -719,11 +778,7 @@ def describe_endpoint_config(self, endpoint_config_name): message = "Could not find endpoint configuration '{}'.".format( FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name) ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) def delete_endpoint_config(self, endpoint_config_name): try: @@ -732,11 +787,7 @@ def delete_endpoint_config(self, endpoint_config_name): message = "Could not find endpoint configuration '{}'.".format( FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name) ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) def create_endpoint( self, endpoint_name, endpoint_config_name, tags, @@ -747,11 +798,7 @@ def create_endpoint( message = "Could not find endpoint_config '{}'.".format( FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name) ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) endpoint = FakeEndpoint( region_name=self.region_name, @@ -772,11 +819,7 @@ def describe_endpoint(self, endpoint_name): message = "Could not find endpoint configuration '{}'.".format( FakeEndpoint.arn_formatter(endpoint_name, self.region_name) ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) def delete_endpoint(self, endpoint_name): try: @@ -785,11 +828,7 @@ def delete_endpoint(self, endpoint_name): message = "Could not find endpoint configuration '{}'.".format( FakeEndpoint.arn_formatter(endpoint_name, self.region_name) ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) def get_endpoint_by_arn(self, arn): endpoints = [ @@ -799,11 +838,7 @@ def get_endpoint_by_arn(self, arn): ] if len(endpoints) == 0: message = "RecordNotFound" - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) return endpoints[0] def get_endpoint_tags(self, arn): @@ -865,11 +900,7 @@ def describe_training_job(self, training_job_name): message = "Could not find training job '{}'.".format( FakeTrainingJob.arn_formatter(training_job_name, self.region_name) ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) def delete_training_job(self, training_job_name): try: @@ -878,11 +909,7 @@ def delete_training_job(self, training_job_name): message = "Could not find endpoint configuration '{}'.".format( FakeTrainingJob.arn_formatter(training_job_name, self.region_name) ) - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message=message) def get_training_job_by_arn(self, arn): training_jobs = [ @@ -891,12 +918,7 @@ def get_training_job_by_arn(self, arn): if training_job.training_job_arn == arn ] if len(training_jobs) == 0: - message = "RecordNotFound" - raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", - ) + raise ValidationError(message="RecordNotFound") return training_jobs[0] def get_training_job_tags(self, arn): diff --git a/moto/sagemaker/responses.py b/moto/sagemaker/responses.py index 48a3a643207e..749ac787f46d 100644 --- a/moto/sagemaker/responses.py +++ b/moto/sagemaker/responses.py @@ -239,3 +239,38 @@ def delete_training_job(self): training_job_name = self._get_param("TrainingJobName") self.sagemaker_backend.delete_training_job(training_job_name) return 200, {}, json.dumps("{}") + + @amzn_request_id + def create_notebook_instance_lifecycle_config(self): + try: + lifecycle_configuration = self.sagemaker_backend.create_notebook_instance_lifecycle_config( + notebook_instance_lifecycle_config_name=self._get_param( + "NotebookInstanceLifecycleConfigName" + ), + on_create=self._get_param("OnCreate"), + on_start=self._get_param("OnStart"), + ) + response = { + "NotebookInstanceLifecycleConfigArn": lifecycle_configuration.notebook_instance_lifecycle_config_arn, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_notebook_instance_lifecycle_config(self): + response = self.sagemaker_backend.describe_notebook_instance_lifecycle_config( + notebook_instance_lifecycle_config_name=self._get_param( + "NotebookInstanceLifecycleConfigName" + ) + ) + return json.dumps(response) + + @amzn_request_id + def delete_notebook_instance_lifecycle_config(self): + self.sagemaker_backend.delete_notebook_instance_lifecycle_config( + notebook_instance_lifecycle_config_name=self._get_param( + "NotebookInstanceLifecycleConfigName" + ) + ) + return 200, {}, json.dumps("{}") diff --git a/tests/test_sagemaker/test_sagemaker_notebooks.py b/tests/test_sagemaker/test_sagemaker_notebooks.py index 70cdc94234f9..c04618c7747e 100644 --- a/tests/test_sagemaker/test_sagemaker_notebooks.py +++ b/tests/test_sagemaker/test_sagemaker_notebooks.py @@ -225,3 +225,68 @@ def test_describe_nonexistent_model(): assert_true( e.exception.response["Error"]["Message"].startswith("Could not find model") ) + + +@mock_sagemaker +def test_notebook_instance_lifecycle_config(): + sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) + + name = "MyLifeCycleConfig" + on_create = [{"Content": "Create Script Line 1"}] + on_start = [{"Content": "Start Script Line 1"}] + resp = sagemaker.create_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, OnCreate=on_create, OnStart=on_start + ) + assert_true( + resp["NotebookInstanceLifecycleConfigArn"].startswith("arn:aws:sagemaker") + ) + assert_true(resp["NotebookInstanceLifecycleConfigArn"].endswith(name)) + + with assert_raises(ClientError) as e: + resp = sagemaker.create_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + OnCreate=on_create, + OnStart=on_start, + ) + assert_true( + e.exception.response["Error"]["Message"].endswith( + "Notebook Instance Lifecycle Config already exists.)" + ) + ) + + resp = sagemaker.describe_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + ) + assert_equal(resp["NotebookInstanceLifecycleConfigName"], name) + assert_true( + resp["NotebookInstanceLifecycleConfigArn"].startswith("arn:aws:sagemaker") + ) + assert_true(resp["NotebookInstanceLifecycleConfigArn"].endswith(name)) + assert_equal(resp["OnStart"], on_start) + assert_equal(resp["OnCreate"], on_create) + assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) + assert_true(isinstance(resp["CreationTime"], datetime.datetime)) + + sagemaker.delete_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + ) + + with assert_raises(ClientError) as e: + sagemaker.describe_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + ) + assert_true( + e.exception.response["Error"]["Message"].endswith( + "Notebook Instance Lifecycle Config does not exist.)" + ) + ) + + with assert_raises(ClientError) as e: + sagemaker.delete_notebook_instance_lifecycle_config( + NotebookInstanceLifecycleConfigName=name, + ) + assert_true( + e.exception.response["Error"]["Message"].endswith( + "Notebook Instance Lifecycle Config does not exist.)" + ) + ) From a3880c4c3552d0e5ee96875c8fa3749d452f8fa9 Mon Sep 17 00:00:00 2001 From: Eoin Shanaghy Date: Sat, 31 Oct 2020 15:56:24 +0000 Subject: [PATCH 597/658] Metric data query alarms (#3419) * Add support for metric data query alarms (Metrics=[..]) * Fix trailing whitespace * Allow for unordered metrics in Python 2.7 * Add describe_alarm assertions and support DatapointsToAlarm --- moto/cloudwatch/models.py | 35 ++++++ moto/cloudwatch/responses.py | 106 ++++++++++++++++-- .../test_cloudwatch/test_cloudwatch_boto3.py | 84 ++++++++++++++ 3 files changed, 216 insertions(+), 9 deletions(-) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 5d956215c778..b8134731393e 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -31,6 +31,33 @@ def __ne__(self, item): # Only needed on Py2; Py3 defines it implicitly return self != item +class Metric(object): + def __init__(self, metric_name, namespace, dimensions): + self.metric_name = metric_name + self.namespace = namespace + self.dimensions = dimensions + + +class MetricStat(object): + def __init__(self, metric, period, stat, unit): + self.metric = metric + self.period = period + self.stat = stat + self.unit = unit + + +class MetricDataQuery(object): + def __init__( + self, id, label, period, return_data, expression=None, metric_stat=None + ): + self.id = id + self.label = label + self.period = period + self.return_data = return_data + self.expression = expression + self.metric_stat = metric_stat + + def daterange(start, stop, step=timedelta(days=1), inclusive=False): """ This method will iterate from `start` to `stop` datetimes with a timedelta step of `step` @@ -65,8 +92,10 @@ def __init__( name, namespace, metric_name, + metric_data_queries, comparison_operator, evaluation_periods, + datapoints_to_alarm, period, threshold, statistic, @@ -81,8 +110,10 @@ def __init__( self.name = name self.namespace = namespace self.metric_name = metric_name + self.metric_data_queries = metric_data_queries self.comparison_operator = comparison_operator self.evaluation_periods = evaluation_periods + self.datapoints_to_alarm = datapoints_to_alarm self.period = period self.threshold = threshold self.statistic = statistic @@ -235,8 +266,10 @@ def put_metric_alarm( name, namespace, metric_name, + metric_data_queries, comparison_operator, evaluation_periods, + datapoints_to_alarm, period, threshold, statistic, @@ -252,8 +285,10 @@ def put_metric_alarm( name, namespace, metric_name, + metric_data_queries, comparison_operator, evaluation_periods, + datapoints_to_alarm, period, threshold, statistic, diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index f6e003ee24c0..c4b427dc6bb9 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -1,7 +1,7 @@ import json from moto.core.utils import amzn_request_id from moto.core.responses import BaseResponse -from .models import cloudwatch_backends +from .models import cloudwatch_backends, MetricDataQuery, MetricStat, Metric, Dimension from dateutil.parser import parse as dtparse @@ -19,8 +19,37 @@ def put_metric_alarm(self): name = self._get_param("AlarmName") namespace = self._get_param("Namespace") metric_name = self._get_param("MetricName") + metrics = self._get_multi_param("Metrics.member") + metric_data_queries = None + if metrics: + metric_data_queries = [ + MetricDataQuery( + id=metric.get("Id"), + label=metric.get("Label"), + period=metric.get("Period"), + return_data=metric.get("ReturnData"), + expression=metric.get("Expression"), + metric_stat=MetricStat( + metric=Metric( + metric_name=metric.get("MetricStat.Metric.MetricName"), + namespace=metric.get("MetricStat.Metric.Namespace"), + dimensions=[ + Dimension(name=dim["Name"], value=dim["Value"]) + for dim in metric["MetricStat.Metric.Dimensions.member"] + ], + ), + period=metric.get("MetricStat.Period"), + stat=metric.get("MetricStat.Stat"), + unit=metric.get("MetricStat.Unit"), + ) + if "MetricStat.Metric.MetricName" in metric + else None, + ) + for metric in metrics + ] comparison_operator = self._get_param("ComparisonOperator") evaluation_periods = self._get_param("EvaluationPeriods") + datapoints_to_alarm = self._get_param("DatapointsToAlarm") period = self._get_param("Period") threshold = self._get_param("Threshold") statistic = self._get_param("Statistic") @@ -37,8 +66,10 @@ def put_metric_alarm(self): name, namespace, metric_name, + metric_data_queries, comparison_operator, evaluation_periods, + datapoints_to_alarm, period, threshold, statistic, @@ -261,35 +292,92 @@ def set_alarm_state(self): {{ alarm.description }} {{ alarm.name }} {{ alarm.comparison_operator }} - - {% for dimension in alarm.dimensions %} - - {{ dimension.name }} - {{ dimension.value }} - - {% endfor %} - + {% if alarm.dimensions is not none %} + + {% for dimension in alarm.dimensions %} + + {{ dimension.name }} + {{ dimension.value }} + + {% endfor %} + + {% endif %} {{ alarm.evaluation_periods }} + {% if alarm.datapoints_to_alarm is not none %} + {{ alarm.datapoints_to_alarm }} + {% endif %} {% for action in alarm.insufficient_data_actions %} {{ action }} {% endfor %} + {% if alarm.metric_name is not none %} {{ alarm.metric_name }} + {% endif %} + {% if alarm.metric_data_queries is not none %} + + {% for metric in alarm.metric_data_queries %} + + {{ metric.id }} + {% if metric.label is not none %} + + {% endif %} + {% if metric.expression is not none %} + {{ metric.expression }} + {% endif %} + {% if metric.metric_stat is not none %} + + + {{ metric.metric_stat.metric.namespace }} + {{ metric.metric_stat.metric.metric_name }} + + {% for dim in metric.metric_stat.metric.dimensions %} + + {{ dim.name }} + {{ dim.value }} + + {% endfor %} + + + {% if metric.metric_stat.period is not none %} + {{ metric.metric_stat.period }} + {% endif %} + {{ metric.metric_stat.stat }} + {% if metric.metric_stat.unit is not none %} + {{ metric.metric_stat.unit }} + {% endif %} + + {% endif %} + {% if metric.period is not none %} + {{ metric.period }} + {% endif %} + {{ metric.return_data }} + + {% endfor %} + + {% endif %} + {% if alarm.namespace is not none %} {{ alarm.namespace }} + {% endif %} {% for action in alarm.ok_actions %} {{ action }} {% endfor %} + {% if alarm.period is not none %} {{ alarm.period }} + {% endif %} {{ alarm.state_reason }} {{ alarm.state_reason_data }} {{ alarm.state_updated_timestamp }} {{ alarm.state_value }} + {% if alarm.statistic is not none %} {{ alarm.statistic }} + {% endif %} {{ alarm.threshold }} + {% if alarm.unit is not none %} {{ alarm.unit }} + {% endif %} {% endfor %} diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index c38e2c77e3cb..a7a72ca4c62c 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -141,6 +141,90 @@ def test_describe_alarms_for_metric(): alarms.get("MetricAlarms").should.have.length_of(1) +@mock_cloudwatch +def test_describe_alarms(): + conn = boto3.client("cloudwatch", region_name="eu-central-1") + conn.put_metric_alarm( + AlarmName="testalarm1", + MetricName="cpu", + Namespace="blah", + Period=10, + EvaluationPeriods=5, + Statistic="Average", + Threshold=2, + ComparisonOperator="GreaterThanThreshold", + ActionsEnabled=True, + ) + metric_data_queries = [ + { + "Id": "metricA", + "Expression": "metricB + metricC", + "Label": "metricA", + "ReturnData": True, + }, + { + "Id": "metricB", + "MetricStat": { + "Metric": { + "Namespace": "ns", + "MetricName": "metricB", + "Dimensions": [{"Name": "Name", "Value": "B"}], + }, + "Period": 60, + "Stat": "Sum", + }, + "ReturnData": False, + }, + { + "Id": "metricC", + "MetricStat": { + "Metric": { + "Namespace": "AWS/Lambda", + "MetricName": "metricC", + "Dimensions": [{"Name": "Name", "Value": "C"}], + }, + "Period": 60, + "Stat": "Sum", + "Unit": "Seconds", + }, + "ReturnData": False, + }, + ] + conn.put_metric_alarm( + AlarmName="testalarm2", + EvaluationPeriods=1, + DatapointsToAlarm=1, + Metrics=metric_data_queries, + ComparisonOperator="GreaterThanThreshold", + Threshold=1.0, + ) + alarms = conn.describe_alarms() + metric_alarms = alarms.get("MetricAlarms") + metric_alarms.should.have.length_of(2) + single_metric_alarm = [ + alarm for alarm in metric_alarms if alarm["AlarmName"] == "testalarm1" + ][0] + multiple_metric_alarm = [ + alarm for alarm in metric_alarms if alarm["AlarmName"] == "testalarm2" + ][0] + + single_metric_alarm["MetricName"].should.equal("cpu") + single_metric_alarm.shouldnt.have.property("Metrics") + single_metric_alarm["Namespace"].should.equal("blah") + single_metric_alarm["Period"].should.equal(10) + single_metric_alarm["EvaluationPeriods"].should.equal(5) + single_metric_alarm["Statistic"].should.equal("Average") + single_metric_alarm["ComparisonOperator"].should.equal("GreaterThanThreshold") + single_metric_alarm["Threshold"].should.equal(2) + + multiple_metric_alarm.shouldnt.have.property("MetricName") + multiple_metric_alarm["EvaluationPeriods"].should.equal(1) + multiple_metric_alarm["DatapointsToAlarm"].should.equal(1) + multiple_metric_alarm["Metrics"].should.equal(metric_data_queries) + multiple_metric_alarm["ComparisonOperator"].should.equal("GreaterThanThreshold") + multiple_metric_alarm["Threshold"].should.equal(1.0) + + @mock_cloudwatch def test_alarm_state(): client = boto3.client("cloudwatch", region_name="eu-central-1") From 68e3d394abeaafa0b2220d4fae6ee121c3077f38 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sun, 1 Nov 2020 02:16:41 -0800 Subject: [PATCH 598/658] Stepfunctions improvements (#3427) * Implement filtering for stepfunctions:ListExecutions * Add pagination to Step Functions endpoints Implements a generalized approach to pagination via a decorator method for the Step Functions endpoints. Modeled on the real AWS backend behavior, `nextToken` is a dictionary of pagination information encoded in an opaque string. With just a bit of metadata hard-coded (`utils.PAGINATION_MODEL`), backend `list` methods need only be decorated with `@paginate` and ensure that their returned entities are sorted to get full pagination support without any duplicated code polluting the model. Closes #3137 --- moto/stepfunctions/exceptions.py | 8 + moto/stepfunctions/models.py | 24 ++- moto/stepfunctions/responses.py | 49 +++++-- moto/stepfunctions/utils.py | 138 ++++++++++++++++++ .../test_stepfunctions/test_stepfunctions.py | 91 +++++++++++- 5 files changed, 284 insertions(+), 26 deletions(-) create mode 100644 moto/stepfunctions/utils.py diff --git a/moto/stepfunctions/exceptions.py b/moto/stepfunctions/exceptions.py index 4abb6a8afd35..b5fd2ddb9225 100644 --- a/moto/stepfunctions/exceptions.py +++ b/moto/stepfunctions/exceptions.py @@ -46,3 +46,11 @@ class InvalidExecutionInput(AWSError): class StateMachineDoesNotExist(AWSError): TYPE = "StateMachineDoesNotExist" STATUS = 400 + + +class InvalidToken(AWSError): + TYPE = "InvalidToken" + STATUS = 400 + + def __init__(self, message="Invalid token"): + super(InvalidToken, self).__init__("Invalid Token: {}".format(message)) diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index 03cbcf32088d..3731539f82af 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -5,7 +5,7 @@ from boto3 import Session from moto.core import ACCOUNT_ID, BaseBackend -from moto.core.utils import iso_8601_datetime_without_milliseconds +from moto.core.utils import iso_8601_datetime_with_milliseconds from uuid import uuid4 from .exceptions import ( ExecutionAlreadyExists, @@ -15,11 +15,12 @@ InvalidName, StateMachineDoesNotExist, ) +from .utils import paginate class StateMachine: def __init__(self, arn, name, definition, roleArn, tags=None): - self.creation_date = iso_8601_datetime_without_milliseconds(datetime.now()) + self.creation_date = iso_8601_datetime_with_milliseconds(datetime.now()) self.arn = arn self.name = name self.definition = definition @@ -43,7 +44,7 @@ def __init__( ) self.execution_arn = execution_arn self.name = execution_name - self.start_date = iso_8601_datetime_without_milliseconds(datetime.now()) + self.start_date = iso_8601_datetime_with_milliseconds(datetime.now()) self.state_machine_arn = state_machine_arn self.execution_input = execution_input self.status = "RUNNING" @@ -51,7 +52,7 @@ def __init__( def stop(self): self.status = "ABORTED" - self.stop_date = iso_8601_datetime_without_milliseconds(datetime.now()) + self.stop_date = iso_8601_datetime_with_milliseconds(datetime.now()) class StepFunctionBackend(BaseBackend): @@ -189,8 +190,10 @@ def create_state_machine(self, name, definition, roleArn, tags=None): self.state_machines.append(state_machine) return state_machine + @paginate def list_state_machines(self): - return self.state_machines + state_machines = sorted(self.state_machines, key=lambda x: x.creation_date) + return state_machines def describe_state_machine(self, arn): self._validate_machine_arn(arn) @@ -233,13 +236,20 @@ def stop_execution(self, execution_arn): execution.stop() return execution - def list_executions(self, state_machine_arn): - return [ + @paginate + def list_executions(self, state_machine_arn, status_filter=None): + executions = [ execution for execution in self.executions if execution.state_machine_arn == state_machine_arn ] + if status_filter: + executions = list(filter(lambda e: e.status == status_filter, executions)) + + executions = sorted(executions, key=lambda x: x.start_date, reverse=True) + return executions + def describe_execution(self, arn): self._validate_execution_arn(arn) exctn = next((x for x in self.executions if x.execution_arn == arn), None) diff --git a/moto/stepfunctions/responses.py b/moto/stepfunctions/responses.py index d9e438892be7..7106d81d09ba 100644 --- a/moto/stepfunctions/responses.py +++ b/moto/stepfunctions/responses.py @@ -33,19 +33,22 @@ def create_state_machine(self): @amzn_request_id def list_state_machines(self): - list_all = self.stepfunction_backend.list_state_machines() - list_all = sorted( - [ - { - "creationDate": sm.creation_date, - "name": sm.name, - "stateMachineArn": sm.arn, - } - for sm in list_all - ], - key=lambda x: x["name"], + max_results = self._get_int_param("maxResults") + next_token = self._get_param("nextToken") + results, next_token = self.stepfunction_backend.list_state_machines( + max_results=max_results, next_token=next_token ) - response = {"stateMachines": list_all} + state_machines = [ + { + "creationDate": sm.creation_date, + "name": sm.name, + "stateMachineArn": sm.arn, + } + for sm in results + ] + response = {"stateMachines": state_machines} + if next_token: + response["nextToken"] = next_token return 200, {}, json.dumps(response) @amzn_request_id @@ -110,9 +113,20 @@ def start_execution(self): @amzn_request_id def list_executions(self): + max_results = self._get_int_param("maxResults") + next_token = self._get_param("nextToken") arn = self._get_param("stateMachineArn") - state_machine = self.stepfunction_backend.describe_state_machine(arn) - executions = self.stepfunction_backend.list_executions(arn) + status_filter = self._get_param("statusFilter") + try: + state_machine = self.stepfunction_backend.describe_state_machine(arn) + results, next_token = self.stepfunction_backend.list_executions( + arn, + status_filter=status_filter, + max_results=max_results, + next_token=next_token, + ) + except AWSError as err: + return err.response() executions = [ { "executionArn": execution.execution_arn, @@ -121,9 +135,12 @@ def list_executions(self): "stateMachineArn": state_machine.arn, "status": execution.status, } - for execution in executions + for execution in results ] - return 200, {}, json.dumps({"executions": executions}) + response = {"executions": executions} + if next_token: + response["nextToken"] = next_token + return 200, {}, json.dumps(response) @amzn_request_id def describe_execution(self): diff --git a/moto/stepfunctions/utils.py b/moto/stepfunctions/utils.py new file mode 100644 index 000000000000..cf6b58c8aed8 --- /dev/null +++ b/moto/stepfunctions/utils.py @@ -0,0 +1,138 @@ +from functools import wraps + +from botocore.paginate import TokenDecoder, TokenEncoder +from six.moves import reduce + +from .exceptions import InvalidToken + +PAGINATION_MODEL = { + "list_executions": { + "input_token": "next_token", + "limit_key": "max_results", + "limit_default": 100, + "page_ending_range_keys": ["start_date", "execution_arn"], + }, + "list_state_machines": { + "input_token": "next_token", + "limit_key": "max_results", + "limit_default": 100, + "page_ending_range_keys": ["creation_date", "arn"], + }, +} + + +def paginate(original_function=None, pagination_model=None): + def pagination_decorator(func): + @wraps(func) + def pagination_wrapper(*args, **kwargs): + method = func.__name__ + model = pagination_model or PAGINATION_MODEL + pagination_config = model.get(method) + if not pagination_config: + raise ValueError( + "No pagination config for backend method: {}".format(method) + ) + # We pop the pagination arguments, so the remaining kwargs (if any) + # can be used to compute the optional parameters checksum. + input_token = kwargs.pop(pagination_config.get("input_token"), None) + limit = kwargs.pop(pagination_config.get("limit_key"), None) + paginator = Paginator( + max_results=limit, + max_results_default=pagination_config.get("limit_default"), + starting_token=input_token, + page_ending_range_keys=pagination_config.get("page_ending_range_keys"), + param_values_to_check=kwargs, + ) + results = func(*args, **kwargs) + return paginator.paginate(results) + + return pagination_wrapper + + if original_function: + return pagination_decorator(original_function) + + return pagination_decorator + + +class Paginator(object): + def __init__( + self, + max_results=None, + max_results_default=None, + starting_token=None, + page_ending_range_keys=None, + param_values_to_check=None, + ): + self._max_results = max_results if max_results else max_results_default + self._starting_token = starting_token + self._page_ending_range_keys = page_ending_range_keys + self._param_values_to_check = param_values_to_check + self._token_encoder = TokenEncoder() + self._token_decoder = TokenDecoder() + self._param_checksum = self._calculate_parameter_checksum() + self._parsed_token = self._parse_starting_token() + + def _parse_starting_token(self): + if self._starting_token is None: + return None + # The starting token is a dict passed as a base64 encoded string. + next_token = self._starting_token + try: + next_token = self._token_decoder.decode(next_token) + except (ValueError, TypeError): + raise InvalidToken("Invalid token") + if next_token.get("parameterChecksum") != self._param_checksum: + raise InvalidToken( + "Input inconsistent with page token: {}".format(str(next_token)) + ) + return next_token + + def _calculate_parameter_checksum(self): + if not self._param_values_to_check: + return None + return reduce( + lambda x, y: x ^ y, + [hash(item) for item in self._param_values_to_check.items()], + ) + + def _check_predicate(self, item): + page_ending_range_key = self._parsed_token["pageEndingRangeKey"] + predicate_values = page_ending_range_key.split("|") + for (index, attr) in enumerate(self._page_ending_range_keys): + if not getattr(item, attr, None) == predicate_values[index]: + return False + return True + + def _build_next_token(self, next_item): + token_dict = {} + if self._param_checksum: + token_dict["parameterChecksum"] = self._param_checksum + range_keys = [] + for (index, attr) in enumerate(self._page_ending_range_keys): + range_keys.append(getattr(next_item, attr)) + token_dict["pageEndingRangeKey"] = "|".join(range_keys) + return TokenEncoder().encode(token_dict) + + def paginate(self, results): + index_start = 0 + if self._starting_token: + try: + index_start = next( + index + for (index, result) in enumerate(results) + if self._check_predicate(result) + ) + except StopIteration: + raise InvalidToken("Resource not found!") + + index_end = index_start + self._max_results + if index_end > len(results): + index_end = len(results) + + results_page = results[index_start:index_end] + + next_token = None + if results_page and index_end < len(results): + page_ending_result = results[index_end] + next_token = self._build_next_token(page_ending_result) + return results_page, next_token diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 36b08487c121..e6592c2ffc58 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -168,15 +168,15 @@ def test_state_machine_list_returns_empty_list_by_default(): def test_state_machine_list_returns_created_state_machines(): client = boto3.client("stepfunctions", region_name=region) # - machine2 = client.create_state_machine( - name="name2", definition=str(simple_definition), roleArn=_get_default_role() - ) machine1 = client.create_state_machine( name="name1", definition=str(simple_definition), roleArn=_get_default_role(), tags=[{"key": "tag_key", "value": "tag_value"}], ) + machine2 = client.create_state_machine( + name="name2", definition=str(simple_definition), roleArn=_get_default_role() + ) list = client.list_state_machines() # list["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @@ -195,6 +195,28 @@ def test_state_machine_list_returns_created_state_machines(): ) +@mock_stepfunctions +def test_state_machine_list_pagination(): + client = boto3.client("stepfunctions", region_name=region) + for i in range(25): + machine_name = "StateMachine-{}".format(i) + client.create_state_machine( + name=machine_name, + definition=str(simple_definition), + roleArn=_get_default_role(), + ) + + resp = client.list_state_machines() + resp.should_not.have.key("nextToken") + resp["stateMachines"].should.have.length_of(25) + + paginator = client.get_paginator("list_state_machines") + page_iterator = paginator.paginate(maxResults=5) + for page in page_iterator: + page["stateMachines"].should.have.length_of(5) + page["stateMachines"][-1]["name"].should.contain("24") + + @mock_stepfunctions @mock_sts def test_state_machine_creation_is_idempotent_by_name(): @@ -489,6 +511,69 @@ def test_state_machine_list_executions(): executions["executions"][0].shouldnt.have("stopDate") +@mock_stepfunctions +def test_state_machine_list_executions_with_filter(): + client = boto3.client("stepfunctions", region_name=region) + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + for i in range(20): + execution = client.start_execution(stateMachineArn=sm["stateMachineArn"]) + if not i % 4: + client.stop_execution(executionArn=execution["executionArn"]) + + resp = client.list_executions(stateMachineArn=sm["stateMachineArn"]) + resp["executions"].should.have.length_of(20) + + resp = client.list_executions( + stateMachineArn=sm["stateMachineArn"], statusFilter="ABORTED" + ) + resp["executions"].should.have.length_of(5) + all([e["status"] == "ABORTED" for e in resp["executions"]]).should.be.true + + +@mock_stepfunctions +def test_state_machine_list_executions_with_pagination(): + client = boto3.client("stepfunctions", region_name=region) + sm = client.create_state_machine( + name="name", definition=str(simple_definition), roleArn=_get_default_role() + ) + for _ in range(100): + client.start_execution(stateMachineArn=sm["stateMachineArn"]) + + resp = client.list_executions(stateMachineArn=sm["stateMachineArn"]) + resp.should_not.have.key("nextToken") + resp["executions"].should.have.length_of(100) + + paginator = client.get_paginator("list_executions") + page_iterator = paginator.paginate( + stateMachineArn=sm["stateMachineArn"], maxResults=25 + ) + for page in page_iterator: + page["executions"].should.have.length_of(25) + + with assert_raises(ClientError) as ex: + resp = client.list_executions( + stateMachineArn=sm["stateMachineArn"], maxResults=10 + ) + client.list_executions( + stateMachineArn=sm["stateMachineArn"], + maxResults=10, + statusFilter="ABORTED", + nextToken=resp["nextToken"], + ) + ex.exception.response["Error"]["Code"].should.equal("InvalidToken") + ex.exception.response["Error"]["Message"].should.contain( + "Input inconsistent with page token" + ) + + with assert_raises(ClientError) as ex: + client.list_executions( + stateMachineArn=sm["stateMachineArn"], nextToken="invalid" + ) + ex.exception.response["Error"]["Code"].should.equal("InvalidToken") + + @mock_stepfunctions @mock_sts def test_state_machine_list_executions_when_none_exist(): From 9970be2309da2456e5f80cb558c62aa277dc99e0 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 2 Nov 2020 14:26:18 +0530 Subject: [PATCH 599/658] Fix: Adding alarm arn to describe alarms response (#3409) * Fix: adding alarm arn to describe alarms response * Fix:Delete subscriptions on delete topic * modified tests Co-authored-by: usmankb --- moto/cloudwatch/models.py | 7 ++++++- moto/cloudwatch/responses.py | 5 +++-- moto/cloudwatch/utils.py | 4 ++++ tests/test_cloudwatch/test_cloudwatch.py | 3 +++ tests/test_cloudwatch/test_cloudwatch_boto3.py | 4 ++++ 5 files changed, 20 insertions(+), 3 deletions(-) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index b8134731393e..772672e0eb33 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -9,7 +9,7 @@ from datetime import datetime, timedelta from dateutil.tz import tzutc from uuid import uuid4 -from .utils import make_arn_for_dashboard +from .utils import make_arn_for_dashboard, make_arn_for_alarm from dateutil import parser from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID @@ -106,8 +106,10 @@ def __init__( insufficient_data_actions, unit, actions_enabled, + region="us-east-1", ): self.name = name + self.alarm_arn = make_arn_for_alarm(region, DEFAULT_ACCOUNT_ID, name) self.namespace = namespace self.metric_name = metric_name self.metric_data_queries = metric_data_queries @@ -280,6 +282,7 @@ def put_metric_alarm( insufficient_data_actions, unit, actions_enabled, + region="us-east-1", ): alarm = FakeAlarm( name, @@ -299,7 +302,9 @@ def put_metric_alarm( insufficient_data_actions, unit, actions_enabled, + region, ) + self.alarms[name] = alarm return alarm diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index c4b427dc6bb9..159e2442556a 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -80,6 +80,7 @@ def put_metric_alarm(self): insufficient_data_actions, unit, actions_enabled, + self.region, ) template = self.response_template(PUT_METRIC_ALARM_TEMPLATE) return template.render(alarm=alarm) @@ -287,7 +288,7 @@ def set_alarm_state(self): {{ action }} {% endfor %} - {{ alarm.arn }} + {{ alarm.alarm_arn }} {{ alarm.configuration_updated_timestamp }} {{ alarm.description }} {{ alarm.name }} @@ -395,7 +396,7 @@ def set_alarm_state(self): {{ action }} {% endfor %} - {{ alarm.arn }} + {{ alarm.alarm_arn }} {{ alarm.configuration_updated_timestamp }} {{ alarm.description }} {{ alarm.name }} diff --git a/moto/cloudwatch/utils.py b/moto/cloudwatch/utils.py index ee33a44021dd..896133d043f7 100644 --- a/moto/cloudwatch/utils.py +++ b/moto/cloudwatch/utils.py @@ -3,3 +3,7 @@ def make_arn_for_dashboard(account_id, name): return "arn:aws:cloudwatch::{0}dashboard/{1}".format(account_id, name) + + +def make_arn_for_alarm(region, account_id, alarm_name): + return "arn:aws:cloudwatch:{0}:{1}:alarm:{2}".format(region, account_id, alarm_name) diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index b1f84ff4bf37..92e1cd498cb5 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -3,6 +3,8 @@ from boto.s3.key import Key from datetime import datetime import sure # noqa +from moto.cloudwatch.utils import make_arn_for_alarm +from moto.core import ACCOUNT_ID from moto import mock_cloudwatch_deprecated, mock_s3_deprecated @@ -51,6 +53,7 @@ def test_create_alarm(): list(alarm.ok_actions).should.equal(["arn:ok"]) list(alarm.insufficient_data_actions).should.equal(["arn:insufficient"]) alarm.unit.should.equal("Seconds") + assert "tester" in alarm.alarm_arn @mock_cloudwatch_deprecated diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index a7a72ca4c62c..9c4757d6071b 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -10,6 +10,8 @@ import sure # noqa from moto import mock_cloudwatch +from moto.cloudwatch.utils import make_arn_for_alarm +from moto.core import ACCOUNT_ID @mock_cloudwatch @@ -140,6 +142,8 @@ def test_describe_alarms_for_metric(): alarms = conn.describe_alarms_for_metric(MetricName="cpu", Namespace="blah") alarms.get("MetricAlarms").should.have.length_of(1) + assert "testalarm1" in alarms.get("MetricAlarms")[0].get("AlarmArn") + @mock_cloudwatch def test_describe_alarms(): From 53c3eb62404e10eb0457abc7e6332da33c8c69df Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 2 Nov 2020 15:45:40 +0530 Subject: [PATCH 600/658] Fix:SecretsManager :Error on Invalid secretID (#3413) * Fix:SecretsManager :Error on Invalid secretID * Fixed tests Co-authored-by: usmankb --- moto/secretsmanager/models.py | 7 +++--- .../test_secretsmanager.py | 21 ++++++++++++++++++ tests/test_secretsmanager/test_server.py | 22 ++++++++++++++++++- 3 files changed, 45 insertions(+), 5 deletions(-) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 46c1d1f05374..0782b6bd97be 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -342,13 +342,12 @@ def _add_secret( def put_secret_value(self, secret_id, secret_string, secret_binary, version_stages): - if secret_id in self.secrets.keys(): + if not self._is_valid_identifier(secret_id): + raise SecretNotFoundException() + else: secret = self.secrets[secret_id] tags = secret.tags description = secret.description - else: - tags = [] - description = "" secret = self._add_secret( secret_id, diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index cbcee74994b2..92f1231e9c57 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -641,9 +641,26 @@ def test_rotate_secret_rotation_period_too_long(): ) +@mock_secretsmanager +def test_put_secret_value_on_non_existing_secret(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + with assert_raises(ClientError) as cm: + conn.put_secret_value( + SecretId=DEFAULT_SECRET_NAME, + SecretString="foosecret", + VersionStages=["AWSCURRENT"], + ) + + assert_equal( + "Secrets Manager can't find the specified secret.", + cm.exception.response["Error"]["Message"], + ) + + @mock_secretsmanager def test_put_secret_value_puts_new_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary=b("foosecret")) put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="foosecret", @@ -662,6 +679,7 @@ def test_put_secret_value_puts_new_secret(): @mock_secretsmanager def test_put_secret_binary_value_puts_new_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary=b("foosecret")) put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretBinary=b("foosecret"), @@ -706,6 +724,7 @@ def test_put_secret_binary_requires_either_string_or_binary(): @mock_secretsmanager def test_put_secret_value_can_get_first_version_if_put_twice(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary=b("foosecret")) put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="first_secret", @@ -729,6 +748,7 @@ def test_put_secret_value_can_get_first_version_if_put_twice(): @mock_secretsmanager def test_put_secret_value_versions_differ_if_same_secret_put_twice(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary="foosecret") put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="dupe_secret", @@ -781,6 +801,7 @@ def test_put_secret_value_maintains_description_and_tags(): @mock_secretsmanager def test_can_list_secret_version_ids(): conn = boto3.client("secretsmanager", region_name="us-west-2") + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretBinary="foosecret") put_secret_value_dict = conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="dupe_secret", diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index 81cb641bdf4e..da41eb5fba80 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -408,7 +408,11 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): def test_put_secret_value_puts_new_secret(): backend = server.create_backend_app("secretsmanager") test_client = backend.test_client() - + test_client.post( + "/", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, + headers={"X-Amz-Target": "secretsmanager.CreateSecret"}, + ) test_client.post( "/", data={ @@ -458,6 +462,12 @@ def test_put_secret_value_can_get_first_version_if_put_twice(): first_secret_string = "first_secret" second_secret_string = "second_secret" + test_client.post( + "/", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, + headers={"X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + put_first_secret_value_json = test_client.post( "/", data={ @@ -507,6 +517,11 @@ def test_put_secret_value_versions_differ_if_same_secret_put_twice(): backend = server.create_backend_app("secretsmanager") test_client = backend.test_client() + test_client.post( + "/", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, + headers={"X-Amz-Target": "secretsmanager.CreateSecret"}, + ) put_first_secret_value_json = test_client.post( "/", data={ @@ -543,6 +558,11 @@ def test_can_list_secret_version_ids(): backend = server.create_backend_app("secretsmanager") test_client = backend.test_client() + test_client.post( + "/", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, + headers={"X-Amz-Target": "secretsmanager.CreateSecret"}, + ) put_first_secret_value_json = test_client.post( "/", data={ From 171130fe7bc975904bade3ca259fc89818bcfe5a Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Mon, 2 Nov 2020 03:53:03 -0800 Subject: [PATCH 601/658] Add support for CloudFormation resource `AWS::StepFunctions::StateMachine` (#3429) Closes #3402 Co-authored-by: Bert Blommers --- moto/cloudformation/parsing.py | 1 + moto/stepfunctions/models.py | 43 ++++++++++++++- .../test_stepfunctions/test_stepfunctions.py | 54 ++++++++++++++++++- 3 files changed, 95 insertions(+), 3 deletions(-) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 760142033029..168536f79e23 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -38,6 +38,7 @@ from moto.s3.utils import bucket_and_name_from_url from moto.sns import models as sns_models # noqa from moto.sqs import models as sqs_models # noqa +from moto.stepfunctions import models as stepfunctions_models # noqa # End ugly list of imports diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index 3731539f82af..9dfa33ba85fc 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -4,7 +4,7 @@ from boto3 import Session -from moto.core import ACCOUNT_ID, BaseBackend +from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel from moto.core.utils import iso_8601_datetime_with_milliseconds from uuid import uuid4 from .exceptions import ( @@ -18,7 +18,7 @@ from .utils import paginate -class StateMachine: +class StateMachine(CloudFormationModel): def __init__(self, arn, name, definition, roleArn, tags=None): self.creation_date = iso_8601_datetime_with_milliseconds(datetime.now()) self.arn = arn @@ -27,6 +27,45 @@ def __init__(self, arn, name, definition, roleArn, tags=None): self.roleArn = roleArn self.tags = tags + @property + def physical_resource_id(self): + return self.arn + + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Name": + return self.name + raise UnformattedGetAttTemplateException() + + @staticmethod + def cloudformation_name_type(): + return "StateMachine" + + @staticmethod + def cloudformation_type(): + return "AWS::StepFunctions::StateMachine" + + @classmethod + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + name = properties.get("StateMachineName", resource_name) + definition = properties.get("DefinitionString", "") + role_arn = properties.get("RoleArn", "") + tags = properties.get("Tags", []) + tags_xform = [{k.lower(): v for k, v in d.items()} for d in tags] + sf_backend = stepfunction_backends[region_name] + return sf_backend.create_state_machine( + name, definition, role_arn, tags=tags_xform + ) + + @classmethod + def delete_from_cloudformation_json(cls, resource_name, _, region_name): + sf_backend = stepfunction_backends[region_name] + sf_backend.delete_state_machine(resource_name) + class Execution: def __init__( diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index e6592c2ffc58..1c961b882f50 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -8,7 +8,7 @@ from botocore.exceptions import ClientError from nose.tools import assert_raises -from moto import mock_sts, mock_stepfunctions +from moto import mock_cloudformation, mock_sts, mock_stepfunctions from moto.core import ACCOUNT_ID region = "us-east-1" @@ -709,6 +709,58 @@ def test_state_machine_describe_execution_after_stoppage(): description["stopDate"].should.be.a(datetime) +@mock_stepfunctions +@mock_cloudformation +def test_state_machine_cloudformation(): + sf = boto3.client("stepfunctions", region_name="us-east-1") + cf = boto3.resource("cloudformation", region_name="us-east-1") + definition = '{"StartAt": "HelloWorld", "States": {"HelloWorld": {"Type": "Task", "Resource": "arn:aws:lambda:us-east-1:111122223333;:function:HelloFunction", "End": true}}}' + role_arn = ( + "arn:aws:iam::111122223333:role/service-role/StatesExecutionRole-us-east-1;" + ) + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "An example template for a Step Functions state machine.", + "Resources": { + "MyStateMachine": { + "Type": "AWS::StepFunctions::StateMachine", + "Properties": { + "StateMachineName": "HelloWorld-StateMachine", + "StateMachineType": "STANDARD", + "DefinitionString": definition, + "RoleArn": role_arn, + "Tags": [ + {"Key": "key1", "Value": "value1"}, + {"Key": "key2", "Value": "value2"}, + ], + }, + } + }, + "Outputs": { + "StateMachineArn": {"Value": {"Ref": "MyStateMachine"}}, + "StateMachineName": {"Value": {"Fn::GetAtt": ["MyStateMachine", "Name"]}}, + }, + } + cf.create_stack(StackName="test_stack", TemplateBody=json.dumps(template)) + outputs_list = cf.Stack("test_stack").outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + state_machine = sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + state_machine["stateMachineArn"].should.equal(output["StateMachineArn"]) + state_machine["name"].should.equal(output["StateMachineName"]) + state_machine["roleArn"].should.equal(role_arn) + state_machine["definition"].should.equal(definition) + tags = sf.list_tags_for_resource(resourceArn=output["StateMachineArn"]).get("tags") + for i, tag in enumerate(tags, 1): + tag["key"].should.equal("key{}".format(i)) + tag["value"].should.equal("value{}".format(i)) + + cf.Stack("test_stack").delete() + with assert_raises(ClientError) as ex: + sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + ex.exception.response["Error"]["Code"].should.equal("StateMachineDoesNotExist") + ex.exception.response["Error"]["Message"].should.contain("Does Not Exist") + + def _get_account_id(): global account_id if account_id: From f57a77451c7962a9ff542aeaf84be8e915958672 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 2 Nov 2020 19:00:02 +0530 Subject: [PATCH 602/658] Fix:Added Tags for Network-ACL,RouteTable,InternetGateway (#3430) * Fix:Added Tags for Network-ACL,RouteTable,InternetGateway * Modified internet-gateway tags * Lint Co-authored-by: usmankb --- moto/ec2/models.py | 12 +++++++++--- moto/ec2/responses/internet_gateways.py | 5 ++++- moto/ec2/responses/network_acls.py | 7 +++++-- moto/ec2/responses/route_tables.py | 5 ++++- tests/test_ec2/test_internet_gateways.py | 20 +++++++++++++++++++- tests/test_ec2/test_network_acls.py | 23 +++++++++++++++++++++++ tests/test_ec2/test_route_tables.py | 19 +++++++++++++++++++ 7 files changed, 83 insertions(+), 8 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 6666a964ba57..7a0cef7a2173 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3969,10 +3969,12 @@ def __init__(self): self.route_tables = {} super(RouteTableBackend, self).__init__() - def create_route_table(self, vpc_id, main=False): + def create_route_table(self, vpc_id, tags=[], main=False): route_table_id = random_route_table_id() vpc = self.get_vpc(vpc_id) # Validate VPC exists route_table = RouteTable(self, route_table_id, vpc_id, main=main) + for tag in tags: + route_table.add_tag(tag.get("Key"), tag.get("Value")) self.route_tables[route_table_id] = route_table # AWS creates a default local route. @@ -4300,8 +4302,10 @@ def __init__(self): self.internet_gateways = {} super(InternetGatewayBackend, self).__init__() - def create_internet_gateway(self): + def create_internet_gateway(self, tags=[]): igw = InternetGateway(self) + for tag in tags: + igw.add_tag(tag.get("Key"), tag.get("Value")) self.internet_gateways[igw.id] = igw return igw @@ -5299,10 +5303,12 @@ def get_network_acl(self, network_acl_id): raise InvalidNetworkAclIdError(network_acl_id) return network_acl - def create_network_acl(self, vpc_id, default=False): + def create_network_acl(self, vpc_id, tags=[], default=False): network_acl_id = random_network_acl_id() self.get_vpc(vpc_id) network_acl = NetworkAcl(self, network_acl_id, vpc_id, default) + for tag in tags: + network_acl.add_tag(tag.get("Key"), tag.get("Value")) self.network_acls[network_acl_id] = network_acl if default: self.add_default_entries(network_acl_id) diff --git a/moto/ec2/responses/internet_gateways.py b/moto/ec2/responses/internet_gateways.py index d232b3b05a18..cec29849d647 100644 --- a/moto/ec2/responses/internet_gateways.py +++ b/moto/ec2/responses/internet_gateways.py @@ -14,7 +14,10 @@ def attach_internet_gateway(self): def create_internet_gateway(self): if self.is_not_dryrun("CreateInternetGateway"): - igw = self.ec2_backend.create_internet_gateway() + tags = self._get_multi_param("TagSpecification") + if tags: + tags = tags[0].get("Tag") + igw = self.ec2_backend.create_internet_gateway(tags=tags) template = self.response_template(CREATE_INTERNET_GATEWAY_RESPONSE) return template.render(internet_gateway=igw) diff --git a/moto/ec2/responses/network_acls.py b/moto/ec2/responses/network_acls.py index c0a9c7c9006e..4b1c4c2c5e13 100644 --- a/moto/ec2/responses/network_acls.py +++ b/moto/ec2/responses/network_acls.py @@ -6,7 +6,10 @@ class NetworkACLs(BaseResponse): def create_network_acl(self): vpc_id = self._get_param("VpcId") - network_acl = self.ec2_backend.create_network_acl(vpc_id) + tags = self._get_multi_param("TagSpecification") + if tags: + tags = tags[0].get("Tag") + network_acl = self.ec2_backend.create_network_acl(vpc_id, tags=tags) template = self.response_template(CREATE_NETWORK_ACL_RESPONSE) return template.render(network_acl=network_acl) @@ -161,7 +164,7 @@ def replace_network_acl_association(self): {{ tag.resource_id }} {{ tag.resource_type }} - {{ tag.key }} + {{ tag.key}} {{ tag.value }} {% endfor %} diff --git a/moto/ec2/responses/route_tables.py b/moto/ec2/responses/route_tables.py index a91d02317d00..c929ffb9ebcc 100644 --- a/moto/ec2/responses/route_tables.py +++ b/moto/ec2/responses/route_tables.py @@ -39,7 +39,10 @@ def create_route(self): def create_route_table(self): vpc_id = self._get_param("VpcId") - route_table = self.ec2_backend.create_route_table(vpc_id) + tags = self._get_multi_param("TagSpecification") + if tags: + tags = tags[0].get("Tag") + route_table = self.ec2_backend.create_route_table(vpc_id, tags) template = self.response_template(CREATE_ROUTE_TABLE_RESPONSE) return template.render(route_table=route_table) diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index 5941643cfd36..2319bf0626a8 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -7,11 +7,13 @@ import re import boto +import boto3 + from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 VPC_CIDR = "10.0.0.0/16" @@ -269,3 +271,19 @@ def test_igw_filter_by_attachment_state(): result = conn.get_all_internet_gateways(filters={"attachment.state": "available"}) result.should.have.length_of(1) result[0].id.should.equal(igw1.id) + + +@mock_ec2 +def test_create_internet_gateway_with_tags(): + ec2 = boto3.resource("ec2", region_name="eu-central-1") + + igw = ec2.create_internet_gateway( + TagSpecifications=[ + { + "ResourceType": "internet-gateway", + "Tags": [{"Key": "test", "Value": "TestRouteTable"}], + } + ], + ) + igw.tags.should.have.length_of(1) + igw.tags.should.equal([{"Key": "test", "Value": "TestRouteTable"}]) diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index f255fa67fe4a..c20bf75c60c2 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -304,3 +304,26 @@ def test_describe_network_acls(): "An error occurred (InvalidRouteTableID.NotFound) when calling the " "DescribeNetworkAcls operation: The routeTable ID '1' does not exist" ) + + +@mock_ec2 +def test_create_network_acl_with_tags(): + conn = boto3.client("ec2", region_name="us-west-2") + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16") + vpc_id = vpc["Vpc"]["VpcId"] + + network_acl = conn.create_network_acl( + VpcId=vpc_id, + TagSpecifications=[ + { + "ResourceType": "network-acl", + "Tags": [{"Key": "test", "Value": "TestTags"}], + } + ], + ) + + (len(network_acl.get("NetworkAcl").get("Tags"))).should.equal(1) + network_acl.get("NetworkAcl").get("Tags").should.equal( + [{"Key": "test", "Value": "TestTags"}] + ) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 7bb4db6959bf..a652bd1cf3a0 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -715,3 +715,22 @@ def test_create_vpc_end_point(): ) vpc_end_point["VpcEndpoint"]["VpcId"].should.equal(vpc["Vpc"]["VpcId"]) len(vpc_end_point["VpcEndpoint"]["DnsEntries"]).should.be.greater_than(0) + + +@mock_ec2 +def test_create_route_tables_with_tags(): + ec2 = boto3.resource("ec2", region_name="eu-central-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + + route_table = ec2.create_route_table( + VpcId=vpc.id, + TagSpecifications=[ + { + "ResourceType": "route-table", + "Tags": [{"Key": "test", "Value": "TestRouteTable"}], + } + ], + ) + + route_table.tags.should.have.length_of(1) From f584e16ab9603398e1d08db9c81eb2b71910caed Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Mon, 2 Nov 2020 09:21:09 -0800 Subject: [PATCH 603/658] Fix: eventName for a deleted record should be REMOVE instead of DELETE (#3431) Verified API documentation[1] against the real AWS backend. [1]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_streams_Record.html#DDB-Type-streams_Record-eventName Fixes #3400 --- moto/dynamodb2/models/__init__.py | 2 +- tests/test_dynamodbstreams/test_dynamodbstreams.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 6757a6859967..782ddcee9ce1 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -245,7 +245,7 @@ def add(self, old, new): if old is None: event_name = "INSERT" elif new is None: - event_name = "DELETE" + event_name = "REMOVE" else: event_name = "MODIFY" seq = len(self.items) + self.starting_sequence_number diff --git a/tests/test_dynamodbstreams/test_dynamodbstreams.py b/tests/test_dynamodbstreams/test_dynamodbstreams.py index 065d7280e763..6f66e304d3ea 100644 --- a/tests/test_dynamodbstreams/test_dynamodbstreams.py +++ b/tests/test_dynamodbstreams/test_dynamodbstreams.py @@ -155,7 +155,7 @@ def test_get_records_seq(self): assert len(resp["Records"]) == 3 assert resp["Records"][0]["eventName"] == "INSERT" assert resp["Records"][1]["eventName"] == "MODIFY" - assert resp["Records"][2]["eventName"] == "DELETE" + assert resp["Records"][2]["eventName"] == "REMOVE" sequence_number_modify = resp["Records"][1]["dynamodb"]["SequenceNumber"] @@ -175,7 +175,7 @@ def test_get_records_seq(self): resp = conn.get_records(ShardIterator=iterator_id) assert len(resp["Records"]) == 2 assert resp["Records"][0]["eventName"] == "MODIFY" - assert resp["Records"][1]["eventName"] == "DELETE" + assert resp["Records"][1]["eventName"] == "REMOVE" # check that if we get the shard iterator AFTER_SEQUENCE_NUMBER will get the DELETE event resp = conn.get_shard_iterator( @@ -187,7 +187,7 @@ def test_get_records_seq(self): iterator_id = resp["ShardIterator"] resp = conn.get_records(ShardIterator=iterator_id) assert len(resp["Records"]) == 1 - assert resp["Records"][0]["eventName"] == "DELETE" + assert resp["Records"][0]["eventName"] == "REMOVE" class TestEdges: From 76265576aca54dadf1cee7d39a67dec2e3d86370 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Tue, 3 Nov 2020 06:18:56 -0800 Subject: [PATCH 604/658] Fix: describe/list attribute discrepancy in Secrets Manager (#3432) `secretsmanager:DescribeSecret` returns `VersionIdsToStages` `secretsmanager:ListSecrets` returns the same information in `SecretVersionsToStages` * Verified fix against real AWS backend. Fixes #3406 --- moto/secretsmanager/models.py | 1 + .../test_secretsmanager/test_list_secrets.py | 2 ++ .../test_secretsmanager.py | 29 +++++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 0782b6bd97be..0aaa2027a3c0 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -136,6 +136,7 @@ def to_dict(self): "DeletedDate": self.deleted_date, "Tags": self.tags, "VersionIdsToStages": version_id_to_stages, + "SecretVersionsToStages": version_id_to_stages, } def _form_version_ids_to_stages(self): diff --git a/tests/test_secretsmanager/test_list_secrets.py b/tests/test_secretsmanager/test_list_secrets.py index da3c4eb7efa1..5470e3e12e16 100644 --- a/tests/test_secretsmanager/test_list_secrets.py +++ b/tests/test_secretsmanager/test_list_secrets.py @@ -43,9 +43,11 @@ def test_list_secrets(): assert secrets["SecretList"][0]["ARN"] is not None assert secrets["SecretList"][0]["Name"] == "test-secret" + assert secrets["SecretList"][0]["SecretVersionsToStages"] is not None assert secrets["SecretList"][1]["ARN"] is not None assert secrets["SecretList"][1]["Name"] == "test-secret-2" assert secrets["SecretList"][1]["Tags"] == [{"Key": "a", "Value": "1"}] + assert secrets["SecretList"][1]["SecretVersionsToStages"] is not None @mock_secretsmanager diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 92f1231e9c57..68a7e6742ae2 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -963,3 +963,32 @@ def test_tag_resource(): "Secrets Manager can't find the specified secret.", cm.exception.response["Error"]["Message"], ) + + +@mock_secretsmanager +def test_secret_versions_to_stages_attribute_discrepancy(): + client = boto3.client("secretsmanager", region_name="us-west-2") + + resp = client.create_secret(Name=DEFAULT_SECRET_NAME, SecretString="foosecret") + previous_version_id = resp["VersionId"] + + resp = client.put_secret_value( + SecretId=DEFAULT_SECRET_NAME, + SecretString="dupe_secret", + VersionStages=["AWSCURRENT"], + ) + current_version_id = resp["VersionId"] + + secret = client.describe_secret(SecretId=DEFAULT_SECRET_NAME) + describe_vtos = secret["VersionIdsToStages"] + assert describe_vtos[current_version_id] == ["AWSCURRENT"] + assert describe_vtos[previous_version_id] == ["AWSPREVIOUS"] + + secret = client.list_secrets( + Filters=[{"Key": "name", "Values": [DEFAULT_SECRET_NAME]}] + ).get("SecretList")[0] + list_vtos = secret["SecretVersionsToStages"] + assert list_vtos[current_version_id] == ["AWSCURRENT"] + assert list_vtos[previous_version_id] == ["AWSPREVIOUS"] + + assert describe_vtos == list_vtos From 574f46e2120f4b0fc2a675036787b7932ae7d4e3 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Thu, 5 Nov 2020 02:16:48 -0800 Subject: [PATCH 605/658] Implement additional Step Functions endpoints (#3437) * Implement tagging/untagging for State Machine resources * Implement `stepfunctions:UpdateStateMachine` endpoint --- IMPLEMENTATION_COVERAGE.md | 6 +- moto/stepfunctions/exceptions.py | 8 ++ moto/stepfunctions/models.py | 56 ++++++++- moto/stepfunctions/responses.py | 36 ++++++ .../test_stepfunctions/test_stepfunctions.py | 106 ++++++++++++++++++ 5 files changed, 208 insertions(+), 4 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index a108361d36f2..110cd7b6b8a4 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -8125,9 +8125,9 @@ - [ ] send_task_success - [X] start_execution - [X] stop_execution -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_state_machine +- [X] tag_resource +- [X] untag_resource +- [X] update_state_machine
## storagegateway diff --git a/moto/stepfunctions/exceptions.py b/moto/stepfunctions/exceptions.py index b5fd2ddb9225..c184e2cc7e57 100644 --- a/moto/stepfunctions/exceptions.py +++ b/moto/stepfunctions/exceptions.py @@ -54,3 +54,11 @@ class InvalidToken(AWSError): def __init__(self, message="Invalid token"): super(InvalidToken, self).__init__("Invalid Token: {}".format(message)) + + +class ResourceNotFound(AWSError): + TYPE = "ResourceNotFound" + STATUS = 400 + + def __init__(self, arn): + super(ResourceNotFound, self).__init__("Resource not found: '{}'".format(arn)) diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index 9dfa33ba85fc..86c76c98a5da 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -13,6 +13,7 @@ InvalidArn, InvalidExecutionInput, InvalidName, + ResourceNotFound, StateMachineDoesNotExist, ) from .utils import paginate @@ -21,11 +22,41 @@ class StateMachine(CloudFormationModel): def __init__(self, arn, name, definition, roleArn, tags=None): self.creation_date = iso_8601_datetime_with_milliseconds(datetime.now()) + self.update_date = self.creation_date self.arn = arn self.name = name self.definition = definition self.roleArn = roleArn - self.tags = tags + self.tags = [] + if tags: + self.add_tags(tags) + + def update(self, **kwargs): + for key, value in kwargs.items(): + if value is not None: + setattr(self, key, value) + self.update_date = iso_8601_datetime_with_milliseconds(datetime.now()) + + def add_tags(self, tags): + merged_tags = [] + for tag in self.tags: + replacement_index = next( + (index for (index, d) in enumerate(tags) if d["key"] == tag["key"]), + None, + ) + if replacement_index is not None: + replacement = tags.pop(replacement_index) + merged_tags.append(replacement) + else: + merged_tags.append(tag) + for tag in tags: + merged_tags.append(tag) + self.tags = merged_tags + return self.tags + + def remove_tags(self, tag_keys): + self.tags = [tag_set for tag_set in self.tags if tag_set["key"] not in tag_keys] + return self.tags @property def physical_resource_id(self): @@ -249,6 +280,15 @@ def delete_state_machine(self, arn): if sm: self.state_machines.remove(sm) + def update_state_machine(self, arn, definition=None, role_arn=None): + sm = self.describe_state_machine(arn) + updates = { + "definition": definition, + "roleArn": role_arn, + } + sm.update(**updates) + return sm + def start_execution(self, state_machine_arn, name=None, execution_input=None): state_machine_name = self.describe_state_machine(state_machine_arn).name self._ensure_execution_name_doesnt_exist(name) @@ -296,6 +336,20 @@ def describe_execution(self, arn): raise ExecutionDoesNotExist("Execution Does Not Exist: '" + arn + "'") return exctn + def tag_resource(self, resource_arn, tags): + try: + state_machine = self.describe_state_machine(resource_arn) + state_machine.add_tags(tags) + except StateMachineDoesNotExist: + raise ResourceNotFound(resource_arn) + + def untag_resource(self, resource_arn, tag_keys): + try: + state_machine = self.describe_state_machine(resource_arn) + state_machine.remove_tags(tag_keys) + except StateMachineDoesNotExist: + raise ResourceNotFound(resource_arn) + def reset(self): region_name = self.region_name self.__dict__ = {} diff --git a/moto/stepfunctions/responses.py b/moto/stepfunctions/responses.py index 7106d81d09ba..7eae8091b982 100644 --- a/moto/stepfunctions/responses.py +++ b/moto/stepfunctions/responses.py @@ -83,6 +83,22 @@ def delete_state_machine(self): except AWSError as err: return err.response() + @amzn_request_id + def update_state_machine(self): + arn = self._get_param("stateMachineArn") + definition = self._get_param("definition") + role_arn = self._get_param("roleArn") + try: + state_machine = self.stepfunction_backend.update_state_machine( + arn=arn, definition=definition, role_arn=role_arn + ) + response = { + "updateDate": state_machine.update_date, + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + @amzn_request_id def list_tags_for_resource(self): arn = self._get_param("resourceArn") @@ -94,6 +110,26 @@ def list_tags_for_resource(self): response = {"tags": tags} return 200, {}, json.dumps(response) + @amzn_request_id + def tag_resource(self): + arn = self._get_param("resourceArn") + tags = self._get_param("tags", []) + try: + self.stepfunction_backend.tag_resource(arn, tags) + except AWSError as err: + return err.response() + return 200, {}, json.dumps({}) + + @amzn_request_id + def untag_resource(self): + arn = self._get_param("resourceArn") + tag_keys = self._get_param("tagKeys", []) + try: + self.stepfunction_backend.untag_resource(arn, tag_keys) + except AWSError as err: + return err.response() + return 200, {}, json.dumps({}) + @amzn_request_id def start_execution(self): arn = self._get_param("stateMachineArn") diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 1c961b882f50..0bea43084f46 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -155,6 +155,33 @@ def test_state_machine_creation_requires_valid_role_arn(): ) +@mock_stepfunctions +@mock_sts +def test_update_state_machine(): + client = boto3.client("stepfunctions", region_name=region) + + resp = client.create_state_machine( + name="test", definition=str(simple_definition), roleArn=_get_default_role() + ) + state_machine_arn = resp["stateMachineArn"] + + updated_role = _get_default_role() + "-updated" + updated_definition = str(simple_definition).replace( + "DefaultState", "DefaultStateUpdated" + ) + resp = client.update_state_machine( + stateMachineArn=state_machine_arn, + definition=updated_definition, + roleArn=updated_role, + ) + resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + resp["updateDate"].should.be.a(datetime) + + desc = client.describe_state_machine(stateMachineArn=state_machine_arn) + desc["definition"].should.equal(updated_definition) + desc["roleArn"].should.equal(updated_role) + + @mock_stepfunctions def test_state_machine_list_returns_empty_list_by_default(): client = boto3.client("stepfunctions", region_name=region) @@ -326,6 +353,85 @@ def test_state_machine_can_deleted_nonexisting_machine(): sm_list["stateMachines"].should.have.length_of(0) +@mock_stepfunctions +def test_state_machine_tagging_non_existent_resource_fails(): + client = boto3.client("stepfunctions", region_name=region) + non_existent_arn = "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( + region=region, account=ACCOUNT_ID + ) + with assert_raises(ClientError) as ex: + client.tag_resource(resourceArn=non_existent_arn, tags=[]) + ex.exception.response["Error"]["Code"].should.equal("ResourceNotFound") + ex.exception.response["Error"]["Message"].should.contain(non_existent_arn) + + +@mock_stepfunctions +def test_state_machine_untagging_non_existent_resource_fails(): + client = boto3.client("stepfunctions", region_name=region) + non_existent_arn = "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( + region=region, account=ACCOUNT_ID + ) + with assert_raises(ClientError) as ex: + client.untag_resource(resourceArn=non_existent_arn, tagKeys=[]) + ex.exception.response["Error"]["Code"].should.equal("ResourceNotFound") + ex.exception.response["Error"]["Message"].should.contain(non_existent_arn) + + +@mock_stepfunctions +@mock_sts +def test_state_machine_tagging(): + client = boto3.client("stepfunctions", region_name=region) + tags = [ + {"key": "tag_key1", "value": "tag_value1"}, + {"key": "tag_key2", "value": "tag_value2"}, + ] + machine = client.create_state_machine( + name="test", definition=str(simple_definition), roleArn=_get_default_role(), + ) + client.tag_resource(resourceArn=machine["stateMachineArn"], tags=tags) + resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) + resp["tags"].should.equal(tags) + + tags_update = [ + {"key": "tag_key1", "value": "tag_value1_new"}, + {"key": "tag_key3", "value": "tag_value3"}, + ] + client.tag_resource(resourceArn=machine["stateMachineArn"], tags=tags_update) + resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) + tags_expected = [ + tags_update[0], + tags[1], + tags_update[1], + ] + resp["tags"].should.equal(tags_expected) + + +@mock_stepfunctions +@mock_sts +def test_state_machine_untagging(): + client = boto3.client("stepfunctions", region_name=region) + tags = [ + {"key": "tag_key1", "value": "tag_value1"}, + {"key": "tag_key2", "value": "tag_value2"}, + {"key": "tag_key3", "value": "tag_value3"}, + ] + machine = client.create_state_machine( + name="test", + definition=str(simple_definition), + roleArn=_get_default_role(), + tags=tags, + ) + resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) + resp["tags"].should.equal(tags) + tags_to_delete = ["tag_key1", "tag_key2"] + client.untag_resource( + resourceArn=machine["stateMachineArn"], tagKeys=tags_to_delete + ) + resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) + expected_tags = [tag for tag in tags if tag["key"] not in tags_to_delete] + resp["tags"].should.equal(expected_tags) + + @mock_stepfunctions @mock_sts def test_state_machine_list_tags_for_created_machine(): From 032b9c40088e406b74b6a9aed56f791fd45b5557 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 5 Nov 2020 11:20:18 +0000 Subject: [PATCH 606/658] Tech Debt - Remove duplicate AWSError classes --- moto/acm/models.py | 14 +----------- moto/applicationautoscaling/exceptions.py | 17 -------------- moto/batch/exceptions.py | 28 +++++------------------ moto/core/exceptions.py | 17 ++++++++++++++ moto/sagemaker/exceptions.py | 17 -------------- moto/sagemaker/responses.py | 2 +- moto/stepfunctions/exceptions.py | 18 +-------------- moto/xray/exceptions.py | 23 ------------------- moto/xray/models.py | 3 ++- moto/xray/responses.py | 3 ++- 10 files changed, 30 insertions(+), 112 deletions(-) diff --git a/moto/acm/models.py b/moto/acm/models.py index 6e4ac150892d..3963b88c220d 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -1,9 +1,9 @@ from __future__ import unicode_literals import re -import json import datetime from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import AWSError from moto.ec2 import ec2_backends from .utils import make_arn_for_certificate @@ -50,18 +50,6 @@ def datetime_to_epoch(date): return int((date - datetime.datetime(1970, 1, 1)).total_seconds()) -class AWSError(Exception): - TYPE = None - STATUS = 400 - - def __init__(self, message): - self.message = message - - def response(self): - resp = {"__type": self.TYPE, "message": self.message} - return json.dumps(resp), dict(status=self.STATUS) - - class AWSValidationException(AWSError): TYPE = "ValidationException" diff --git a/moto/applicationautoscaling/exceptions.py b/moto/applicationautoscaling/exceptions.py index 8d5fb3c0c11b..e409da4e7600 100644 --- a/moto/applicationautoscaling/exceptions.py +++ b/moto/applicationautoscaling/exceptions.py @@ -1,24 +1,7 @@ from __future__ import unicode_literals -import json from moto.core.exceptions import JsonRESTError -class AWSError(Exception): - """ Copied from acm/models.py; this class now exists in >5 locations, - maybe this should be centralised for use by any module? - """ - - TYPE = None - STATUS = 400 - - def __init__(self, message): - self.message = message - - def response(self): - resp = {"__type": self.TYPE, "message": self.message} - return json.dumps(resp), dict(status=self.STATUS) - - class AWSValidationException(JsonRESTError): def __init__(self, message, **kwargs): super(AWSValidationException, self).__init__( diff --git a/moto/batch/exceptions.py b/moto/batch/exceptions.py index c411f3fceba8..5d3ea3fd0cd0 100644 --- a/moto/batch/exceptions.py +++ b/moto/batch/exceptions.py @@ -1,40 +1,24 @@ from __future__ import unicode_literals -import json - - -class AWSError(Exception): - CODE = None - STATUS = 400 - - def __init__(self, message, code=None, status=None): - self.message = message - self.code = code if code is not None else self.CODE - self.status = status if status is not None else self.STATUS - - def response(self): - return ( - json.dumps({"__type": self.code, "message": self.message}), - dict(status=self.status), - ) +from moto.core.exceptions import AWSError class InvalidRequestException(AWSError): - CODE = "InvalidRequestException" + TYPE = "InvalidRequestException" class InvalidParameterValueException(AWSError): - CODE = "InvalidParameterValue" + TYPE = "InvalidParameterValue" class ValidationError(AWSError): - CODE = "ValidationError" + TYPE = "ValidationError" class InternalFailure(AWSError): - CODE = "InternalFailure" + TYPE = "InternalFailure" STATUS = 500 class ClientException(AWSError): - CODE = "ClientException" + TYPE = "ClientException" STATUS = 400 diff --git a/moto/core/exceptions.py b/moto/core/exceptions.py index ea91eda63661..6938f9bf12ec 100644 --- a/moto/core/exceptions.py +++ b/moto/core/exceptions.py @@ -2,6 +2,7 @@ from werkzeug.exceptions import HTTPException from jinja2 import DictLoader, Environment +import json SINGLE_ERROR_RESPONSE = """ @@ -109,6 +110,22 @@ def __init__(self): ) +class AWSError(Exception): + TYPE = None + STATUS = 400 + + def __init__(self, message, type=None, status=None): + self.message = message + self.type = type if type is not None else self.TYPE + self.status = status if status is not None else self.STATUS + + def response(self): + return ( + json.dumps({"__type": self.type, "message": self.message}), + dict(status=self.status), + ) + + class InvalidNextTokenException(JsonRESTError): """For AWS Config resource listing. This will be used by many different resource types, and so it is in moto.core.""" diff --git a/moto/sagemaker/exceptions.py b/moto/sagemaker/exceptions.py index e2d01e82e62d..0331fee89982 100644 --- a/moto/sagemaker/exceptions.py +++ b/moto/sagemaker/exceptions.py @@ -1,5 +1,4 @@ from __future__ import unicode_literals -import json from moto.core.exceptions import RESTError, JsonRESTError ERROR_WITH_MODEL_NAME = """{% extends 'single_error' %} @@ -30,22 +29,6 @@ def __init__(self, *args, **kwargs): ) -class AWSError(Exception): - TYPE = None - STATUS = 400 - - def __init__(self, message, type=None, status=None): - self.message = message - self.type = type if type is not None else self.TYPE - self.status = status if status is not None else self.STATUS - - def response(self): - return ( - json.dumps({"__type": self.type, "message": self.message}), - dict(status=self.status), - ) - - class ValidationError(JsonRESTError): def __init__(self, message, **kwargs): super(ValidationError, self).__init__("ValidationException", message, **kwargs) diff --git a/moto/sagemaker/responses.py b/moto/sagemaker/responses.py index 749ac787f46d..d5d2cab435d5 100644 --- a/moto/sagemaker/responses.py +++ b/moto/sagemaker/responses.py @@ -2,9 +2,9 @@ import json +from moto.core.exceptions import AWSError from moto.core.responses import BaseResponse from moto.core.utils import amzn_request_id -from .exceptions import AWSError from .models import sagemaker_backends diff --git a/moto/stepfunctions/exceptions.py b/moto/stepfunctions/exceptions.py index b5fd2ddb9225..a24c150083d2 100644 --- a/moto/stepfunctions/exceptions.py +++ b/moto/stepfunctions/exceptions.py @@ -1,21 +1,5 @@ from __future__ import unicode_literals -import json - - -class AWSError(Exception): - TYPE = None - STATUS = 400 - - def __init__(self, message, type=None, status=None): - self.message = message - self.type = type if type is not None else self.TYPE - self.status = status if status is not None else self.STATUS - - def response(self): - return ( - json.dumps({"__type": self.type, "message": self.message}), - dict(status=self.status), - ) +from moto.core.exceptions import AWSError class ExecutionAlreadyExists(AWSError): diff --git a/moto/xray/exceptions.py b/moto/xray/exceptions.py index 8b5c87e36785..2449cb45da17 100644 --- a/moto/xray/exceptions.py +++ b/moto/xray/exceptions.py @@ -1,26 +1,3 @@ -import json - - -class AWSError(Exception): - CODE = None - STATUS = 400 - - def __init__(self, message, code=None, status=None): - self.message = message - self.code = code if code is not None else self.CODE - self.status = status if status is not None else self.STATUS - - def response(self): - return ( - json.dumps({"__type": self.code, "message": self.message}), - dict(status=self.status), - ) - - -class InvalidRequestException(AWSError): - CODE = "InvalidRequestException" - - class BadSegmentException(Exception): def __init__(self, seg_id=None, code=None, message=None): self.id = seg_id diff --git a/moto/xray/models.py b/moto/xray/models.py index 39d8ae2d4b93..6352fa37c78c 100644 --- a/moto/xray/models.py +++ b/moto/xray/models.py @@ -6,7 +6,8 @@ from collections import defaultdict import json from moto.core import BaseBackend, BaseModel -from .exceptions import BadSegmentException, AWSError +from moto.core.exceptions import AWSError +from .exceptions import BadSegmentException class TelemetryRecords(BaseModel): diff --git a/moto/xray/responses.py b/moto/xray/responses.py index 118f2de2f1b7..aaf56c80a3cd 100644 --- a/moto/xray/responses.py +++ b/moto/xray/responses.py @@ -3,10 +3,11 @@ import datetime from moto.core.responses import BaseResponse +from moto.core.exceptions import AWSError from six.moves.urllib.parse import urlsplit from .models import xray_backends -from .exceptions import AWSError, BadSegmentException +from .exceptions import BadSegmentException class XRayResponse(BaseResponse): From b7cf2d4478d8b6c2f77189dc792ad08e6774dc54 Mon Sep 17 00:00:00 2001 From: Peter Lithammer Date: Thu, 5 Nov 2020 15:10:23 +0100 Subject: [PATCH 607/658] ecr: Fix "imageDigest" value in ecr.list_images() response (#3436) --- moto/ecr/models.py | 2 +- tests/test_ecr/test_ecr_boto3.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/moto/ecr/models.py b/moto/ecr/models.py index 33a0201fde00..299ed48a797a 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -164,7 +164,7 @@ def response_object(self): def response_list_object(self): response_object = self.gen_response_object() response_object["imageTag"] = self.image_tag - response_object["imageDigest"] = "i don't know" + response_object["imageDigest"] = self.get_image_digest() return { k: v for k, v in response_object.items() if v is not None and v != [None] } diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 6c6840a7ed74..fd678f661c8f 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -318,6 +318,9 @@ def test_list_images(): type(response["imageIds"]).should.be(list) len(response["imageIds"]).should.be(3) + for image in response["imageIds"]: + image["imageDigest"].should.contain("sha") + image_tags = ["latest", "v1", "v2"] set( [ @@ -331,6 +334,7 @@ def test_list_images(): type(response["imageIds"]).should.be(list) len(response["imageIds"]).should.be(1) response["imageIds"][0]["imageTag"].should.equal("oldest") + response["imageIds"][0]["imageDigest"].should.contain("sha") @mock_ecr From 725ad7571d6446d6b989008df8c885d6e48cce86 Mon Sep 17 00:00:00 2001 From: pwrmiller Date: Fri, 6 Nov 2020 03:23:47 -0500 Subject: [PATCH 608/658] Adds some basic endpoints for Amazon Forecast (#3434) * Adding some basic endpoints for Amazon Forecast, including all dataset group related endpoints * Adds better testing around exception handling in forecast endpoint, removes some unused code, and cleans up validation code * Fix unused imports, optimize imports, code style fixes Co-authored-by: Paul Miller --- IMPLEMENTATION_COVERAGE.md | 12 +- README.md | 1 + docs/index.rst | 2 + moto/__init__.py | 1 + moto/backends.py | 1 + moto/forecast/__init__.py | 7 + moto/forecast/exceptions.py | 43 ++++++ moto/forecast/models.py | 173 +++++++++++++++++++++ moto/forecast/responses.py | 92 +++++++++++ moto/forecast/urls.py | 7 + tests/test_forecast/__init__.py | 0 tests/test_forecast/test_forecast.py | 222 +++++++++++++++++++++++++++ 12 files changed, 555 insertions(+), 6 deletions(-) create mode 100644 moto/forecast/__init__.py create mode 100644 moto/forecast/exceptions.py create mode 100644 moto/forecast/models.py create mode 100644 moto/forecast/responses.py create mode 100644 moto/forecast/urls.py create mode 100644 tests/test_forecast/__init__.py create mode 100644 tests/test_forecast/test_forecast.py diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 110cd7b6b8a4..9ea4330fa2fb 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3518,34 +3518,34 @@ ## forecast
-0% implemented +19% implemented - [ ] create_dataset -- [ ] create_dataset_group +- [X] create_dataset_group - [ ] create_dataset_import_job - [ ] create_forecast - [ ] create_forecast_export_job - [ ] create_predictor - [ ] delete_dataset -- [ ] delete_dataset_group +- [X] delete_dataset_group - [ ] delete_dataset_import_job - [ ] delete_forecast - [ ] delete_forecast_export_job - [ ] delete_predictor - [ ] describe_dataset -- [ ] describe_dataset_group +- [X] describe_dataset_group - [ ] describe_dataset_import_job - [ ] describe_forecast - [ ] describe_forecast_export_job - [ ] describe_predictor - [ ] get_accuracy_metrics -- [ ] list_dataset_groups +- [X] list_dataset_groups - [ ] list_dataset_import_jobs - [ ] list_datasets - [ ] list_forecast_export_jobs - [ ] list_forecasts - [ ] list_predictors -- [ ] update_dataset_group +- [X] update_dataset_group
## forecastquery diff --git a/README.md b/README.md index 3915a85cdbb5..784976a4aa52 100644 --- a/README.md +++ b/README.md @@ -102,6 +102,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L | ELB | @mock_elb | core endpoints done | | | ELBv2 | @mock_elbv2 | all endpoints done | | | EMR | @mock_emr | core endpoints done | | +| Forecast | @mock_forecast | some core endpoints done | | | Glacier | @mock_glacier | core endpoints done | | | IAM | @mock_iam | core endpoints done | | | IoT | @mock_iot | core endpoints done | | diff --git a/docs/index.rst b/docs/index.rst index 22ac97228ab5..4f2d7e090812 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -60,6 +60,8 @@ Currently implemented Services: +---------------------------+-----------------------+------------------------------------+ | EMR | @mock_emr | core endpoints done | +---------------------------+-----------------------+------------------------------------+ +| Forecast | @mock_forecast | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ | Glacier | @mock_glacier | core endpoints done | +---------------------------+-----------------------+------------------------------------+ | IAM | @mock_iam | core endpoints done | diff --git a/moto/__init__.py b/moto/__init__.py index c73e111a0b76..fd467cbf8296 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -63,6 +63,7 @@ def f(*args, **kwargs): mock_emr = lazy_load(".emr", "mock_emr") mock_emr_deprecated = lazy_load(".emr", "mock_emr_deprecated") mock_events = lazy_load(".events", "mock_events") +mock_forecast = lazy_load(".forecast", "mock_forecast") mock_glacier = lazy_load(".glacier", "mock_glacier") mock_glacier_deprecated = lazy_load(".glacier", "mock_glacier_deprecated") mock_glue = lazy_load(".glue", "mock_glue") diff --git a/moto/backends.py b/moto/backends.py index e76a89ccb3bc..c8bac72fc570 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -75,6 +75,7 @@ "kinesisvideoarchivedmedia", "kinesisvideoarchivedmedia_backends", ), + "forecast": ("forecast", "forecast_backends"), } diff --git a/moto/forecast/__init__.py b/moto/forecast/__init__.py new file mode 100644 index 000000000000..75b23b94a01c --- /dev/null +++ b/moto/forecast/__init__.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals + +from .models import forecast_backends +from ..core.models import base_decorator + +forecast_backend = forecast_backends["us-east-1"] +mock_forecast = base_decorator(forecast_backends) diff --git a/moto/forecast/exceptions.py b/moto/forecast/exceptions.py new file mode 100644 index 000000000000..ad86e90fcf22 --- /dev/null +++ b/moto/forecast/exceptions.py @@ -0,0 +1,43 @@ +from __future__ import unicode_literals + +import json + + +class AWSError(Exception): + TYPE = None + STATUS = 400 + + def __init__(self, message, type=None, status=None): + self.message = message + self.type = type if type is not None else self.TYPE + self.status = status if status is not None else self.STATUS + + def response(self): + return ( + json.dumps({"__type": self.type, "message": self.message}), + dict(status=self.status), + ) + + +class InvalidInputException(AWSError): + TYPE = "InvalidInputException" + + +class ResourceAlreadyExistsException(AWSError): + TYPE = "ResourceAlreadyExistsException" + + +class ResourceNotFoundException(AWSError): + TYPE = "ResourceNotFoundException" + + +class ResourceInUseException(AWSError): + TYPE = "ResourceInUseException" + + +class LimitExceededException(AWSError): + TYPE = "LimitExceededException" + + +class ValidationException(AWSError): + TYPE = "ValidationException" diff --git a/moto/forecast/models.py b/moto/forecast/models.py new file mode 100644 index 000000000000..c7b18618c6e6 --- /dev/null +++ b/moto/forecast/models.py @@ -0,0 +1,173 @@ +import re +from datetime import datetime + +from boto3 import Session +from future.utils import iteritems + +from moto.core import ACCOUNT_ID, BaseBackend +from moto.core.utils import iso_8601_datetime_without_milliseconds +from .exceptions import ( + InvalidInputException, + ResourceAlreadyExistsException, + ResourceNotFoundException, + ValidationException, +) + + +class DatasetGroup: + accepted_dataset_group_name_format = re.compile(r"^[a-zA-Z][a-z-A-Z0-9_]*") + accepted_dataset_group_arn_format = re.compile(r"^[a-zA-Z0-9\-\_\.\/\:]+$") + accepted_dataset_types = [ + "INVENTORY_PLANNING", + "METRICS", + "RETAIL", + "EC2_CAPACITY", + "CUSTOM", + "WEB_TRAFFIC", + "WORK_FORCE", + ] + + def __init__( + self, region_name, dataset_arns, dataset_group_name, domain, tags=None + ): + self.creation_date = iso_8601_datetime_without_milliseconds(datetime.now()) + self.modified_date = self.creation_date + + self.arn = ( + "arn:aws:forecast:" + + region_name + + ":" + + str(ACCOUNT_ID) + + ":dataset-group/" + + dataset_group_name + ) + self.dataset_arns = dataset_arns if dataset_arns else [] + self.dataset_group_name = dataset_group_name + self.domain = domain + self.tags = tags + self._validate() + + def update(self, dataset_arns): + self.dataset_arns = dataset_arns + self.last_modified_date = iso_8601_datetime_without_milliseconds(datetime.now()) + + def _validate(self): + errors = [] + + errors.extend(self._validate_dataset_group_name()) + errors.extend(self._validate_dataset_group_name_len()) + errors.extend(self._validate_dataset_group_domain()) + + if errors: + err_count = len(errors) + message = str(err_count) + " validation error" + message += "s" if err_count > 1 else "" + message += " detected: " + message += "; ".join(errors) + raise ValidationException(message) + + def _validate_dataset_group_name(self): + errors = [] + if not re.match( + self.accepted_dataset_group_name_format, self.dataset_group_name + ): + errors.append( + "Value '" + + self.dataset_group_name + + "' at 'datasetGroupName' failed to satisfy constraint: Member must satisfy regular expression pattern " + + self.accepted_dataset_group_name_format.pattern + ) + return errors + + def _validate_dataset_group_name_len(self): + errors = [] + if len(self.dataset_group_name) >= 64: + errors.append( + "Value '" + + self.dataset_group_name + + "' at 'datasetGroupName' failed to satisfy constraint: Member must have length less than or equal to 63" + ) + return errors + + def _validate_dataset_group_domain(self): + errors = [] + if self.domain not in self.accepted_dataset_types: + errors.append( + "Value '" + + self.domain + + "' at 'domain' failed to satisfy constraint: Member must satisfy enum value set " + + str(self.accepted_dataset_types) + ) + return errors + + +class ForecastBackend(BaseBackend): + def __init__(self, region_name): + super(ForecastBackend, self).__init__() + self.dataset_groups = {} + self.datasets = {} + self.region_name = region_name + + def create_dataset_group(self, dataset_group_name, domain, dataset_arns, tags): + dataset_group = DatasetGroup( + region_name=self.region_name, + dataset_group_name=dataset_group_name, + domain=domain, + dataset_arns=dataset_arns, + tags=tags, + ) + + if dataset_arns: + for dataset_arn in dataset_arns: + if dataset_arn not in self.datasets: + raise InvalidInputException( + "Dataset arns: [" + dataset_arn + "] are not found" + ) + + if self.dataset_groups.get(dataset_group.arn): + raise ResourceAlreadyExistsException( + "A dataset group already exists with the arn: " + dataset_group.arn + ) + + self.dataset_groups[dataset_group.arn] = dataset_group + return dataset_group + + def describe_dataset_group(self, dataset_group_arn): + try: + dataset_group = self.dataset_groups[dataset_group_arn] + except KeyError: + raise ResourceNotFoundException("No resource found " + dataset_group_arn) + return dataset_group + + def delete_dataset_group(self, dataset_group_arn): + try: + del self.dataset_groups[dataset_group_arn] + except KeyError: + raise ResourceNotFoundException("No resource found " + dataset_group_arn) + + def update_dataset_group(self, dataset_group_arn, dataset_arns): + try: + dsg = self.dataset_groups[dataset_group_arn] + except KeyError: + raise ResourceNotFoundException("No resource found " + dataset_group_arn) + + for dataset_arn in dataset_arns: + if dataset_arn not in dsg.dataset_arns: + raise InvalidInputException( + "Dataset arns: [" + dataset_arn + "] are not found" + ) + + dsg.update(dataset_arns) + + def list_dataset_groups(self): + return [v for (_, v) in iteritems(self.dataset_groups)] + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + +forecast_backends = {} +for region in Session().get_available_regions("forecast"): + forecast_backends[region] = ForecastBackend(region) diff --git a/moto/forecast/responses.py b/moto/forecast/responses.py new file mode 100644 index 000000000000..09d55b0d8a1a --- /dev/null +++ b/moto/forecast/responses.py @@ -0,0 +1,92 @@ +from __future__ import unicode_literals + +import json + +from moto.core.responses import BaseResponse +from moto.core.utils import amzn_request_id +from .exceptions import AWSError +from .models import forecast_backends + + +class ForecastResponse(BaseResponse): + @property + def forecast_backend(self): + return forecast_backends[self.region] + + @amzn_request_id + def create_dataset_group(self): + dataset_group_name = self._get_param("DatasetGroupName") + domain = self._get_param("Domain") + dataset_arns = self._get_param("DatasetArns") + tags = self._get_param("Tags") + + try: + dataset_group = self.forecast_backend.create_dataset_group( + dataset_group_name=dataset_group_name, + domain=domain, + dataset_arns=dataset_arns, + tags=tags, + ) + response = {"DatasetGroupArn": dataset_group.arn} + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def describe_dataset_group(self): + dataset_group_arn = self._get_param("DatasetGroupArn") + + try: + dataset_group = self.forecast_backend.describe_dataset_group( + dataset_group_arn=dataset_group_arn + ) + response = { + "CreationTime": dataset_group.creation_date, + "DatasetArns": dataset_group.dataset_arns, + "DatasetGroupArn": dataset_group.arn, + "DatasetGroupName": dataset_group.dataset_group_name, + "Domain": dataset_group.domain, + "LastModificationTime": dataset_group.modified_date, + "Status": "ACTIVE", + } + return 200, {}, json.dumps(response) + except AWSError as err: + return err.response() + + @amzn_request_id + def delete_dataset_group(self): + dataset_group_arn = self._get_param("DatasetGroupArn") + try: + self.forecast_backend.delete_dataset_group(dataset_group_arn) + return 200, {}, None + except AWSError as err: + return err.response() + + @amzn_request_id + def update_dataset_group(self): + dataset_group_arn = self._get_param("DatasetGroupArn") + dataset_arns = self._get_param("DatasetArns") + try: + self.forecast_backend.update_dataset_group(dataset_group_arn, dataset_arns) + return 200, {}, None + except AWSError as err: + return err.response() + + @amzn_request_id + def list_dataset_groups(self): + list_all = self.forecast_backend.list_dataset_groups() + list_all = sorted( + [ + { + "DatasetGroupArn": dsg.arn, + "DatasetGroupName": dsg.dataset_group_name, + "CreationTime": dsg.creation_date, + "LastModificationTime": dsg.creation_date, + } + for dsg in list_all + ], + key=lambda x: x["LastModificationTime"], + reverse=True, + ) + response = {"DatasetGroups": list_all} + return 200, {}, json.dumps(response) diff --git a/moto/forecast/urls.py b/moto/forecast/urls.py new file mode 100644 index 000000000000..221659e6f1f7 --- /dev/null +++ b/moto/forecast/urls.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals + +from .responses import ForecastResponse + +url_bases = ["https?://forecast.(.+).amazonaws.com"] + +url_paths = {"{0}/$": ForecastResponse.dispatch} diff --git a/tests/test_forecast/__init__.py b/tests/test_forecast/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/test_forecast/test_forecast.py b/tests/test_forecast/test_forecast.py new file mode 100644 index 000000000000..32af519c7438 --- /dev/null +++ b/tests/test_forecast/test_forecast.py @@ -0,0 +1,222 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises +from parameterized import parameterized + +from moto import mock_forecast +from moto.core import ACCOUNT_ID + +region = "us-east-1" +account_id = None +valid_domains = [ + "RETAIL", + "CUSTOM", + "INVENTORY_PLANNING", + "EC2_CAPACITY", + "WORK_FORCE", + "WEB_TRAFFIC", + "METRICS", +] + + +@parameterized(valid_domains) +@mock_forecast +def test_forecast_dataset_group_create(domain): + name = "example_dataset_group" + client = boto3.client("forecast", region_name=region) + response = client.create_dataset_group(DatasetGroupName=name, Domain=domain) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response["DatasetGroupArn"].should.equal( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/" + name + ) + + +@mock_forecast +def test_forecast_dataset_group_create_invalid_domain(): + name = "example_dataset_group" + client = boto3.client("forecast", region_name=region) + invalid_domain = "INVALID" + + with assert_raises(ClientError) as exc: + client.create_dataset_group(DatasetGroupName=name, Domain=invalid_domain) + exc.exception.response["Error"]["Code"].should.equal("ValidationException") + exc.exception.response["Error"]["Message"].should.equal( + "1 validation error detected: Value '" + + invalid_domain + + "' at 'domain' failed to satisfy constraint: Member must satisfy enum value set ['INVENTORY_PLANNING', 'METRICS', 'RETAIL', 'EC2_CAPACITY', 'CUSTOM', 'WEB_TRAFFIC', 'WORK_FORCE']" + ) + + +@parameterized([" ", "a" * 64]) +@mock_forecast +def test_forecast_dataset_group_create_invalid_name(name): + client = boto3.client("forecast", region_name=region) + + with assert_raises(ClientError) as exc: + client.create_dataset_group(DatasetGroupName=name, Domain="CUSTOM") + exc.exception.response["Error"]["Code"].should.equal("ValidationException") + exc.exception.response["Error"]["Message"].should.contain( + "1 validation error detected: Value '" + + name + + "' at 'datasetGroupName' failed to satisfy constraint: Member must" + ) + + +@mock_forecast +def test_forecast_dataset_group_create_duplicate_fails(): + client = boto3.client("forecast", region_name=region) + client.create_dataset_group(DatasetGroupName="name", Domain="RETAIL") + + with assert_raises(ClientError) as exc: + client.create_dataset_group(DatasetGroupName="name", Domain="RETAIL") + + exc.exception.response["Error"]["Code"].should.equal( + "ResourceAlreadyExistsException" + ) + + +@mock_forecast +def test_forecast_dataset_group_list_default_empty(): + client = boto3.client("forecast", region_name=region) + + list = client.list_dataset_groups() + list["DatasetGroups"].should.be.empty + + +@mock_forecast +def test_forecast_dataset_group_list_some(): + client = boto3.client("forecast", region_name=region) + + client.create_dataset_group(DatasetGroupName="hello", Domain="CUSTOM") + result = client.list_dataset_groups() + + assert len(result["DatasetGroups"]) == 1 + result["DatasetGroups"][0]["DatasetGroupArn"].should.equal( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/hello" + ) + + +@mock_forecast +def test_forecast_delete_dataset_group(): + dataset_group_name = "name" + dataset_group_arn = ( + "arn:aws:forecast:" + + region + + ":" + + ACCOUNT_ID + + ":dataset-group/" + + dataset_group_name + ) + client = boto3.client("forecast", region_name=region) + client.create_dataset_group(DatasetGroupName=dataset_group_name, Domain="CUSTOM") + client.delete_dataset_group(DatasetGroupArn=dataset_group_arn) + + +@mock_forecast +def test_forecast_delete_dataset_group_missing(): + client = boto3.client("forecast", region_name=region) + missing_dsg_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/missing" + ) + + with assert_raises(ClientError) as exc: + client.delete_dataset_group(DatasetGroupArn=missing_dsg_arn) + exc.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + exc.exception.response["Error"]["Message"].should.equal( + "No resource found " + missing_dsg_arn + ) + + +@mock_forecast +def test_forecast_update_dataset_arns_empty(): + dataset_group_name = "name" + dataset_group_arn = ( + "arn:aws:forecast:" + + region + + ":" + + ACCOUNT_ID + + ":dataset-group/" + + dataset_group_name + ) + client = boto3.client("forecast", region_name=region) + client.create_dataset_group(DatasetGroupName=dataset_group_name, Domain="CUSTOM") + client.update_dataset_group(DatasetGroupArn=dataset_group_arn, DatasetArns=[]) + + +@mock_forecast +def test_forecast_update_dataset_group_not_found(): + client = boto3.client("forecast", region_name=region) + dataset_group_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/" + "test" + ) + with assert_raises(ClientError) as exc: + client.update_dataset_group(DatasetGroupArn=dataset_group_arn, DatasetArns=[]) + exc.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + exc.exception.response["Error"]["Message"].should.equal( + "No resource found " + dataset_group_arn + ) + + +@mock_forecast +def test_describe_dataset_group(): + name = "test" + client = boto3.client("forecast", region_name=region) + dataset_group_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/" + name + ) + client.create_dataset_group(DatasetGroupName=name, Domain="CUSTOM") + result = client.describe_dataset_group(DatasetGroupArn=dataset_group_arn) + assert result.get("DatasetGroupArn") == dataset_group_arn + assert result.get("Domain") == "CUSTOM" + assert result.get("DatasetArns") == [] + + +@mock_forecast +def test_describe_dataset_group_missing(): + client = boto3.client("forecast", region_name=region) + dataset_group_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/name" + ) + with assert_raises(ClientError) as exc: + client.describe_dataset_group(DatasetGroupArn=dataset_group_arn) + exc.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + exc.exception.response["Error"]["Message"].should.equal( + "No resource found " + dataset_group_arn + ) + + +@mock_forecast +def test_create_dataset_group_missing_datasets(): + client = boto3.client("forecast", region_name=region) + dataset_arn = "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset/name" + with assert_raises(ClientError) as exc: + client.create_dataset_group( + DatasetGroupName="name", Domain="CUSTOM", DatasetArns=[dataset_arn] + ) + exc.exception.response["Error"]["Code"].should.equal("InvalidInputException") + exc.exception.response["Error"]["Message"].should.equal( + "Dataset arns: [" + dataset_arn + "] are not found" + ) + + +@mock_forecast +def test_update_dataset_group_missing_datasets(): + name = "test" + client = boto3.client("forecast", region_name=region) + dataset_group_arn = ( + "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/" + name + ) + client.create_dataset_group(DatasetGroupName=name, Domain="CUSTOM") + dataset_arn = "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset/name" + + with assert_raises(ClientError) as exc: + client.update_dataset_group( + DatasetGroupArn=dataset_group_arn, DatasetArns=[dataset_arn] + ) + exc.exception.response["Error"]["Code"].should.equal("InvalidInputException") + exc.exception.response["Error"]["Message"].should.equal( + "Dataset arns: [" + dataset_arn + "] are not found" + ) From 3b6162de670d47856e6d377912c2fdf4d5f430a9 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 6 Nov 2020 16:34:09 +0000 Subject: [PATCH 609/658] Refactor Forecast to also use shared AWSError class --- moto/forecast/exceptions.py | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/moto/forecast/exceptions.py b/moto/forecast/exceptions.py index ad86e90fcf22..dbc6f6414bb1 100644 --- a/moto/forecast/exceptions.py +++ b/moto/forecast/exceptions.py @@ -1,22 +1,6 @@ from __future__ import unicode_literals -import json - - -class AWSError(Exception): - TYPE = None - STATUS = 400 - - def __init__(self, message, type=None, status=None): - self.message = message - self.type = type if type is not None else self.TYPE - self.status = status if status is not None else self.STATUS - - def response(self): - return ( - json.dumps({"__type": self.type, "message": self.message}), - dict(status=self.status), - ) +from moto.core.exceptions import AWSError class InvalidInputException(AWSError): From 3b9635b3c72acc1770ef9666dadec6c3c650e712 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sun, 8 Nov 2020 00:06:35 -0800 Subject: [PATCH 610/658] Add ssm:SendCommand support for instance tag Targets Replace the special-case code to handle Cloud Formation tags with a more generic implementation that covers all instance tags. Supersedes #2863 Closes #2862 --- moto/ssm/models.py | 50 +++++++++++++++----------------- tests/test_ssm/test_ssm_boto3.py | 35 +++++++++++++++++++++- 2 files changed, 57 insertions(+), 28 deletions(-) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 07812c316592..538e700f884b 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -6,12 +6,11 @@ from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.exceptions import RESTError -from moto.cloudformation import cloudformation_backends +from moto.ec2 import ec2_backends import datetime import time import uuid -import itertools import json import yaml import hashlib @@ -246,9 +245,6 @@ def __init__( if targets is None: targets = [] - self.error_count = 0 - self.completed_count = len(instance_ids) - self.target_count = len(instance_ids) self.command_id = str(uuid.uuid4()) self.status = "Success" self.status_details = "Details placeholder" @@ -262,7 +258,6 @@ def __init__( self.comment = comment self.document_name = document_name - self.instance_ids = instance_ids self.max_concurrency = max_concurrency self.max_errors = max_errors self.notification_config = notification_config @@ -274,14 +269,19 @@ def __init__( self.targets = targets self.backend_region = backend_region - # Get instance ids from a cloud formation stack target. - stack_instance_ids = [ - self.get_instance_ids_by_stack_ids(target["Values"]) - for target in self.targets - if target["Key"] == "tag:aws:cloudformation:stack-name" - ] - - self.instance_ids += list(itertools.chain.from_iterable(stack_instance_ids)) + self.instance_ids = instance_ids + self.instance_ids += self._get_instance_ids_from_targets() + # Ensure no duplicate instance_ids + self.instance_ids = list(set(self.instance_ids)) + + # NOTE: All of these counts are 0 in the ssm:SendCommand response + # received from a real AWS backend. The counts are correct when + # making subsequent calls to ssm:DescribeCommand or ssm:ListCommands. + # Not likely to cause any problems, but perhaps an area for future + # improvement. + self.error_count = 0 + self.completed_count = len(instance_ids) + self.target_count = len(instance_ids) # Create invocations with a single run command plugin. self.invocations = [] @@ -290,19 +290,15 @@ def __init__( self.invocation_response(instance_id, "aws:runShellScript") ) - def get_instance_ids_by_stack_ids(self, stack_ids): - instance_ids = [] - cloudformation_backend = cloudformation_backends[self.backend_region] - for stack_id in stack_ids: - stack_resources = cloudformation_backend.list_stack_resources(stack_id) - instance_resources = [ - instance.id - for instance in stack_resources - if instance.type == "AWS::EC2::Instance" - ] - instance_ids.extend(instance_resources) - - return instance_ids + def _get_instance_ids_from_targets(self): + target_instance_ids = [] + ec2_backend = ec2_backends[self.backend_region] + ec2_filters = {target["Key"]: target["Values"] for target in self.targets} + reservations = ec2_backend.all_reservations(filters=ec2_filters) + for reservation in reservations: + for instance in reservation.instances: + target_instance_ids.append(instance.id) + return target_instance_ids def response_object(self): r = { diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 2f74759e9003..c590e75b7b34 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -11,7 +11,7 @@ from botocore.exceptions import ClientError, ParamValidationError from nose.tools import assert_raises -from moto import mock_ssm +from moto import mock_ec2, mock_ssm @mock_ssm @@ -1713,3 +1713,36 @@ def test_get_command_invocation(): invocation_response = client.get_command_invocation( CommandId=cmd_id, InstanceId=instance_id, PluginName="FAKE" ) + + +@mock_ec2 +@mock_ssm +def test_get_command_invocations_by_instance_tag(): + ec2 = boto3.client("ec2", region_name="us-east-1") + ssm = boto3.client("ssm", region_name="us-east-1") + tag_specifications = [ + {"ResourceType": "instance", "Tags": [{"Key": "Name", "Value": "test-tag"}]} + ] + num_instances = 3 + resp = ec2.run_instances( + ImageId="ami-1234abcd", + MaxCount=num_instances, + MinCount=num_instances, + TagSpecifications=tag_specifications, + ) + instance_ids = [] + for instance in resp["Instances"]: + instance_ids.append(instance["InstanceId"]) + instance_ids.should.have.length_of(num_instances) + + command_id = ssm.send_command( + DocumentName="AWS-RunShellScript", + Targets=[{"Key": "tag:Name", "Values": ["test-tag"]}], + )["Command"]["CommandId"] + + resp = ssm.list_commands(CommandId=command_id) + resp["Commands"][0]["TargetCount"].should.equal(num_instances) + + for instance_id in instance_ids: + resp = ssm.get_command_invocation(CommandId=command_id, InstanceId=instance_id) + resp["Status"].should.equal("Success") From 2f70373c2e9d91405082ddbd12eb61fb43914b20 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sun, 8 Nov 2020 04:44:23 -0800 Subject: [PATCH 611/658] Add CloudFormation Update support for `AWS::StepFunctions::StateMachine` (#3440) Closes #3402 --- moto/stepfunctions/models.py | 68 ++++++++- moto/stepfunctions/utils.py | 10 ++ .../test_stepfunctions/test_stepfunctions.py | 142 ++++++++++++++++++ 3 files changed, 214 insertions(+), 6 deletions(-) diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index 86c76c98a5da..125e5d807e64 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -16,7 +16,7 @@ ResourceNotFound, StateMachineDoesNotExist, ) -from .utils import paginate +from .utils import paginate, api_to_cfn_tags, cfn_to_api_tags class StateMachine(CloudFormationModel): @@ -62,11 +62,39 @@ def remove_tags(self, tag_keys): def physical_resource_id(self): return self.arn + def get_cfn_properties(self, prop_overrides): + property_names = [ + "DefinitionString", + "RoleArn", + "StateMachineName", + ] + properties = {} + for prop in property_names: + properties[prop] = prop_overrides.get(prop, self.get_cfn_attribute(prop)) + # Special handling for Tags + overridden_keys = [tag["Key"] for tag in prop_overrides.get("Tags", [])] + original_tags_to_include = [ + tag + for tag in self.get_cfn_attribute("Tags") + if tag["Key"] not in overridden_keys + ] + properties["Tags"] = original_tags_to_include + prop_overrides.get("Tags", []) + return properties + def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == "Name": return self.name + elif attribute_name == "DefinitionString": + return self.definition + elif attribute_name == "RoleArn": + return self.roleArn + elif attribute_name == "StateMachineName": + return self.name + elif attribute_name == "Tags": + return api_to_cfn_tags(self.tags) + raise UnformattedGetAttTemplateException() @staticmethod @@ -85,18 +113,46 @@ def create_from_cloudformation_json( name = properties.get("StateMachineName", resource_name) definition = properties.get("DefinitionString", "") role_arn = properties.get("RoleArn", "") - tags = properties.get("Tags", []) - tags_xform = [{k.lower(): v for k, v in d.items()} for d in tags] + tags = cfn_to_api_tags(properties.get("Tags", [])) sf_backend = stepfunction_backends[region_name] - return sf_backend.create_state_machine( - name, definition, role_arn, tags=tags_xform - ) + return sf_backend.create_state_machine(name, definition, role_arn, tags=tags) @classmethod def delete_from_cloudformation_json(cls, resource_name, _, region_name): sf_backend = stepfunction_backends[region_name] sf_backend.delete_state_machine(resource_name) + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json.get("Properties", {}) + name = properties.get("StateMachineName", original_resource.name) + + if name != original_resource.name: + # Replacement + new_properties = original_resource.get_cfn_properties(properties) + cloudformation_json["Properties"] = new_properties + new_resource = cls.create_from_cloudformation_json( + name, cloudformation_json, region_name + ) + cls.delete_from_cloudformation_json( + original_resource.arn, cloudformation_json, region_name + ) + return new_resource + + else: + # No Interruption + definition = properties.get("DefinitionString") + role_arn = properties.get("RoleArn") + tags = cfn_to_api_tags(properties.get("Tags", [])) + sf_backend = stepfunction_backends[region_name] + state_machine = sf_backend.update_state_machine( + original_resource.arn, definition=definition, role_arn=role_arn, + ) + state_machine.add_tags(tags) + return state_machine + class Execution: def __init__( diff --git a/moto/stepfunctions/utils.py b/moto/stepfunctions/utils.py index cf6b58c8aed8..130ffe792bb8 100644 --- a/moto/stepfunctions/utils.py +++ b/moto/stepfunctions/utils.py @@ -136,3 +136,13 @@ def paginate(self, results): page_ending_result = results[index_end] next_token = self._build_next_token(page_ending_result) return results_page, next_token + + +def cfn_to_api_tags(cfn_tags_entry): + api_tags = [{k.lower(): v for k, v in d.items()} for d in cfn_tags_entry] + return api_tags + + +def api_to_cfn_tags(api_tags): + cfn_tags_entry = [{k.capitalize(): v for k, v in d.items()} for d in api_tags] + return cfn_tags_entry diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 0bea43084f46..dd11e7961a02 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -867,6 +867,148 @@ def test_state_machine_cloudformation(): ex.exception.response["Error"]["Message"].should.contain("Does Not Exist") +@mock_stepfunctions +@mock_cloudformation +def test_state_machine_cloudformation_update_with_replacement(): + sf = boto3.client("stepfunctions", region_name="us-east-1") + cf = boto3.resource("cloudformation", region_name="us-east-1") + definition = '{"StartAt": "HelloWorld", "States": {"HelloWorld": {"Type": "Task", "Resource": "arn:aws:lambda:us-east-1:111122223333;:function:HelloFunction", "End": true}}}' + role_arn = ( + "arn:aws:iam::111122223333:role/service-role/StatesExecutionRole-us-east-1" + ) + properties = { + "StateMachineName": "HelloWorld-StateMachine", + "DefinitionString": definition, + "RoleArn": role_arn, + "Tags": [ + {"Key": "key1", "Value": "value1"}, + {"Key": "key2", "Value": "value2"}, + ], + } + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "An example template for a Step Functions state machine.", + "Resources": { + "MyStateMachine": { + "Type": "AWS::StepFunctions::StateMachine", + "Properties": {}, + } + }, + "Outputs": { + "StateMachineArn": {"Value": {"Ref": "MyStateMachine"}}, + "StateMachineName": {"Value": {"Fn::GetAtt": ["MyStateMachine", "Name"]}}, + }, + } + template["Resources"]["MyStateMachine"]["Properties"] = properties + cf.create_stack(StackName="test_stack", TemplateBody=json.dumps(template)) + outputs_list = cf.Stack("test_stack").outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + state_machine = sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + original_machine_arn = state_machine["stateMachineArn"] + original_creation_date = state_machine["creationDate"] + + # Update State Machine, with replacement. + updated_role = role_arn + "-updated" + updated_definition = definition.replace("HelloWorld", "HelloWorld2") + updated_properties = { + "StateMachineName": "New-StateMachine-Name", + "DefinitionString": updated_definition, + "RoleArn": updated_role, + "Tags": [ + {"Key": "key3", "Value": "value3"}, + {"Key": "key1", "Value": "updated_value"}, + ], + } + template["Resources"]["MyStateMachine"]["Properties"] = updated_properties + cf.Stack("test_stack").update(TemplateBody=json.dumps(template)) + outputs_list = cf.Stack("test_stack").outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + state_machine = sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + state_machine["stateMachineArn"].should_not.equal(original_machine_arn) + state_machine["name"].should.equal("New-StateMachine-Name") + state_machine["creationDate"].should.be.greater_than(original_creation_date) + state_machine["roleArn"].should.equal(updated_role) + state_machine["definition"].should.equal(updated_definition) + tags = sf.list_tags_for_resource(resourceArn=output["StateMachineArn"]).get("tags") + tags.should.have.length_of(3) + for tag in tags: + if tag["key"] == "key1": + tag["value"].should.equal("updated_value") + + with assert_raises(ClientError) as ex: + sf.describe_state_machine(stateMachineArn=original_machine_arn) + ex.exception.response["Error"]["Code"].should.equal("StateMachineDoesNotExist") + ex.exception.response["Error"]["Message"].should.contain( + "State Machine Does Not Exist" + ) + + +@mock_stepfunctions +@mock_cloudformation +def test_state_machine_cloudformation_update_with_no_interruption(): + sf = boto3.client("stepfunctions", region_name="us-east-1") + cf = boto3.resource("cloudformation", region_name="us-east-1") + definition = '{"StartAt": "HelloWorld", "States": {"HelloWorld": {"Type": "Task", "Resource": "arn:aws:lambda:us-east-1:111122223333;:function:HelloFunction", "End": true}}}' + role_arn = ( + "arn:aws:iam::111122223333:role/service-role/StatesExecutionRole-us-east-1" + ) + properties = { + "StateMachineName": "HelloWorld-StateMachine", + "DefinitionString": definition, + "RoleArn": role_arn, + "Tags": [ + {"Key": "key1", "Value": "value1"}, + {"Key": "key2", "Value": "value2"}, + ], + } + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "An example template for a Step Functions state machine.", + "Resources": { + "MyStateMachine": { + "Type": "AWS::StepFunctions::StateMachine", + "Properties": {}, + } + }, + "Outputs": { + "StateMachineArn": {"Value": {"Ref": "MyStateMachine"}}, + "StateMachineName": {"Value": {"Fn::GetAtt": ["MyStateMachine", "Name"]}}, + }, + } + template["Resources"]["MyStateMachine"]["Properties"] = properties + cf.create_stack(StackName="test_stack", TemplateBody=json.dumps(template)) + outputs_list = cf.Stack("test_stack").outputs + output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} + state_machine = sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) + machine_arn = state_machine["stateMachineArn"] + creation_date = state_machine["creationDate"] + + # Update State Machine in-place, no replacement. + updated_role = role_arn + "-updated" + updated_definition = definition.replace("HelloWorld", "HelloWorldUpdated") + updated_properties = { + "DefinitionString": updated_definition, + "RoleArn": updated_role, + "Tags": [ + {"Key": "key3", "Value": "value3"}, + {"Key": "key1", "Value": "updated_value"}, + ], + } + template["Resources"]["MyStateMachine"]["Properties"] = updated_properties + cf.Stack("test_stack").update(TemplateBody=json.dumps(template)) + + state_machine = sf.describe_state_machine(stateMachineArn=machine_arn) + state_machine["name"].should.equal("HelloWorld-StateMachine") + state_machine["creationDate"].should.equal(creation_date) + state_machine["roleArn"].should.equal(updated_role) + state_machine["definition"].should.equal(updated_definition) + tags = sf.list_tags_for_resource(resourceArn=machine_arn).get("tags") + tags.should.have.length_of(3) + for tag in tags: + if tag["key"] == "key1": + tag["value"].should.equal("updated_value") + + def _get_account_id(): global account_id if account_id: From 204fdabcc9d84b5efdaecae3fcc86c5bc893c38a Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 8 Nov 2020 13:49:27 +0000 Subject: [PATCH 612/658] #3359 - Downgrade Docker-version to not break mock_lambda without Docker running --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index bcbc88a20f3c..1eb781dbc9b6 100755 --- a/setup.py +++ b/setup.py @@ -78,7 +78,7 @@ def get_version(): _dep_PyYAML = "PyYAML>=5.1" _dep_python_jose = "python-jose[cryptography]>=3.1.0,<4.0.0" _dep_python_jose_ecdsa_pin = "ecdsa<0.15" # https://github.com/spulec/moto/pull/3263#discussion_r477404984 -_dep_docker = "docker>=2.5.1" +_dep_docker = "docker>=2.5.1,<=4.3.0" # https://github.com/spulec/moto/issues/3359 _dep_jsondiff = "jsondiff>=1.1.2" _dep_aws_xray_sdk = "aws-xray-sdk!=0.96,>=0.93" _dep_idna = "idna<3,>=2.5" From c0700aa704356e314ac06d08542fcedf24af34ab Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 8 Nov 2020 14:16:02 +0000 Subject: [PATCH 613/658] Revert "#3359 - Downgrade Docker-version to not break mock_lambda without Docker running" This reverts commit 204fdabcc9d84b5efdaecae3fcc86c5bc893c38a. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 1eb781dbc9b6..bcbc88a20f3c 100755 --- a/setup.py +++ b/setup.py @@ -78,7 +78,7 @@ def get_version(): _dep_PyYAML = "PyYAML>=5.1" _dep_python_jose = "python-jose[cryptography]>=3.1.0,<4.0.0" _dep_python_jose_ecdsa_pin = "ecdsa<0.15" # https://github.com/spulec/moto/pull/3263#discussion_r477404984 -_dep_docker = "docker>=2.5.1,<=4.3.0" # https://github.com/spulec/moto/issues/3359 +_dep_docker = "docker>=2.5.1" _dep_jsondiff = "jsondiff>=1.1.2" _dep_aws_xray_sdk = "aws-xray-sdk!=0.96,>=0.93" _dep_idna = "idna<3,>=2.5" From 390a4d55102f49cade830bf422ebf36e3be7dacb Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 8 Nov 2020 14:18:49 +0000 Subject: [PATCH 614/658] #3359 - Only initiate Docker when invoking Lambdas --- moto/awslambda/models.py | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index ce9c78fc6560..12f98ec0cabf 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -162,25 +162,11 @@ def __init__(self, spec, region, validate_s3=True, version=1): self.run_time = spec["Runtime"] self.logs_backend = logs_backends[self.region] self.environment_vars = spec.get("Environment", {}).get("Variables", {}) - self.docker_client = docker.from_env() + self.docker_client = None self.policy = None self.state = "Active" self.reserved_concurrency = spec.get("ReservedConcurrentExecutions", None) - # Unfortunately mocking replaces this method w/o fallback enabled, so we - # need to replace it if we detect it's been mocked - if requests.adapters.HTTPAdapter.send != _orig_adapter_send: - _orig_get_adapter = self.docker_client.api.get_adapter - - def replace_adapter_send(*args, **kwargs): - adapter = _orig_get_adapter(*args, **kwargs) - - if isinstance(adapter, requests.adapters.HTTPAdapter): - adapter.send = functools.partial(_orig_adapter_send, adapter) - return adapter - - self.docker_client.api.get_adapter = replace_adapter_send - # optional self.description = spec.get("Description", "") self.memory_size = spec.get("MemorySize", 128) @@ -242,6 +228,26 @@ def replace_adapter_send(*args, **kwargs): self.tags = dict() + def initiate_docker_client(self): + # We should only initiate the Docker Client at runtime. + # The docker.from_env() call will fall if Docker is not running + if self.docker_client is None: + self.docker_client = docker.from_env() + + # Unfortunately mocking replaces this method w/o fallback enabled, so we + # need to replace it if we detect it's been mocked + if requests.adapters.HTTPAdapter.send != _orig_adapter_send: + _orig_get_adapter = self.docker_client.api.get_adapter + + def replace_adapter_send(*args, **kwargs): + adapter = _orig_get_adapter(*args, **kwargs) + + if isinstance(adapter, requests.adapters.HTTPAdapter): + adapter.send = functools.partial(_orig_adapter_send, adapter) + return adapter + + self.docker_client.api.get_adapter = replace_adapter_send + def set_version(self, version): self.function_arn = make_function_ver_arn( self.region, ACCOUNT_ID, self.function_name, version @@ -412,6 +418,8 @@ def _invoke_lambda(self, code, event=None, context=None): env_vars.update(self.environment_vars) + self.initiate_docker_client() + container = exit_code = None log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON) with _DockerDataVolumeContext(self) as data_vol: From f53a8f723c260f4ecf0cbe7988dc0f2afbb1402d Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 8 Nov 2020 14:25:28 +0000 Subject: [PATCH 615/658] Travis: Use Focal-distribution, so we no longer have to downgrade Docker --- .travis.yml | 4 ++-- travis_moto_server.sh | 4 ---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index ed9084f19029..824eb0edcf66 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,4 @@ -dist: bionic +dist: focal language: python services: - docker @@ -27,7 +27,7 @@ install: docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${PYTHON_DOCKER_TAG} /moto/travis_moto_server.sh & fi travis_retry pip install -r requirements-dev.txt - travis_retry pip install "docker>=2.5.1,<=4.2.2" # Limit version due to old Docker Engine in Travis https://github.com/docker/docker-py/issues/2639 + travis_retry pip install docker>=2.5.1 travis_retry pip install boto==2.45.0 travis_retry pip install boto3 travis_retry pip install dist/moto*.gz diff --git a/travis_moto_server.sh b/travis_moto_server.sh index c764d1cd1c46..a9ca79eb5845 100755 --- a/travis_moto_server.sh +++ b/travis_moto_server.sh @@ -1,8 +1,4 @@ #!/usr/bin/env bash set -e -# TravisCI on bionic dist uses old version of Docker Engine -# which is incompatibile with newer docker-py -# See https://github.com/docker/docker-py/issues/2639 -pip install "docker>=2.5.1,<=4.2.2" pip install $(ls /moto/dist/moto*.gz)[server,all] moto_server -H 0.0.0.0 -p 5000 From 8d3cc3ef326ac4d82b79a5e20276bc9215c85fd7 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 8 Nov 2020 15:16:53 +0000 Subject: [PATCH 616/658] #3359 - Reuse Docker-on-request for AWSLambda and Batch --- moto/awslambda/models.py | 31 ++++--------------------------- moto/batch/models.py | 22 +++------------------- moto/utilities/docker.py | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 46 deletions(-) create mode 100644 moto/utilities/docker.py diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 12f98ec0cabf..c5f29fd68cd0 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -17,13 +17,12 @@ import re import zipfile import uuid -import functools import tarfile import calendar import threading import traceback import weakref -import requests.adapters +import requests.exceptions from boto3 import Session @@ -47,6 +46,7 @@ from moto.dynamodb2 import dynamodb_backends2 from moto.dynamodbstreams import dynamodbstreams_backends from moto.core import ACCOUNT_ID +from moto.utilities.docker import DockerModel logger = logging.getLogger(__name__) @@ -55,7 +55,6 @@ except ImportError: from backports.tempfile import TemporaryDirectory -_orig_adapter_send = requests.adapters.HTTPAdapter.send docker_3 = docker.__version__[0] >= "3" @@ -151,8 +150,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): raise # multiple processes trying to use same volume? -class LambdaFunction(CloudFormationModel): +class LambdaFunction(CloudFormationModel, DockerModel): def __init__(self, spec, region, validate_s3=True, version=1): + DockerModel.__init__(self) # required self.region = region self.code = spec["Code"] @@ -162,7 +162,6 @@ def __init__(self, spec, region, validate_s3=True, version=1): self.run_time = spec["Runtime"] self.logs_backend = logs_backends[self.region] self.environment_vars = spec.get("Environment", {}).get("Variables", {}) - self.docker_client = None self.policy = None self.state = "Active" self.reserved_concurrency = spec.get("ReservedConcurrentExecutions", None) @@ -228,26 +227,6 @@ def __init__(self, spec, region, validate_s3=True, version=1): self.tags = dict() - def initiate_docker_client(self): - # We should only initiate the Docker Client at runtime. - # The docker.from_env() call will fall if Docker is not running - if self.docker_client is None: - self.docker_client = docker.from_env() - - # Unfortunately mocking replaces this method w/o fallback enabled, so we - # need to replace it if we detect it's been mocked - if requests.adapters.HTTPAdapter.send != _orig_adapter_send: - _orig_get_adapter = self.docker_client.api.get_adapter - - def replace_adapter_send(*args, **kwargs): - adapter = _orig_get_adapter(*args, **kwargs) - - if isinstance(adapter, requests.adapters.HTTPAdapter): - adapter.send = functools.partial(_orig_adapter_send, adapter) - return adapter - - self.docker_client.api.get_adapter = replace_adapter_send - def set_version(self, version): self.function_arn = make_function_ver_arn( self.region, ACCOUNT_ID, self.function_name, version @@ -418,8 +397,6 @@ def _invoke_lambda(self, code, event=None, context=None): env_vars.update(self.environment_vars) - self.initiate_docker_client() - container = exit_code = None log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON) with _DockerDataVolumeContext(self) as data_vol: diff --git a/moto/batch/models.py b/moto/batch/models.py index c4bc81a73788..6a2d889bcd97 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals import re -import requests.adapters from itertools import cycle import six import datetime @@ -8,7 +7,6 @@ import uuid import logging import docker -import functools import threading import dateutil.parser from boto3 import Session @@ -30,8 +28,8 @@ from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES from moto.iam.exceptions import IAMNotFoundException from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID +from moto.utilities.docker import DockerModel -_orig_adapter_send = requests.adapters.HTTPAdapter.send logger = logging.getLogger(__name__) COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile( r"^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$" @@ -311,7 +309,7 @@ def create_from_cloudformation_json( return backend.get_job_definition_by_arn(arn) -class Job(threading.Thread, BaseModel): +class Job(threading.Thread, BaseModel, DockerModel): def __init__(self, name, job_def, job_queue, log_backend, container_overrides): """ Docker Job @@ -324,6 +322,7 @@ def __init__(self, name, job_def, job_queue, log_backend, container_overrides): :type log_backend: moto.logs.models.LogsBackend """ threading.Thread.__init__(self) + DockerModel.__init__(self) self.job_name = name self.job_id = str(uuid.uuid4()) @@ -342,24 +341,9 @@ def __init__(self, name, job_def, job_queue, log_backend, container_overrides): self.daemon = True self.name = "MOTO-BATCH-" + self.job_id - self.docker_client = docker.from_env() self._log_backend = log_backend self.log_stream_name = None - # Unfortunately mocking replaces this method w/o fallback enabled, so we - # need to replace it if we detect it's been mocked - if requests.adapters.HTTPAdapter.send != _orig_adapter_send: - _orig_get_adapter = self.docker_client.api.get_adapter - - def replace_adapter_send(*args, **kwargs): - adapter = _orig_get_adapter(*args, **kwargs) - - if isinstance(adapter, requests.adapters.HTTPAdapter): - adapter.send = functools.partial(_orig_adapter_send, adapter) - return adapter - - self.docker_client.api.get_adapter = replace_adapter_send - def describe(self): result = { "jobDefinition": self.job_definition.arn, diff --git a/moto/utilities/docker.py b/moto/utilities/docker.py new file mode 100644 index 000000000000..576a9df1d8e8 --- /dev/null +++ b/moto/utilities/docker.py @@ -0,0 +1,33 @@ +import docker +import functools +import requests.adapters + + +_orig_adapter_send = requests.adapters.HTTPAdapter.send + + +class DockerModel: + def __init__(self): + self.__docker_client = None + + @property + def docker_client(self): + if self.__docker_client is None: + # We should only initiate the Docker Client at runtime. + # The docker.from_env() call will fall if Docker is not running + self.__docker_client = docker.from_env() + + # Unfortunately mocking replaces this method w/o fallback enabled, so we + # need to replace it if we detect it's been mocked + if requests.adapters.HTTPAdapter.send != _orig_adapter_send: + _orig_get_adapter = self.docker_client.api.get_adapter + + def replace_adapter_send(*args, **kwargs): + adapter = _orig_get_adapter(*args, **kwargs) + + if isinstance(adapter, requests.adapters.HTTPAdapter): + adapter.send = functools.partial(_orig_adapter_send, adapter) + return adapter + + self.docker_client.api.get_adapter = replace_adapter_send + return self.__docker_client From 8adb1165f922aef28064b7886e9e05e433bb38d5 Mon Sep 17 00:00:00 2001 From: Isis Ohana Date: Mon, 9 Nov 2020 11:47:13 -0300 Subject: [PATCH 617/658] adding docs for athena e glue --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 3915a85cdbb5..70bc26a118e0 100644 --- a/README.md +++ b/README.md @@ -81,6 +81,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L | ACM | @mock_acm | all endpoints done | | | API Gateway | @mock_apigateway | core endpoints done | | | Application Autoscaling | @mock_applicationautoscaling | basic endpoints done | | +| Athena | @mock_athena | core endpoints done | | | Autoscaling | @mock_autoscaling | core endpoints done | | | Cloudformation | @mock_cloudformation | core endpoints done | | | Cloudwatch | @mock_cloudwatch | basic endpoints done | | @@ -103,6 +104,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L | ELBv2 | @mock_elbv2 | all endpoints done | | | EMR | @mock_emr | core endpoints done | | | Glacier | @mock_glacier | core endpoints done | | +| Glue | @mock_glue | core endpoints done | | | IAM | @mock_iam | core endpoints done | | | IoT | @mock_iot | core endpoints done | | | IoT data | @mock_iotdata | core endpoints done | | From b5b1c45d68446d242be58b02089e1120bb9f6a51 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 9 Nov 2020 16:31:18 +0000 Subject: [PATCH 618/658] Rename DockerUtilities to differentiate from docker-dependency --- moto/awslambda/models.py | 2 +- moto/batch/models.py | 2 +- moto/utilities/{docker.py => docker_utilities.py} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename moto/utilities/{docker.py => docker_utilities.py} (100%) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index c5f29fd68cd0..a26fcba40211 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -46,7 +46,7 @@ from moto.dynamodb2 import dynamodb_backends2 from moto.dynamodbstreams import dynamodbstreams_backends from moto.core import ACCOUNT_ID -from moto.utilities.docker import DockerModel +from moto.utilities.docker_utilities import DockerModel logger = logging.getLogger(__name__) diff --git a/moto/batch/models.py b/moto/batch/models.py index 6a2d889bcd97..f729144d89ee 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -28,7 +28,7 @@ from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES from moto.iam.exceptions import IAMNotFoundException from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID -from moto.utilities.docker import DockerModel +from moto.utilities.docker_utilities import DockerModel logger = logging.getLogger(__name__) COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile( diff --git a/moto/utilities/docker.py b/moto/utilities/docker_utilities.py similarity index 100% rename from moto/utilities/docker.py rename to moto/utilities/docker_utilities.py From b8e08539e33eb0999ee25d616815890daf7a02d3 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Mon, 9 Nov 2020 14:59:06 -0800 Subject: [PATCH 619/658] Fix: Return `Tags` in iam:CreateUserResponse Fixes #3450 --- moto/iam/responses.py | 10 ++++++++++ tests/test_iam/test_iam.py | 17 +++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 55a7c207626b..d6f8ae020e71 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -1684,6 +1684,16 @@ def get_account_summary(self): {{ user.id }} {{ user.created_iso_8601 }} {{ user.arn }} + {% if user.tags %} + + {% for tag in user.tags %} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {% endfor %} + + {% endif %} diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 7db2f0162775..a7f4aea23308 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -4007,3 +4007,20 @@ def test_list_roles_none_found_returns_empty_list(): response = iam.list_roles(MaxItems=10) roles = response["Roles"] assert len(roles) == 0 + + +@mock_iam() +def test_create_user_with_tags(): + conn = boto3.client("iam", region_name="us-east-1") + user_name = "test-user" + tags = [ + {"Key": "somekey", "Value": "somevalue"}, + {"Key": "someotherkey", "Value": "someothervalue"}, + ] + resp = conn.create_user(UserName=user_name, Tags=tags) + assert resp["User"]["Tags"] == tags + resp = conn.list_user_tags(UserName=user_name) + assert resp["Tags"] == tags + + resp = conn.create_user(UserName="test-create-user-no-tags") + assert "Tags" not in resp["User"] From 77dc60ea97ff9e7aef4a364a124782c27209385c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= Date: Tue, 6 Oct 2020 07:54:49 +0200 Subject: [PATCH 620/658] Port test suite from nose to pytest. This just eliminates all errors on the tests collection. Elimination of failures is left to the next commit. --- requirements-tests.txt | 4 +- setup.cfg | 6 - tests/__init__.py | 1 - tests/backport_assert_raises.py | 41 ---- tests/helpers.py | 2 +- tests/test_acm/__init__.py | 1 + tests/test_acm/resources/__init__.py | 1 + tests/test_apigateway/__init__.py | 1 + tests/test_apigateway/test_apigateway.py | 52 ++--- .../test_applicationautoscaling.py | 12 +- .../test_validation.py | 18 +- tests/test_athena/__init__.py | 1 + tests/test_athena/test_athena.py | 4 +- tests/test_autoscaling/__init__.py | 1 + tests/test_autoscaling/test_autoscaling.py | 6 +- .../test_autoscaling_cloudformation.py | 2 +- tests/test_autoscaling/test_elbv2.py | 2 +- tests/test_autoscaling/test_policies.py | 2 +- tests/test_awslambda/__init__.py | 1 + .../test_awslambda_cloudformation.py | 4 +- tests/test_awslambda/test_lambda.py | 6 +- tests/test_batch/__init__.py | 1 + tests/test_batch/test_batch.py | 4 - tests/test_batch/test_batch_cloudformation.py | 1 - .../test_cloudformation_stack_crud.py | 10 +- .../test_cloudformation_stack_crud_boto3.py | 11 +- tests/test_cloudformation/test_validate.py | 1 - tests/test_cloudwatch/__init__.py | 1 + .../test_cloudwatch/test_cloudwatch_boto3.py | 10 +- tests/test_codecommit/test_codecommit.py | 12 +- tests/test_codepipeline/test_codepipeline.py | 24 +- tests/test_cognitoidentity/__init__.py | 1 + .../test_cognitoidentity.py | 4 +- tests/test_cognitoidp/__init__.py | 1 + tests/test_cognitoidp/test_cognitoidp.py | 12 +- tests/test_config/__init__.py | 1 + tests/test_config/test_config.py | 140 ++++++------ tests/test_core/__init__.py | 1 + tests/test_core/test_auth.py | 34 ++- tests/test_core/test_decorator_calls.py | 11 +- tests/test_core/test_instance_metadata.py | 2 +- tests/test_core/test_moto_api.py | 2 +- tests/test_datapipeline/__init__.py | 1 + tests/test_datasync/test_datasync.py | 16 +- tests/test_dynamodb/__init__.py | 1 + tests/test_dynamodb/test_dynamodb.py | 5 +- tests/test_dynamodb2/__init__.py | 1 + tests/test_dynamodb2/test_dynamodb.py | 80 +++---- .../test_dynamodb_table_with_range_key.py | 4 +- tests/test_dynamodbstreams/__init__.py | 1 + .../test_dynamodbstreams.py | 4 +- tests/test_ec2/test_amis.py | 42 ++-- tests/test_ec2/test_customer_gateways.py | 5 +- tests/test_ec2/test_dhcp_options.py | 23 +- tests/test_ec2/test_elastic_block_store.py | 69 +++--- tests/test_ec2/test_elastic_ip_addresses.py | 39 ++-- .../test_elastic_network_interfaces.py | 15 +- tests/test_ec2/test_flow_logs.py | 23 +- tests/test_ec2/test_general.py | 5 +- tests/test_ec2/test_instances.py | 37 ++-- tests/test_ec2/test_internet_gateways.py | 25 +-- tests/test_ec2/test_key_pairs.py | 21 +- tests/test_ec2/test_launch_templates.py | 4 +- tests/test_ec2/test_network_acls.py | 4 +- tests/test_ec2/test_route_tables.py | 25 +-- tests/test_ec2/test_security_groups.py | 59 +++-- tests/test_ec2/test_spot_instances.py | 6 +- tests/test_ec2/test_subnets.py | 19 +- tests/test_ec2/test_tags.py | 24 +- tests/test_ec2/test_vpc_peering.py | 15 +- tests/test_ec2/test_vpcs.py | 19 +- tests/test_ec2/test_vpn_connections.py | 8 +- tests/test_ecr/__init__.py | 1 + tests/test_ecr/test_ecr_boto3.py | 2 +- tests/test_ecs/__init__.py | 1 + tests/test_ecs/test_ecs_boto3.py | 6 +- tests/test_elb/__init__.py | 1 + tests/test_elb/test_elb.py | 8 +- tests/test_elbv2/__init__.py | 1 + tests/test_elbv2/test_elbv2.py | 60 ++--- tests/test_emr/__init__.py | 1 + tests/test_emr/test_emr_boto3.py | 10 +- tests/test_events/__init__.py | 1 + tests/test_events/test_events.py | 4 +- tests/test_glacier/__init__.py | 1 + tests/test_glue/test_datacatalog.py | 34 +-- tests/test_iam/__init__.py | 1 + tests/test_iam/test_iam.py | 206 +++++++++--------- tests/test_iam/test_iam_cloudformation.py | 10 +- tests/test_iam/test_iam_groups.py | 16 +- tests/test_iam/test_iam_policies.py | 4 +- tests/test_iot/__init__.py | 1 + tests/test_iot/test_iot.py | 12 +- tests/test_iotdata/__init__.py | 1 + tests/test_iotdata/test_iotdata.py | 8 +- tests/test_kinesis/__init__.py | 1 + tests/test_kinesisvideo/__init__.py | 1 + tests/test_kinesisvideo/test_kinesisvideo.py | 10 +- .../__init__.py | 1 + tests/test_kms/__init__.py | 1 + tests/test_kms/test_kms.py | 22 +- tests/test_kms/test_kms_boto3.py | 46 ++-- tests/test_kms/test_utils.py | 12 +- tests/test_logs/__init__.py | 1 + tests/test_logs/test_logs.py | 18 +- .../test_managedblockchain_proposalvotes.py | 2 +- tests/test_opsworks/__init__.py | 1 + .../test_organizations_boto3.py | 94 ++++---- tests/test_packages/__init__.py | 1 - tests/test_polly/__init__.py | 1 + tests/test_polly/test_polly.py | 10 +- tests/test_ram/test_ram.py | 20 +- tests/test_rds/__init__.py | 1 + tests/test_rds2/__init__.py | 1 + tests/test_redshift/__init__.py | 1 + tests/test_route53/__init__.py | 1 + tests/test_route53/test_route53.py | 6 +- tests/test_s3/test_s3.py | 127 ++++++----- tests/test_s3/test_s3_lifecycle.py | 36 +-- tests/test_s3/test_s3_storageclass.py | 10 +- tests/test_s3bucket_path/__init__.py | 1 + .../test_sagemaker/test_sagemaker_endpoint.py | 61 ++---- tests/test_sagemaker/test_sagemaker_models.py | 5 +- .../test_sagemaker_notebooks.py | 168 +++++++------- .../test_sagemaker/test_sagemaker_training.py | 72 +++--- tests/test_secretsmanager/__init__.py | 1 + .../test_secretsmanager/test_list_secrets.py | 33 ++- .../test_secretsmanager.py | 112 +++++----- tests/test_ses/__init__.py | 1 + tests/test_ses/test_ses_boto3.py | 16 +- tests/test_ses/test_ses_sns_boto3.py | 1 - tests/test_sns/__init__.py | 1 + tests/test_sns/test_publishing_boto3.py | 8 +- tests/test_sns/test_subscriptions_boto3.py | 12 +- tests/test_sqs/__init__.py | 1 + tests/test_sqs/test_sqs.py | 41 ++-- tests/test_ssm/__init__.py | 1 + tests/test_ssm/test_ssm_boto3.py | 8 +- tests/test_stepfunctions/__init__.py | 1 + .../test_stepfunctions/test_stepfunctions.py | 24 +- tests/test_sts/__init__.py | 1 + tests/test_sts/test_sts.py | 4 +- tests/test_swf/models/test_domain.py | 3 - .../responses/test_workflow_executions.py | 3 - tests/test_xray/__init__.py | 1 + tox.ini | 2 +- 146 files changed, 1172 insertions(+), 1277 deletions(-) delete mode 100644 tests/backport_assert_raises.py create mode 100644 tests/test_acm/__init__.py create mode 100644 tests/test_acm/resources/__init__.py create mode 100644 tests/test_apigateway/__init__.py create mode 100644 tests/test_athena/__init__.py create mode 100644 tests/test_autoscaling/__init__.py create mode 100644 tests/test_batch/__init__.py create mode 100644 tests/test_cloudwatch/__init__.py create mode 100644 tests/test_cognitoidentity/__init__.py create mode 100644 tests/test_cognitoidp/__init__.py create mode 100644 tests/test_config/__init__.py create mode 100644 tests/test_core/__init__.py create mode 100644 tests/test_datapipeline/__init__.py create mode 100644 tests/test_dynamodb/__init__.py create mode 100644 tests/test_dynamodb2/__init__.py create mode 100644 tests/test_dynamodbstreams/__init__.py create mode 100644 tests/test_ecr/__init__.py create mode 100644 tests/test_ecs/__init__.py create mode 100644 tests/test_elb/__init__.py create mode 100644 tests/test_elbv2/__init__.py create mode 100644 tests/test_emr/__init__.py create mode 100644 tests/test_events/__init__.py create mode 100644 tests/test_glacier/__init__.py create mode 100644 tests/test_iam/__init__.py create mode 100644 tests/test_iot/__init__.py create mode 100644 tests/test_iotdata/__init__.py create mode 100644 tests/test_kinesis/__init__.py create mode 100644 tests/test_kinesisvideo/__init__.py create mode 100644 tests/test_kinesisvideoarchivedmedia/__init__.py create mode 100644 tests/test_kms/__init__.py create mode 100644 tests/test_logs/__init__.py create mode 100644 tests/test_opsworks/__init__.py create mode 100644 tests/test_polly/__init__.py create mode 100644 tests/test_rds/__init__.py create mode 100644 tests/test_rds2/__init__.py create mode 100644 tests/test_redshift/__init__.py create mode 100644 tests/test_route53/__init__.py create mode 100644 tests/test_s3bucket_path/__init__.py create mode 100644 tests/test_secretsmanager/__init__.py create mode 100644 tests/test_ses/__init__.py create mode 100644 tests/test_sns/__init__.py create mode 100644 tests/test_sqs/__init__.py create mode 100644 tests/test_stepfunctions/__init__.py create mode 100644 tests/test_sts/__init__.py create mode 100644 tests/test_xray/__init__.py diff --git a/requirements-tests.txt b/requirements-tests.txt index eaa8454c77de..c19f35c7d4d2 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -1,4 +1,4 @@ -nose +pytest sure==1.4.11 freezegun -parameterized>=0.7.0 \ No newline at end of file +parameterized>=0.7.0 diff --git a/setup.cfg b/setup.cfg index fb04c16a82a0..3c6e79cf31da 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,8 +1,2 @@ -[nosetests] -verbosity=1 -detailed-errors=1 -with-coverage=1 -cover-package=moto - [bdist_wheel] universal=1 diff --git a/tests/__init__.py b/tests/__init__.py index 05b1d476b3d6..01fe5ab1fb55 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -6,4 +6,3 @@ logging.getLogger("boto").setLevel(logging.CRITICAL) logging.getLogger("boto3").setLevel(logging.CRITICAL) logging.getLogger("botocore").setLevel(logging.CRITICAL) -logging.getLogger("nose").setLevel(logging.CRITICAL) diff --git a/tests/backport_assert_raises.py b/tests/backport_assert_raises.py deleted file mode 100644 index bfed51308eb2..000000000000 --- a/tests/backport_assert_raises.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import unicode_literals - -""" -Patch courtesy of: -https://marmida.com/blog/index.php/2012/08/08/monkey-patching-assert_raises/ -""" - -# code for monkey-patching -import nose.tools - -# let's fix nose.tools.assert_raises (which is really unittest.assertRaises) -# so that it always supports context management - -# in order for these changes to be available to other modules, you'll need -# to guarantee this module is imported by your fixture before either nose or -# unittest are imported - -try: - nose.tools.assert_raises(Exception) -except TypeError: - # this version of assert_raises doesn't support the 1-arg version - class AssertRaisesContext(object): - def __init__(self, expected): - self.expected = expected - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, tb): - self.exception = exc_val - if issubclass(exc_type, self.expected): - return True - nose.tools.assert_equal(exc_type, self.expected) - # if you get to this line, the last assertion must have passed - # suppress the propagation of this exception - return True - - def assert_raises_context(exc_type): - return AssertRaisesContext(exc_type) - - nose.tools.assert_raises = assert_raises_context diff --git a/tests/helpers.py b/tests/helpers.py index ffe27103d166..9293bcad9e67 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import boto -from nose.plugins.skip import SkipTest +from unittest import SkipTest import six diff --git a/tests/test_acm/__init__.py b/tests/test_acm/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_acm/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_acm/resources/__init__.py b/tests/test_acm/resources/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_acm/resources/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_apigateway/__init__.py b/tests/test_apigateway/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_apigateway/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index c58d644fad76..4a6c3eea396b 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -11,7 +11,7 @@ import responses from moto import mock_apigateway, mock_cognitoidp, settings from moto.core import ACCOUNT_ID -from nose.tools import assert_raises +import pytest @freeze_time("2015-01-01") @@ -90,7 +90,7 @@ def test_create_rest_api_with_policy(): def test_create_rest_api_invalid_apikeysource(): client = boto3.client("apigateway", region_name="us-west-2") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_rest_api( name="my_api", description="this is my api", @@ -126,7 +126,7 @@ def test_create_rest_api_valid_apikeysources(): def test_create_rest_api_invalid_endpointconfiguration(): client = boto3.client("apigateway", region_name="us-west-2") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_rest_api( name="my_api", description="this is my api", @@ -194,7 +194,7 @@ def test_create_resource__validate_name(): valid_names = ["users", "{user_id}", "{proxy+}", "user_09", "good-dog"] # All invalid names should throw an exception for name in invalid_names: - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_resource(restApiId=api_id, parentId=root_id, pathPart=name) ex.exception.response["Error"]["Code"].should.equal("BadRequestException") ex.exception.response["Error"]["Message"].should.equal( @@ -1194,7 +1194,7 @@ def test_create_deployment_requires_REST_methods(): response = client.create_rest_api(name="my_api", description="this is my api") api_id = response["id"] - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_deployment(restApiId=api_id, stageName=stage_name)["id"] ex.exception.response["Error"]["Code"].should.equal("BadRequestException") ex.exception.response["Error"]["Message"].should.equal( @@ -1217,7 +1217,7 @@ def test_create_deployment_requires_REST_method_integrations(): restApiId=api_id, resourceId=root_id, httpMethod="GET", authorizationType="NONE" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_deployment(restApiId=api_id, stageName=stage_name)["id"] ex.exception.response["Error"]["Code"].should.equal("BadRequestException") ex.exception.response["Error"]["Message"].should.equal( @@ -1273,7 +1273,7 @@ def test_put_integration_response_requires_responseTemplate(): integrationHttpMethod="POST", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration_response( restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" ) @@ -1314,7 +1314,7 @@ def test_put_integration_response_with_response_template(): integrationHttpMethod="POST", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration_response( restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" ) @@ -1372,7 +1372,7 @@ def test_put_integration_validation(): for type in types_requiring_integration_method: # Ensure that integrations of these types fail if no integrationHttpMethod is provided - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1428,7 +1428,7 @@ def test_put_integration_validation(): ) for type in ["AWS_PROXY"]: # Ensure that aws_proxy does not support S3 - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1446,7 +1446,7 @@ def test_put_integration_validation(): ) for type in aws_types: # Ensure that the Role ARN is for the current account - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1462,7 +1462,7 @@ def test_put_integration_validation(): ) for type in ["AWS"]: # Ensure that the Role ARN is specified for aws integrations - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1477,7 +1477,7 @@ def test_put_integration_validation(): ) for type in http_types: # Ensure that the URI is valid HTTP - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1492,7 +1492,7 @@ def test_put_integration_validation(): ) for type in aws_types: # Ensure that the URI is an ARN - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1507,7 +1507,7 @@ def test_put_integration_validation(): ) for type in aws_types: # Ensure that the URI is a valid ARN - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_integration( restApiId=api_id, resourceId=root_id, @@ -1632,7 +1632,7 @@ def test_create_domain_names(): response["domainName"].should.equal(domain_name) response["certificateName"].should.equal(test_certificate_name) # without domain name it should throw BadRequestException - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_domain_name(domainName="") ex.exception.response["Error"]["Message"].should.equal("No Domain Name specified") @@ -1666,7 +1666,7 @@ def test_get_domain_name(): client = boto3.client("apigateway", region_name="us-west-2") domain_name = "testDomain" # quering an invalid domain name which is not present - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_domain_name(domainName=domain_name) ex.exception.response["Error"]["Message"].should.equal( @@ -1701,7 +1701,7 @@ def test_create_model(): response["description"].should.equal(description) # with an invalid rest_api_id it should throw NotFoundException - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_model( restApiId=dummy_rest_api_id, name=model_name, @@ -1713,7 +1713,7 @@ def test_create_model(): ) ex.exception.response["Error"]["Code"].should.equal("NotFoundException") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_model( restApiId=rest_api_id, name="", @@ -1770,7 +1770,7 @@ def test_get_model_by_name(): result["name"] = model_name result["description"] = description - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_model(restApiId=dummy_rest_api_id, modelName=model_name) ex.exception.response["Error"]["Message"].should.equal( "Invalid Rest API Id specified" @@ -1784,7 +1784,7 @@ def test_get_model_with_invalid_name(): response = client.create_rest_api(name="my_api", description="this is my api") rest_api_id = response["id"] # test with an invalid model name - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_model(restApiId=rest_api_id, modelName="fake") ex.exception.response["Error"]["Message"].should.equal( "Invalid Model Name specified" @@ -1868,7 +1868,7 @@ def test_create_api_headers(): payload = {"value": apikey_value, "name": apikey_name} client.create_api_key(**payload) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_api_key(**payload) ex.exception.response["Error"]["Code"].should.equal("ConflictException") if not settings.TEST_SERVER_MODE: @@ -1939,7 +1939,7 @@ def test_usage_plans(): len(response["items"]).should.equal(0) # # Try to get info about a non existing usage - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_usage_plan(usagePlanId="not_existing") ex.exception.response["Error"]["Code"].should.equal("NotFoundException") ex.exception.response["Error"]["Message"].should.equal( @@ -2030,7 +2030,7 @@ def test_usage_plan_keys(): len(response["items"]).should.equal(0) # Try to get info about a non existing api key - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_usage_plan_key(usagePlanId=usage_plan_id, keyId="not_existing_key") ex.exception.response["Error"]["Code"].should.equal("NotFoundException") ex.exception.response["Error"]["Message"].should.equal( @@ -2038,7 +2038,7 @@ def test_usage_plan_keys(): ) # Try to get info about an existing api key that has not jet added to a valid usage plan - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_usage_plan_key(usagePlanId=usage_plan_id, keyId=key_id) ex.exception.response["Error"]["Code"].should.equal("NotFoundException") ex.exception.response["Error"]["Message"].should.equal( @@ -2046,7 +2046,7 @@ def test_usage_plan_keys(): ) # Try to get info about an existing api key that has not jet added to a valid usage plan - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_usage_plan_key(usagePlanId="not_existing_plan_id", keyId=key_id) ex.exception.response["Error"]["Code"].should.equal("NotFoundException") ex.exception.response["Error"]["Message"].should.equal( diff --git a/tests/test_applicationautoscaling/test_applicationautoscaling.py b/tests/test_applicationautoscaling/test_applicationautoscaling.py index 9b1c0b678f10..f362cc2c1573 100644 --- a/tests/test_applicationautoscaling/test_applicationautoscaling.py +++ b/tests/test_applicationautoscaling/test_applicationautoscaling.py @@ -1,10 +1,10 @@ from __future__ import unicode_literals -import botocore + import boto3 +import botocore +import pytest import sure # noqa -from nose.tools import assert_raises from moto import mock_applicationautoscaling, mock_ecs -from moto.applicationautoscaling.exceptions import AWSValidationException DEFAULT_REGION = "us-east-1" DEFAULT_ECS_CLUSTER = "default" @@ -334,7 +334,7 @@ def test_put_scaling_policy(): }, } - with assert_raises(client.exceptions.ValidationException) as e: + with pytest.raises(client.exceptions.ValidationException) as e: client.put_scaling_policy( PolicyName=policy_name, ServiceNamespace=namespace, @@ -443,7 +443,7 @@ def test_delete_scaling_policies(): }, } - with assert_raises(client.exceptions.ValidationException) as e: + with pytest.raises(client.exceptions.ValidationException) as e: client.delete_scaling_policy( PolicyName=policy_name, ServiceNamespace=namespace, @@ -507,7 +507,7 @@ def test_deregister_scalable_target(): response = client.describe_scalable_targets(ServiceNamespace=namespace) len(response["ScalableTargets"]).should.equal(0) - with assert_raises(client.exceptions.ValidationException) as e: + with pytest.raises(client.exceptions.ValidationException) as e: client.deregister_scalable_target( ServiceNamespace=namespace, ResourceId=resource_id, diff --git a/tests/test_applicationautoscaling/test_validation.py b/tests/test_applicationautoscaling/test_validation.py index 02281ab05ad3..c77b64fc80be 100644 --- a/tests/test_applicationautoscaling/test_validation.py +++ b/tests/test_applicationautoscaling/test_validation.py @@ -4,7 +4,7 @@ from moto.applicationautoscaling import models from moto.applicationautoscaling.exceptions import AWSValidationException from botocore.exceptions import ParamValidationError -from nose.tools import assert_raises +import pytest import sure # noqa from botocore.exceptions import ClientError from parameterized import parameterized @@ -25,21 +25,21 @@ @mock_applicationautoscaling def test_describe_scalable_targets_no_params_should_raise_param_validation_errors(): client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) - with assert_raises(ParamValidationError): + with pytest.raises(ParamValidationError): client.describe_scalable_targets() @mock_applicationautoscaling def test_register_scalable_target_no_params_should_raise_param_validation_errors(): client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) - with assert_raises(ParamValidationError): + with pytest.raises(ParamValidationError): client.register_scalable_target() @mock_applicationautoscaling def test_register_scalable_target_with_none_service_namespace_should_raise_param_validation_errors(): client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) - with assert_raises(ParamValidationError): + with pytest.raises(ParamValidationError): register_scalable_target(client, ServiceNamespace=None) @@ -47,7 +47,7 @@ def test_register_scalable_target_with_none_service_namespace_should_raise_param def test_describe_scalable_targets_with_invalid_scalable_dimension_should_return_validation_exception(): client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: response = client.describe_scalable_targets( ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, ScalableDimension="foo", ) @@ -62,7 +62,7 @@ def test_describe_scalable_targets_with_invalid_scalable_dimension_should_return def test_describe_scalable_targets_with_invalid_service_namespace_should_return_validation_exception(): client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: response = client.describe_scalable_targets( ServiceNamespace="foo", ScalableDimension=DEFAULT_SCALABLE_DIMENSION, ) @@ -77,7 +77,7 @@ def test_describe_scalable_targets_with_invalid_service_namespace_should_return_ def test_describe_scalable_targets_with_multiple_invalid_parameters_should_return_validation_exception(): client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: response = client.describe_scalable_targets( ServiceNamespace="foo", ScalableDimension="bar", ) @@ -94,7 +94,7 @@ def test_register_scalable_target_ecs_with_non_existent_service_should_return_va client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) resource_id = "service/{}/foo".format(DEFAULT_ECS_CLUSTER) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: register_scalable_target(client, ServiceNamespace="ecs", ResourceId=resource_id) err.response["Error"]["Code"].should.equal("ValidationException") err.response["Error"]["Message"].should.equal( @@ -116,7 +116,7 @@ def test_target_params_are_valid_success(namespace, r_id, dimension, expected): expected ) else: - with assert_raises(AWSValidationException): + with pytest.raises(AWSValidationException): models._target_params_are_valid(namespace, r_id, dimension) diff --git a/tests/test_athena/__init__.py b/tests/test_athena/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_athena/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_athena/test_athena.py b/tests/test_athena/test_athena.py index 805a653e3e58..98e1dc4b9f13 100644 --- a/tests/test_athena/test_athena.py +++ b/tests/test_athena/test_athena.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest import boto3 import sure # noqa @@ -104,7 +104,7 @@ def test_start_query_execution(): def test_start_query_validate_workgroup(): client = boto3.client("athena", region_name="us-east-1") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.start_query_execution( QueryString="query1", QueryExecutionContext={"Database": "string"}, diff --git a/tests/test_autoscaling/__init__.py b/tests/test_autoscaling/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_autoscaling/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 1e7121381943..9e51c4b125f1 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -8,7 +8,7 @@ import boto.ec2.elb import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import ( mock_autoscaling, @@ -21,7 +21,7 @@ ) from tests.helpers import requires_boto_gte -from utils import ( +from .utils import ( setup_networking, setup_networking_deprecated, setup_instance_with_networking, @@ -781,7 +781,7 @@ def test_create_autoscaling_group_from_invalid_instance_id(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_auto_scaling_group( AutoScalingGroupName="test_asg", InstanceId=invalid_instance_id, diff --git a/tests/test_autoscaling/test_autoscaling_cloudformation.py b/tests/test_autoscaling/test_autoscaling_cloudformation.py index 240ba66e0a59..24a5b5628a7f 100644 --- a/tests/test_autoscaling/test_autoscaling_cloudformation.py +++ b/tests/test_autoscaling/test_autoscaling_cloudformation.py @@ -7,7 +7,7 @@ mock_ec2, ) -from utils import setup_networking +from .utils import setup_networking @mock_autoscaling diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py index a3d3dba9f670..d3b1cc5f8b4b 100644 --- a/tests/test_autoscaling/test_elbv2.py +++ b/tests/test_autoscaling/test_elbv2.py @@ -4,7 +4,7 @@ import sure # noqa from moto import mock_autoscaling, mock_ec2, mock_elbv2 -from utils import setup_networking +from .utils import setup_networking @mock_elbv2 diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py index f44938eea27a..284fe267a8f5 100644 --- a/tests/test_autoscaling/test_policies.py +++ b/tests/test_autoscaling/test_policies.py @@ -7,7 +7,7 @@ from moto import mock_autoscaling_deprecated -from utils import setup_networking_deprecated +from .utils import setup_networking_deprecated def setup_autoscale_group(): diff --git a/tests/test_awslambda/__init__.py b/tests/test_awslambda/__init__.py index e69de29bb2d1..08a1c1568c9c 100644 --- a/tests/test_awslambda/__init__.py +++ b/tests/test_awslambda/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_awslambda/test_awslambda_cloudformation.py b/tests/test_awslambda/test_awslambda_cloudformation.py index c3061ff3a0e5..f87918328897 100644 --- a/tests/test_awslambda/test_awslambda_cloudformation.py +++ b/tests/test_awslambda/test_awslambda_cloudformation.py @@ -4,7 +4,7 @@ import zipfile from botocore.exceptions import ClientError from moto import mock_cloudformation, mock_iam, mock_lambda, mock_s3, mock_sqs -from nose.tools import assert_raises +import pytest from string import Template from uuid import uuid4 @@ -109,7 +109,7 @@ def test_lambda_can_be_deleted_by_cloudformation(): # Delete Stack cf.delete_stack(StackName=stack["StackId"]) # Verify function was deleted - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: lmbda.get_function(FunctionName=created_fn_name) e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index f7e7b3c7e1bf..2de95cb3c091 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -24,7 +24,7 @@ mock_sqs, ) from moto.sts.models import ACCOUNT_ID -from nose.tools import assert_raises +import pytest from botocore.exceptions import ClientError _lambda_region = "us-west-2" @@ -497,7 +497,7 @@ def test_get_function(): ) # Test get function when can't find function name - with assert_raises(conn.exceptions.ResourceNotFoundException): + with pytest.raises(conn.exceptions.ResourceNotFoundException): conn.get_function(FunctionName="junk", Qualifier="$LATEST") @@ -1800,7 +1800,7 @@ def test_get_function_concurrency(): def create_invalid_lambda(role): conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: conn.create_function( FunctionName="testFunction", Runtime="python2.7", diff --git a/tests/test_batch/__init__.py b/tests/test_batch/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_batch/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 566be6aca47b..511042d1f191 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -7,10 +7,6 @@ import sure # noqa from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs -import functools -import nose - - DEFAULT_REGION = "eu-central-1" diff --git a/tests/test_batch/test_batch_cloudformation.py b/tests/test_batch/test_batch_cloudformation.py index cc51b79f3ac1..7935f3fe9c01 100644 --- a/tests/test_batch/test_batch_cloudformation.py +++ b/tests/test_batch/test_batch_cloudformation.py @@ -14,7 +14,6 @@ mock_cloudformation, ) import functools -import nose import json DEFAULT_REGION = "eu-central-1" diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index d7e26e85d737..6baae83bcc42 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -12,9 +12,7 @@ from boto.exception import BotoServerError import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +import pytest from moto.core import ACCOUNT_ID from moto import ( @@ -319,7 +317,7 @@ def test_delete_stack_by_id(): conn.describe_stacks().should.have.length_of(1) conn.delete_stack(stack_id) conn.describe_stacks().should.have.length_of(0) - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.describe_stacks("test_stack") conn.describe_stacks(stack_id).should.have.length_of(1) @@ -338,7 +336,7 @@ def test_delete_stack_with_resource_missing_delete_attr(): @mock_cloudformation_deprecated def test_bad_describe_stack(): conn = boto.connect_cloudformation() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.describe_stacks("bad_stack") @@ -519,7 +517,7 @@ def test_update_stack_when_rolled_back(): stack_id ].status = "ROLLBACK_COMPLETE" - with assert_raises(BotoServerError) as err: + with pytest.raises(BotoServerError) as err: conn.update_stack("test_stack", dummy_template_json) ex = err.exception diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 65469f1b34ea..86b6f1a94e82 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -9,8 +9,7 @@ from botocore.exceptions import ClientError import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -from nose.tools import assert_raises +import pytest from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 from moto.core import ACCOUNT_ID @@ -548,7 +547,7 @@ def test_boto3_list_stack_set_operations(): @mock_cloudformation def test_boto3_bad_list_stack_resources(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): cf_conn.list_stack_resources(StackName="test_stack_set") @@ -1180,7 +1179,7 @@ def test_describe_updated_stack(): @mock_cloudformation def test_bad_describe_stack(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): cf_conn.describe_stacks(StackName="non_existent_stack") @@ -1332,7 +1331,7 @@ def test_delete_stack_with_export(): def test_export_names_must_be_unique(): cf = boto3.resource("cloudformation", region_name="us-east-1") cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json) - with assert_raises(ClientError): + with pytest.raises(ClientError): cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json) @@ -1373,7 +1372,7 @@ def test_boto3_create_duplicate_stack(): StackName="test_stack", TemplateBody=dummy_template_json, ) - with assert_raises(ClientError): + with pytest.raises(ClientError): cf_conn.create_stack( StackName="test_stack", TemplateBody=dummy_template_json, ) diff --git a/tests/test_cloudformation/test_validate.py b/tests/test_cloudformation/test_validate.py index ea14fceeaf46..a4c65a4c73dd 100644 --- a/tests/test_cloudformation/test_validate.py +++ b/tests/test_cloudformation/test_validate.py @@ -3,7 +3,6 @@ import yaml import os import boto3 -from nose.tools import raises import botocore import sure # noqa diff --git a/tests/test_cloudwatch/__init__.py b/tests/test_cloudwatch/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_cloudwatch/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 9c4757d6071b..c62f3145962d 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -4,7 +4,7 @@ from botocore.exceptions import ClientError from datetime import datetime, timedelta from freezegun import freeze_time -from nose.tools import assert_raises +import pytest from uuid import uuid4 import pytz import sure # noqa @@ -111,7 +111,7 @@ def test_delete_invalid_alarm(): ) # trying to delete an alarm which is not created along with valid alarm. - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName", "testalarm1"]) e.exception.response["Error"]["Code"].should.equal("ResourceNotFound") @@ -120,7 +120,7 @@ def test_delete_invalid_alarm(): len(resp["MetricAlarms"]).should.equal(1) # test to check if the error raises if only one invalid alarm is tried to delete. - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName"]) e.exception.response["Error"]["Code"].should.equal("ResourceNotFound") @@ -423,7 +423,7 @@ def test_list_metrics_paginated(): # Verify that only a single page of metrics is returned cloudwatch.list_metrics()["Metrics"].should.be.empty # Verify we can't pass a random NextToken - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: cloudwatch.list_metrics(NextToken=str(uuid4())) e.exception.response["Error"]["Message"].should.equal( "Request parameter NextToken is invalid" @@ -452,7 +452,7 @@ def test_list_metrics_paginated(): len(third_page["Metrics"]).should.equal(100) third_page.shouldnt.contain("NextToken") # Verify that we can't reuse an existing token - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: cloudwatch.list_metrics(NextToken=first_page["NextToken"]) e.exception.response["Error"]["Message"].should.equal( "Request parameter NextToken is invalid" diff --git a/tests/test_codecommit/test_codecommit.py b/tests/test_codecommit/test_codecommit.py index 69021372a06f..7a5867d44a40 100644 --- a/tests/test_codecommit/test_codecommit.py +++ b/tests/test_codecommit/test_codecommit.py @@ -4,7 +4,7 @@ from moto import mock_codecommit from moto.core import ACCOUNT_ID from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest @mock_codecommit @@ -81,7 +81,7 @@ def test_create_repository_repository_name_exists(): client.create_repository(repositoryName="repository_two") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_repository( repositoryName="repository_two", repositoryDescription="description repo two", @@ -99,7 +99,7 @@ def test_create_repository_repository_name_exists(): def test_create_repository_invalid_repository_name(): client = boto3.client("codecommit", region_name="eu-central-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_repository(repositoryName="in_123_valid_@#$_characters") ex = e.exception ex.operation_name.should.equal("CreateRepository") @@ -156,7 +156,7 @@ def test_get_repository(): client = boto3.client("codecommit", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_repository(repositoryName=repository_name) ex = e.exception ex.operation_name.should.equal("GetRepository") @@ -171,7 +171,7 @@ def test_get_repository(): def test_get_repository_invalid_repository_name(): client = boto3.client("codecommit", region_name="eu-central-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_repository(repositoryName="repository_one-@#@") ex = e.exception ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @@ -207,7 +207,7 @@ def test_delete_repository(): def test_delete_repository_invalid_repository_name(): client = boto3.client("codecommit", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_repository(repositoryName="_rep@ository_one") ex = e.exception ex.operation_name.should.equal("DeleteRepository") diff --git a/tests/test_codepipeline/test_codepipeline.py b/tests/test_codepipeline/test_codepipeline.py index a40efa05c2f3..ac72f99818c3 100644 --- a/tests/test_codepipeline/test_codepipeline.py +++ b/tests/test_codepipeline/test_codepipeline.py @@ -4,7 +4,7 @@ import boto3 import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_codepipeline, mock_iam @@ -77,7 +77,7 @@ def test_create_pipeline_errors(): client_iam = boto3.client("iam", region_name="us-east-1") create_basic_codepipeline(client, "test-pipeline") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: create_basic_codepipeline(client, "test-pipeline") ex = e.exception ex.operation_name.should.equal("CreatePipeline") @@ -87,7 +87,7 @@ def test_create_pipeline_errors(): "A pipeline with the name 'test-pipeline' already exists in account '123456789012'" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_pipeline( pipeline={ "name": "invalid-pipeline", @@ -139,7 +139,7 @@ def test_create_pipeline_errors(): ), )["Role"]["Arn"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_pipeline( pipeline={ "name": "invalid-pipeline", @@ -175,7 +175,7 @@ def test_create_pipeline_errors(): "CodePipeline is not authorized to perform AssumeRole on role arn:aws:iam::123456789012:role/wrong-role" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_pipeline( pipeline={ "name": "invalid-pipeline", @@ -282,7 +282,7 @@ def test_get_pipeline(): def test_get_pipeline_errors(): client = boto3.client("codepipeline", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_pipeline(name="not-existing") ex = e.exception ex.operation_name.should.equal("GetPipeline") @@ -410,7 +410,7 @@ def test_update_pipeline(): def test_update_pipeline_errors(): client = boto3.client("codepipeline", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.update_pipeline( pipeline={ "name": "not-existing", @@ -517,7 +517,7 @@ def test_list_tags_for_resource(): def test_list_tags_for_resource_errors(): client = boto3.client("codepipeline", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_tags_for_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:not-existing" ) @@ -555,7 +555,7 @@ def test_tag_resource_errors(): name = "test-pipeline" create_basic_codepipeline(client, name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.tag_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:not-existing", tags=[{"key": "key-2", "value": "value-2"}], @@ -568,7 +568,7 @@ def test_tag_resource_errors(): "The account with id '123456789012' does not include a pipeline with the name 'not-existing'" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.tag_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), tags=[{"key": "aws:key", "value": "value"}], @@ -583,7 +583,7 @@ def test_tag_resource_errors(): "msg=[Caller is an end user and not allowed to mutate system tags]" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.tag_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), tags=[ @@ -634,7 +634,7 @@ def test_untag_resource(): def test_untag_resource_errors(): client = boto3.client("codepipeline", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.untag_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:not-existing", tagKeys=["key"], diff --git a/tests/test_cognitoidentity/__init__.py b/tests/test_cognitoidentity/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_cognitoidentity/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index 164cb023c60d..a159033297ba 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -3,7 +3,7 @@ import boto3 import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_cognitoidentity from moto.cognitoidentity.utils import get_random_identity_id @@ -75,7 +75,7 @@ def test_describe_identity_pool(): def test_describe_identity_pool_with_invalid_id_raises_error(): conn = boto3.client("cognito-identity", "us-west-2") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: conn.describe_identity_pool(IdentityPoolId="us-west-2_non-existent") cm.exception.operation_name.should.equal("DescribeIdentityPool") diff --git a/tests/test_cognitoidp/__init__.py b/tests/test_cognitoidp/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_cognitoidp/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index a5212b82e1a7..bbd8d5a39172 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -17,7 +17,7 @@ import sure # noqa from botocore.exceptions import ClientError from jose import jws, jwk, jwt -from nose.tools import assert_raises +import pytest from moto import mock_cognitoidp, settings from moto.cognitoidp.utils import create_id @@ -603,7 +603,7 @@ def test_update_identity_provider_no_user_pool(): new_value = str(uuid.uuid4()) - with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + with pytest.raises(conn.exceptions.ResourceNotFoundException) as cm: conn.update_identity_provider( UserPoolId="foo", ProviderName="bar", ProviderDetails={"thing": new_value} ) @@ -623,7 +623,7 @@ def test_update_identity_provider_no_identity_provider(): new_value = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + with pytest.raises(conn.exceptions.ResourceNotFoundException) as cm: conn.update_identity_provider( UserPoolId=user_pool_id, ProviderName="foo", @@ -699,7 +699,7 @@ def test_create_group_with_duplicate_name_raises_error(): conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) cm.exception.operation_name.should.equal("CreateGroup") cm.exception.response["Error"]["Code"].should.equal("GroupExistsException") @@ -747,7 +747,7 @@ def test_delete_group(): result = conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) cm.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") @@ -1565,7 +1565,7 @@ def test_resource_server(): res["ResourceServer"]["Name"].should.equal(name) res["ResourceServer"]["Scopes"].should.equal(scopes) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_resource_server( UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes ) diff --git a/tests/test_config/__init__.py b/tests/test_config/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_config/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index 34462222150e..716792863fba 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -5,8 +5,8 @@ import boto3 from botocore.exceptions import ClientError -from nose import SkipTest -from nose.tools import assert_raises +from unittest import SkipTest +import pytest from moto import mock_s3 from moto.config import mock_config @@ -20,7 +20,7 @@ def test_put_configuration_recorder(): client = boto3.client("config", region_name="us-west-2") # Try without a name supplied: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder(ConfigurationRecorder={"roleARN": "somearn"}) assert ( ce.exception.response["Error"]["Code"] @@ -29,7 +29,7 @@ def test_put_configuration_recorder(): assert "is not valid, blank string." in ce.exception.response["Error"]["Message"] # Try with a really long name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder( ConfigurationRecorder={"name": "a" * 257, "roleARN": "somearn"} ) @@ -68,7 +68,7 @@ def test_put_configuration_recorder(): ] for bg in bad_groups: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder( ConfigurationRecorder={ "name": "default", @@ -85,7 +85,7 @@ def test_put_configuration_recorder(): ) # With an invalid Resource Type: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder( ConfigurationRecorder={ "name": "default", @@ -166,7 +166,7 @@ def test_put_configuration_recorder(): assert not result[0]["recordingGroup"].get("resourceTypes") # Can currently only have exactly 1 Config Recorder in an account/region: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_recorder( ConfigurationRecorder={ "name": "someotherrecorder", @@ -192,7 +192,7 @@ def test_put_configuration_aggregator(): client = boto3.client("config", region_name="us-west-2") # With too many aggregation sources: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -213,7 +213,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "ValidationException" # With an invalid region config (no regions defined): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -229,7 +229,7 @@ def test_put_configuration_aggregator(): ) assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", OrganizationAggregationSource={ @@ -243,7 +243,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" # With both region flags defined: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -260,7 +260,7 @@ def test_put_configuration_aggregator(): ) assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", OrganizationAggregationSource={ @@ -276,7 +276,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" # Name too long: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="a" * 257, AccountAggregationSources=[ @@ -287,7 +287,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "ValidationException" # Too many tags (>50): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -304,7 +304,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "ValidationException" # Tag key is too big (>128 chars): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -319,7 +319,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "ValidationException" # Tag value is too big (>256 chars): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -334,7 +334,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "ValidationException" # Duplicate Tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -346,7 +346,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "InvalidInput" # Invalid characters in the tag key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -361,7 +361,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "ValidationException" # If it contains both the AccountAggregationSources and the OrganizationAggregationSource - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( ConfigurationAggregatorName="testing", AccountAggregationSources=[ @@ -379,7 +379,7 @@ def test_put_configuration_aggregator(): assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" # If it contains neither: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_configuration_aggregator(ConfigurationAggregatorName="testing") assert ( "AccountAggregationSource or the OrganizationAggregationSource" @@ -466,7 +466,7 @@ def test_describe_configuration_aggregators(): ) # Describe with an incorrect name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_aggregators( ConfigurationAggregatorNames=["DoesNotExist"] ) @@ -480,7 +480,7 @@ def test_describe_configuration_aggregators(): ) # Error describe with more than 1 item in the list: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_aggregators( ConfigurationAggregatorNames=["testing0", "DoesNotExist"] ) @@ -551,7 +551,7 @@ def test_describe_configuration_aggregators(): ) # Test with an invalid filter: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_aggregators(NextToken="WRONG") assert ( "The nextToken provided is invalid" == ce.exception.response["Error"]["Message"] @@ -564,7 +564,7 @@ def test_put_aggregation_authorization(): client = boto3.client("config", region_name="us-west-2") # Too many tags (>50): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", @@ -579,7 +579,7 @@ def test_put_aggregation_authorization(): assert ce.exception.response["Error"]["Code"] == "ValidationException" # Tag key is too big (>128 chars): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", @@ -592,7 +592,7 @@ def test_put_aggregation_authorization(): assert ce.exception.response["Error"]["Code"] == "ValidationException" # Tag value is too big (>256 chars): - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", @@ -605,7 +605,7 @@ def test_put_aggregation_authorization(): assert ce.exception.response["Error"]["Code"] == "ValidationException" # Duplicate Tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", @@ -615,7 +615,7 @@ def test_put_aggregation_authorization(): assert ce.exception.response["Error"]["Code"] == "InvalidInput" # Invalid characters in the tag key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", @@ -708,7 +708,7 @@ def test_describe_aggregation_authorizations(): ] == ["{}".format(str(x) * 12) for x in range(8, 10)] # Test with an invalid filter: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_aggregation_authorizations(NextToken="WRONG") assert ( "The nextToken provided is invalid" == ce.exception.response["Error"]["Message"] @@ -751,7 +751,7 @@ def test_delete_configuration_aggregator(): client.delete_configuration_aggregator(ConfigurationAggregatorName="testing") # And again to confirm that it's deleted: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.delete_configuration_aggregator(ConfigurationAggregatorName="testing") assert ( "The configuration aggregator does not exist." @@ -796,7 +796,7 @@ def test_describe_configurations(): ) # Specify an incorrect name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_recorders(ConfigurationRecorderNames=["wrong"]) assert ( ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" @@ -804,7 +804,7 @@ def test_describe_configurations(): assert "wrong" in ce.exception.response["Error"]["Message"] # And with both a good and wrong name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_recorders( ConfigurationRecorderNames=["testrecorder", "wrong"] ) @@ -819,7 +819,7 @@ def test_delivery_channels(): client = boto3.client("config", region_name="us-west-2") # Try without a config recorder: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={}) assert ( ce.exception.response["Error"]["Code"] @@ -845,7 +845,7 @@ def test_delivery_channels(): ) # Try without a name supplied: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={}) assert ( ce.exception.response["Error"]["Code"] == "InvalidDeliveryChannelNameException" @@ -853,7 +853,7 @@ def test_delivery_channels(): assert "is not valid, blank string." in ce.exception.response["Error"]["Message"] # Try with a really long name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={"name": "a" * 257}) assert ce.exception.response["Error"]["Code"] == "ValidationException" assert ( @@ -862,7 +862,7 @@ def test_delivery_channels(): ) # Without specifying a bucket name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={"name": "testchannel"}) assert ce.exception.response["Error"]["Code"] == "NoSuchBucketException" assert ( @@ -870,7 +870,7 @@ def test_delivery_channels(): == "Cannot find a S3 bucket with an empty bucket name." ) - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={"name": "testchannel", "s3BucketName": ""} ) @@ -881,7 +881,7 @@ def test_delivery_channels(): ) # With an empty string for the S3 key prefix: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={ "name": "testchannel", @@ -893,7 +893,7 @@ def test_delivery_channels(): assert "empty s3 key prefix." in ce.exception.response["Error"]["Message"] # With an empty string for the SNS ARN: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={ "name": "testchannel", @@ -905,7 +905,7 @@ def test_delivery_channels(): assert "The sns topic arn" in ce.exception.response["Error"]["Message"] # With an invalid delivery frequency: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={ "name": "testchannel", @@ -950,7 +950,7 @@ def test_delivery_channels(): ) # Can only have 1: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_delivery_channel( DeliveryChannel={"name": "testchannel2", "s3BucketName": "somebucket"} ) @@ -1015,13 +1015,13 @@ def test_describe_delivery_channels(): ) # Specify an incorrect name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_delivery_channels(DeliveryChannelNames=["wrong"]) assert ce.exception.response["Error"]["Code"] == "NoSuchDeliveryChannelException" assert "wrong" in ce.exception.response["Error"]["Message"] # And with both a good and wrong name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_delivery_channels(DeliveryChannelNames=["testchannel", "wrong"]) assert ce.exception.response["Error"]["Code"] == "NoSuchDeliveryChannelException" assert "wrong" in ce.exception.response["Error"]["Message"] @@ -1032,7 +1032,7 @@ def test_start_configuration_recorder(): client = boto3.client("config", region_name="us-west-2") # Without a config recorder: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") assert ( ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" @@ -1052,7 +1052,7 @@ def test_start_configuration_recorder(): ) # Without a delivery channel: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") assert ( ce.exception.response["Error"]["Code"] == "NoAvailableDeliveryChannelException" @@ -1090,7 +1090,7 @@ def test_stop_configuration_recorder(): client = boto3.client("config", region_name="us-west-2") # Without a config recorder: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.stop_configuration_recorder(ConfigurationRecorderName="testrecorder") assert ( ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" @@ -1180,7 +1180,7 @@ def test_describe_configuration_recorder_status(): assert not result[0]["recording"] # Invalid name: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.describe_configuration_recorder_status( ConfigurationRecorderNames=["testrecorder", "wrong"] ) @@ -1211,7 +1211,7 @@ def test_delete_configuration_recorder(): client.delete_configuration_recorder(ConfigurationRecorderName="testrecorder") # Try again -- it should be deleted: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.delete_configuration_recorder(ConfigurationRecorderName="testrecorder") assert ( ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" @@ -1240,7 +1240,7 @@ def test_delete_delivery_channel(): client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") # With the recorder enabled: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.delete_delivery_channel(DeliveryChannelName="testchannel") assert ( ce.exception.response["Error"]["Code"] @@ -1258,7 +1258,7 @@ def test_delete_delivery_channel(): client.delete_delivery_channel(DeliveryChannelName="testchannel") # Verify: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.delete_delivery_channel(DeliveryChannelName="testchannel") assert ce.exception.response["Error"]["Code"] == "NoSuchDeliveryChannelException" @@ -1341,12 +1341,12 @@ def test_list_discovered_resource(): )["resourceIdentifiers"] # Test with an invalid page num > 100: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_discovered_resources(resourceType="AWS::S3::Bucket", limit=101) assert "101" in ce.exception.response["Error"]["Message"] # Test by supplying both resourceName and also resourceIds: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_discovered_resources( resourceType="AWS::S3::Bucket", resourceName="whats", @@ -1359,7 +1359,7 @@ def test_list_discovered_resource(): # More than 20 resourceIds: resource_ids = ["{}".format(x) for x in range(0, 21)] - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_discovered_resources( resourceType="AWS::S3::Bucket", resourceIds=resource_ids ) @@ -1378,7 +1378,7 @@ def test_list_aggregate_discovered_resource(): client = boto3.client("config", region_name="us-west-2") # Without an aggregator: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_aggregate_discovered_resources( ConfigurationAggregatorName="lolno", ResourceType="AWS::S3::Bucket" ) @@ -1504,7 +1504,7 @@ def test_list_aggregate_discovered_resource(): )["ResourceIdentifiers"] # Test with an invalid page num > 100: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.list_aggregate_discovered_resources( ConfigurationAggregatorName="testing", ResourceType="AWS::S3::Bucket", @@ -1522,7 +1522,7 @@ def test_get_resource_config_history(): client = boto3.client("config", region_name="us-west-2") # With an invalid resource type: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_resource_config_history( resourceType="NOT::A::RESOURCE", resourceId="notcreatedyet" ) @@ -1533,7 +1533,7 @@ def test_get_resource_config_history(): } # With nothing created yet: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_resource_config_history( resourceType="AWS::S3::Bucket", resourceId="notcreatedyet" ) @@ -1565,7 +1565,7 @@ def test_get_resource_config_history(): Bucket="eu-bucket", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}, ) - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_resource_config_history( resourceType="AWS::S3::Bucket", resourceId="eu-bucket" ) @@ -1581,7 +1581,7 @@ def test_batch_get_resource_config(): client = boto3.client("config", region_name="us-west-2") # With more than 100 resourceKeys: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.batch_get_resource_config( resourceKeys=[ {"resourceType": "AWS::S3::Bucket", "resourceId": "someBucket"} @@ -1653,7 +1653,7 @@ def test_batch_get_aggregate_resource_config(): "ResourceType": "NOT::A::RESOURCE", "ResourceId": "nope", } - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.batch_get_aggregate_resource_config( ConfigurationAggregatorName="lolno", ResourceIdentifiers=[bad_ri] ) @@ -1673,7 +1673,7 @@ def test_batch_get_aggregate_resource_config(): ) # With more than 100 items: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.batch_get_aggregate_resource_config( ConfigurationAggregatorName="testing", ResourceIdentifiers=[bad_ri] * 101 ) @@ -1814,7 +1814,7 @@ def test_put_evaluations(): client = boto3.client("config", region_name="us-west-2") # Try without Evaluations supplied: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_evaluations(Evaluations=[], ResultToken="test", TestMode=True) assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" assert ( @@ -1823,7 +1823,7 @@ def test_put_evaluations(): ) # Try without a ResultToken supplied: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_evaluations( Evaluations=[ { @@ -1842,7 +1842,7 @@ def test_put_evaluations(): raise SkipTest("Does not work in server mode due to error in Workzeug") else: # Try without TestMode supplied: - with assert_raises(NotImplementedError): + with pytest.raises(NotImplementedError): client.put_evaluations( Evaluations=[ { @@ -1913,7 +1913,7 @@ def test_put_organization_conformance_pack_errors(): client = boto3.client("config", region_name="us-east-1") # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.put_organization_conformance_pack( DeliveryS3Bucket="awsconfigconforms-test-bucket", OrganizationConformancePackName="test-pack", @@ -1927,7 +1927,7 @@ def test_put_organization_conformance_pack_errors(): ex.response["Error"]["Message"].should.equal("Template body is invalid") # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.put_organization_conformance_pack( DeliveryS3Bucket="awsconfigconforms-test-bucket", OrganizationConformancePackName="test-pack", @@ -1979,7 +1979,7 @@ def test_describe_organization_conformance_packs_errors(): client = boto3.client("config", region_name="us-east-1") # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.describe_organization_conformance_packs( OrganizationConformancePackNames=["not-existing"] ) @@ -2055,7 +2055,7 @@ def test_describe_organization_conformance_pack_statuses_errors(): client = boto3.client("config", region_name="us-east-1") # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.describe_organization_conformance_pack_statuses( OrganizationConformancePackNames=["not-existing"] ) @@ -2127,7 +2127,7 @@ def test_get_organization_conformance_pack_detailed_status_errors(): client = boto3.client("config", region_name="us-east-1") # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_organization_conformance_pack_detailed_status( OrganizationConformancePackName="not-existing" ) @@ -2171,7 +2171,7 @@ def test_delete_organization_conformance_pack_errors(): client = boto3.client("config", region_name="us-east-1") # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_organization_conformance_pack( OrganizationConformancePackName="not-existing" ) diff --git a/tests/test_core/__init__.py b/tests/test_core/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_core/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py index b391d82c8082..67c3b67a2c8f 100644 --- a/tests/test_core/test_auth.py +++ b/tests/test_core/test_auth.py @@ -4,9 +4,7 @@ import sure # noqa from botocore.exceptions import ClientError -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest from moto import mock_iam, mock_ec2, mock_s3, mock_sts, mock_elbv2, mock_rds2 from moto.core import set_initial_no_auth_action_count @@ -179,7 +177,7 @@ def test_invalid_client_token_id(): aws_access_key_id="invalid", aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_user() ex.exception.response["Error"]["Code"].should.equal("InvalidClientTokenId") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -197,7 +195,7 @@ def test_auth_failure(): aws_access_key_id="invalid", aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.describe_instances() ex.exception.response["Error"]["Code"].should.equal("AuthFailure") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) @@ -216,7 +214,7 @@ def test_signature_does_not_match(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_user() ex.exception.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -235,7 +233,7 @@ def test_auth_failure_with_valid_access_key_id(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.describe_instances() ex.exception.response["Error"]["Code"].should.equal("AuthFailure") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) @@ -255,7 +253,7 @@ def test_access_denied_with_no_policy(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.describe_instances() ex.exception.response["Error"]["Code"].should.equal("AccessDenied") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -321,7 +319,7 @@ def test_access_denied_for_run_instances(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.run_instances(MaxCount=1, MinCount=1) ex.exception.response["Error"]["Code"].should.equal("AccessDenied") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -352,7 +350,7 @@ def test_access_denied_with_denying_policy(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_vpc(CidrBlock="10.0.0.0/16") ex.exception.response["Error"]["Code"].should.equal("AccessDenied") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -452,7 +450,7 @@ def test_s3_access_denied_with_denying_attached_group_policy(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.list_buckets() ex.exception.response["Error"]["Code"].should.equal("AccessDenied") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -486,7 +484,7 @@ def test_s3_access_denied_with_denying_inline_group_policy(): aws_secret_access_key=access_key["SecretAccessKey"], ) client.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_object(Bucket=bucket_name, Key="sdfsdf") ex.exception.response["Error"]["Code"].should.equal("AccessDenied") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -532,7 +530,7 @@ def test_access_denied_with_many_irrelevant_policies(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_key_pair(KeyName="TestKey") ex.exception.response["Error"]["Code"].should.equal("AccessDenied") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -631,7 +629,7 @@ def test_access_denied_with_temporary_credentials(): aws_secret_access_key=credentials["SecretAccessKey"], aws_session_token=credentials["SessionToken"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_db_instance( DBInstanceIdentifier="test-db-instance", DBInstanceClass="db.t3", @@ -678,7 +676,7 @@ def test_s3_invalid_access_key_id(): aws_access_key_id="invalid", aws_secret_access_key="invalid", ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.list_buckets() ex.exception.response["Error"]["Code"].should.equal("InvalidAccessKeyId") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -700,7 +698,7 @@ def test_s3_signature_does_not_match(): aws_secret_access_key="invalid", ) client.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.put_object(Bucket=bucket_name, Key="abc") ex.exception.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -736,7 +734,7 @@ def test_s3_access_denied_not_action(): aws_secret_access_key=access_key["SecretAccessKey"], ) client.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.delete_object(Bucket=bucket_name, Key="sdfsdf") ex.exception.response["Error"]["Code"].should.equal("AccessDenied") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) @@ -776,7 +774,7 @@ def test_s3_invalid_token_with_temporary_credentials(): aws_session_token="invalid", ) client.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.list_bucket_metrics_configurations(Bucket=bucket_name) ex.exception.response["Error"]["Code"].should.equal("InvalidToken") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 408ca6819463..5e04f075cc6e 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -4,8 +4,7 @@ import sure # noqa import unittest -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +import pytest from moto import mock_ec2_deprecated, mock_s3_deprecated @@ -27,21 +26,21 @@ def test_basic_decorator(): def test_context_manager(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn.get_all_instances() with mock_ec2_deprecated(): conn = boto.connect_ec2("the_key", "the_secret") list(conn.get_all_instances()).should.equal([]) - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn = boto.connect_ec2("the_key", "the_secret") conn.get_all_instances() def test_decorator_start_and_stop(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn.get_all_instances() mock = mock_ec2_deprecated() @@ -50,7 +49,7 @@ def test_decorator_start_and_stop(): list(conn.get_all_instances()).should.equal([]) mock.stop() - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn.get_all_instances() diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py index d30138d5d4d0..9870f0df5ad7 100644 --- a/tests/test_core/test_instance_metadata.py +++ b/tests/test_core/test_instance_metadata.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import sure # noqa -from nose.tools import assert_raises +import pytest import requests from moto import mock_ec2, settings diff --git a/tests/test_core/test_moto_api.py b/tests/test_core/test_moto_api.py index 6482d903ea62..648510475603 100644 --- a/tests/test_core/test_moto_api.py +++ b/tests/test_core/test_moto_api.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals import sure # noqa -from nose.tools import assert_raises +import pytest import requests import boto3 diff --git a/tests/test_datapipeline/__init__.py b/tests/test_datapipeline/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_datapipeline/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_datasync/test_datasync.py b/tests/test_datasync/test_datasync.py index e3ea87675fa2..d8d919f1392f 100644 --- a/tests/test_datasync/test_datasync.py +++ b/tests/test_datasync/test_datasync.py @@ -4,11 +4,11 @@ import boto3 from botocore.exceptions import ClientError from moto import mock_datasync -from nose.tools import assert_raises +import pytest def create_locations(client, create_smb=False, create_s3=False): - """ + """ Convenience function for creating locations. Locations must exist before tasks can be created. """ @@ -101,7 +101,7 @@ def test_describe_location_wrong(): Password="", AgentArns=agent_arns, ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_location_s3(LocationArn=response["LocationArn"]) @@ -159,11 +159,11 @@ def test_create_task_fail(): """ Test that Locations must exist before a Task can be created """ client = boto3.client("datasync", region_name="us-east-1") locations = create_locations(client, create_smb=True, create_s3=True) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.create_task( SourceLocationArn="1", DestinationLocationArn=locations["s3_arn"] ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.create_task( SourceLocationArn=locations["smb_arn"], DestinationLocationArn="2" ) @@ -220,7 +220,7 @@ def test_describe_task(): def test_describe_task_not_exist(): client = boto3.client("datasync", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.describe_task(TaskArn="abc") @@ -328,7 +328,7 @@ def test_start_task_execution_twice(): assert "TaskExecutionArn" in response task_execution_arn = response["TaskExecutionArn"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.start_task_execution(TaskArn=task_arn) @@ -392,7 +392,7 @@ def test_describe_task_execution(): def test_describe_task_execution_not_exist(): client = boto3.client("datasync", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.describe_task_execution(TaskExecutionArn="abc") diff --git a/tests/test_dynamodb/__init__.py b/tests/test_dynamodb/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_dynamodb/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index 931e57e0630e..3e10920259e5 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -4,8 +4,7 @@ import boto.dynamodb import sure # noqa import requests -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest from moto import mock_dynamodb, mock_dynamodb_deprecated from moto.dynamodb import dynamodb_backend @@ -38,7 +37,7 @@ def test_list_tables_layer_1(): @mock_dynamodb_deprecated def test_describe_missing_table(): conn = boto.connect_dynamodb("the_key", "the_secret") - with assert_raises(DynamoDBResponseError): + with pytest.raises(DynamoDBResponseError): conn.describe_table("messages") diff --git a/tests/test_dynamodb2/__init__.py b/tests/test_dynamodb2/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_dynamodb2/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 41baddc79e1c..6704bbcc78ba 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -17,7 +17,7 @@ import moto.dynamodb2.comparisons import moto.dynamodb2.models -from nose.tools import assert_raises +import pytest try: import boto.dynamodb2 @@ -72,7 +72,7 @@ def test_describe_missing_table(): conn = boto.dynamodb2.connect_to_region( "us-west-2", aws_access_key_id="ak", aws_secret_access_key="sk" ) - with assert_raises(JSONResponseError): + with pytest.raises(JSONResponseError): conn.describe_table("messages") @@ -201,7 +201,7 @@ def test_item_add_empty_string_exception(): ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.put_item( TableName=name, Item={ @@ -248,7 +248,7 @@ def test_update_item_with_empty_string_exception(): }, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.update_item( TableName=name, Key={"forum_name": {"S": "LOLCat Forum"}}, @@ -1354,7 +1354,7 @@ def test_put_empty_item(): ) table = dynamodb.Table("test") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: table.put_item(Item={}) ex.exception.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: Missing the key structure_id in the item" @@ -1373,7 +1373,7 @@ def test_put_item_nonexisting_hash_key(): ) table = dynamodb.Table("test") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: table.put_item(Item={"a_terribly_misguided_id_attribute": "abcdef"}) ex.exception.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: Missing the key structure_id in the item" @@ -1398,7 +1398,7 @@ def test_put_item_nonexisting_range_key(): ) table = dynamodb.Table("test") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: table.put_item(Item={"structure_id": "abcdef"}) ex.exception.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: Missing the key added_at in the item" @@ -1980,7 +1980,7 @@ def test_delete_item(): assert response["Count"] == 2 # Test ReturnValues validation - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: table.delete_item( Key={"client": "client1", "app": "app1"}, ReturnValues="ALL_NEW" ) @@ -2085,7 +2085,7 @@ def test_describe_continuous_backups_errors(): client = boto3.client("dynamodb", region_name="us-east-1") # when - with assert_raises(Exception) as e: + with pytest.raises(Exception) as e: client.describe_continuous_backups(TableName="not-existing-table") # then @@ -2171,7 +2171,7 @@ def test_update_continuous_backups_errors(): client = boto3.client("dynamodb", region_name="us-east-1") # when - with assert_raises(Exception) as e: + with pytest.raises(Exception) as e: client.update_continuous_backups( TableName="not-existing-table", PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, @@ -2291,7 +2291,7 @@ def test_update_item_on_map(): ExpressionAttributeValues={":tb": "new_value"}, ) # Running this against AWS DDB gives an exception so make sure it also fails.: - with assert_raises(client.exceptions.ClientError): + with pytest.raises(client.exceptions.ClientError): # botocore.exceptions.ClientError: An error occurred (ValidationException) when calling the UpdateItem # operation: The document path provided in the update expression is invalid for update table.update_item( @@ -2321,7 +2321,7 @@ def test_update_item_on_map(): ) # Test nested value for a nonexistent attribute throws a ClientError. - with assert_raises(client.exceptions.ClientError): + with pytest.raises(client.exceptions.ClientError): table.update_item( Key={"forum_name": "the-key", "subject": "123"}, UpdateExpression="SET nonexistent.#nested = :tb", @@ -2409,7 +2409,7 @@ def update(col, to, rv): r = update("col1", "val5", "NONE") assert r["Attributes"] == {} - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: r = update("col1", "val6", "WRONG") @@ -2438,7 +2438,7 @@ def test_put_return_attributes(): ) assert r["Attributes"] == {"id": {"S": "foo"}, "col1": {"S": "val1"}} - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.put_item( TableName="moto-test", Item={"id": {"S": "foo"}, "col1": {"S": "val3"}}, @@ -2675,7 +2675,7 @@ def test_condition_expressions(): }, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.put_item( TableName="test1", Item={ @@ -2691,7 +2691,7 @@ def test_condition_expressions(): }, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.put_item( TableName="test1", Item={ @@ -2707,7 +2707,7 @@ def test_condition_expressions(): }, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.put_item( TableName="test1", Item={ @@ -2735,7 +2735,7 @@ def test_condition_expressions(): ExpressionAttributeValues={":match": {"S": "match"}}, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.update_item( TableName="test1", Key={"client": {"S": "client1"}, "app": {"S": "app1"}}, @@ -2745,7 +2745,7 @@ def test_condition_expressions(): ExpressionAttributeNames={"#existing": "existing", "#match": "match"}, ) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.delete_item( TableName="test1", Key={"client": {"S": "client1"}, "app": {"S": "app1"}}, @@ -2830,7 +2830,7 @@ def update_if_attr_doesnt_exist(): update_if_attr_doesnt_exist() # Second time should fail - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): update_if_attr_doesnt_exist() @@ -2870,7 +2870,7 @@ def test_condition_expression__and_order(): # ensure that the RHS of the AND expression is not evaluated if the LHS # returns true (as it would result an error) - with assert_raises(client.exceptions.ConditionalCheckFailedException): + with pytest.raises(client.exceptions.ConditionalCheckFailedException): client.update_item( TableName="test", Key={"forum_name": {"S": "the-key"}}, @@ -2966,7 +2966,7 @@ def test_scan_by_non_exists_index(): ], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.scan(TableName="test", IndexName="non_exists_index") ex.exception.response["Error"]["Code"].should.equal("ValidationException") @@ -3001,7 +3001,7 @@ def test_query_by_non_exists_index(): ], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.query( TableName="test", IndexName="non_exists_index", @@ -3041,7 +3041,7 @@ def test_batch_items_returns_all(): @mock_dynamodb2 def test_batch_items_throws_exception_when_requesting_100_items_for_single_table(): dynamodb = _create_user_table() - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.batch_get_item( RequestItems={ "users": { @@ -3063,7 +3063,7 @@ def test_batch_items_throws_exception_when_requesting_100_items_for_single_table @mock_dynamodb2 def test_batch_items_throws_exception_when_requesting_100_items_across_all_tables(): dynamodb = _create_user_table() - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.batch_get_item( RequestItems={ "users": { @@ -3160,7 +3160,7 @@ def test_batch_items_with_basic_projection_expression_and_attr_expression_names( @mock_dynamodb2 def test_batch_items_should_throw_exception_for_duplicate_request(): client = _create_user_table() - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.batch_get_item( RequestItems={ "users": { @@ -3186,7 +3186,7 @@ def test_index_with_unknown_attributes_should_fail(): "Some index key attributes are not defined in AttributeDefinitions." ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.create_table( AttributeDefinitions=[ {"AttributeName": "customer_nr", "AttributeType": "S"}, @@ -3366,7 +3366,7 @@ def test_update_list_index__set_index_of_a_string(): client.put_item( TableName=table_name, Item={"id": {"S": "foo2"}, "itemstr": {"S": "somestring"}} ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.update_item( TableName=table_name, Key={"id": {"S": "foo2"}}, @@ -3615,7 +3615,7 @@ def test_item_size_is_under_400KB(): def assert_failure_due_to_item_size(func, **kwargs): - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: func(**kwargs) ex.exception.response["Error"]["Code"].should.equal("ValidationException") ex.exception.response["Error"]["Message"].should.equal( @@ -3624,7 +3624,7 @@ def assert_failure_due_to_item_size(func, **kwargs): def assert_failure_due_to_item_size_to_update(func, **kwargs): - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: func(**kwargs) ex.exception.response["Error"]["Code"].should.equal("ValidationException") ex.exception.response["Error"]["Message"].should.equal( @@ -3654,7 +3654,7 @@ def test_hash_key_cannot_use_begins_with_operations(): batch.put_item(Item=item) table = dynamodb.Table("test-table") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: table.query(KeyConditionExpression=Key("key").begins_with("prefix-")) ex.exception.response["Error"]["Code"].should.equal("ValidationException") ex.exception.response["Error"]["Message"].should.equal( @@ -4047,7 +4047,7 @@ def test_update_catches_invalid_list_append_operation(): ) # Update item using invalid list_append expression - with assert_raises(ParamValidationError) as ex: + with pytest.raises(ParamValidationError) as ex: client.update_item( TableName="TestTable", Key={"SHA256": {"S": "sha-of-file"}}, @@ -4166,7 +4166,7 @@ def test_query_catches_when_no_filters(): ) table = dynamo.Table("origin-rbu-dev") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: table.query(TableName="original-rbu-dev") ex.exception.response["Error"]["Code"].should.equal("ValidationException") @@ -4197,7 +4197,7 @@ def test_invalid_transact_get_items(): client = boto3.client("dynamodb", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.transact_get_items( TransactItems=[ {"Get": {"Key": {"id": {"S": "1"}}, "TableName": "test1"}} @@ -4211,7 +4211,7 @@ def test_invalid_transact_get_items(): re.I, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.transact_get_items( TransactItems=[ {"Get": {"Key": {"id": {"S": "1"},}, "TableName": "test1"}}, @@ -4491,7 +4491,7 @@ def test_transact_write_items_put_conditional_expressions(): TableName="test-table", Item={"id": {"S": "foo2"},}, ) # Put multiple items - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.transact_write_items( TransactItems=[ { @@ -4581,7 +4581,7 @@ def test_transact_write_items_conditioncheck_fails(): ) # Try to put an email address, but verify whether it exists # ConditionCheck should fail - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.transact_write_items( TransactItems=[ { @@ -4687,7 +4687,7 @@ def test_transact_write_items_delete_with_failed_condition_expression(): ) # Try to delete an item that does not have an email address # ConditionCheck should fail - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.transact_write_items( TransactItems=[ { @@ -4758,7 +4758,7 @@ def test_transact_write_items_update_with_failed_condition_expression(): ) # Try to update an item that does not have an email address # ConditionCheck should fail - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.transact_write_items( TransactItems=[ { @@ -5318,7 +5318,7 @@ def test_transact_write_items_fails_with_transaction_canceled_exception(): # Insert one item dynamodb.put_item(TableName="test-table", Item={"id": {"S": "foo"}}) # Update two items, the one that exists and another that doesn't - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: dynamodb.transact_write_items( TransactItems=[ { diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 12e75a73e0b3..e50cd45c1b5b 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -8,7 +8,7 @@ from botocore.exceptions import ClientError import sure # noqa from freezegun import freeze_time -from nose.tools import assert_raises +import pytest from moto import mock_dynamodb2, mock_dynamodb2_deprecated from boto.exception import JSONResponseError @@ -1353,7 +1353,7 @@ def test_update_item_with_expression(): def assert_failure_due_to_key_not_in_schema(func, **kwargs): - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: func(**kwargs) ex.exception.response["Error"]["Code"].should.equal("ValidationException") ex.exception.response["Error"]["Message"].should.equal( diff --git a/tests/test_dynamodbstreams/__init__.py b/tests/test_dynamodbstreams/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_dynamodbstreams/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_dynamodbstreams/test_dynamodbstreams.py b/tests/test_dynamodbstreams/test_dynamodbstreams.py index 6f66e304d3ea..70efc5289674 100644 --- a/tests/test_dynamodbstreams/test_dynamodbstreams.py +++ b/tests/test_dynamodbstreams/test_dynamodbstreams.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals, print_function -from nose.tools import assert_raises +import pytest import boto3 from moto import mock_dynamodb2, mock_dynamodbstreams @@ -224,7 +224,7 @@ def test_enable_stream_on_table(self): assert "LatestStreamLabel" in resp["TableDescription"] # now try to enable it again - with assert_raises(conn.exceptions.ResourceInUseException): + with pytest.raises(conn.exceptions.ResourceInUseException): resp = conn.update_table( TableName="test-streams", StreamSpecification={ diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 5b26acf6f836..b23eae4ab0f4 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -7,7 +7,7 @@ from botocore.exceptions import ClientError # Ensure 'assert_raises' context manager support for Python 2.6 -from nose.tools import assert_raises +import pytest import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 @@ -27,7 +27,7 @@ def test_ami_create_and_delete(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: image_id = conn.create_image( instance.id, "test-ami", "this is a test ami", dry_run=True ) @@ -76,7 +76,7 @@ def test_ami_create_and_delete(): root_mapping.should_not.be.none # Deregister - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: success = conn.deregister_image(image_id, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -87,7 +87,7 @@ def test_ami_create_and_delete(): success = conn.deregister_image(image_id) success.should.be.true - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.deregister_image(image_id) cm.exception.code.should.equal("InvalidAMIID.NotFound") cm.exception.status.should.equal(400) @@ -112,7 +112,7 @@ def test_ami_copy(): # Boto returns a 'CopyImage' object with an image_id attribute here. Use # the image_id to fetch the full info. - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: copy_image_ref = conn.copy_image( source_image.region.name, source_image.id, @@ -152,7 +152,7 @@ def test_ami_copy(): ) # Copy from non-existent source ID. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.copy_image( source_image.region.name, "ami-abcd1234", @@ -164,7 +164,7 @@ def test_ami_copy(): cm.exception.request_id.should_not.be.none # Copy from non-existent source region. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: invalid_region = ( "us-east-1" if (source_image.region.name != "us-east-1") else "us-west-1" ) @@ -208,7 +208,7 @@ def test_ami_tagging(): conn.create_image(instance.id, "test-ami", "this is a test ami") image = conn.get_all_images()[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: image.add_tag("a key", "some value", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -233,7 +233,7 @@ def test_ami_create_from_missing_instance(): conn = boto.connect_ec2("the_key", "the_secret") args = ["i-abcdefg", "test-ami", "this is a test ami"] - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_image(*args) cm.exception.code.should.equal("InvalidInstanceID.NotFound") cm.exception.status.should.equal(400) @@ -353,7 +353,7 @@ def test_ami_filtering_via_tag(): def test_getting_missing_ami(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_image("ami-missing") cm.exception.code.should.equal("InvalidAMIID.NotFound") cm.exception.status.should.equal(400) @@ -364,7 +364,7 @@ def test_getting_missing_ami(): def test_getting_malformed_ami(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_image("foo-missing") cm.exception.code.should.equal("InvalidAMIID.Malformed") cm.exception.status.should.equal(400) @@ -399,7 +399,7 @@ def test_ami_attribute_group_permissions(): } # Add 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_image_attribute(**dict(ADD_GROUP_ARGS, **{"dry_run": True})) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -678,7 +678,7 @@ def test_ami_attribute_error_cases(): image = conn.get_image(image_id) # Error: Add with group != 'all' - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", operation="add", groups="everyone" ) @@ -687,7 +687,7 @@ def test_ami_attribute_error_cases(): cm.exception.request_id.should_not.be.none # Error: Add with user ID that isn't an integer. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", @@ -699,7 +699,7 @@ def test_ami_attribute_error_cases(): cm.exception.request_id.should_not.be.none # Error: Add with user ID that is > length 12. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", @@ -711,7 +711,7 @@ def test_ami_attribute_error_cases(): cm.exception.request_id.should_not.be.none # Error: Add with user ID that is < length 12. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", @@ -724,7 +724,7 @@ def test_ami_attribute_error_cases(): # Error: Add with one invalid user ID among other valid IDs, ensure no # partial changes. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( image.id, attribute="launchPermission", @@ -739,7 +739,7 @@ def test_ami_attribute_error_cases(): attributes.attrs.should.have.length_of(0) # Error: Add with invalid image ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( "ami-abcd1234", attribute="launchPermission", operation="add", groups="all" ) @@ -748,7 +748,7 @@ def test_ami_attribute_error_cases(): cm.exception.request_id.should_not.be.none # Error: Remove with invalid image ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_image_attribute( "ami-abcd1234", attribute="launchPermission", @@ -765,11 +765,11 @@ def test_ami_describe_non_existent(): ec2 = boto3.resource("ec2", region_name="us-west-1") # Valid pattern but non-existent id img = ec2.Image("ami-abcd1234") - with assert_raises(ClientError): + with pytest.raises(ClientError): img.load() # Invalid ami pattern img = ec2.Image("not_an_ami_id") - with assert_raises(ClientError): + with pytest.raises(ClientError): img.load() diff --git a/tests/test_ec2/test_customer_gateways.py b/tests/test_ec2/test_customer_gateways.py index a676a2b5d596..8d94a9a949b0 100644 --- a/tests/test_ec2/test_customer_gateways.py +++ b/tests/test_ec2/test_customer_gateways.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals import boto import sure # noqa -from nose.tools import assert_raises -from nose.tools import assert_false +import pytest from boto.exception import EC2ResponseError from moto import mock_ec2_deprecated @@ -45,5 +44,5 @@ def test_delete_customer_gateways(): @mock_ec2_deprecated def test_delete_customer_gateways_bad_id(): conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_customer_gateway("cgw-0123abcd") diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py index 4aaceaa07e51..c04faa85d42e 100644 --- a/tests/test_ec2/test_dhcp_options.py +++ b/tests/test_ec2/test_dhcp_options.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest import boto3 import boto @@ -33,7 +32,7 @@ def test_dhcp_options_associate_invalid_dhcp_id(): conn = boto.connect_vpc("the_key", "the_secret") vpc = conn.create_vpc("10.0.0.0/16") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_dhcp_options("foo", vpc.id) cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") cm.exception.status.should.equal(400) @@ -46,7 +45,7 @@ def test_dhcp_options_associate_invalid_vpc_id(): conn = boto.connect_vpc("the_key", "the_secret") dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_dhcp_options(dhcp_options.id, "foo") cm.exception.code.should.equal("InvalidVpcID.NotFound") cm.exception.status.should.equal(400) @@ -64,7 +63,7 @@ def test_dhcp_options_delete_with_vpc(): rval = conn.associate_dhcp_options(dhcp_options_id, vpc.id) rval.should.be.equal(True) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_dhcp_options(dhcp_options_id) cm.exception.code.should.equal("DependencyViolation") cm.exception.status.should.equal(400) @@ -72,7 +71,7 @@ def test_dhcp_options_delete_with_vpc(): vpc.delete() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_dhcp_options([dhcp_options_id]) cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") cm.exception.status.should.equal(400) @@ -100,13 +99,13 @@ def test_create_dhcp_options_invalid_options(): conn = boto.connect_vpc("the_key", "the_secret") servers = ["f", "f", "f", "f", "f"] - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_dhcp_options(ntp_servers=servers) cm.exception.code.should.equal("InvalidParameterValue") cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_dhcp_options(netbios_node_type="0") cm.exception.code.should.equal("InvalidParameterValue") cm.exception.status.should.equal(400) @@ -131,7 +130,7 @@ def test_describe_dhcp_options_invalid_id(): """get error on invalid dhcp_option_id lookup""" conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_dhcp_options(["1"]) cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") cm.exception.status.should.equal(400) @@ -149,7 +148,7 @@ def test_delete_dhcp_options(): conn.delete_dhcp_options(dhcp_option.id) # .should.be.equal(True) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_dhcp_options([dhcp_option.id]) cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") cm.exception.status.should.equal(400) @@ -162,7 +161,7 @@ def test_delete_dhcp_options_invalid_id(): conn.create_dhcp_options() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_dhcp_options("dopt-abcd1234") cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") cm.exception.status.should.equal(400) @@ -175,7 +174,7 @@ def test_delete_dhcp_options_malformed_id(): conn.create_dhcp_options() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_dhcp_options("foo-abcd1234") cm.exception.code.should.equal("InvalidDhcpOptionsId.Malformed") cm.exception.status.should.equal(400) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 2a5dfbf2ad15..846d1bacc218 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest from moto.ec2 import ec2_backends import boto @@ -31,7 +30,7 @@ def test_create_and_delete_volume(): volume = current_volume[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: volume.delete(dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -46,7 +45,7 @@ def test_create_and_delete_volume(): my_volume.should.have.length_of(0) # Deleting something that was already deleted should throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: volume.delete() cm.exception.code.should.equal("InvalidVolume.NotFound") cm.exception.status.should.equal(400) @@ -95,7 +94,7 @@ def test_delete_attached_volume(): @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): conn = boto.ec2.connect_to_region("us-east-1") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -109,7 +108,7 @@ def test_create_encrypted_volume(): conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -134,7 +133,7 @@ def test_filter_volume_by_id(): vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id]) vol2.should.have.length_of(2) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_volumes(volume_ids=["vol-does_not_exist"]) cm.exception.code.should.equal("InvalidVolume.NotFound") cm.exception.status.should.equal(400) @@ -259,7 +258,7 @@ def test_volume_attach_and_detach(): volume.update() volume.volume_state().should.equal("available") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: volume.attach(instance.id, "/dev/sdh", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -275,7 +274,7 @@ def test_volume_attach_and_detach(): volume.attach_data.instance_id.should.equal(instance.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: volume.detach(dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -288,19 +287,19 @@ def test_volume_attach_and_detach(): volume.update() volume.volume_state().should.equal("available") - with assert_raises(EC2ResponseError) as cm1: + with pytest.raises(EC2ResponseError) as cm1: volume.attach("i-1234abcd", "/dev/sdh") cm1.exception.code.should.equal("InvalidInstanceID.NotFound") cm1.exception.status.should.equal(400) cm1.exception.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as cm2: + with pytest.raises(EC2ResponseError) as cm2: conn.detach_volume(volume.id, instance.id, "/dev/sdh") cm2.exception.code.should.equal("InvalidAttachment.NotFound") cm2.exception.status.should.equal(400) cm2.exception.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as cm3: + with pytest.raises(EC2ResponseError) as cm3: conn.detach_volume(volume.id, "i-1234abcd", "/dev/sdh") cm3.exception.code.should.equal("InvalidInstanceID.NotFound") cm3.exception.status.should.equal(400) @@ -312,7 +311,7 @@ def test_create_snapshot(): conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: snapshot = volume.create_snapshot("a dryrun snapshot", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -340,7 +339,7 @@ def test_create_snapshot(): conn.get_all_snapshots().should.have.length_of(num_snapshots) # Deleting something that was already deleted should throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: snapshot.delete() cm.exception.code.should.equal("InvalidSnapshot.NotFound") cm.exception.status.should.equal(400) @@ -382,7 +381,7 @@ def test_filter_snapshot_by_id(): s.volume_id.should.be.within([volume2.id, volume3.id]) s.region.name.should.equal(conn.region.name) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_snapshots(snapshot_ids=["snap-does_not_exist"]) cm.exception.code.should.equal("InvalidSnapshot.NotFound") cm.exception.status.should.equal(400) @@ -484,7 +483,7 @@ def test_snapshot_attribute(): # Add 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_snapshot_attribute(**dict(ADD_GROUP_ARGS, **{"dry_run": True})) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -506,7 +505,7 @@ def test_snapshot_attribute(): ) # Remove 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_snapshot_attribute(**dict(REMOVE_GROUP_ARGS, **{"dry_run": True})) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -527,7 +526,7 @@ def test_snapshot_attribute(): ).should_not.throw(EC2ResponseError) # Error: Add with group != 'all' - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_snapshot_attribute( snapshot.id, attribute="createVolumePermission", @@ -539,7 +538,7 @@ def test_snapshot_attribute(): cm.exception.request_id.should_not.be.none # Error: Add with invalid snapshot ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_snapshot_attribute( "snapshot-abcd1234", attribute="createVolumePermission", @@ -551,7 +550,7 @@ def test_snapshot_attribute(): cm.exception.request_id.should_not.be.none # Error: Remove with invalid snapshot ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.modify_snapshot_attribute( "snapshot-abcd1234", attribute="createVolumePermission", @@ -740,7 +739,7 @@ def test_create_volume_from_snapshot(): volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot("a test snapshot") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: snapshot = volume.create_snapshot("a test snapshot", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -786,7 +785,7 @@ def test_modify_attribute_blockDeviceMapping(): instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute( "blockDeviceMapping", {"/dev/sda1": True}, dry_run=True ) @@ -809,7 +808,7 @@ def test_volume_tag_escaping(): vol = conn.create_volume(10, "us-east-1a") snapshot = conn.create_snapshot(vol.id, "Desc") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: snapshot.add_tags({"key": ""}, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -879,25 +878,25 @@ def test_copy_snapshot(): getattr(source, attrib).should.equal(getattr(dest, attrib)) # Copy from non-existent source ID. - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: create_snapshot_error = ec2_client.create_snapshot(VolumeId="vol-abcd1234") - cm.exception.response["Error"]["Code"].should.equal("InvalidVolume.NotFound") - cm.exception.response["Error"]["Message"].should.equal( - "The volume 'vol-abcd1234' does not exist." - ) - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.exception.response["Error"]["Code"].should.equal("InvalidVolume.NotFound") + cm.exception.response["Error"]["Message"].should.equal( + "The volume 'vol-abcd1234' does not exist." + ) + cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Copy from non-existent source region. - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: copy_snapshot_response = dest_ec2_client.copy_snapshot( SourceSnapshotId=create_snapshot_response["SnapshotId"], SourceRegion="eu-west-2", ) - cm.exception.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") - cm.exception.response["Error"]["Message"].should.be.none - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.exception.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") + cm.exception.response["Error"]["Message"].should.be.none + cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_ec2 diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index baecb94d6d15..e9a247ea7cbc 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest import boto import boto3 @@ -21,7 +20,7 @@ def test_eip_allocate_classic(): """Allocate/release Classic EIP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: standard = conn.allocate_address(dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -35,7 +34,7 @@ def test_eip_allocate_classic(): standard.instance_id.should.be.none standard.domain.should.be.equal("standard") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: standard.release(dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -52,7 +51,7 @@ def test_eip_allocate_vpc(): """Allocate/release VPC EIP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: vpc = conn.allocate_address(domain="vpc", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -84,7 +83,7 @@ def test_eip_allocate_invalid_domain(): """Allocate EIP invalid domain""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.allocate_address(domain="bogus") cm.exception.code.should.equal("InvalidParameterValue") cm.exception.status.should.equal(400) @@ -102,13 +101,13 @@ def test_eip_associate_classic(): eip = conn.allocate_address() eip.instance_id.should.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(public_ip=eip.public_ip) cm.exception.code.should.equal("MissingParameter") cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.associate_address( instance_id=instance.id, public_ip=eip.public_ip, dry_run=True ) @@ -123,7 +122,7 @@ def test_eip_associate_classic(): eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] eip.instance_id.should.be.equal(instance.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -153,7 +152,7 @@ def test_eip_associate_vpc(): eip = conn.allocate_address(domain="vpc") eip.instance_id.should.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(allocation_id=eip.allocation_id) cm.exception.code.should.equal("MissingParameter") cm.exception.status.should.equal(400) @@ -169,7 +168,7 @@ def test_eip_associate_vpc(): eip.instance_id.should.be.equal("") eip.association_id.should.be.none - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: eip.release(dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -241,7 +240,7 @@ def test_eip_associate_network_interface(): eip = conn.allocate_address(domain="vpc") eip.network_interface_id.should.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(network_interface_id=eni.id) cm.exception.code.should.equal("MissingParameter") cm.exception.status.should.equal(400) @@ -276,7 +275,7 @@ def test_eip_reassociate(): conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) # Different ID detects resource association - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address( instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False ) @@ -312,7 +311,7 @@ def test_eip_reassociate_nic(): conn.associate_address(network_interface_id=eni1.id, public_ip=eip.public_ip) # Different ID detects resource association - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(network_interface_id=eni2.id, public_ip=eip.public_ip) cm.exception.code.should.equal("Resource.AlreadyAssociated") cm.exception.status.should.equal(400) @@ -336,7 +335,7 @@ def test_eip_associate_invalid_args(): eip = conn.allocate_address() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_address(instance_id=instance.id) cm.exception.code.should.equal("MissingParameter") cm.exception.status.should.equal(400) @@ -350,7 +349,7 @@ def test_eip_disassociate_bogus_association(): """Disassociate bogus EIP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.disassociate_address(association_id="bogus") cm.exception.code.should.equal("InvalidAssociationID.NotFound") cm.exception.status.should.equal(400) @@ -362,7 +361,7 @@ def test_eip_release_bogus_eip(): """Release bogus EIP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.release_address(allocation_id="bogus") cm.exception.code.should.equal("InvalidAllocationID.NotFound") cm.exception.status.should.equal(400) @@ -374,7 +373,7 @@ def test_eip_disassociate_arg_error(): """Invalid arguments disassociate address""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.disassociate_address() cm.exception.code.should.equal("MissingParameter") cm.exception.status.should.equal(400) @@ -386,7 +385,7 @@ def test_eip_release_arg_error(): """Invalid arguments release address""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.release_address() cm.exception.code.should.equal("MissingParameter") cm.exception.status.should.equal(400) @@ -438,7 +437,7 @@ def test_eip_describe_none(): """Error when search for bogus IP""" conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_addresses(addresses=["256.256.256.256"]) cm.exception.code.should.equal("InvalidAddress.NotFound") cm.exception.status.should.equal(400) diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index e7fd878a6f7c..259885ee075d 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest import boto3 from botocore.exceptions import ClientError @@ -21,7 +20,7 @@ def test_elastic_network_interfaces(): vpc = conn.create_vpc("10.0.0.0/16") subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: eni = conn.create_network_interface(subnet.id, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -38,7 +37,7 @@ def test_elastic_network_interfaces(): eni.private_ip_addresses.should.have.length_of(1) eni.private_ip_addresses[0].private_ip_address.startswith("10.").should.be.true - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.delete_network_interface(eni.id, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -51,7 +50,7 @@ def test_elastic_network_interfaces(): all_enis = conn.get_all_network_interfaces() all_enis.should.have.length_of(0) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_network_interface(eni.id) cm.exception.error_code.should.equal("InvalidNetworkInterfaceID.NotFound") cm.exception.status.should.equal(400) @@ -62,7 +61,7 @@ def test_elastic_network_interfaces(): def test_elastic_network_interfaces_subnet_validation(): conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_network_interface("subnet-abcd1234") cm.exception.error_code.should.equal("InvalidSubnetID.NotFound") cm.exception.status.should.equal(400) @@ -133,7 +132,7 @@ def test_elastic_network_interfaces_modify_attribute(): eni.groups.should.have.length_of(1) eni.groups[0].id.should.equal(security_group1.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_network_interface_attribute( eni.id, "groupset", [security_group2.id], dry_run=True ) @@ -228,7 +227,7 @@ def test_elastic_network_interfaces_get_by_tag_name(): SubnetId=subnet.id, PrivateIpAddress="10.0.10.5" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: eni1.create_tags(Tags=[{"Key": "Name", "Value": "eni1"}], DryRun=True) ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) diff --git a/tests/test_ec2/test_flow_logs.py b/tests/test_ec2/test_flow_logs.py index 044e6c31d202..1dba572d3e8a 100644 --- a/tests/test_ec2/test_flow_logs.py +++ b/tests/test_ec2/test_flow_logs.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +import pytest import boto3 @@ -36,7 +35,7 @@ def test_create_flow_logs_s3(): CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_flow_logs( ResourceType="VPC", ResourceIds=[vpc["VpcId"]], @@ -87,7 +86,7 @@ def test_create_flow_logs_cloud_watch(): vpc = client.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"] logs_client.create_log_group(logGroupName="test-group") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_flow_logs( ResourceType="VPC", ResourceIds=[vpc["VpcId"]], @@ -243,7 +242,7 @@ def test_delete_flow_logs_delete_many(): def test_delete_flow_logs_non_existing(): client = boto3.client("ec2", region_name="us-west-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.delete_flow_logs(FlowLogIds=["fl-1a2b3c4d"]) ex.exception.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @@ -251,7 +250,7 @@ def test_delete_flow_logs_non_existing(): "These flow log ids in the input list are not found: [TotalCount: 1] fl-1a2b3c4d" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.delete_flow_logs(FlowLogIds=["fl-1a2b3c4d", "fl-2b3c4d5e"]) ex.exception.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @@ -304,7 +303,7 @@ def test_create_flow_logs_invalid_parameters(): CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_flow_logs( ResourceType="VPC", ResourceIds=[vpc["VpcId"]], @@ -319,7 +318,7 @@ def test_create_flow_logs_invalid_parameters(): "Invalid Flow Log Max Aggregation Interval" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_flow_logs( ResourceType="VPC", ResourceIds=[vpc["VpcId"]], @@ -332,7 +331,7 @@ def test_create_flow_logs_invalid_parameters(): "LogDestination can't be empty if LogGroupName is not provided." ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_flow_logs( ResourceType="VPC", ResourceIds=[vpc["VpcId"]], @@ -346,7 +345,7 @@ def test_create_flow_logs_invalid_parameters(): "LogDestination type must be cloud-watch-logs if LogGroupName is provided." ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_flow_logs( ResourceType="VPC", ResourceIds=[vpc["VpcId"]], @@ -368,7 +367,7 @@ def test_create_flow_logs_invalid_parameters(): )["FlowLogIds"] response.should.have.length_of(1) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_flow_logs( ResourceType="VPC", ResourceIds=[vpc["VpcId"]], @@ -391,7 +390,7 @@ def test_create_flow_logs_invalid_parameters(): )["FlowLogIds"] response.should.have.length_of(1) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_flow_logs( ResourceType="VPC", ResourceIds=[vpc["VpcId"]], diff --git a/tests/test_ec2/test_general.py b/tests/test_ec2/test_general.py index 7b8f3bd53b54..b6e75ea6a6a8 100644 --- a/tests/test_ec2/test_general.py +++ b/tests/test_ec2/test_general.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest import boto import boto3 @@ -25,7 +24,7 @@ def test_console_output(): def test_console_output_without_instance(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_console_output("i-1234abcd") cm.exception.code.should.equal("InvalidInstanceID.NotFound") cm.exception.status.should.equal(400) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index d7a2ff3f3c47..b770862e2737 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -3,8 +3,7 @@ # Ensure 'assert_raises' context manager support for Python 2.6 from botocore.exceptions import ClientError -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest import base64 import ipaddress @@ -52,7 +51,7 @@ def test_add_servers(): def test_instance_launch_and_terminate(): conn = boto.ec2.connect_to_region("us-east-1") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: reservation = conn.run_instances("ami-1234abcd", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -87,7 +86,7 @@ def test_instance_launch_and_terminate(): volume.attach_data.instance_id.should.equal(instance.id) volume.status.should.equal("in-use") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.terminate_instances([instance.id], dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -290,7 +289,7 @@ def test_get_instances_by_id(): instance_ids.should.equal([instance1.id, instance2.id]) # Call get_all_instances with a bad id should raise an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_instances(instance_ids=[instance1.id, "i-1234abcd"]) cm.exception.code.should.equal("InvalidInstanceID.NotFound") cm.exception.status.should.equal(400) @@ -743,7 +742,7 @@ def test_instance_start_and_stop(): instance_ids = [instance.id for instance in instances] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: stopped_instances = conn.stop_instances(instance_ids, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -756,7 +755,7 @@ def test_instance_start_and_stop(): for instance in stopped_instances: instance.state.should.equal("stopping") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: started_instances = conn.start_instances([instances[0].id], dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -774,7 +773,7 @@ def test_instance_reboot(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.reboot(dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -792,7 +791,7 @@ def test_instance_attribute_instance_type(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("instanceType", "m1.small", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -820,7 +819,7 @@ def test_modify_instance_attribute_security_groups(): "test security group 2", "this is a test security group 2" ).id - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -843,7 +842,7 @@ def test_instance_attribute_user_data(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("userData", "this is my user data", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -873,7 +872,7 @@ def test_instance_attribute_source_dest_check(): # Set to false (note: Boto converts bool to string, eg 'false') - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("sourceDestCheck", False, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -919,7 +918,7 @@ def test_user_data_with_run_instance(): def test_run_instance_with_security_group_name(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: group = conn.create_security_group("group1", "some description", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -1196,7 +1195,7 @@ def test_instance_with_nic_attach_detach(): set([group.id for group in eni.groups]).should.equal(set([security_group2.id])) # Attach - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.attach_network_interface(eni.id, instance.id, device_index=1, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -1223,7 +1222,7 @@ def test_instance_with_nic_attach_detach(): ) # Detach - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -1242,7 +1241,7 @@ def test_instance_with_nic_attach_detach(): set([group.id for group in eni.groups]).should.equal(set([security_group2.id])) # Detach with invalid attachment ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.detach_network_interface("eni-attach-1234abcd") cm.exception.code.should.equal("InvalidAttachmentID.NotFound") cm.exception.status.should.equal(400) @@ -1410,7 +1409,7 @@ def test_describe_instance_status_with_instance_filter_deprecated(): all_status[0].id.should.equal(instance.id) # Call get_all_instance_status with a bad id should raise an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"]) cm.exception.code.should.equal("InvalidInstanceID.NotFound") cm.exception.status.should.equal(400) @@ -1537,7 +1536,7 @@ def test_get_instance_by_security_group(): security_group = conn.create_security_group("test", "test") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.modify_instance_attribute( instance.id, "groupSet", [security_group.id], dry_run=True ) @@ -1661,7 +1660,7 @@ def test_describe_instance_attribute(): ] for invalid_instance_attribute in invalid_instance_attributes: - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.describe_instance_attribute( InstanceId=instance_id, Attribute=invalid_instance_attribute ) diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index 2319bf0626a8..cfa8bafe908c 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest import re @@ -28,7 +27,7 @@ def test_igw_create(): conn.get_all_internet_gateways().should.have.length_of(0) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: igw = conn.create_internet_gateway(dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -51,7 +50,7 @@ def test_igw_attach(): igw = conn.create_internet_gateway() vpc = conn.create_vpc(VPC_CIDR) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -71,7 +70,7 @@ def test_igw_attach_bad_vpc(): conn = boto.connect_vpc("the_key", "the_secret") igw = conn.create_internet_gateway() - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.attach_internet_gateway(igw.id, BAD_VPC) cm.exception.code.should.equal("InvalidVpcID.NotFound") cm.exception.status.should.equal(400) @@ -87,7 +86,7 @@ def test_igw_attach_twice(): vpc2 = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc1.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.attach_internet_gateway(igw.id, vpc2.id) cm.exception.code.should.equal("Resource.AlreadyAssociated") cm.exception.status.should.equal(400) @@ -102,7 +101,7 @@ def test_igw_detach(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -124,7 +123,7 @@ def test_igw_detach_wrong_vpc(): vpc2 = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc1.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.detach_internet_gateway(igw.id, vpc2.id) cm.exception.code.should.equal("Gateway.NotAttached") cm.exception.status.should.equal(400) @@ -139,7 +138,7 @@ def test_igw_detach_invalid_vpc(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.detach_internet_gateway(igw.id, BAD_VPC) cm.exception.code.should.equal("Gateway.NotAttached") cm.exception.status.should.equal(400) @@ -153,7 +152,7 @@ def test_igw_detach_unattached(): igw = conn.create_internet_gateway() vpc = conn.create_vpc(VPC_CIDR) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.detach_internet_gateway(igw.id, vpc.id) cm.exception.code.should.equal("Gateway.NotAttached") cm.exception.status.should.equal(400) @@ -169,7 +168,7 @@ def test_igw_delete(): igw = conn.create_internet_gateway() conn.get_all_internet_gateways().should.have.length_of(1) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.delete_internet_gateway(igw.id, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -189,7 +188,7 @@ def test_igw_delete_attached(): vpc = conn.create_vpc(VPC_CIDR) conn.attach_internet_gateway(igw.id, vpc.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_internet_gateway(igw.id) cm.exception.code.should.equal("DependencyViolation") cm.exception.status.should.equal(400) @@ -209,7 +208,7 @@ def test_igw_desribe(): def test_igw_describe_bad_id(): """ internet gateway fail to fetch by bad id """ conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_internet_gateways([BAD_IGW]) cm.exception.code.should.equal("InvalidInternetGatewayID.NotFound") cm.exception.status.should.equal(400) diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 09982ac7a06d..022b4ceeb1ae 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest import boto import sure # noqa @@ -56,7 +55,7 @@ def test_key_pairs_empty(): def test_key_pairs_invalid_id(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_key_pairs("foo") cm.exception.code.should.equal("InvalidKeyPair.NotFound") cm.exception.status.should.equal(400) @@ -67,7 +66,7 @@ def test_key_pairs_invalid_id(): def test_key_pairs_create(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.create_key_pair("foo", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -110,7 +109,7 @@ def test_key_pairs_create_exist(): conn.create_key_pair("foo") assert len(conn.get_all_key_pairs()) == 1 - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_key_pair("foo") cm.exception.code.should.equal("InvalidKeyPair.Duplicate") cm.exception.status.should.equal(400) @@ -130,7 +129,7 @@ def test_key_pairs_delete_exist(): conn = boto.connect_ec2("the_key", "the_secret") conn.create_key_pair("foo") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: r = conn.delete_key_pair("foo", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -147,7 +146,7 @@ def test_key_pairs_delete_exist(): def test_key_pairs_import(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", RSA_PUBLIC_KEY_OPENSSH, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -176,7 +175,7 @@ def test_key_pairs_import_exist(): assert kp.name == "foo" assert len(conn.get_all_key_pairs()) == 1 - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_key_pair("foo") cm.exception.code.should.equal("InvalidKeyPair.Duplicate") cm.exception.status.should.equal(400) @@ -187,19 +186,19 @@ def test_key_pairs_import_exist(): def test_key_pairs_invalid(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", b"") ex.exception.error_code.should.equal("InvalidKeyPair.Format") ex.exception.status.should.equal(400) ex.exception.message.should.equal("Key is not in valid OpenSSH public key format") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", b"garbage") ex.exception.error_code.should.equal("InvalidKeyPair.Format") ex.exception.status.should.equal(400) ex.exception.message.should.equal("Key is not in valid OpenSSH public key format") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", DSA_PUBLIC_KEY_OPENSSH) ex.exception.error_code.should.equal("InvalidKeyPair.Format") ex.exception.status.should.equal(400) diff --git a/tests/test_ec2/test_launch_templates.py b/tests/test_ec2/test_launch_templates.py index 4c37818d1070..0bcf188ce548 100644 --- a/tests/test_ec2/test_launch_templates.py +++ b/tests/test_ec2/test_launch_templates.py @@ -1,7 +1,7 @@ import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from botocore.client import ClientError from moto import mock_ec2 @@ -30,7 +30,7 @@ def test_launch_template_create(): lt["DefaultVersionNumber"].should.equal(1) lt["LatestVersionNumber"].should.equal(1) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: cli.create_launch_template( LaunchTemplateName="test-template", LaunchTemplateData={ diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index c20bf75c60c2..1bb0587335fa 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -2,7 +2,7 @@ import boto import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from botocore.exceptions import ClientError from moto import mock_ec2_deprecated, mock_ec2 @@ -261,7 +261,7 @@ def test_duplicate_network_acl_entry(): RuleNumber=rule_number, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: default_network_acl.create_entry( CidrBlock="10.0.0.0/0", Egress=egress, diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index a652bd1cf3a0..4ebfeb7376d0 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest import boto import boto3 @@ -61,7 +60,7 @@ def test_route_tables_additional(): local_route.state.should.equal("active") local_route.destination_cidr_block.should.equal(vpc.cidr_block) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_vpc(vpc.id) cm.exception.code.should.equal("DependencyViolation") cm.exception.status.should.equal(400) @@ -72,7 +71,7 @@ def test_route_tables_additional(): all_route_tables = conn.get_all_route_tables(filters={"vpc-id": vpc.id}) all_route_tables.should.have.length_of(1) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_route_table("rtb-1234abcd") cm.exception.code.should.equal("InvalidRouteTableID.NotFound") cm.exception.status.should.equal(400) @@ -197,7 +196,7 @@ def test_route_table_associations(): association_id_idempotent.should.equal(association_id) # Error: Attempt delete associated route table. - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_route_table(route_table.id) cm.exception.code.should.equal("DependencyViolation") cm.exception.status.should.equal(400) @@ -211,21 +210,21 @@ def test_route_table_associations(): route_table.associations.should.have.length_of(0) # Error: Disassociate with invalid association ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.disassociate_route_table(association_id) cm.exception.code.should.equal("InvalidAssociationID.NotFound") cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Error: Associate with invalid subnet ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_route_table(route_table.id, "subnet-1234abcd") cm.exception.code.should.equal("InvalidSubnetID.NotFound") cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Error: Associate with invalid route table ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.associate_route_table("rtb-1234abcd", subnet.id) cm.exception.code.should.equal("InvalidRouteTableID.NotFound") cm.exception.status.should.equal(400) @@ -293,7 +292,7 @@ def test_route_table_replace_route_table_association(): association_id_idempotent.should.equal(association_id2) # Error: Replace association with invalid association ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.replace_route_table_association_with_assoc( "rtbassoc-1234abcd", route_table1.id ) @@ -302,7 +301,7 @@ def test_route_table_replace_route_table_association(): cm.exception.request_id.should_not.be.none # Error: Replace association with invalid route table ID - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.replace_route_table_association_with_assoc(association_id2, "rtb-1234abcd") cm.exception.code.should.equal("InvalidRouteTableID.NotFound") cm.exception.status.should.equal(400) @@ -389,7 +388,7 @@ def test_routes_additional(): ] new_routes.should.have.length_of(0) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_route(main_route_table.id, ROUTE_CIDR) cm.exception.code.should.equal("InvalidRoute.NotFound") cm.exception.status.should.equal(400) @@ -442,7 +441,7 @@ def get_target_route(): target_route.state.should.equal("active") target_route.destination_cidr_block.should.equal(ROUTE_CIDR) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.replace_route("rtb-1234abcd", ROUTE_CIDR, gateway_id=igw.id) cm.exception.code.should.equal("InvalidRouteTableID.NotFound") cm.exception.status.should.equal(400) @@ -571,7 +570,7 @@ def test_create_route_with_invalid_destination_cidr_block_parameter(): internet_gateway.reload() destination_cidr_block = "1000.1.0.0/20" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: route = route_table.create_route( DestinationCidrBlock=destination_cidr_block, GatewayId=internet_gateway.id ) diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 10885df189d4..9f8c1aecd5bd 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -4,8 +4,7 @@ import json # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +import pytest import boto3 import boto @@ -20,7 +19,7 @@ def test_create_and_describe_security_group(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: security_group = conn.create_security_group( "test security group", "this is a test security group", dry_run=True ) @@ -38,7 +37,7 @@ def test_create_and_describe_security_group(): security_group.description.should.equal("this is a test security group") # Trying to create another group with the same name should throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_security_group( "test security group", "this is a test security group" ) @@ -57,7 +56,7 @@ def test_create_and_describe_security_group(): def test_create_security_group_without_description_raises_error(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_security_group("test security group", "") cm.exception.code.should.equal("MissingParameter") cm.exception.status.should.equal(400) @@ -87,7 +86,7 @@ def test_create_and_describe_vpc_security_group(): # Trying to create another group with the same name in the same VPC should # throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_security_group( "test security group", "this is a test security group", vpc_id ) @@ -146,14 +145,14 @@ def test_deleting_security_groups(): conn.get_all_security_groups().should.have.length_of(4) # Deleting a group that doesn't exist should throw an error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_security_group("foobar") cm.exception.code.should.equal("InvalidGroup.NotFound") cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none # Delete by name - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.delete_security_group("test2", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -184,7 +183,7 @@ def test_authorize_ip_range_and_revoke(): conn = boto.connect_ec2("the_key", "the_secret") security_group = conn.create_security_group("test", "test") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: success = security_group.authorize( ip_protocol="tcp", from_port="22", @@ -208,7 +207,7 @@ def test_authorize_ip_range_and_revoke(): security_group.rules[0].grants[0].cidr_ip.should.equal("123.123.123.123/32") # Wrong Cidr should throw error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: security_group.revoke( ip_protocol="tcp", from_port="22", @@ -220,7 +219,7 @@ def test_authorize_ip_range_and_revoke(): cm.exception.request_id.should_not.be.none # Actually revoke - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: security_group.revoke( ip_protocol="tcp", from_port="22", @@ -246,7 +245,7 @@ def test_authorize_ip_range_and_revoke(): "testegress", "testegress", vpc_id="vpc-3432589" ) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: success = conn.authorize_security_group_egress( egress_security_group.id, "tcp", @@ -285,7 +284,7 @@ def test_authorize_ip_range_and_revoke(): ).should.throw(EC2ResponseError) # Actually revoke - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.revoke_security_group_egress( egress_security_group.id, "tcp", @@ -335,7 +334,7 @@ def test_authorize_other_group_and_revoke(): security_group.rules[0].grants[0].group_id.should.equal(other_security_group.id) # Wrong source group should throw error - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: security_group.revoke( ip_protocol="tcp", from_port="22", to_port="2222", src_group=wrong_group ) @@ -440,7 +439,7 @@ def test_get_all_security_groups(): resp.should.have.length_of(1) resp[0].id.should.equal(sg1.id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_security_groups(groupnames=["does_not_exist"]) cm.exception.code.should.equal("InvalidGroup.NotFound") cm.exception.status.should.equal(400) @@ -469,7 +468,7 @@ def test_get_all_security_groups(): def test_authorize_bad_cidr_throws_invalid_parameter_value(): conn = boto.connect_ec2("the_key", "the_secret") security_group = conn.create_security_group("test", "test") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: security_group.authorize( ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123" ) @@ -485,7 +484,7 @@ def test_security_group_tagging(): sg = conn.create_security_group("test-sg", "Test SG", vpc.id) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: sg.add_tag("Test", "Tag", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -534,7 +533,7 @@ def test_sec_group_rule_limit(): other_sg = ec2_conn.create_security_group("test_2", "test_other") # INGRESS - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", @@ -556,13 +555,13 @@ def test_sec_group_rule_limit(): ) success.should.be.true # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", cidr_ip=["100.0.0.0/0"] ) cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", src_security_group_group_id=other_sg.id ) @@ -581,13 +580,13 @@ def test_sec_group_rule_limit(): group_id=sg.id, ip_protocol="-1", cidr_ip="{0}.0.0.0/0".format(i) ) # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", cidr_ip="101.0.0.0/0" ) cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", src_group_id=other_sg.id ) @@ -605,7 +604,7 @@ def test_sec_group_rule_limit_vpc(): other_sg = ec2_conn.create_security_group("test_2", "test", vpc_id=vpc.id) # INGRESS - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", @@ -627,13 +626,13 @@ def test_sec_group_rule_limit_vpc(): ) # verify that we cannot authorize past the limit for a CIDR IP success.should.be.true - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", cidr_ip=["100.0.0.0/0"] ) cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", src_security_group_group_id=other_sg.id ) @@ -652,13 +651,13 @@ def test_sec_group_rule_limit_vpc(): group_id=sg.id, ip_protocol="-1", cidr_ip="{0}.0.0.0/0".format(i) ) # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", cidr_ip="50.0.0.0/0" ) cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", src_group_id=other_sg.id ) @@ -689,7 +688,7 @@ def test_add_same_rule_twice_throws_error(): ] sg.authorize_ingress(IpPermissions=ip_permissions) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: sg.authorize_ingress(IpPermissions=ip_permissions) @@ -761,7 +760,7 @@ def test_security_group_tagging_boto3(): sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_tags( Resources=[sg["GroupId"]], Tags=[{"Key": "Test", "Value": "Tag"}], @@ -926,7 +925,7 @@ def test_get_all_security_groups_filter_with_same_vpc_id(): ) security_groups.should.have.length_of(1) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_security_groups(group_ids=["does_not_exist"]) cm.exception.code.should.equal("InvalidGroup.NotFound") cm.exception.status.should.equal(400) diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 5eb5a6e480b3..c7b965918a7f 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from nose.tools import assert_raises +import pytest import datetime import boto @@ -31,7 +31,7 @@ def test_request_spot_instances(): start = iso_8601_datetime_with_milliseconds(start_dt) end = iso_8601_datetime_with_milliseconds(end_dt) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: request = conn.request_spot_instances( SpotPrice="0.5", InstanceCount=1, @@ -155,7 +155,7 @@ def test_cancel_spot_instance_request(): requests = conn.get_all_spot_instance_requests() requests.should.have.length_of(1) - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 416235f434a7..1d44999aeb46 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +import pytest import boto3 import boto @@ -30,7 +29,7 @@ def test_subnets(): all_subnets = conn.get_all_subnets() all_subnets.should.have.length_of(0 + len(ec2.get_all_zones())) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_subnet(subnet.id) cm.exception.code.should.equal("InvalidSubnetID.NotFound") cm.exception.status.should.equal(400) @@ -41,7 +40,7 @@ def test_subnets(): def test_subnet_create_vpc_validation(): conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_subnet("vpc-abcd1234", "10.0.0.0/18") cm.exception.code.should.equal("InvalidVpcID.NotFound") cm.exception.status.should.equal(400) @@ -202,7 +201,7 @@ def test_modify_subnet_attribute_validation(): VpcId=vpc.id, CidrBlock="10.0.0.0/24", AvailabilityZone="us-west-1a" ) - with assert_raises(ParamValidationError): + with pytest.raises(ParamValidationError): client.modify_subnet_attribute( SubnetId=subnet.id, MapPublicIpOnLaunch={"Value": "invalid"} ) @@ -228,7 +227,7 @@ def test_subnet_get_by_id(): subnetA.id.should.be.within(subnets_by_id) subnetB1.id.should.be.within(subnets_by_id) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_subnets(subnet_ids=["subnet-does_not_exist"]) cm.exception.code.should.equal("InvalidSubnetID.NotFound") cm.exception.status.should.equal(400) @@ -386,7 +385,7 @@ def test_create_subnet_with_invalid_availability_zone(): vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") subnet_availability_zone = "asfasfas" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: subnet = client.create_subnet( VpcId=vpc.id, CidrBlock="10.0.0.0/24", @@ -409,7 +408,7 @@ def test_create_subnet_with_invalid_cidr_range(): vpc.is_default.shouldnt.be.ok subnet_cidr_block = "10.1.0.0/20" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) str(ex.exception).should.equal( "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " @@ -444,7 +443,7 @@ def test_create_subnet_with_invalid_cidr_block_parameter(): vpc.is_default.shouldnt.be.ok subnet_cidr_block = "1000.1.0.0/20" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) str(ex.exception).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateSubnet " @@ -503,7 +502,7 @@ def test_create_subnets_with_overlapping_cidr_blocks(): vpc.is_default.shouldnt.be.ok subnet_cidr_block = "10.0.0.0/24" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) str(ex.exception).should.equal( diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 8480f8bc0a50..918b02623d5e 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from nose.tools import assert_raises +import pytest import itertools @@ -11,7 +11,7 @@ import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 -from nose.tools import assert_raises +import pytest @mock_ec2_deprecated @@ -20,7 +20,7 @@ def test_add_tag(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.add_tag("a key", "some value", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -51,7 +51,7 @@ def test_remove_tag(): tag.name.should.equal("a key") tag.value.should.equal("some value") - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: instance.remove_tag("a key", dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -106,7 +106,7 @@ def test_create_tags(): "blank key": "", } - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: conn.create_tags(instance.id, tag_dict, dry_run=True) ex.exception.error_code.should.equal("DryRunOperation") ex.exception.status.should.equal(400) @@ -131,14 +131,14 @@ def test_tag_limit_exceeded(): for i in range(51): tag_dict["{0:02d}".format(i + 1)] = "" - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_tags(instance.id, tag_dict) cm.exception.code.should.equal("TagLimitExceeded") cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none instance.add_tag("a key", "a value") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_tags(instance.id, tag_dict) cm.exception.code.should.equal("TagLimitExceeded") cm.exception.status.should.equal(400) @@ -157,7 +157,7 @@ def test_invalid_parameter_tag_null(): reservation = conn.run_instances("ami-1234abcd") instance = reservation.instances[0] - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: instance.add_tag("a key", None) cm.exception.code.should.equal("InvalidParameterValue") cm.exception.status.should.equal(400) @@ -167,13 +167,13 @@ def test_invalid_parameter_tag_null(): @mock_ec2_deprecated def test_invalid_id(): conn = boto.connect_ec2("the_key", "the_secret") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_tags("ami-blah", {"key": "tag"}) cm.exception.code.should.equal("InvalidID") cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.create_tags("blah-blah", {"key": "tag"}) cm.exception.code.should.equal("InvalidID") cm.exception.status.should.equal(400) @@ -449,7 +449,7 @@ def test_create_tag_empty_resource(): # create ec2 client in us-west-1 client = boto3.client("ec2", region_name="us-west-1") # create tag with empty resource - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_tags(Resources=[], Tags=[{"Key": "Value"}]) ex.exception.response["Error"]["Code"].should.equal("MissingParameter") ex.exception.response["Error"]["Message"].should.equal( @@ -462,7 +462,7 @@ def test_delete_tag_empty_resource(): # create ec2 client in us-west-1 client = boto3.client("ec2", region_name="us-west-1") # delete tag with empty resource - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.delete_tags(Resources=[], Tags=[{"Key": "Value"}]) ex.exception.response["Error"]["Code"].should.equal("MissingParameter") ex.exception.response["Error"]["Message"].should.equal( diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index b535518dedd6..f852ab3cab33 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises +import pytest from moto.ec2.exceptions import EC2ClientError from botocore.exceptions import ClientError @@ -49,7 +48,7 @@ def test_vpc_peering_connections_accept(): vpc_pcx = conn.accept_vpc_peering_connection(vpc_pcx.id) vpc_pcx._status.code.should.equal("active") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.reject_vpc_peering_connection(vpc_pcx.id) cm.exception.code.should.equal("InvalidStateTransition") cm.exception.status.should.equal(400) @@ -69,7 +68,7 @@ def test_vpc_peering_connections_reject(): verdict = conn.reject_vpc_peering_connection(vpc_pcx.id) verdict.should.equal(True) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.accept_vpc_peering_connection(vpc_pcx.id) cm.exception.code.should.equal("InvalidStateTransition") cm.exception.status.should.equal(400) @@ -93,7 +92,7 @@ def test_vpc_peering_connections_delete(): all_vpc_pcxs.should.have.length_of(1) all_vpc_pcxs[0]._status.code.should.equal("deleted") - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_vpc_peering_connection("pcx-1234abcd") cm.exception.code.should.equal("InvalidVpcPeeringConnectionId.NotFound") cm.exception.status.should.equal(400) @@ -129,7 +128,7 @@ def test_vpc_peering_connections_cross_region_fail(): ec2_apn1 = boto3.resource("ec2", region_name="ap-northeast-1") vpc_apn1 = ec2_apn1.create_vpc(CidrBlock="10.20.0.0/16") # create peering wrong region with no vpc - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_usw1.create_vpc_peering_connection( VpcId=vpc_usw1.id, PeerVpcId=vpc_apn1.id, PeerRegion="ap-northeast-2" ) @@ -253,7 +252,7 @@ def test_vpc_peering_connections_cross_region_accept_wrong_region(): # accept wrong peering from us-west-1 which will raise error ec2_apn1 = boto3.client("ec2", region_name="ap-northeast-1") ec2_usw1 = boto3.client("ec2", region_name="us-west-1") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_usw1.accept_vpc_peering_connection(VpcPeeringConnectionId=vpc_pcx_usw1.id) cm.exception.response["Error"]["Code"].should.equal("OperationNotPermitted") exp_msg = ( @@ -278,7 +277,7 @@ def test_vpc_peering_connections_cross_region_reject_wrong_region(): # reject wrong peering from us-west-1 which will raise error ec2_apn1 = boto3.client("ec2", region_name="ap-northeast-1") ec2_usw1 = boto3.client("ec2", region_name="us-west-1") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_usw1.reject_vpc_peering_connection(VpcPeeringConnectionId=vpc_pcx_usw1.id) cm.exception.response["Error"]["Code"].should.equal("OperationNotPermitted") exp_msg = ( diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 8ad85072c8f3..32e59a91bec9 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises +import pytest from moto.ec2.exceptions import EC2ClientError from botocore.exceptions import ClientError @@ -31,7 +30,7 @@ def test_vpcs(): all_vpcs = conn.get_all_vpcs() all_vpcs.should.have.length_of(1) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.delete_vpc("vpc-1234abcd") cm.exception.code.should.equal("InvalidVpcID.NotFound") cm.exception.status.should.equal(400) @@ -114,7 +113,7 @@ def test_vpc_get_by_id(): vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) - with assert_raises(EC2ResponseError) as cm: + with pytest.raises(EC2ResponseError) as cm: conn.get_all_vpcs(vpc_ids=["vpc-does_not_exist"]) cm.exception.code.should.equal("InvalidVpcID.NotFound") cm.exception.status.should.equal(400) @@ -402,7 +401,7 @@ def test_associate_vpc_ipv4_cidr_block(): ) # Check error on adding 6th association. - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: response = ec2.meta.client.associate_vpc_cidr_block( VpcId=vpc.id, CidrBlock="10.10.50.0/22" ) @@ -447,7 +446,7 @@ def test_disassociate_vpc_ipv4_cidr_block(): ) # Error attempting to delete a non-existent CIDR_BLOCK association - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: response = ec2.meta.client.disassociate_vpc_cidr_block( AssociationId="vpc-cidr-assoc-BORING123" ) @@ -469,7 +468,7 @@ def test_disassociate_vpc_ipv4_cidr_block(): {}, )["AssociationId"] - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: response = ec2.meta.client.disassociate_vpc_cidr_block( AssociationId=vpc_base_cidr_assoc_id ) @@ -549,7 +548,7 @@ def test_vpc_associate_ipv6_cidr_block(): ipv6_cidr_block_association_set["AssociationId"].should.contain("vpc-cidr-assoc") # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: response = ec2.meta.client.associate_vpc_cidr_block( VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True ) @@ -657,7 +656,7 @@ def test_create_vpc_with_invalid_cidr_block_parameter(): ec2 = boto3.resource("ec2", region_name="us-west-1") vpc_cidr_block = "1000.1.0.0/20" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) str(ex.exception).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateVpc " @@ -672,7 +671,7 @@ def test_create_vpc_with_invalid_cidr_range(): ec2 = boto3.resource("ec2", region_name="us-west-1") vpc_cidr_block = "10.1.0.0/29" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) str(ex.exception).should.equal( "An error occurred (InvalidVpc.Range) when calling the CreateVpc " diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index 4360c8b2eaa6..ca8897417345 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -1,11 +1,11 @@ from __future__ import unicode_literals + import boto import boto3 -from nose.tools import assert_raises +import pytest import sure # noqa from boto.exception import EC2ResponseError - -from moto import mock_ec2_deprecated, mock_ec2 +from moto import mock_ec2, mock_ec2_deprecated @mock_ec2_deprecated @@ -35,7 +35,7 @@ def test_delete_vpn_connections(): @mock_ec2_deprecated def test_delete_vpn_connections_bad_id(): conn = boto.connect_vpc("the_key", "the_secret") - with assert_raises(EC2ResponseError): + with pytest.raises(EC2ResponseError): conn.delete_vpn_connection("vpn-0123abcd") diff --git a/tests/test_ecr/__init__.py b/tests/test_ecr/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_ecr/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index fd678f661c8f..e44307bee44a 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -15,7 +15,7 @@ from dateutil.tz import tzlocal from moto import mock_ecr -from nose import SkipTest +from unittest import SkipTest def _create_image_digest(contents=None): diff --git a/tests/test_ecs/__init__.py b/tests/test_ecs/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_ecs/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 2ef801807be0..afec17da26c3 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -10,7 +10,7 @@ from moto import mock_ecs from moto import mock_ec2 -from nose.tools import assert_raises +import pytest @mock_ecs @@ -860,7 +860,7 @@ def test_deregister_container_instance(): containerInstances=[container_instance_id], startedBy="moto", ) - with assert_raises(Exception) as e: + with pytest.raises(Exception) as e: ecs_client.deregister_container_instance( cluster=test_cluster_name, containerInstance=container_instance_id ).should.have.raised(Exception) @@ -952,7 +952,7 @@ def test_describe_container_instances(): instance.keys().should.contain("pendingTasksCount") instance["registeredAt"].should.be.a("datetime.datetime") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: ecs_client.describe_container_instances( cluster=test_cluster_name, containerInstances=[] ) diff --git a/tests/test_elb/__init__.py b/tests/test_elb/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_elb/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 1583ea544625..dd51e8f6064e 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -11,7 +11,7 @@ ) from botocore.exceptions import ClientError from boto.exception import BotoServerError -from nose.tools import assert_raises +import pytest import sure # noqa from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated @@ -123,7 +123,7 @@ def test_create_and_delete_boto3_support(): def test_create_load_balancer_with_no_listeners_defined(): client = boto3.client("elb", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): client.create_load_balancer( LoadBalancerName="my-lb", Listeners=[], @@ -180,7 +180,7 @@ def test_apply_security_groups_to_load_balancer(): assert balancer["SecurityGroups"] == [security_group.id] # Using a not-real security group raises an error - with assert_raises(ClientError) as error: + with pytest.raises(ClientError) as error: response = client.apply_security_groups_to_load_balancer( LoadBalancerName="my-lb", SecurityGroups=["not-really-a-security-group"] ) @@ -255,7 +255,7 @@ def test_create_and_delete_listener_boto3_support(): balancer["ListenerDescriptions"][1]["Listener"]["InstancePort"].should.equal(8443) # Creating this listener with an conflicting definition throws error - with assert_raises(ClientError): + with pytest.raises(ClientError): client.create_load_balancer_listeners( LoadBalancerName="my-lb", Listeners=[ diff --git a/tests/test_elbv2/__init__.py b/tests/test_elbv2/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_elbv2/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 5ab85284dda0..6ff48095daba 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -4,7 +4,7 @@ import boto3 import botocore from botocore.exceptions import ClientError, ParamValidationError -from nose.tools import assert_raises +import pytest import sure # noqa from moto import mock_elbv2, mock_ec2, mock_acm @@ -96,9 +96,9 @@ def test_describe_load_balancers(): response = conn.describe_load_balancers(Names=["my-lb"]) response.get("LoadBalancers")[0].get("LoadBalancerName").should.equal("my-lb") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_load_balancers(LoadBalancerArns=["not-a/real/arn"]) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_load_balancers(Names=["nope"]) @@ -132,7 +132,7 @@ def test_add_remove_tags(): lbs.should.have.length_of(1) lb = lbs[0] - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.add_tags(ResourceArns=["missing-arn"], Tags=[{"Key": "a", "Value": "b"}]) conn.add_tags( @@ -274,7 +274,7 @@ def test_create_target_group_and_listeners(): load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn") # Can't create a target group with an invalid protocol - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_target_group( Name="a-target", Protocol="HTTP", @@ -389,7 +389,7 @@ def test_create_target_group_and_listeners(): # Try to delete the target group and it fails because there's a # listener referencing it - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: conn.delete_target_group(TargetGroupArn=target_group.get("TargetGroupArn")) e.exception.operation_name.should.equal("DeleteTargetGroup") e.exception.args.should.equal( @@ -477,7 +477,7 @@ def test_create_invalid_target_group(): # Fail to create target group with name which length is 33 long_name = "A" * 33 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_target_group( Name=long_name, Protocol="HTTP", @@ -495,7 +495,7 @@ def test_create_invalid_target_group(): invalid_names = ["-name", "name-", "-name-", "example.com", "test@test", "Na--me"] for name in invalid_names: - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_target_group( Name=name, Protocol="HTTP", @@ -941,7 +941,7 @@ def test_handle_listener_rules(): load_balancer_arn = response.get("LoadBalancers")[0].get("LoadBalancerArn") # Can't create a target group with an invalid protocol - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_target_group( Name="a-target", Protocol="HTTP", @@ -1032,7 +1032,7 @@ def test_handle_listener_rules(): ) # test for PriorityInUse - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=priority, @@ -1079,11 +1079,11 @@ def test_handle_listener_rules(): ) # test for invalid describe rule request - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_rules() - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_rules(RuleArns=[]) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_rules( ListenerArn=http_listener_arn, RuleArns=[first_rule["RuleArn"]] ) @@ -1125,7 +1125,7 @@ def test_handle_listener_rules(): } ] ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.set_rule_priorities( RulePriorities=[ {"RuleArn": first_rule["RuleArn"], "Priority": 999}, @@ -1141,7 +1141,7 @@ def test_handle_listener_rules(): # test for invalid action type safe_priority = 2 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1160,7 +1160,7 @@ def test_handle_listener_rules(): # test for invalid action type safe_priority = 2 invalid_target_group_arn = target_group.get("TargetGroupArn") + "x" - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1173,7 +1173,7 @@ def test_handle_listener_rules(): # test for invalid condition field_name safe_priority = 2 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1188,7 +1188,7 @@ def test_handle_listener_rules(): # test for emptry condition value safe_priority = 2 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1203,7 +1203,7 @@ def test_handle_listener_rules(): # test for multiple condition value safe_priority = 2 - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_rule( ListenerArn=http_listener_arn, Priority=safe_priority, @@ -1260,7 +1260,7 @@ def test_describe_invalid_target_group(): ) # Check error raises correctly - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.describe_target_groups(Names=["invalid"]) @@ -1358,7 +1358,7 @@ def test_set_ip_address_type(): arn = response["LoadBalancers"][0]["LoadBalancerArn"] # Internal LBs cant be dualstack yet - with assert_raises(ClientError): + with pytest.raises(ClientError): client.set_ip_address_type(LoadBalancerArn=arn, IpAddressType="dualstack") # Create internet facing one @@ -1410,7 +1410,7 @@ def test_set_security_groups(): resp = client.describe_load_balancers(LoadBalancerArns=[arn]) len(resp["LoadBalancers"][0]["SecurityGroups"]).should.equal(2) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.set_security_groups(LoadBalancerArn=arn, SecurityGroups=["non_existent"]) @@ -1451,11 +1451,11 @@ def test_set_subnets(): len(resp["LoadBalancers"][0]["AvailabilityZones"]).should.equal(3) # Only 1 AZ - with assert_raises(ClientError): + with pytest.raises(ClientError): client.set_subnets(LoadBalancerArn=arn, Subnets=[subnet1.id]) # Multiple subnets in same AZ - with assert_raises(ClientError): + with pytest.raises(ClientError): client.set_subnets( LoadBalancerArn=arn, Subnets=[subnet1.id, subnet2.id, subnet2.id] ) @@ -1644,7 +1644,7 @@ def test_modify_listener_http_to_https(): listener.certificate.should.equal(yahoo_arn) # No default cert - with assert_raises(ClientError): + with pytest.raises(ClientError): client.modify_listener( ListenerArn=listener_arn, Port=443, @@ -1655,7 +1655,7 @@ def test_modify_listener_http_to_https(): ) # Bad cert - with assert_raises(ClientError): + with pytest.raises(ClientError): client.modify_listener( ListenerArn=listener_arn, Port=443, @@ -1884,7 +1884,7 @@ def test_fixed_response_action_listener_rule_validates_status_code(): "MessageBody": "This page does not exist", }, } - with assert_raises(ParamValidationError): + with pytest.raises(ParamValidationError): conn.create_listener( LoadBalancerArn=load_balancer_arn, Protocol="HTTP", @@ -1934,7 +1934,7 @@ def test_fixed_response_action_listener_rule_validates_status_code(): "MessageBody": "This page does not exist", }, } - with assert_raises(ParamValidationError): + with pytest.raises(ParamValidationError): conn.create_listener( LoadBalancerArn=load_balancer_arn, Protocol="HTTP", @@ -1951,7 +1951,7 @@ def test_fixed_response_action_listener_rule_validates_status_code(): }, } - with assert_raises(ClientError) as invalid_status_code_exception: + with pytest.raises(ClientError) as invalid_status_code_exception: conn.create_listener( LoadBalancerArn=load_balancer_arn, Protocol="HTTP", @@ -1998,7 +1998,7 @@ def test_fixed_response_action_listener_rule_validates_content_type(): "StatusCode": "200", }, } - with assert_raises(ClientError) as invalid_content_type_exception: + with pytest.raises(ClientError) as invalid_content_type_exception: conn.create_listener( LoadBalancerArn=load_balancer_arn, Protocol="HTTP", diff --git a/tests/test_emr/__init__.py b/tests/test_emr/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_emr/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index af6939f80b5f..a3308e3fe0f1 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -9,7 +9,7 @@ import six import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_emr @@ -395,7 +395,7 @@ def test_run_job_flow(): @mock_emr def test_run_job_flow_with_invalid_params(): client = boto3.client("emr", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: # cannot set both AmiVersion and ReleaseLabel args = deepcopy(run_job_flow_args) args["AmiVersion"] = "2.4" @@ -592,7 +592,7 @@ def _patch_cluster_id_placeholder_in_autoscaling_policy( def test_run_job_flow_with_custom_ami(): client = boto3.client("emr", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: # CustomAmiId available in Amazon EMR 5.7.0 and later args = deepcopy(run_job_flow_args) args["CustomAmiId"] = "MyEmrCustomId" @@ -601,7 +601,7 @@ def test_run_job_flow_with_custom_ami(): ex.exception.response["Error"]["Code"].should.equal("ValidationException") ex.exception.response["Error"]["Message"].should.equal("Custom AMI is not allowed") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: args = deepcopy(run_job_flow_args) args["CustomAmiId"] = "MyEmrCustomId" args["AmiVersion"] = "3.8.1" @@ -611,7 +611,7 @@ def test_run_job_flow_with_custom_ami(): "Custom AMI is not supported in this version of EMR" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: # AMI version and release label exception raises before CustomAmi exception args = deepcopy(run_job_flow_args) args["CustomAmiId"] = "MyEmrCustomId" diff --git a/tests/test_events/__init__.py b/tests/test_events/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_events/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index b65171603f81..4b5bbd4cb138 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -6,7 +6,7 @@ import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto.core import ACCOUNT_ID from moto.core.exceptions import JsonRESTError @@ -331,7 +331,7 @@ def test_put_events(): response["FailedEntryCount"].should.equal(0) response["Entries"].should.have.length_of(1) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.put_events(Entries=[event] * 20) diff --git a/tests/test_glacier/__init__.py b/tests/test_glacier/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_glacier/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index bc68b48f6a15..ac63932efb54 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -2,7 +2,7 @@ import sure # noqa import re -from nose.tools import assert_raises +import pytest import boto3 from botocore.client import ClientError @@ -32,7 +32,7 @@ def test_create_database_already_exists(): database_name = "cantcreatethisdatabasetwice" helpers.create_database(client, database_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.create_database(client, database_name) exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") @@ -43,7 +43,7 @@ def test_get_database_not_exits(): client = boto3.client("glue", region_name="us-east-1") database_name = "nosuchdatabase" - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_database(client, database_name) exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") @@ -102,7 +102,7 @@ def test_create_table_already_exists(): table_name = "cantcreatethistabletwice" helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.create_table(client, database_name, table_name) exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") @@ -192,7 +192,7 @@ def test_get_table_version_not_found(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table_version(client, database_name, "myfirsttable", "20") exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") @@ -207,7 +207,7 @@ def test_get_table_version_invalid_input(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table_version(client, database_name, "myfirsttable", "10not-an-int") exc.exception.response["Error"]["Code"].should.equal("InvalidInputException") @@ -219,7 +219,7 @@ def test_get_table_not_exits(): database_name = "myspecialdatabase" helpers.create_database(client, database_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, "myfirsttable") exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") @@ -233,7 +233,7 @@ def test_get_table_when_database_not_exits(): client = boto3.client("glue", region_name="us-east-1") database_name = "nosuchdatabase" - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, "myfirsttable") exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") @@ -256,7 +256,7 @@ def test_delete_table(): result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) # confirm table is deleted - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, table_name) exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") @@ -281,7 +281,7 @@ def test_batch_delete_table(): result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) # confirm table is deleted - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, table_name) exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") @@ -350,7 +350,7 @@ def test_create_partition_already_exist(): helpers.create_partition(client, database_name, table_name, values=values) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.create_partition(client, database_name, table_name, values=values) exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") @@ -366,7 +366,7 @@ def test_get_partition_not_found(): helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_partition(client, database_name, table_name, values) exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") @@ -542,7 +542,7 @@ def test_update_partition_not_found_moving(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.update_partition( client, database_name, @@ -565,7 +565,7 @@ def test_update_partition_not_found_change_in_place(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.update_partition( client, database_name, table_name, old_values=values, values=values ) @@ -588,7 +588,7 @@ def test_update_partition_cannot_overwrite(): helpers.create_partition(client, database_name, table_name, values=values[0]) helpers.create_partition(client, database_name, table_name, values=values[1]) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.update_partition( client, database_name, table_name, old_values=values[0], values=values[1] ) @@ -648,7 +648,7 @@ def test_update_partition_move(): columns=[{"Name": "country", "Type": "string"}], ) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: helpers.get_partition(client, database_name, table_name, values) # Old partition shouldn't exist anymore @@ -697,7 +697,7 @@ def test_delete_partition_bad_partition(): helpers.create_database(client, database_name) helpers.create_table(client, database_name, table_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.delete_partition( DatabaseName=database_name, TableName=table_name, PartitionValues=values ) diff --git a/tests/test_iam/__init__.py b/tests/test_iam/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_iam/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 7db2f0162775..d63e1777f588 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -13,8 +13,7 @@ from moto.core import ACCOUNT_ID from moto.iam.models import aws_managed_policies from moto.backends import get_backend -from nose.tools import assert_raises, assert_equals -from nose.tools import raises +import pytest from datetime import datetime from tests.helpers import requires_boto_gte @@ -93,7 +92,7 @@ def test_get_all_server_certs(): def test_get_server_cert_doesnt_exist(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_server_certificate("NonExistant") @@ -128,14 +127,14 @@ def test_delete_server_cert(): conn.upload_server_cert("certname", "certbody", "privatekey") conn.get_server_certificate("certname") conn.delete_server_cert("certname") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_server_certificate("certname") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.delete_server_cert("certname") @mock_iam_deprecated() -@raises(BotoServerError) +@pytest.mark.xfail(raises=BotoServerError) def test_get_role__should_throw__when_role_does_not_exist(): conn = boto.connect_iam() @@ -143,7 +142,7 @@ def test_get_role__should_throw__when_role_does_not_exist(): @mock_iam_deprecated() -@raises(BotoServerError) +@pytest.mark.xfail(raises=BotoServerError) def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist(): conn = boto.connect_iam() @@ -181,7 +180,7 @@ def test_create_role_and_instance_profile(): def test_create_instance_profile_should_throw_when_name_is_not_unique(): conn = boto3.client("iam", region_name="us-east-1") conn.create_instance_profile(InstanceProfileName="unique-instance-profile") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_instance_profile(InstanceProfileName="unique-instance-profile") @@ -214,13 +213,13 @@ def test_delete_instance_profile(): conn.add_role_to_instance_profile( InstanceProfileName="my-profile", RoleName="my-role" ) - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_instance_profile(InstanceProfileName="my-profile") conn.remove_role_from_instance_profile( InstanceProfileName="my-profile", RoleName="my-role" ) conn.delete_instance_profile(InstanceProfileName="my-profile") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): profile = conn.get_instance_profile(InstanceProfileName="my-profile") @@ -253,7 +252,7 @@ def test_update_login_profile(): def test_delete_role(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.delete_role(RoleName="my-role") # Test deletion failure with a managed policy @@ -264,12 +263,12 @@ def test_delete_role(): PolicyName="my-managed-policy", PolicyDocument=MOCK_POLICY ) conn.attach_role_policy(PolicyArn=response["Policy"]["Arn"], RoleName="my-role") - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_role(RoleName="my-role") conn.detach_role_policy(PolicyArn=response["Policy"]["Arn"], RoleName="my-role") conn.delete_policy(PolicyArn=response["Policy"]["Arn"]) conn.delete_role(RoleName="my-role") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role(RoleName="my-role") # Test deletion failure with an inline policy @@ -279,11 +278,11 @@ def test_delete_role(): conn.put_role_policy( RoleName="my-role", PolicyName="my-role-policy", PolicyDocument=MOCK_POLICY ) - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_role(RoleName="my-role") conn.delete_role_policy(RoleName="my-role", PolicyName="my-role-policy") conn.delete_role(RoleName="my-role") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role(RoleName="my-role") # Test deletion failure with attachment to an instance profile @@ -294,13 +293,13 @@ def test_delete_role(): conn.add_role_to_instance_profile( InstanceProfileName="my-profile", RoleName="my-role" ) - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_role(RoleName="my-role") conn.remove_role_from_instance_profile( InstanceProfileName="my-profile", RoleName="my-role" ) conn.delete_role(RoleName="my-role") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role(RoleName="my-role") # Test deletion with no conflicts @@ -308,7 +307,7 @@ def test_delete_role(): RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" ) conn.delete_role(RoleName="my-role") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role(RoleName="my-role") @@ -389,7 +388,7 @@ def test_list_role_policies(): role.policy_names.should.have.length_of(1) role.policy_names[0].should.equal("test policy 2") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.delete_role_policy("my-role", "test policy") @@ -412,7 +411,7 @@ def test_get_role_policy(): conn.create_role( RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="my-path" ) - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role_policy(RoleName="my-role", PolicyName="does-not-exist") @@ -442,7 +441,7 @@ def test_create_policy_already_exists(): response = conn.create_policy( PolicyName="TestCreatePolicy", PolicyDocument=MOCK_POLICY ) - with assert_raises(conn.exceptions.EntityAlreadyExistsException) as ex: + with pytest.raises(conn.exceptions.EntityAlreadyExistsException) as ex: response = conn.create_policy( PolicyName="TestCreatePolicy", PolicyDocument=MOCK_POLICY ) @@ -467,7 +466,7 @@ def test_delete_policy(): @mock_iam def test_create_policy_versions(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestCreatePolicyVersion".format( ACCOUNT_ID @@ -508,7 +507,7 @@ def test_create_many_policy_versions(): ), PolicyDocument=MOCK_POLICY, ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestCreateManyPolicyVersions".format( ACCOUNT_ID @@ -639,7 +638,7 @@ def test_get_policy_version(): PolicyArn="arn:aws:iam::{}:policy/TestGetPolicyVersion".format(ACCOUNT_ID), PolicyDocument=MOCK_POLICY, ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.get_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestGetPolicyVersion".format(ACCOUNT_ID), VersionId="v2-does-not-exist", @@ -661,7 +660,7 @@ def test_get_aws_managed_policy_version(): managed_policy_version_create_date = datetime.strptime( "2015-04-09T15:03:43+00:00", "%Y-%m-%dT%H:%M:%S+00:00" ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.get_policy_version( PolicyArn=managed_policy_arn, VersionId="v2-does-not-exist" ) @@ -679,7 +678,7 @@ def test_get_aws_managed_policy_v4_version(): managed_policy_version_create_date = datetime.strptime( "2018-10-08T21:33:45+00:00", "%Y-%m-%dT%H:%M:%S+00:00" ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.get_policy_version( PolicyArn=managed_policy_arn, VersionId="v2-does-not-exist" ) @@ -693,7 +692,7 @@ def test_get_aws_managed_policy_v4_version(): @mock_iam def test_list_policy_versions(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::{}:policy/TestListPolicyVersions".format(ACCOUNT_ID) ) @@ -729,7 +728,7 @@ def test_delete_policy_version(): PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format(ACCOUNT_ID), PolicyDocument=MOCK_POLICY, ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format( ACCOUNT_ID @@ -754,7 +753,7 @@ def test_delete_default_policy_version(): PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format(ACCOUNT_ID), PolicyDocument=MOCK_POLICY_2, ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_policy_version( PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format( ACCOUNT_ID @@ -767,14 +766,14 @@ def test_delete_default_policy_version(): def test_create_user(): conn = boto.connect_iam() conn.create_user("my-user") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.create_user("my-user") @mock_iam_deprecated() def test_get_user(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_user("my-user") conn.create_user("my-user") conn.get_user("my-user") @@ -783,13 +782,13 @@ def test_get_user(): @mock_iam() def test_update_user(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.update_user(UserName="my-user") conn.create_user(UserName="my-user") conn.update_user(UserName="my-user", NewPath="/new-path/", NewUserName="new-user") response = conn.get_user(UserName="new-user") response["User"].get("Path").should.equal("/new-path/") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_user(UserName="my-user") @@ -846,11 +845,11 @@ def test_user_policies(): @mock_iam_deprecated() def test_create_login_profile(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.create_login_profile("my-user", "my-pass") conn.create_user("my-user") conn.create_login_profile("my-user", "my-pass") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.create_login_profile("my-user", "my-pass") @@ -858,7 +857,7 @@ def test_create_login_profile(): def test_delete_login_profile(): conn = boto.connect_iam() conn.create_user("my-user") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.delete_login_profile("my-user") conn.create_login_profile("my-user", "my-pass") conn.delete_login_profile("my-user") @@ -867,7 +866,7 @@ def test_delete_login_profile(): @mock_iam def test_create_access_key(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_access_key(UserName="my-user") conn.create_user(UserName="my-user") access_key = conn.create_access_key(UserName="my-user")["AccessKey"] @@ -899,22 +898,19 @@ def test_get_all_access_keys(): conn = boto.connect_iam() conn.create_user("my-user") response = conn.get_all_access_keys("my-user") - assert_equals( + assert \ response["list_access_keys_response"]["list_access_keys_result"][ "access_key_metadata" - ], - [], - ) + ] == [] conn.create_access_key("my-user") response = conn.get_all_access_keys("my-user") - assert_equals( + assert \ sorted( response["list_access_keys_response"]["list_access_keys_result"][ "access_key_metadata" ][0].keys() - ), - sorted(["status", "create_date", "user_name", "access_key_id"]), - ) + ) == \ + sorted(["status", "create_date", "user_name", "access_key_id"]) @mock_iam @@ -922,13 +918,12 @@ def test_list_access_keys(): conn = boto3.client("iam", region_name="us-east-1") conn.create_user(UserName="my-user") response = conn.list_access_keys(UserName="my-user") - assert_equals(response["AccessKeyMetadata"], []) + assert response["AccessKeyMetadata"] == [] access_key = conn.create_access_key(UserName="my-user")["AccessKey"] response = conn.list_access_keys(UserName="my-user") - assert_equals( - sorted(response["AccessKeyMetadata"][0].keys()), - sorted(["Status", "CreateDate", "UserName", "AccessKeyId"]), - ) + assert \ + sorted(response["AccessKeyMetadata"][0].keys()) == \ + sorted(["Status", "CreateDate", "UserName", "AccessKeyId"] conn = boto3.client( "iam", region_name="us-east-1", @@ -936,10 +931,9 @@ def test_list_access_keys(): aws_secret_access_key=access_key["SecretAccessKey"], ) response = conn.list_access_keys() - assert_equals( - sorted(response["AccessKeyMetadata"][0].keys()), - sorted(["Status", "CreateDate", "UserName", "AccessKeyId"]), - ) + assert \ + sorted(response["AccessKeyMetadata"][0].keys()) == \ + sorted(["Status", "CreateDate", "UserName", "AccessKeyId"]) @mock_iam_deprecated() @@ -1188,7 +1182,7 @@ def test_enable_virtual_mfa_device(): @mock_iam_deprecated() def test_delete_user_deprecated(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.delete_user("my-user") conn.create_user("my-user") conn.delete_user("my-user") @@ -1197,7 +1191,7 @@ def test_delete_user_deprecated(): @mock_iam() def test_delete_user(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.delete_user(UserName="my-user") # Test deletion failure with a managed policy @@ -1206,12 +1200,12 @@ def test_delete_user(): PolicyName="my-managed-policy", PolicyDocument=MOCK_POLICY ) conn.attach_user_policy(PolicyArn=response["Policy"]["Arn"], UserName="my-user") - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_user(UserName="my-user") conn.detach_user_policy(PolicyArn=response["Policy"]["Arn"], UserName="my-user") conn.delete_policy(PolicyArn=response["Policy"]["Arn"]) conn.delete_user(UserName="my-user") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_user(UserName="my-user") # Test deletion failure with an inline policy @@ -1219,17 +1213,17 @@ def test_delete_user(): conn.put_user_policy( UserName="my-user", PolicyName="my-user-policy", PolicyDocument=MOCK_POLICY ) - with assert_raises(conn.exceptions.DeleteConflictException): + with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_user(UserName="my-user") conn.delete_user_policy(UserName="my-user", PolicyName="my-user-policy") conn.delete_user(UserName="my-user") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_user(UserName="my-user") # Test deletion with no conflicts conn.create_user(UserName="my-user") conn.delete_user(UserName="my-user") - with assert_raises(conn.exceptions.NoSuchEntityException): + with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_user(UserName="my-user") @@ -1259,7 +1253,7 @@ def test_boto3_generate_credential_report(): def test_get_credential_report(): conn = boto.connect_iam() conn.create_user("my-user") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_credential_report() result = conn.generate_credential_report() while ( @@ -1282,7 +1276,7 @@ def test_get_credential_report(): def test_boto3_get_credential_report(): conn = boto3.client("iam", region_name="us-east-1") conn.create_user(UserName="my-user") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.get_credential_report() result = conn.generate_credential_report() while result["State"] != "COMPLETE": @@ -1306,7 +1300,7 @@ def test_boto3_get_credential_report_content(): if not settings.TEST_SERVER_MODE: iam_backend = get_backend("iam")["global"] iam_backend.users[username].access_keys[1].last_used = timestamp - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.get_credential_report() result = conn.generate_credential_report() while result["State"] != "COMPLETE": @@ -1336,7 +1330,7 @@ def test_get_access_key_last_used_when_used(): client = iam.meta.client username = "test-user" iam.create_user(UserName=username) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.get_access_key_last_used(AccessKeyId="non-existent-key-id") create_key_response = client.create_access_key(UserName=username)["AccessKey"] # Set last used date using the IAM backend. Moto currently does not have a mechanism for tracking usage of access keys @@ -1448,12 +1442,12 @@ def test_managed_policy(): "attached_policies" ].should.have.length_of(1) - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.detach_role_policy( "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", role_name ) - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.detach_role_policy("arn:aws:iam::aws:policy/Nonexistent", role_name) @@ -1461,13 +1455,13 @@ def test_managed_policy(): def test_boto3_create_login_profile(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_login_profile(UserName="my-user", Password="Password") conn.create_user(UserName="my-user") conn.create_login_profile(UserName="my-user", Password="Password") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_login_profile(UserName="my-user", Password="Password") @@ -1506,7 +1500,7 @@ def test_update_access_key(): client = iam.meta.client username = "test-user" iam.create_user(UserName=username) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.update_access_key( UserName=username, AccessKeyId="non-existent-key", Status="Inactive" ) @@ -1527,7 +1521,7 @@ def test_get_access_key_last_used_when_unused(): client = iam.meta.client username = "test-user" iam.create_user(UserName=username) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.get_access_key_last_used(AccessKeyId="non-existent-key-id") create_key_response = client.create_access_key(UserName=username)["AccessKey"] resp = client.get_access_key_last_used( @@ -1566,7 +1560,7 @@ def test_get_ssh_public_key(): iam.create_user(UserName=username) public_key = MOCK_CERT - with assert_raises(ClientError): + with pytest.raises(ClientError): client.get_ssh_public_key( UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Encoding="SSH" ) @@ -1607,7 +1601,7 @@ def test_update_ssh_public_key(): iam.create_user(UserName=username) public_key = MOCK_CERT - with assert_raises(ClientError): + with pytest.raises(ClientError): client.update_ssh_public_key( UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Status="Inactive" ) @@ -1634,7 +1628,7 @@ def test_delete_ssh_public_key(): iam.create_user(UserName=username) public_key = MOCK_CERT - with assert_raises(ClientError): + with pytest.raises(ClientError): client.delete_ssh_public_key( UserName=username, SSHPublicKeyId="xxnon-existent-keyxx" ) @@ -1827,14 +1821,14 @@ def test_signing_certs(): assert resp["CertificateId"] # Upload a the cert with an invalid body: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.upload_signing_certificate( UserName="testing", CertificateBody="notacert" ) assert ce.exception.response["Error"]["Code"] == "MalformedCertificate" # Upload with an invalid user: - with assert_raises(ClientError): + with pytest.raises(ClientError): client.upload_signing_certificate( UserName="notauser", CertificateBody=MOCK_CERT ) @@ -1844,12 +1838,12 @@ def test_signing_certs(): UserName="testing", CertificateId=cert_id, Status="Inactive" ) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.update_signing_certificate( UserName="notauser", CertificateId=cert_id, Status="Inactive" ) - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.update_signing_certificate( UserName="testing", CertificateId="x" * 32, Status="Inactive" ) @@ -1864,13 +1858,13 @@ def test_signing_certs(): assert resp[0]["CertificateBody"] == MOCK_CERT assert resp[0]["Status"] == "Inactive" # Changed with the update call above. - with assert_raises(ClientError): + with pytest.raises(ClientError): client.list_signing_certificates(UserName="notauser") # Delete: client.delete_signing_certificate(UserName="testing", CertificateId=cert_id) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.delete_signing_certificate(UserName="notauser", CertificateId=cert_id) @@ -1921,7 +1915,7 @@ def test_delete_saml_provider(): conn.create_user(UserName="testing") cert_id = "123456789012345678901234" - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.delete_signing_certificate(UserName="testing", CertificateId=cert_id) assert ce.exception.response["Error"][ @@ -1982,7 +1976,7 @@ def test_create_role_with_tags(): # Test creating tags with invalid values: # With more than 50 tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: too_many_tags = list( map(lambda x: {"Key": str(x), "Value": str(x)}, range(0, 51)) ) @@ -1995,7 +1989,7 @@ def test_create_role_with_tags(): ) # With a duplicate tag: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -2007,7 +2001,7 @@ def test_create_role_with_tags(): ) # Duplicate tag with different casing: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -2019,7 +2013,7 @@ def test_create_role_with_tags(): ) # With a really big key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -2031,7 +2025,7 @@ def test_create_role_with_tags(): ) # With a really big value: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -2043,7 +2037,7 @@ def test_create_role_with_tags(): ) # With an invalid character: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.create_role( RoleName="my-role3", AssumeRolePolicyDocument="{}", @@ -2125,7 +2119,7 @@ def test_tag_role(): # Test creating tags with invalid values: # With more than 50 tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: too_many_tags = list( map(lambda x: {"Key": str(x), "Value": str(x)}, range(0, 51)) ) @@ -2136,7 +2130,7 @@ def test_tag_role(): ) # With a duplicate tag: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role( RoleName="my-role", Tags=[{"Key": "0", "Value": ""}, {"Key": "0", "Value": ""}], @@ -2147,7 +2141,7 @@ def test_tag_role(): ) # Duplicate tag with different casing: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role( RoleName="my-role", Tags=[{"Key": "a", "Value": ""}, {"Key": "A", "Value": ""}], @@ -2158,7 +2152,7 @@ def test_tag_role(): ) # With a really big key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role(RoleName="my-role", Tags=[{"Key": "0" * 129, "Value": ""}]) assert ( "Member must have length less than or equal to 128." @@ -2166,7 +2160,7 @@ def test_tag_role(): ) # With a really big value: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role(RoleName="my-role", Tags=[{"Key": "0", "Value": "0" * 257}]) assert ( "Member must have length less than or equal to 256." @@ -2174,7 +2168,7 @@ def test_tag_role(): ) # With an invalid character: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.tag_role(RoleName="my-role", Tags=[{"Key": "NOWAY!", "Value": ""}]) assert ( "Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" @@ -2182,7 +2176,7 @@ def test_tag_role(): ) # With a role that doesn't exist: - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.tag_role(RoleName="notarole", Tags=[{"Key": "some", "Value": "value"}]) @@ -2214,7 +2208,7 @@ def test_untag_role(): # Test removing tags with invalid values: # With more than 50 tags: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.untag_role(RoleName="my-role", TagKeys=[str(x) for x in range(0, 51)]) assert ( "failed to satisfy constraint: Member must have length less than or equal to 50." @@ -2223,7 +2217,7 @@ def test_untag_role(): assert "tagKeys" in ce.exception.response["Error"]["Message"] # With a really big key: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.untag_role(RoleName="my-role", TagKeys=["0" * 129]) assert ( "Member must have length less than or equal to 128." @@ -2232,7 +2226,7 @@ def test_untag_role(): assert "tagKeys" in ce.exception.response["Error"]["Message"] # With an invalid character: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: conn.untag_role(RoleName="my-role", TagKeys=["NOWAY!"]) assert ( "Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" @@ -2241,7 +2235,7 @@ def test_untag_role(): assert "tagKeys" in ce.exception.response["Error"]["Message"] # With a role that doesn't exist: - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.untag_role(RoleName="notarole", TagKeys=["somevalue"]) @@ -2249,7 +2243,7 @@ def test_untag_role(): def test_update_role_description(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role( @@ -2264,7 +2258,7 @@ def test_update_role_description(): def test_update_role(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role( @@ -2278,7 +2272,7 @@ def test_update_role(): def test_update_role(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role( @@ -2292,7 +2286,7 @@ def test_update_role(): def test_update_role_defaults(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.delete_role(RoleName="my-role") conn.create_role( @@ -2436,12 +2430,12 @@ def test_create_role_with_permissions_boundary(): invalid_boundary_arn = "arn:aws:iam::123456789:not_a_boundary" - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.put_role_permissions_boundary( RoleName="my-role", PermissionsBoundary=invalid_boundary_arn ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_role( RoleName="bad-boundary", AssumeRolePolicyDocument="some policy", @@ -2461,7 +2455,7 @@ def test_create_role_with_same_name_should_fail(): RoleName=test_role_name, AssumeRolePolicyDocument="policy", Description="test" ) # Create the role again, and verify that it fails - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: iam.create_role( RoleName=test_role_name, AssumeRolePolicyDocument="policy", @@ -2479,7 +2473,7 @@ def test_create_policy_with_same_name_should_fail(): test_policy_name = str(uuid4()) policy = iam.create_policy(PolicyName=test_policy_name, PolicyDocument=MOCK_POLICY) # Create the role again, and verify that it fails - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: iam.create_policy(PolicyName=test_policy_name, PolicyDocument=MOCK_POLICY) err.exception.response["Error"]["Code"].should.equal("EntityAlreadyExists") err.exception.response["Error"]["Message"].should.equal( diff --git a/tests/test_iam/test_iam_cloudformation.py b/tests/test_iam/test_iam_cloudformation.py index aa063273f94b..737e76323b12 100644 --- a/tests/test_iam/test_iam_cloudformation.py +++ b/tests/test_iam/test_iam_cloudformation.py @@ -2,7 +2,7 @@ import yaml import sure # noqa -from nose.tools import assert_raises +import pytest from botocore.exceptions import ClientError from moto import mock_iam, mock_cloudformation, mock_s3, mock_sts @@ -111,7 +111,7 @@ def test_iam_cloudformation_update_user_replacement(): cf_client.update_stack(StackName=stack_name, TemplateBody=template) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: iam_client.get_user(UserName=original_user_name) e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") @@ -175,7 +175,7 @@ def test_iam_cloudformation_update_drop_user(): second_user_name.should.equal(second_provisioned_user["PhysicalResourceId"]) iam_client.get_user(UserName=second_user_name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: iam_client.get_user(UserName=first_user_name) e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") @@ -205,7 +205,7 @@ def test_iam_cloudformation_delete_user(): cf_client.delete_stack(StackName=stack_name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: user = iam_client.get_user(UserName=user_name) e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") @@ -235,7 +235,7 @@ def test_iam_cloudformation_delete_user_having_generated_name(): cf_client.delete_stack(StackName=stack_name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: user = iam_client.get_user(UserName=user_name) e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 64d838e2b68d..a6bb5f4c0637 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -6,7 +6,7 @@ import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from boto.exception import BotoServerError from botocore.exceptions import ClientError from moto import mock_iam, mock_iam_deprecated @@ -29,7 +29,7 @@ def test_create_group(): conn = boto.connect_iam() conn.create_group("my-group") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.create_group("my-group") @@ -38,7 +38,7 @@ def test_get_group(): conn = boto.connect_iam() conn.create_group("my-group") conn.get_group("my-group") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_group("not-group") @@ -77,10 +77,10 @@ def test_get_all_groups(): @mock_iam_deprecated() def test_add_user_to_group(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.add_user_to_group("my-group", "my-user") conn.create_group("my-group") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.add_user_to_group("my-group", "my-user") conn.create_user("my-user") conn.add_user_to_group("my-group", "my-user") @@ -89,11 +89,11 @@ def test_add_user_to_group(): @mock_iam_deprecated() def test_remove_user_from_group(): conn = boto.connect_iam() - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.remove_user_from_group("my-group", "my-user") conn.create_group("my-group") conn.create_user("my-user") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.remove_user_from_group("my-group", "my-user") conn.add_user_to_group("my-group", "my-user") conn.remove_user_from_group("my-group", "my-user") @@ -150,7 +150,7 @@ def test_attach_group_policies(): def test_get_group_policy(): conn = boto.connect_iam() conn.create_group("my-group") - with assert_raises(BotoServerError): + with pytest.raises(BotoServerError): conn.get_group_policy("my-group", "my-policy") conn.put_group_policy("my-group", "my-policy", MOCK_POLICY) diff --git a/tests/test_iam/test_iam_policies.py b/tests/test_iam/test_iam_policies.py index 6348b0cbad9d..dae533827597 100644 --- a/tests/test_iam/test_iam_policies.py +++ b/tests/test_iam/test_iam_policies.py @@ -2,7 +2,7 @@ import boto3 from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_iam @@ -1624,7 +1624,7 @@ def test_create_policy_with_valid_policy_documents(): @mock_iam def check_create_policy_with_invalid_policy_document(test_case): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_policy( PolicyName="TestCreatePolicy", PolicyDocument=json.dumps(test_case["document"]), diff --git a/tests/test_iot/__init__.py b/tests/test_iot/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_iot/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index e80a12a0fab8..44b365182d40 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -6,7 +6,7 @@ from moto import mock_iot from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest def generate_thing_group_tree(iot_client, tree_dict, _parent=None): @@ -643,7 +643,7 @@ def test_delete_policy_validation(): client.create_policy(policyName=policy_name, policyDocument=doc) client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_policy(policyName=policy_name) e.exception.response["Error"]["Message"].should.contain( "The policy cannot be deleted as the policy is attached to one or more principals (name=%s)" @@ -684,7 +684,7 @@ def test_delete_certificate_validation(): client.create_thing(thingName=thing_name) client.attach_thing_principal(thingName=thing_name, principal=cert_arn) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) e.exception.response["Error"]["Message"].should.contain( "Certificate must be deactivated (not ACTIVE) before deletion." @@ -693,7 +693,7 @@ def test_delete_certificate_validation(): res.should.have.key("certificates").which.should.have.length_of(1) client.update_certificate(certificateId=cert_id, newStatus="REVOKED") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) e.exception.response["Error"]["Message"].should.contain( "Things must be detached before deletion (arn: %s)" % cert_arn @@ -702,7 +702,7 @@ def test_delete_certificate_validation(): res.should.have.key("certificates").which.should.have.length_of(1) client.detach_thing_principal(thingName=thing_name, principal=cert_arn) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) e.exception.response["Error"]["Message"].should.contain( "Certificate policies must be detached before deletion (arn: %s)" % cert_arn @@ -798,7 +798,7 @@ def test_principal_policy(): res.should.have.key("policies").which.should.have.length_of(0) res = client.list_policy_principals(policyName=policy_name) res.should.have.key("principals").which.should.have.length_of(0) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.detach_policy(policyName=policy_name, target=cert_arn) e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") diff --git a/tests/test_iotdata/__init__.py b/tests/test_iotdata/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_iotdata/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_iotdata/test_iotdata.py b/tests/test_iotdata/test_iotdata.py index ac0a04244811..caebdbde8090 100644 --- a/tests/test_iotdata/test_iotdata.py +++ b/tests/test_iotdata/test_iotdata.py @@ -3,7 +3,7 @@ import json import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from botocore.exceptions import ClientError from moto import mock_iotdata, mock_iot @@ -17,7 +17,7 @@ def test_basic(): raw_payload = b'{"state": {"desired": {"led": "on"}}}' iot_client.create_thing(thingName=name) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.get_thing_shadow(thingName=name) res = client.update_thing_shadow(thingName=name, payload=raw_payload) @@ -42,7 +42,7 @@ def test_basic(): payload.should.have.key("timestamp") client.delete_thing_shadow(thingName=name) - with assert_raises(ClientError): + with pytest.raises(ClientError): client.get_thing_shadow(thingName=name) @@ -99,7 +99,7 @@ def test_update(): payload.should.have.key("timestamp") raw_payload = b'{"state": {"desired": {"led": "on"}}, "version": 1}' - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.update_thing_shadow(thingName=name, payload=raw_payload) ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(409) ex.exception.response["Error"]["Message"].should.equal("Version conflict") diff --git a/tests/test_kinesis/__init__.py b/tests/test_kinesis/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_kinesis/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_kinesisvideo/__init__.py b/tests/test_kinesisvideo/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_kinesisvideo/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_kinesisvideo/test_kinesisvideo.py b/tests/test_kinesisvideo/test_kinesisvideo.py index de3d9ebbbec7..abd63bbdad08 100644 --- a/tests/test_kinesisvideo/test_kinesisvideo.py +++ b/tests/test_kinesisvideo/test_kinesisvideo.py @@ -2,7 +2,7 @@ import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from moto import mock_kinesisvideo from botocore.exceptions import ClientError import json @@ -28,7 +28,7 @@ def test_create_stream_with_same_name(): client.create_stream(StreamName=stream_name, DeviceName=device_name) # cannot create with same stream name - with assert_raises(ClientError): + with pytest.raises(ClientError): client.create_stream(StreamName=stream_name, DeviceName=device_name) @@ -43,7 +43,7 @@ def test_describe_stream(): stream_arn = res["StreamARN"] # cannot create with existing stream name - with assert_raises(ClientError): + with pytest.raises(ClientError): client.create_stream(StreamName=stream_name, DeviceName=device_name) # stream can be described with name @@ -69,7 +69,7 @@ def test_describe_stream_with_name_not_exist(): stream_name_not_exist = "not-exist-stream" # cannot describe with not exist stream name - with assert_raises(ClientError): + with pytest.raises(ClientError): client.describe_stream(StreamName=stream_name_not_exist) @@ -123,7 +123,7 @@ def test_delete_stream_with_arn_not_exist(): # cannot delete with not exist stream stream_arn_not_exist = stream_2_arn - with assert_raises(ClientError): + with pytest.raises(ClientError): client.delete_stream(StreamARN=stream_arn_not_exist) diff --git a/tests/test_kinesisvideoarchivedmedia/__init__.py b/tests/test_kinesisvideoarchivedmedia/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_kinesisvideoarchivedmedia/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_kms/__init__.py b/tests/test_kms/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_kms/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index a04a24a8272e..64c70078e4a7 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -9,7 +9,7 @@ import sure # noqa from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException -from nose.tools import assert_raises +import pytest from parameterized import parameterized from moto.core.exceptions import JsonRESTError from moto.kms.models import KmsBackend @@ -192,10 +192,10 @@ def test_generate_data_key(): response = conn.generate_data_key(key_id=key_id, number_of_bytes=32) # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["CiphertextBlob"], validate=True) # Plaintext must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["Plaintext"], validate=True) response["KeyId"].should.equal(key_arn) @@ -364,7 +364,7 @@ def test__create_alias__raises_if_reserved_alias(): ] for alias_name in reserved_aliases: - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) ex = err.exception @@ -392,7 +392,7 @@ def test__create_alias__raises_if_wrong_prefix(): create_resp = kms.create_key() key_id = create_resp["KeyMetadata"]["KeyId"] - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias("wrongprefix/my-alias", key_id) ex = err.exception @@ -415,7 +415,7 @@ def test__create_alias__raises_if_duplicate(): kms.create_alias(alias, key_id) - with assert_raises(AlreadyExistsException) as err: + with pytest.raises(AlreadyExistsException) as err: kms.create_alias(alias, key_id) ex = err.exception @@ -450,7 +450,7 @@ def test__create_alias__raises_if_alias_has_restricted_characters(): ] for alias_name in alias_names_with_restricted_characters: - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) ex = err.exception ex.body["__type"].should.equal("ValidationException") @@ -480,7 +480,7 @@ def test__create_alias__raises_if_alias_has_colon_character(): alias_names_with_restricted_characters = ["alias/my:alias"] for alias_name in alias_names_with_restricted_characters: - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) ex = err.exception ex.body["__type"].should.equal("ValidationException") @@ -514,7 +514,7 @@ def test__create_alias__raises_if_target_key_id_is_existing_alias(): kms.create_alias(alias, key_id) - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.create_alias(alias, alias) ex = err.exception @@ -554,7 +554,7 @@ def test__delete_alias(): def test__delete_alias__raises_if_wrong_prefix(): kms = boto.connect_kms() - with assert_raises(JSONResponseError) as err: + with pytest.raises(JSONResponseError) as err: kms.delete_alias("wrongprefix/my-alias") ex = err.exception @@ -572,7 +572,7 @@ def test__delete_alias__raises_if_alias_is_not_found(): kms = boto.kms.connect_to_region(region) alias_name = "alias/unexisting-alias" - with assert_raises(NotFoundException) as err: + with pytest.raises(NotFoundException) as err: kms.delete_alias(alias_name) expected_message_match = r"Alias arn:aws:kms:{region}:[0-9]{{12}}:{alias_name} is not found.".format( diff --git a/tests/test_kms/test_kms_boto3.py b/tests/test_kms/test_kms_boto3.py index c125c0557080..53f2845759c5 100644 --- a/tests/test_kms/test_kms_boto3.py +++ b/tests/test_kms/test_kms_boto3.py @@ -10,7 +10,7 @@ import six import sure # noqa from freezegun import freeze_time -from nose.tools import assert_raises +import pytest from parameterized import parameterized from moto import mock_kms @@ -132,7 +132,7 @@ def test_describe_key_via_alias_invalid_alias(key_id): client = boto3.client("kms", region_name="us-east-1") client.create_key(Description="key") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.describe_key(KeyId=key_id) @@ -147,10 +147,10 @@ def test_generate_data_key(): response = kms.generate_data_key(KeyId=key_id, NumberOfBytes=32) # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["CiphertextBlob"], validate=True) # Plaintext must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["Plaintext"], validate=True) response["KeyId"].should.equal(key_arn) @@ -169,7 +169,7 @@ def test_encrypt(plaintext): response["CiphertextBlob"].should_not.equal(plaintext) # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(response["CiphertextBlob"], validate=True) response["KeyId"].should.equal(key_arn) @@ -188,13 +188,13 @@ def test_decrypt(plaintext): client.create_key(Description="key") # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(encrypt_response["CiphertextBlob"], validate=True) decrypt_response = client.decrypt(CiphertextBlob=encrypt_response["CiphertextBlob"]) # Plaintext must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(decrypt_response["Plaintext"], validate=True) decrypt_response["Plaintext"].should.equal(_get_encoded_value(plaintext)) @@ -216,7 +216,7 @@ def test_decrypt(plaintext): def test_invalid_key_ids(key_id): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.generate_data_key(KeyId=key_id, NumberOfBytes=5) @@ -403,7 +403,7 @@ def test_generate_data_key_invalid_size_params(kwargs): client = boto3.client("kms", region_name="us-east-1") key = client.create_key(Description="generate-data-key-size") - with assert_raises( + with pytest.raises( (botocore.exceptions.ClientError, botocore.exceptions.ParamValidationError) ) as err: client.generate_data_key(KeyId=key["KeyMetadata"]["KeyId"], **kwargs) @@ -423,7 +423,7 @@ def test_generate_data_key_invalid_size_params(kwargs): def test_generate_data_key_invalid_key(key_id): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.generate_data_key(KeyId=key_id, KeySpec="AES_256") @@ -485,7 +485,7 @@ def test_re_encrypt_decrypt(plaintext): ) # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): + with pytest.raises(Exception): base64.b64decode(re_encrypt_response["CiphertextBlob"], validate=True) re_encrypt_response["SourceKeyId"].should.equal(key_1_arn) @@ -517,7 +517,7 @@ def test_re_encrypt_to_invalid_destination(): encrypt_response = client.encrypt(KeyId=key_id, Plaintext=b"some plaintext") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.re_encrypt( CiphertextBlob=encrypt_response["CiphertextBlob"], DestinationKeyId="alias/DoesNotExist", @@ -548,7 +548,7 @@ def test_generate_random(number_of_bytes): def test_generate_random_invalid_number_of_bytes(number_of_bytes, error_type): client = boto3.client("kms", region_name="us-west-2") - with assert_raises(error_type): + with pytest.raises(error_type): client.generate_random(NumberOfBytes=number_of_bytes) @@ -556,7 +556,7 @@ def test_generate_random_invalid_number_of_bytes(number_of_bytes, error_type): def test_enable_key_rotation_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.enable_key_rotation(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -564,7 +564,7 @@ def test_enable_key_rotation_key_not_found(): def test_disable_key_rotation_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.disable_key_rotation(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -572,7 +572,7 @@ def test_disable_key_rotation_key_not_found(): def test_enable_key_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.enable_key(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -580,7 +580,7 @@ def test_enable_key_key_not_found(): def test_disable_key_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.disable_key(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -588,7 +588,7 @@ def test_disable_key_key_not_found(): def test_cancel_key_deletion_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.cancel_key_deletion(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -596,7 +596,7 @@ def test_cancel_key_deletion_key_not_found(): def test_schedule_key_deletion_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.schedule_key_deletion(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -604,7 +604,7 @@ def test_schedule_key_deletion_key_not_found(): def test_get_key_rotation_status_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.get_key_rotation_status(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -612,7 +612,7 @@ def test_get_key_rotation_status_key_not_found(): def test_get_key_policy_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.get_key_policy( KeyId="12366f9b-1230-123d-123e-123e6ae60c02", PolicyName="default" ) @@ -622,7 +622,7 @@ def test_get_key_policy_key_not_found(): def test_list_key_policies_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.list_key_policies(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") @@ -630,7 +630,7 @@ def test_list_key_policies_key_not_found(): def test_put_key_policy_key_not_found(): client = boto3.client("kms", region_name="us-east-1") - with assert_raises(client.exceptions.NotFoundException): + with pytest.raises(client.exceptions.NotFoundException): client.put_key_policy( KeyId="00000000-0000-0000-0000-000000000000", PolicyName="default", diff --git a/tests/test_kms/test_utils.py b/tests/test_kms/test_utils.py index 4446635f318f..fa402b6b96ba 100644 --- a/tests/test_kms/test_utils.py +++ b/tests/test_kms/test_utils.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import sure # noqa -from nose.tools import assert_raises +import pytest from parameterized import parameterized from moto.kms.exceptions import ( @@ -123,7 +123,7 @@ def test_encrypt_decrypt_cycle(encryption_context): def test_encrypt_unknown_key_id(): - with assert_raises(NotFoundException): + with pytest.raises(NotFoundException): encrypt( master_keys={}, key_id="anything", @@ -136,7 +136,7 @@ def test_decrypt_invalid_ciphertext_format(): master_key = Key("nop", "nop", "nop", "nop", "nop") master_key_map = {master_key.id: master_key} - with assert_raises(InvalidCiphertextException): + with pytest.raises(InvalidCiphertextException): decrypt(master_keys=master_key_map, ciphertext_blob=b"", encryption_context={}) @@ -148,7 +148,7 @@ def test_decrypt_unknwown_key_id(): b"some ciphertext" ) - with assert_raises(AccessDeniedException): + with pytest.raises(AccessDeniedException): decrypt(master_keys={}, ciphertext_blob=ciphertext_blob, encryption_context={}) @@ -161,7 +161,7 @@ def test_decrypt_invalid_ciphertext(): b"some ciphertext" ) - with assert_raises(InvalidCiphertextException): + with pytest.raises(InvalidCiphertextException): decrypt( master_keys=master_key_map, ciphertext_blob=ciphertext_blob, @@ -181,7 +181,7 @@ def test_decrypt_invalid_encryption_context(): encryption_context={"some": "encryption", "context": "here"}, ) - with assert_raises(InvalidCiphertextException): + with pytest.raises(InvalidCiphertextException): decrypt( master_keys=master_key_map, ciphertext_blob=ciphertext_blob, diff --git a/tests/test_logs/__init__.py b/tests/test_logs/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_logs/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 648d561aa90a..f693aeb1e294 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -5,8 +5,8 @@ from botocore.exceptions import ClientError from moto import mock_logs, settings -from nose.tools import assert_raises -from nose import SkipTest +import pytest +from unittest import SkipTest _logs_region = "us-east-1" if settings.TEST_SERVER_MODE else "us-west-2" @@ -28,13 +28,13 @@ def test_exceptions(): log_group_name = "dummy" log_stream_name = "dummp-stream" conn.create_log_group(logGroupName=log_group_name) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_log_group(logGroupName=log_group_name) # descrine_log_groups is not implemented yet conn.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.create_log_stream( logGroupName=log_group_name, logStreamName=log_stream_name ) @@ -45,7 +45,7 @@ def test_exceptions(): logEvents=[{"timestamp": 0, "message": "line"}], ) - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.put_log_events( logGroupName=log_group_name, logStreamName="invalid-stream", @@ -117,7 +117,7 @@ def test_filter_logs_raises_if_filter_pattern(): conn.put_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=messages ) - with assert_raises(NotImplementedError): + with pytest.raises(NotImplementedError): conn.filter_log_events( logGroupName=log_group_name, logStreamNames=[log_stream_name], @@ -332,7 +332,7 @@ def test_get_log_events_errors(): client.create_log_group(logGroupName=log_group_name) client.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, @@ -346,7 +346,7 @@ def test_get_log_events_errors(): "The specified nextToken is invalid." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, @@ -447,7 +447,7 @@ def test_describe_subscription_filters_errors(): client = boto3.client("logs", "us-east-1") # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.describe_subscription_filters(logGroupName="not-existing-log-group",) # then diff --git a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py index eda72839843b..e8f4043d5536 100644 --- a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py +++ b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py @@ -5,7 +5,7 @@ import boto3 import sure # noqa from freezegun import freeze_time -from nose import SkipTest +from unittest import SkipTest from moto import mock_managedblockchain, settings from . import helpers diff --git a/tests/test_opsworks/__init__.py b/tests/test_opsworks/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_opsworks/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 65f9640820ef..1d2ef371546c 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -7,7 +7,7 @@ import six import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_organizations from moto.core import ACCOUNT_ID @@ -61,7 +61,7 @@ def test_describe_organization(): @mock_organizations def test_describe_organization_exception(): client = boto3.client("organizations", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_organization() ex = e.exception ex.operation_name.should.equal("DescribeOrganization") @@ -110,7 +110,7 @@ def test_describe_organizational_unit(): def test_describe_organizational_unit_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_organizational_unit( OrganizationalUnitId=utils.make_random_root_id() ) @@ -139,7 +139,7 @@ def test_list_organizational_units_for_parent(): @mock_organizations def test_list_organizational_units_for_parent_exception(): client = boto3.client("organizations", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_organizational_units_for_parent( ParentId=utils.make_random_root_id() ) @@ -193,7 +193,7 @@ def test_describe_account(): @mock_organizations def test_describe_account_exception(): client = boto3.client("organizations", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_account(AccountId=utils.make_random_account_id()) ex = e.exception ex.operation_name.should.equal("DescribeAccount") @@ -335,7 +335,7 @@ def test_list_children_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] root_id = client.list_roots()["Roots"][0]["Id"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_children( ParentId=utils.make_random_root_id(), ChildType="ACCOUNT" ) @@ -343,7 +343,7 @@ def test_list_children_exception(): ex.operation_name.should.equal("ListChildren") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("ParentNotFoundException") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_children(ParentId=root_id, ChildType="BLEE") ex = e.exception ex.operation_name.should.equal("ListChildren") @@ -387,7 +387,7 @@ def test_create_policy_errors(): # invalid policy type # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_policy( Content=json.dumps(policy_doc01), Description="moto", @@ -427,13 +427,13 @@ def test_describe_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL")["Organization"] policy_id = "p-47fhe9s3" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_policy(PolicyId=policy_id) ex = e.exception ex.operation_name.should.equal("DescribePolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.describe_policy(PolicyId="meaninglessstring") ex = e.exception ex.operation_name.should.equal("DescribePolicy") @@ -626,7 +626,7 @@ def test_delete_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] non_existent_policy_id = utils.make_random_policy_id() - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.delete_policy(PolicyId=non_existent_policy_id) ex = e.exception ex.operation_name.should.equal("DeletePolicy") @@ -642,7 +642,7 @@ def test_delete_policy_exception(): )["Policy"]["PolicySummary"]["Id"] root_id = client.list_roots()["Roots"][0]["Id"] client.attach_policy(PolicyId=policy_id, TargetId=root_id) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.delete_policy(PolicyId=policy_id) ex = e.exception ex.operation_name.should.equal("DeletePolicy") @@ -663,7 +663,7 @@ def test_attach_policy_exception(): Name="MockServiceControlPolicy", Type="SERVICE_CONTROL_POLICY", )["Policy"]["PolicySummary"]["Id"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) ex = e.exception ex.operation_name.should.equal("AttachPolicy") @@ -671,7 +671,7 @@ def test_attach_policy_exception(): ex.response["Error"]["Message"].should.contain( "OrganizationalUnitNotFoundException" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) ex = e.exception ex.operation_name.should.equal("AttachPolicy") @@ -679,7 +679,7 @@ def test_attach_policy_exception(): ex.response["Error"]["Message"].should.contain( "OrganizationalUnitNotFoundException" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) ex = e.exception ex.operation_name.should.equal("AttachPolicy") @@ -688,7 +688,7 @@ def test_attach_policy_exception(): ex.response["Error"]["Message"].should.equal( "You specified an account that doesn't exist." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.attach_policy( PolicyId=policy_id, TargetId="meaninglessstring" ) @@ -729,7 +729,7 @@ def test_update_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] non_existent_policy_id = utils.make_random_policy_id() - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.update_policy(PolicyId=non_existent_policy_id) ex = e.exception ex.operation_name.should.equal("UpdatePolicy") @@ -791,7 +791,7 @@ def test_list_policies_for_target_exception(): root_id = client.list_roots()["Roots"][0]["Id"] ou_id = "ou-gi99-i7r8eh2i2" account_id = "126644886543" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_policies_for_target( TargetId=ou_id, Filter="SERVICE_CONTROL_POLICY" ) @@ -801,7 +801,7 @@ def test_list_policies_for_target_exception(): ex.response["Error"]["Message"].should.contain( "OrganizationalUnitNotFoundException" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_policies_for_target( TargetId=account_id, Filter="SERVICE_CONTROL_POLICY" ) @@ -812,7 +812,7 @@ def test_list_policies_for_target_exception(): ex.response["Error"]["Message"].should.equal( "You specified an account that doesn't exist." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_policies_for_target( TargetId="meaninglessstring", Filter="SERVICE_CONTROL_POLICY" ) @@ -824,7 +824,7 @@ def test_list_policies_for_target_exception(): # not existing root # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_policies_for_target( TargetId="r-0000", Filter="SERVICE_CONTROL_POLICY" ) @@ -840,7 +840,7 @@ def test_list_policies_for_target_exception(): # invalid policy type # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_policies_for_target(TargetId=root_id, Filter="MOTO") # then @@ -887,13 +887,13 @@ def test_list_targets_for_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL")["Organization"] policy_id = "p-47fhe9s3" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_targets_for_policy(PolicyId=policy_id) ex = e.exception ex.operation_name.should.equal("ListTargetsForPolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.list_targets_for_policy(PolicyId="meaninglessstring") ex = e.exception ex.operation_name.should.equal("ListTargetsForPolicy") @@ -929,7 +929,7 @@ def test_tag_resource_errors(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.tag_resource( ResourceId="000000000000", Tags=[{"Key": "key", "Value": "value"},], ) @@ -961,7 +961,7 @@ def test_list_tags_for_resource_errors(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_tags_for_resource(ResourceId="000000000000") ex = e.exception ex.operation_name.should.equal("ListTagsForResource") @@ -998,7 +998,7 @@ def test_untag_resource_errors(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.untag_resource(ResourceId="000000000000", TagKeys=["key"]) ex = e.exception ex.operation_name.should.equal("UntagResource") @@ -1035,7 +1035,7 @@ def test_update_organizational_unit_duplicate_error(): response = client.create_organizational_unit(ParentId=root_id, Name=ou_name) validate_organizational_unit(org, response) response["OrganizationalUnit"]["Name"].should.equal(ou_name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.update_organizational_unit( OrganizationalUnitId=response["OrganizationalUnit"]["Id"], Name=ou_name ) @@ -1081,7 +1081,7 @@ def test_enable_aws_service_access(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.enable_aws_service_access(ServicePrincipal="moto.amazonaws.com") ex = e.exception ex.operation_name.should.equal("EnableAWSServiceAccess") @@ -1142,7 +1142,7 @@ def test_disable_aws_service_access_errors(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.disable_aws_service_access(ServicePrincipal="moto.amazonaws.com") ex = e.exception ex.operation_name.should.equal("DisableAWSServiceAccess") @@ -1199,7 +1199,7 @@ def test_register_delegated_administrator_errors(): # register master Account # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.register_delegated_administrator( AccountId=ACCOUNT_ID, ServicePrincipal="ssm.amazonaws.com" ) @@ -1215,7 +1215,7 @@ def test_register_delegated_administrator_errors(): # register not existing Account # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.register_delegated_administrator( AccountId="000000000000", ServicePrincipal="ssm.amazonaws.com" ) @@ -1231,7 +1231,7 @@ def test_register_delegated_administrator_errors(): # register not supported service # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.register_delegated_administrator( AccountId=account_id, ServicePrincipal="moto.amazonaws.com" ) @@ -1247,7 +1247,7 @@ def test_register_delegated_administrator_errors(): # register service again # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.register_delegated_administrator( AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" ) @@ -1319,7 +1319,7 @@ def test_list_delegated_administrators_erros(): # list not supported service # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_delegated_administrators(ServicePrincipal="moto.amazonaws.com") # then @@ -1365,7 +1365,7 @@ def test_list_delegated_services_for_account_erros(): # list services for not existing Account # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_delegated_services_for_account(AccountId="000000000000") # then @@ -1379,7 +1379,7 @@ def test_list_delegated_services_for_account_erros(): # list services for not registered Account # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_delegated_services_for_account(AccountId=ACCOUNT_ID) # then @@ -1425,7 +1425,7 @@ def test_deregister_delegated_administrator_erros(): # deregister master Account # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.deregister_delegated_administrator( AccountId=ACCOUNT_ID, ServicePrincipal="ssm.amazonaws.com" ) @@ -1441,7 +1441,7 @@ def test_deregister_delegated_administrator_erros(): # deregister not existing Account # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.deregister_delegated_administrator( AccountId="000000000000", ServicePrincipal="ssm.amazonaws.com" ) @@ -1457,7 +1457,7 @@ def test_deregister_delegated_administrator_erros(): # deregister not registered Account # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.deregister_delegated_administrator( AccountId=account_id, ServicePrincipal="ssm.amazonaws.com" ) @@ -1478,7 +1478,7 @@ def test_deregister_delegated_administrator_erros(): # deregister not registered service # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.deregister_delegated_administrator( AccountId=account_id, ServicePrincipal="guardduty.amazonaws.com" ) @@ -1529,7 +1529,7 @@ def test_enable_policy_type_errors(): # not existing root # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.enable_policy_type( RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY" ) @@ -1545,7 +1545,7 @@ def test_enable_policy_type_errors(): # enable policy again ('SERVICE_CONTROL_POLICY' is enabled by default) # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.enable_policy_type(RootId=root_id, PolicyType="SERVICE_CONTROL_POLICY") # then @@ -1559,7 +1559,7 @@ def test_enable_policy_type_errors(): # invalid policy type # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.enable_policy_type(RootId=root_id, PolicyType="MOTO") # then @@ -1604,7 +1604,7 @@ def test_disable_policy_type_errors(): # not existing root # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.disable_policy_type( RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY" ) @@ -1620,7 +1620,7 @@ def test_disable_policy_type_errors(): # disable not enabled policy # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.disable_policy_type( RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" ) @@ -1636,7 +1636,7 @@ def test_disable_policy_type_errors(): # invalid policy type # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.disable_policy_type(RootId=root_id, PolicyType="MOTO") # then diff --git a/tests/test_packages/__init__.py b/tests/test_packages/__init__.py index 05b1d476b3d6..01fe5ab1fb55 100644 --- a/tests/test_packages/__init__.py +++ b/tests/test_packages/__init__.py @@ -6,4 +6,3 @@ logging.getLogger("boto").setLevel(logging.CRITICAL) logging.getLogger("boto3").setLevel(logging.CRITICAL) logging.getLogger("botocore").setLevel(logging.CRITICAL) -logging.getLogger("nose").setLevel(logging.CRITICAL) diff --git a/tests/test_polly/__init__.py b/tests/test_polly/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_polly/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_polly/test_polly.py b/tests/test_polly/test_polly.py index 5428cdeb7b0b..6c99d0538ba9 100644 --- a/tests/test_polly/test_polly.py +++ b/tests/test_polly/test_polly.py @@ -3,19 +3,19 @@ from botocore.exceptions import ClientError import boto3 import sure # noqa -from nose.tools import assert_raises +import pytest from moto import mock_polly # Polly only available in a few regions DEFAULT_REGION = "eu-west-1" LEXICON_XML = """ - W3C diff --git a/tests/test_ram/test_ram.py b/tests/test_ram/test_ram.py index 62422192958e..dbc57a2c06e7 100644 --- a/tests/test_ram/test_ram.py +++ b/tests/test_ram/test_ram.py @@ -4,7 +4,7 @@ import boto3 import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_ram, mock_organizations from moto.core import ACCOUNT_ID @@ -65,7 +65,7 @@ def test_create_resource_share_errors(): # invalid ARN # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_resource_share(name="test", resourceArns=["inalid-arn"]) ex = e.exception ex.operation_name.should.equal("CreateResourceShare") @@ -78,7 +78,7 @@ def test_create_resource_share_errors(): # valid ARN, but not shareable resource type # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_resource_share( name="test", resourceArns=["arn:aws:iam::{}:role/test".format(ACCOUNT_ID)] ) @@ -92,7 +92,7 @@ def test_create_resource_share_errors(): # invalid principal ID # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_resource_share( name="test", principals=["invalid"], @@ -162,7 +162,7 @@ def test_create_resource_share_with_organization_errors(): # unknown Organization # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_resource_share( name="test", principals=[ @@ -184,7 +184,7 @@ def test_create_resource_share_with_organization_errors(): # unknown OU # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.create_resource_share( name="test", principals=[ @@ -236,7 +236,7 @@ def test_get_resource_shares_errors(): # invalid resource owner # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_resource_shares(resourceOwner="invalid") ex = e.exception ex.operation_name.should.equal("GetResourceShares") @@ -282,7 +282,7 @@ def test_update_resource_share_errors(): # invalid resource owner # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.update_resource_share( resourceShareArn="arn:aws:ram:us-east-1:{}:resource-share/not-existing".format( ACCOUNT_ID @@ -328,7 +328,7 @@ def test_delete_resource_share_errors(): # invalid resource owner # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.delete_resource_share( resourceShareArn="arn:aws:ram:us-east-1:{}:resource-share/not-existing".format( ACCOUNT_ID @@ -368,7 +368,7 @@ def test_enable_sharing_with_aws_organization_errors(): # no Organization defined # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.enable_sharing_with_aws_organization() ex = e.exception ex.operation_name.should.equal("EnableSharingWithAwsOrganization") diff --git a/tests/test_rds/__init__.py b/tests/test_rds/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_rds/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_rds2/__init__.py b/tests/test_rds2/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_rds2/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_redshift/__init__.py b/tests/test_redshift/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_redshift/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_route53/__init__.py b/tests/test_route53/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_route53/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 8c036441c4f0..68436a40e643 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -10,7 +10,7 @@ import uuid import botocore -from nose.tools import assert_raises +import pytest from moto import mock_route53, mock_route53_deprecated @@ -855,7 +855,7 @@ def test_change_resource_record_invalid(): ], } - with assert_raises(botocore.exceptions.ClientError): + with pytest.raises(botocore.exceptions.ClientError): conn.change_resource_record_sets( HostedZoneId=hosted_zone_id, ChangeBatch=invalid_a_record_payload ) @@ -878,7 +878,7 @@ def test_change_resource_record_invalid(): ], } - with assert_raises(botocore.exceptions.ClientError): + with pytest.raises(botocore.exceptions.ClientError): conn.change_resource_record_sets( HostedZoneId=hosted_zone_id, ChangeBatch=invalid_cname_record_payload ) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index d8f08e9ef4c7..933d02c6d0da 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -27,10 +27,9 @@ from parameterized import parameterized import six import requests -import tests.backport_assert_raises # noqa from moto.s3.responses import DEFAULT_REGION_NAME -from nose import SkipTest -from nose.tools import assert_raises +from unittest import SkipTest +import pytest import sure # noqa @@ -523,7 +522,7 @@ def test_create_existing_bucket(): "Trying to create a bucket that already exists should raise an Error" conn = boto.s3.connect_to_region("us-west-2") conn.create_bucket("foobar", location="us-west-2") - with assert_raises(S3CreateError): + with pytest.raises(S3CreateError): conn.create_bucket("foobar", location="us-west-2") @@ -665,7 +664,7 @@ def test_delete_keys_invalid(): @mock_s3 def test_boto3_delete_empty_keys_list(): - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: boto3.client("s3").delete_objects(Bucket="foobar", Delete={"Objects": []}) assert err.exception.response["Error"]["Code"] == "MalformedXML" @@ -1015,7 +1014,7 @@ def test_s3_object_in_public_bucket(): bucket.put_object(ACL="private", Body=b"ABCD", Key="file.txt") - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get() exc.exception.response["Error"]["Code"].should.equal("403") @@ -1089,7 +1088,7 @@ def test_s3_object_in_private_bucket(): s3_anonymous = boto3.resource("s3") s3_anonymous.meta.client.meta.events.register("choose-signer.s3.*", disable_signing) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get() exc.exception.response["Error"]["Code"].should.equal("403") @@ -1181,7 +1180,7 @@ def test_s3_location_should_error_outside_useast1(): bucket_name = "asdfasdfsdfdsfasda" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.create_bucket(Bucket=bucket_name) e.exception.response["Error"]["Message"].should.equal( "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." @@ -1200,12 +1199,12 @@ def test_get_public_access_block_for_account(): client = boto3.client("s3control", region_name="us-west-2") # With an invalid account ID: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_public_access_block(AccountId="111111111111") assert ce.exception.response["Error"]["Code"] == "AccessDenied" # Without one defined: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_public_access_block(AccountId=ACCOUNT_ID) assert ( ce.exception.response["Error"]["Code"] @@ -1213,7 +1212,7 @@ def test_get_public_access_block_for_account(): ) # Put a with an invalid account ID: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_public_access_block( AccountId="111111111111", PublicAccessBlockConfiguration={"BlockPublicAcls": True}, @@ -1221,7 +1220,7 @@ def test_get_public_access_block_for_account(): assert ce.exception.response["Error"]["Code"] == "AccessDenied" # Put with an invalid PAB: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_public_access_block( AccountId=ACCOUNT_ID, PublicAccessBlockConfiguration={} ) @@ -1255,7 +1254,7 @@ def test_get_public_access_block_for_account(): } # Delete with an invalid account ID: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.delete_public_access_block(AccountId="111111111111") assert ce.exception.response["Error"]["Code"] == "AccessDenied" @@ -1263,7 +1262,7 @@ def test_get_public_access_block_for_account(): client.delete_public_access_block(AccountId=ACCOUNT_ID) # Confirm that it's deleted: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_public_access_block(AccountId=ACCOUNT_ID) assert ( ce.exception.response["Error"]["Code"] @@ -1462,7 +1461,7 @@ def test_config_get_account_pab(): ) # Without a PAB in place: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: config_client.get_resource_config_history( resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID ) @@ -1633,7 +1632,7 @@ def test_policy(): } ) - with assert_raises(S3ResponseError) as err: + with pytest.raises(S3ResponseError) as err: bucket.get_policy() ex = err.exception @@ -1654,7 +1653,7 @@ def test_policy(): bucket.delete_policy() - with assert_raises(S3ResponseError) as err: + with pytest.raises(S3ResponseError) as err: bucket.get_policy() @@ -1976,7 +1975,7 @@ def test_bucket_create_duplicate(): s3.create_bucket( Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-west-2"} ) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.create_bucket( Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-west-2"} ) @@ -1986,7 +1985,7 @@ def test_bucket_create_duplicate(): @mock_s3 def test_bucket_create_force_us_east_1(): s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.create_bucket( Bucket="blah", CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME}, @@ -2011,7 +2010,7 @@ def test_boto3_bucket_create_eu_central(): @mock_s3 def test_bucket_create_empty_bucket_configuration_should_return_malformed_xml_error(): s3 = boto3.resource("s3", region_name="us-east-1") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.create_bucket(Bucket="whatever", CreateBucketConfiguration={}) e.exception.response["Error"]["Code"].should.equal("MalformedXML") e.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @@ -2028,7 +2027,7 @@ def test_boto3_head_object(): Bucket="blah", Key="hello.txt" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.Object("blah", "hello2.txt").meta.client.head_object( Bucket="blah", Key="hello_bad.txt" ) @@ -2077,7 +2076,7 @@ def test_boto3_get_object(): Bucket="blah", Key="hello.txt" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.Object("blah", "hello2.txt").get() e.exception.response["Error"]["Code"].should.equal("NoSuchKey") @@ -2104,7 +2103,7 @@ def test_boto3_get_missing_object_with_part_number(): s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.Object("blah", "hello.txt").meta.client.head_object( Bucket="blah", Key="hello.txt", PartNumber=123 ) @@ -2176,7 +2175,7 @@ def test_boto3_copy_object_with_versioning(): obj3_version_new.should_not.equal(obj2_version_new) # Copy file that doesn't exist - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.copy_object( CopySource={"Bucket": "blah", "Key": "test4", "VersionId": obj2_version}, Bucket="blah", @@ -2212,7 +2211,7 @@ def test_s3_abort_multipart_data_with_invalid_upload_and_key(): client.create_bucket(Bucket="blah") - with assert_raises(Exception) as err: + with pytest.raises(Exception) as err: client.abort_multipart_upload( Bucket="blah", Key="foobar", UploadId="dummy_upload_id" ) @@ -2360,7 +2359,7 @@ def test_boto3_get_object_if_modified_since(): s3.put_object(Bucket=bucket_name, Key=key, Body="test") - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: s3.get_object( Bucket=bucket_name, Key=key, @@ -2438,7 +2437,7 @@ def test_boto3_head_object_if_modified_since(): s3.put_object(Bucket=bucket_name, Key=key, Body="test") - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: s3.head_object( Bucket=bucket_name, Key=key, @@ -2633,7 +2632,7 @@ def test_boto3_put_bucket_tagging(): resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) # With duplicate tag keys: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: resp = s3.put_bucket_tagging( Bucket=bucket_name, Tagging={ @@ -2650,7 +2649,7 @@ def test_boto3_put_bucket_tagging(): ) # Cannot put tags that are "system" tags - i.e. tags that start with "aws:" - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: s3.put_bucket_tagging( Bucket=bucket_name, Tagging={"TagSet": [{"Key": "aws:sometag", "Value": "nope"}]}, @@ -2691,7 +2690,7 @@ def test_boto3_get_bucket_tagging(): # With no tags: s3.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": []}) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.get_bucket_tagging(Bucket=bucket_name) e = err.exception @@ -2718,7 +2717,7 @@ def test_boto3_delete_bucket_tagging(): resp = s3.delete_bucket_tagging(Bucket=bucket_name) resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(204) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.get_bucket_tagging(Bucket=bucket_name) e = err.exception @@ -2756,7 +2755,7 @@ def test_boto3_put_bucket_cors(): resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_cors( Bucket=bucket_name, CORSConfiguration={ @@ -2771,14 +2770,14 @@ def test_boto3_put_bucket_cors(): "Found unsupported HTTP method in CORS config. " "Unsupported method is NOTREAL" ) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={"CORSRules": []}) e = err.exception e.response["Error"]["Code"].should.equal("MalformedXML") # And 101: many_rules = [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}] * 101 - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_cors( Bucket=bucket_name, CORSConfiguration={"CORSRules": many_rules} ) @@ -2793,7 +2792,7 @@ def test_boto3_get_bucket_cors(): s3.create_bucket(Bucket=bucket_name) # Without CORS: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.get_bucket_cors(Bucket=bucket_name) e = err.exception @@ -2843,7 +2842,7 @@ def test_boto3_delete_bucket_cors(): resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(204) # Verify deletion: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.get_bucket_cors(Bucket=bucket_name) e = err.exception @@ -2906,7 +2905,7 @@ def test_put_bucket_acl_body(): assert len(result["Grants"]) == 1 # With no owner: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_acl( Bucket="bucket", AccessControlPolicy={ @@ -2924,7 +2923,7 @@ def test_put_bucket_acl_body(): assert err.exception.response["Error"]["Code"] == "MalformedACLError" # With incorrect permission: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_acl( Bucket="bucket", AccessControlPolicy={ @@ -3185,7 +3184,7 @@ def test_put_bucket_notification_errors(): # With incorrect ARNs: for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_notification_configuration( Bucket="bucket", NotificationConfiguration={ @@ -3206,7 +3205,7 @@ def test_put_bucket_notification_errors(): ) # Region not the same as the bucket: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_notification_configuration( Bucket="bucket", NotificationConfiguration={ @@ -3226,7 +3225,7 @@ def test_put_bucket_notification_errors(): ) # Invalid event name: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_notification_configuration( Bucket="bucket", NotificationConfiguration={ @@ -3263,7 +3262,7 @@ def test_boto3_put_bucket_logging(): assert not result.get("LoggingEnabled") # A log-bucket that doesn't exist: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_logging( Bucket=bucket_name, BucketLoggingStatus={ @@ -3273,7 +3272,7 @@ def test_boto3_put_bucket_logging(): assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" # A log-bucket that's missing the proper ACLs for LogDelivery: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_logging( Bucket=bucket_name, BucketLoggingStatus={ @@ -3314,7 +3313,7 @@ def test_boto3_put_bucket_logging(): ) # A log-bucket that's in the wrong region: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_logging( Bucket=bucket_name, BucketLoggingStatus={ @@ -3402,7 +3401,7 @@ def test_boto3_put_bucket_logging(): assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 # With an invalid grant: - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_bucket_logging( Bucket=bucket_name, BucketLoggingStatus={ @@ -3431,7 +3430,7 @@ def test_boto3_put_object_tagging(): key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_object_tagging( Bucket=bucket_name, Key=key, @@ -3479,7 +3478,7 @@ def test_boto3_put_object_tagging_on_earliest_version(): bucket_versioning.enable() bucket_versioning.status.should.equal("Enabled") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_object_tagging( Bucket=bucket_name, Key=key, @@ -3547,7 +3546,7 @@ def test_boto3_put_object_tagging_on_both_version(): bucket_versioning.enable() bucket_versioning.status.should.equal("Enabled") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_object_tagging( Bucket=bucket_name, Key=key, @@ -3772,7 +3771,7 @@ def test_boto3_delete_markers(): s3.delete_objects(Bucket=bucket_name, Delete={"Objects": [{"Key": key}]}) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=key) e.exception.response["Error"]["Code"].should.equal("NoSuchKey") @@ -3820,7 +3819,7 @@ def test_boto3_multiple_delete_markers(): response = s3.list_object_versions(Bucket=bucket_name) response["DeleteMarkers"].should.have.length_of(2) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=key) e.response["Error"]["Code"].should.equal("404") @@ -3892,7 +3891,7 @@ def test_get_stream_gzipped(): @mock_s3 def test_boto3_bucket_name_too_long(): s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 64) exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") @@ -3900,7 +3899,7 @@ def test_boto3_bucket_name_too_long(): @mock_s3 def test_boto3_bucket_name_too_short(): s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 2) exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") @@ -3972,7 +3971,7 @@ def test_accelerate_configuration_status_validation(): bucket_name = "some_bucket" s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "bad_status"} ) @@ -3984,7 +3983,7 @@ def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): bucket_name = "some.bucket.with.dots" s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"} ) @@ -4028,11 +4027,11 @@ def test_leading_slashes_not_removed(bucket_name): s3.put_object(Bucket=bucket_name, Key=uploaded_key, Body=b"Some body") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=invalid_key_1) e.exception.response["Error"]["Code"].should.equal("NoSuchKey") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=invalid_key_2) e.exception.response["Error"]["Code"].should.equal("NoSuchKey") @@ -4052,7 +4051,7 @@ def put_object(): s3.put_object(Bucket=bucket_name, Key=key, Body=body) def assert_deleted(): - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=key) e.exception.response["Error"]["Code"].should.equal("NoSuchKey") @@ -4073,7 +4072,7 @@ def test_public_access_block(): client.create_bucket(Bucket="mybucket") # Try to get the public access block (should not exist by default) - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_public_access_block(Bucket="mybucket") assert ( @@ -4123,7 +4122,7 @@ def test_public_access_block(): } # Test with a blank PublicAccessBlockConfiguration: - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.put_public_access_block( Bucket="mybucket", PublicAccessBlockConfiguration={} ) @@ -4156,7 +4155,7 @@ def test_public_access_block(): # Delete: client.delete_public_access_block(Bucket="mybucket") - with assert_raises(ClientError) as ce: + with pytest.raises(ClientError) as ce: client.get_public_access_block(Bucket="mybucket") assert ( ce.exception.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" @@ -4301,7 +4300,7 @@ def test_list_config_discovered_resources(): ) # With an invalid page: - with assert_raises(InvalidNextTokenException) as inte: + with pytest.raises(InvalidNextTokenException) as inte: s3_config_query.list_config_service_resources(None, None, 1, "notabucket") assert "The nextToken provided is invalid" in inte.exception.message @@ -4761,7 +4760,7 @@ def test_encryption(): conn = boto3.client("s3", region_name="us-east-1") conn.create_bucket(Bucket="mybucket") - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: conn.get_bucket_encryption(Bucket="mybucket") sse_config = { @@ -4784,7 +4783,7 @@ def test_encryption(): assert resp["ServerSideEncryptionConfiguration"] == sse_config conn.delete_bucket_encryption(Bucket="mybucket") - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: conn.get_bucket_encryption(Bucket="mybucket") @@ -4799,7 +4798,7 @@ def test_presigned_url_restrict_parameters(): s3 = boto3.client("s3", region_name="us-east-1") # Create a pre-signed url with some metadata. - with assert_raises(botocore.exceptions.ParamValidationError) as err: + with pytest.raises(botocore.exceptions.ParamValidationError) as err: s3.generate_presigned_url( ClientMethod="put_object", Params={"Bucket": bucket, "Key": key, "Unknown": "metadata"}, diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 0a2e66b5c598..da9ffbca4c3d 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -8,7 +8,7 @@ import sure # noqa from botocore.exceptions import ClientError from datetime import datetime -from nose.tools import assert_raises +import pytest from moto import mock_s3_deprecated, mock_s3 @@ -56,7 +56,7 @@ def test_lifecycle_with_filters(): assert result["Rules"][0]["Filter"]["Prefix"] == "" assert not result["Rules"][0]["Filter"].get("And") assert not result["Rules"][0]["Filter"].get("Tag") - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # Without any prefixes and an empty filter (this is by default a prefix for the whole bucket): @@ -75,12 +75,12 @@ def test_lifecycle_with_filters(): ) result = client.get_bucket_lifecycle_configuration(Bucket="bucket") assert len(result["Rules"]) == 1 - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # If we remove the filter -- and don't specify a Prefix, then this is bad: lfc["Rules"][0].pop("Filter") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) @@ -93,12 +93,12 @@ def test_lifecycle_with_filters(): ) result = client.get_bucket_lifecycle_configuration(Bucket="bucket") assert len(result["Rules"]) == 1 - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Filter"]["Prefix"] assert not result["Rules"][0]["Filter"].get("And") assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # With And (single tag): @@ -118,7 +118,7 @@ def test_lifecycle_with_filters(): assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 1 assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # With multiple And tags: @@ -141,7 +141,7 @@ def test_lifecycle_with_filters(): assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # And filter without Prefix but multiple Tags: @@ -156,26 +156,26 @@ def test_lifecycle_with_filters(): ) result = client.get_bucket_lifecycle_configuration(Bucket="bucket") assert len(result["Rules"]) == 1 - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Filter"]["And"]["Prefix"] assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 2 assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" - with assert_raises(KeyError): + with pytest.raises(KeyError): assert result["Rules"][0]["Prefix"] # Can't have both filter and prefix: lfc["Rules"][0]["Prefix"] = "" - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) assert err.exception.response["Error"]["Code"] == "MalformedXML" lfc["Rules"][0]["Prefix"] = "some/path" - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) @@ -196,7 +196,7 @@ def test_lifecycle_with_filters(): "Prefix": "some/prefix", "Tag": {"Key": "mytag", "Value": "mytagvalue"}, } - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) @@ -212,7 +212,7 @@ def test_lifecycle_with_filters(): ], }, } - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) @@ -279,7 +279,7 @@ def test_lifecycle_with_eodm(): # With failure: lfc["Rules"][0]["Expiration"]["Days"] = 7 - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) @@ -287,7 +287,7 @@ def test_lifecycle_with_eodm(): del lfc["Rules"][0]["Expiration"]["Days"] lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) @@ -383,7 +383,7 @@ def test_lifecycle_with_nvt(): # With failures for missing children: del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) @@ -391,7 +391,7 @@ def test_lifecycle_with_nvt(): lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py index a89b4a896dbf..0e8152b03d8f 100644 --- a/tests/test_s3/test_s3_storageclass.py +++ b/tests/test_s3/test_s3_storageclass.py @@ -4,7 +4,7 @@ import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_s3 @@ -105,7 +105,7 @@ def test_s3_invalid_copied_storage_class(): ) # Try to copy an object with an invalid storage class - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.copy_object( CopySource={"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", @@ -128,7 +128,7 @@ def test_s3_invalid_storage_class(): ) # Try to add an object with an invalid storage class - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD" ) @@ -166,7 +166,7 @@ def test_s3_copy_object_error_for_glacier_storage_class_not_restored(): Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="GLACIER" ) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.copy_object( CopySource={"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket", @@ -187,7 +187,7 @@ def test_s3_copy_object_error_for_deep_archive_storage_class_not_restored(): Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="DEEP_ARCHIVE" ) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: s3.copy_object( CopySource={"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket", diff --git a/tests/test_s3bucket_path/__init__.py b/tests/test_s3bucket_path/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_s3bucket_path/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_sagemaker/test_sagemaker_endpoint.py b/tests/test_sagemaker/test_sagemaker_endpoint.py index b048439ff5db..0d21ad1ef1bc 100644 --- a/tests/test_sagemaker/test_sagemaker_endpoint.py +++ b/tests/test_sagemaker/test_sagemaker_endpoint.py @@ -3,12 +3,12 @@ import datetime import boto3 -from botocore.exceptions import ClientError, ParamValidationError +from botocore.exceptions import ClientError import sure # noqa from moto import mock_sagemaker from moto.sts.models import ACCOUNT_ID -from nose.tools import assert_true, assert_equal, assert_raises +import pytest TEST_REGION_NAME = "us-east-1" FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) @@ -33,14 +33,12 @@ def test_create_endpoint_config(): ] endpoint_config_name = "MyEndpointConfig" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sagemaker.create_endpoint_config( EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants, ) - assert_true( - e.exception.response["Error"]["Message"].startswith("Could not find model") - ) + assert e.exception.response["Error"]["Message"].startswith("Could not find model") _create_model(sagemaker, model_name) resp = sagemaker.create_endpoint_config( @@ -88,22 +86,13 @@ def test_delete_endpoint_config(): ) resp = sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sagemaker.describe_endpoint_config(EndpointConfigName=endpoint_config_name) - assert_true( - e.exception.response["Error"]["Message"].startswith( - "Could not find endpoint configuration" - ) - ) + assert e.exception.response["Error"]["Message"].startswith("Could not find endpoint configuration") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) - assert_true( - e.exception.response["Error"]["Message"].startswith( - "Could not find endpoint configuration" - ) - ) - pass + assert e.exception.response["Error"]["Message"].startswith( "Could not find endpoint configuration") @mock_sagemaker @@ -124,16 +113,16 @@ def test_create_endpoint_invalid_instance_type(): ] endpoint_config_name = "MyEndpointConfig" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sagemaker.create_endpoint_config( EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants, ) - assert_equal(e.exception.response["Error"]["Code"], "ValidationException") + assert e.exception.response["Error"]["Code"] == "ValidationException" expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( instance_type ) - assert_true(expected_message in e.exception.response["Error"]["Message"]) + assert expected_message in e.exception.response["Error"]["Message"] @mock_sagemaker @@ -141,15 +130,11 @@ def test_create_endpoint(): sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) endpoint_name = "MyEndpoint" - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sagemaker.create_endpoint( EndpointName=endpoint_name, EndpointConfigName="NonexistentEndpointConfig" ) - assert_true( - e.exception.response["Error"]["Message"].startswith( - "Could not find endpoint configuration" - ) - ) + assert e.exception.response["Error"]["Message"].startswith("Could not find endpoint configuration") model_name = "MyModel" _create_model(sagemaker, model_name) @@ -173,12 +158,12 @@ def test_create_endpoint(): resp["EndpointName"].should.equal(endpoint_name) resp["EndpointConfigName"].should.equal(endpoint_config_name) resp["EndpointStatus"].should.equal("InService") - assert_true(isinstance(resp["CreationTime"], datetime.datetime)) - assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) + assert isinstance(resp["CreationTime"], datetime.datetime) + assert isinstance(resp["LastModifiedTime"], datetime.datetime) resp["ProductionVariants"][0]["VariantName"].should.equal("MyProductionVariant") resp = sagemaker.list_tags(ResourceArn=resp["EndpointArn"]) - assert_equal(resp["Tags"], GENERIC_TAGS_PARAM) + assert resp["Tags"] == GENERIC_TAGS_PARAM @mock_sagemaker @@ -195,17 +180,13 @@ def test_delete_endpoint(): _create_endpoint(sagemaker, endpoint_name, endpoint_config_name) sagemaker.delete_endpoint(EndpointName=endpoint_name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sagemaker.describe_endpoint(EndpointName=endpoint_name) - assert_true( - e.exception.response["Error"]["Message"].startswith("Could not find endpoint") - ) + assert e.exception.response["Error"]["Message"].startswith("Could not find endpoint") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sagemaker.delete_endpoint(EndpointName=endpoint_name) - assert_true( - e.exception.response["Error"]["Message"].startswith("Could not find endpoint") - ) + assert e.exception.response["Error"]["Message"].startswith("Could not find endpoint") def _create_model(boto_client, model_name): @@ -217,7 +198,7 @@ def _create_model(boto_client, model_name): }, ExecutionRoleArn=FAKE_ROLE_ARN, ) - assert_equal(resp["ResponseMetadata"]["HTTPStatusCode"], 200) + assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200 def _create_endpoint_config(boto_client, endpoint_config_name, model_name): diff --git a/tests/test_sagemaker/test_sagemaker_models.py b/tests/test_sagemaker/test_sagemaker_models.py index 4139ca575480..1f2f4440dce4 100644 --- a/tests/test_sagemaker/test_sagemaker_models.py +++ b/tests/test_sagemaker/test_sagemaker_models.py @@ -2,9 +2,8 @@ from __future__ import unicode_literals import boto3 -import tests.backport_assert_raises # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_sagemaker import sure # noqa @@ -76,7 +75,7 @@ def test_delete_model(): @mock_sagemaker def test_delete_model_not_found(): - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: boto3.client("sagemaker", region_name="us-east-1").delete_model( ModelName="blah" ) diff --git a/tests/test_sagemaker/test_sagemaker_notebooks.py b/tests/test_sagemaker/test_sagemaker_notebooks.py index c04618c7747e..9f6a2be39d2d 100644 --- a/tests/test_sagemaker/test_sagemaker_notebooks.py +++ b/tests/test_sagemaker/test_sagemaker_notebooks.py @@ -8,7 +8,7 @@ from moto import mock_sagemaker from moto.sts.models import ACCOUNT_ID -from nose.tools import assert_true, assert_equal, assert_raises +import pytest TEST_REGION_NAME = "us-east-1" FAKE_SUBNET_ID = "subnet-012345678" @@ -41,26 +41,25 @@ def test_create_notebook_instance_minimal_params(): "RoleArn": FAKE_ROLE_ARN, } resp = sagemaker.create_notebook_instance(**args) - assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) - assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) - assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) - assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) - assert_equal(resp["NotebookInstanceName"], NAME_PARAM) - assert_equal(resp["NotebookInstanceStatus"], "InService") - assert_equal( - resp["Url"], "{}.notebook.{}.sagemaker.aws".format(NAME_PARAM, TEST_REGION_NAME) - ) - assert_equal(resp["InstanceType"], INSTANCE_TYPE_PARAM) - assert_equal(resp["RoleArn"], FAKE_ROLE_ARN) - assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) - assert_true(isinstance(resp["CreationTime"], datetime.datetime)) - assert_equal(resp["DirectInternetAccess"], "Enabled") - assert_equal(resp["VolumeSizeInGB"], 5) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) + assert resp["NotebookInstanceName"] == NAME_PARAM + assert resp["NotebookInstanceStatus"] == "InService" + assert resp["Url"] == \ + "{}.notebook.{}.sagemaker.aws".format(NAME_PARAM, TEST_REGION_NAME) + assert resp["InstanceType"] == INSTANCE_TYPE_PARAM + assert resp["RoleArn"] == FAKE_ROLE_ARN + assert isinstance(resp["LastModifiedTime"], datetime.datetime) + assert isinstance(resp["CreationTime"], datetime.datetime) + assert resp["DirectInternetAccess"] == "Enabled" + assert resp["VolumeSizeInGB"] == 5 -# assert_equal(resp["RootAccess"], True) # ToDo: Not sure if this defaults... +# assert resp["RootAccess"] == True # ToDo: Not sure if this defaults... @mock_sagemaker @@ -92,36 +91,34 @@ def test_create_notebook_instance_params(): "RootAccess": ROOT_ACCESS_PARAM, } resp = sagemaker.create_notebook_instance(**args) - assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) - assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) - assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) - assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) - assert_equal(resp["NotebookInstanceName"], NAME_PARAM) - assert_equal(resp["NotebookInstanceStatus"], "InService") - assert_equal( - resp["Url"], "{}.notebook.{}.sagemaker.aws".format(NAME_PARAM, TEST_REGION_NAME) - ) - assert_equal(resp["InstanceType"], INSTANCE_TYPE_PARAM) - assert_equal(resp["RoleArn"], FAKE_ROLE_ARN) - assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) - assert_true(isinstance(resp["CreationTime"], datetime.datetime)) - assert_equal(resp["DirectInternetAccess"], "Enabled") - assert_equal(resp["VolumeSizeInGB"], VOLUME_SIZE_IN_GB_PARAM) - # assert_equal(resp["RootAccess"], True) # ToDo: Not sure if this defaults... - assert_equal(resp["SubnetId"], FAKE_SUBNET_ID) - assert_equal(resp["SecurityGroups"], FAKE_SECURITY_GROUP_IDS) - assert_equal(resp["KmsKeyId"], FAKE_KMS_KEY_ID) - assert_equal( - resp["NotebookInstanceLifecycleConfigName"], FAKE_LIFECYCLE_CONFIG_NAME - ) - assert_equal(resp["AcceleratorTypes"], ACCELERATOR_TYPES_PARAM) - assert_equal(resp["DefaultCodeRepository"], FAKE_DEFAULT_CODE_REPO) - assert_equal(resp["AdditionalCodeRepositories"], FAKE_ADDL_CODE_REPOS) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) + assert resp["NotebookInstanceName"] == NAME_PARAM + assert resp["NotebookInstanceStatus"] == "InService" + assert resp["Url"] == \ + "{}.notebook.{}.sagemaker.aws".format(NAME_PARAM, TEST_REGION_NAME) + assert resp["InstanceType"] == INSTANCE_TYPE_PARAM + assert resp["RoleArn"] == FAKE_ROLE_ARN + assert isinstance(resp["LastModifiedTime"], datetime.datetime) + assert isinstance(resp["CreationTime"], datetime.datetime) + assert resp["DirectInternetAccess"] == "Enabled" + assert resp["VolumeSizeInGB"] == VOLUME_SIZE_IN_GB_PARAM + # assert resp["RootAccess"] == True # ToDo: Not sure if this defaults... + assert resp["SubnetId"] == FAKE_SUBNET_ID + assert resp["SecurityGroups"] == FAKE_SECURITY_GROUP_IDS + assert resp["KmsKeyId"] == FAKE_KMS_KEY_ID + assert resp["NotebookInstanceLifecycleConfigName"] == \ + FAKE_LIFECYCLE_CONFIG_NAME + assert resp["AcceleratorTypes"] == ACCELERATOR_TYPES_PARAM + assert resp["DefaultCodeRepository"] == FAKE_DEFAULT_CODE_REPO + assert resp["AdditionalCodeRepositories"] == FAKE_ADDL_CODE_REPOS resp = sagemaker.list_tags(ResourceArn=resp["NotebookInstanceArn"]) - assert_equal(resp["Tags"], GENERIC_TAGS_PARAM) + assert resp["Tags"] == GENERIC_TAGS_PARAM @mock_sagemaker @@ -136,14 +133,11 @@ def test_create_notebook_instance_bad_volume_size(): "RoleArn": FAKE_ROLE_ARN, "VolumeSizeInGB": vol_size, } - with assert_raises(ParamValidationError) as ex: - resp = sagemaker.create_notebook_instance(**args) - assert_equal( - ex.exception.args[0], - "Parameter validation failed:\nInvalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf".format( - vol_size - ), - ) + with pytest.raises(ParamValidationError) as ex: + sagemaker.create_notebook_instance(**args) + assert \ + ex.exception.args[0] == \ + "Parameter validation failed:\nInvalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf".format(vol_size) @mock_sagemaker @@ -157,14 +151,14 @@ def test_create_notebook_instance_invalid_instance_type(): "InstanceType": instance_type, "RoleArn": FAKE_ROLE_ARN, } - with assert_raises(ClientError) as ex: - resp = sagemaker.create_notebook_instance(**args) - assert_equal(ex.exception.response["Error"]["Code"], "ValidationException") + with pytest.raises(ClientError) as ex: + sagemaker.create_notebook_instance(**args) + assert ex.exception.response["Error"]["Code"] == "ValidationException" expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( instance_type ) - assert_true(expected_message in ex.exception.response["Error"]["Message"]) + assert expected_message in ex.exception.response["Error"]["Message"] @mock_sagemaker @@ -180,51 +174,49 @@ def test_notebook_instance_lifecycle(): "RoleArn": FAKE_ROLE_ARN, } resp = sagemaker.create_notebook_instance(**args) - assert_true(resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker")) - assert_true(resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"])) + assert resp["NotebookInstanceArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) notebook_instance_arn = resp["NotebookInstanceArn"] - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: sagemaker.delete_notebook_instance(NotebookInstanceName=NAME_PARAM) - assert_equal(ex.exception.response["Error"]["Code"], "ValidationException") + assert ex.exception.response["Error"]["Code"] == "ValidationException" expected_message = "Status (InService) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format( notebook_instance_arn ) - assert_true(expected_message in ex.exception.response["Error"]["Message"]) + assert expected_message in ex.exception.response["Error"]["Message"] sagemaker.stop_notebook_instance(NotebookInstanceName=NAME_PARAM) resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) - assert_equal(resp["NotebookInstanceStatus"], "Stopped") + assert resp["NotebookInstanceStatus"] == "Stopped" sagemaker.start_notebook_instance(NotebookInstanceName=NAME_PARAM) resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) - assert_equal(resp["NotebookInstanceStatus"], "InService") + assert resp["NotebookInstanceStatus"] == "InService" sagemaker.stop_notebook_instance(NotebookInstanceName=NAME_PARAM) resp = sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) - assert_equal(resp["NotebookInstanceStatus"], "Stopped") + assert resp["NotebookInstanceStatus"] == "Stopped" sagemaker.delete_notebook_instance(NotebookInstanceName=NAME_PARAM) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) - assert_equal(ex.exception.response["Error"]["Message"], "RecordNotFound") + assert ex.exception.response["Error"]["Message"] == "RecordNotFound" @mock_sagemaker def test_describe_nonexistent_model(): sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) - with assert_raises(ClientError) as e: - resp = sagemaker.describe_model(ModelName="Nonexistent") - assert_true( - e.exception.response["Error"]["Message"].startswith("Could not find model") - ) + with pytest.raises(ClientError) as e: + sagemaker.describe_model(ModelName="Nonexistent") + assert e.exception.response["Error"]["Message"].startswith("Could not find model") @mock_sagemaker @@ -237,56 +229,50 @@ def test_notebook_instance_lifecycle_config(): resp = sagemaker.create_notebook_instance_lifecycle_config( NotebookInstanceLifecycleConfigName=name, OnCreate=on_create, OnStart=on_start ) - assert_true( - resp["NotebookInstanceLifecycleConfigArn"].startswith("arn:aws:sagemaker") - ) - assert_true(resp["NotebookInstanceLifecycleConfigArn"].endswith(name)) + assert resp["NotebookInstanceLifecycleConfigArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceLifecycleConfigArn"].endswith(name) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: resp = sagemaker.create_notebook_instance_lifecycle_config( NotebookInstanceLifecycleConfigName=name, OnCreate=on_create, OnStart=on_start, ) - assert_true( + assert \ e.exception.response["Error"]["Message"].endswith( "Notebook Instance Lifecycle Config already exists.)" ) - ) resp = sagemaker.describe_notebook_instance_lifecycle_config( NotebookInstanceLifecycleConfigName=name, ) - assert_equal(resp["NotebookInstanceLifecycleConfigName"], name) - assert_true( + assert resp["NotebookInstanceLifecycleConfigName"] == name + assert \ resp["NotebookInstanceLifecycleConfigArn"].startswith("arn:aws:sagemaker") - ) - assert_true(resp["NotebookInstanceLifecycleConfigArn"].endswith(name)) - assert_equal(resp["OnStart"], on_start) - assert_equal(resp["OnCreate"], on_create) - assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) - assert_true(isinstance(resp["CreationTime"], datetime.datetime)) + assert resp["NotebookInstanceLifecycleConfigArn"].endswith(name) + assert resp["OnStart"] == on_start + assert resp["OnCreate"] == on_create + assert isinstance(resp["LastModifiedTime"], datetime.datetime) + assert isinstance(resp["CreationTime"], datetime.datetime) sagemaker.delete_notebook_instance_lifecycle_config( NotebookInstanceLifecycleConfigName=name, ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sagemaker.describe_notebook_instance_lifecycle_config( NotebookInstanceLifecycleConfigName=name, ) - assert_true( + assert \ e.exception.response["Error"]["Message"].endswith( "Notebook Instance Lifecycle Config does not exist.)" ) - ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sagemaker.delete_notebook_instance_lifecycle_config( NotebookInstanceLifecycleConfigName=name, ) - assert_true( + assert \ e.exception.response["Error"]["Message"].endswith( "Notebook Instance Lifecycle Config does not exist.)" ) - ) diff --git a/tests/test_sagemaker/test_sagemaker_training.py b/tests/test_sagemaker/test_sagemaker_training.py index feaf9f7136f4..8f1dda9fea0d 100644 --- a/tests/test_sagemaker/test_sagemaker_training.py +++ b/tests/test_sagemaker/test_sagemaker_training.py @@ -7,7 +7,6 @@ from moto import mock_sagemaker from moto.sts.models import ACCOUNT_ID -from nose.tools import assert_true, assert_equal, assert_raises, assert_regexp_matches FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) TEST_REGION_NAME = "us-east-1" @@ -82,46 +81,41 @@ def test_create_training_job(): resp["TrainingJobArn"].should.match( r"^arn:aws:sagemaker:.*:.*:training-job/{}$".format(training_job_name) ) - assert_true( - resp["ModelArtifacts"]["S3ModelArtifacts"].startswith( + assert resp["ModelArtifacts"]["S3ModelArtifacts"].startswith( params["OutputDataConfig"]["S3OutputPath"] ) - ) - assert_true(training_job_name in (resp["ModelArtifacts"]["S3ModelArtifacts"])) - assert_true( + assert training_job_name in (resp["ModelArtifacts"]["S3ModelArtifacts"]) + assert \ resp["ModelArtifacts"]["S3ModelArtifacts"].endswith("output/model.tar.gz") - ) - assert_equal(resp["TrainingJobStatus"], "Completed") - assert_equal(resp["SecondaryStatus"], "Completed") - assert_equal(resp["HyperParameters"], params["HyperParameters"]) - assert_equal( - resp["AlgorithmSpecification"]["TrainingImage"], - params["AlgorithmSpecification"]["TrainingImage"], - ) - assert_equal( - resp["AlgorithmSpecification"]["TrainingInputMode"], - params["AlgorithmSpecification"]["TrainingInputMode"], - ) - assert_true("MetricDefinitions" in resp["AlgorithmSpecification"]) - assert_true("Name" in resp["AlgorithmSpecification"]["MetricDefinitions"][0]) - assert_true("Regex" in resp["AlgorithmSpecification"]["MetricDefinitions"][0]) - assert_equal(resp["RoleArn"], FAKE_ROLE_ARN) - assert_equal(resp["InputDataConfig"], params["InputDataConfig"]) - assert_equal(resp["OutputDataConfig"], params["OutputDataConfig"]) - assert_equal(resp["ResourceConfig"], params["ResourceConfig"]) - assert_equal(resp["StoppingCondition"], params["StoppingCondition"]) - assert_true(isinstance(resp["CreationTime"], datetime.datetime)) - assert_true(isinstance(resp["TrainingStartTime"], datetime.datetime)) - assert_true(isinstance(resp["TrainingEndTime"], datetime.datetime)) - assert_true(isinstance(resp["LastModifiedTime"], datetime.datetime)) - assert_true("SecondaryStatusTransitions" in resp) - assert_true("Status" in resp["SecondaryStatusTransitions"][0]) - assert_true("StartTime" in resp["SecondaryStatusTransitions"][0]) - assert_true("EndTime" in resp["SecondaryStatusTransitions"][0]) - assert_true("StatusMessage" in resp["SecondaryStatusTransitions"][0]) - assert_true("FinalMetricDataList" in resp) - assert_true("MetricName" in resp["FinalMetricDataList"][0]) - assert_true("Value" in resp["FinalMetricDataList"][0]) - assert_true("Timestamp" in resp["FinalMetricDataList"][0]) + assert resp["TrainingJobStatus"] == "Completed" + assert resp["SecondaryStatus"] == "Completed" + assert resp["HyperParameters"] == params["HyperParameters"] + assert \ + resp["AlgorithmSpecification"]["TrainingImage"] == \ + params["AlgorithmSpecification"]["TrainingImage"] + assert \ + resp["AlgorithmSpecification"]["TrainingInputMode"] == \ + params["AlgorithmSpecification"]["TrainingInputMode"] + assert "MetricDefinitions" in resp["AlgorithmSpecification"] + assert "Name" in resp["AlgorithmSpecification"]["MetricDefinitions"][0] + assert "Regex" in resp["AlgorithmSpecification"]["MetricDefinitions"][0] + assert resp["RoleArn"] == FAKE_ROLE_ARN + assert resp["InputDataConfig"] == params["InputDataConfig"] + assert resp["OutputDataConfig"] == params["OutputDataConfig"] + assert resp["ResourceConfig"] == params["ResourceConfig"] + assert resp["StoppingCondition"] == params["StoppingCondition"] + assert isinstance(resp["CreationTime"], datetime.datetime) + assert isinstance(resp["TrainingStartTime"], datetime.datetime) + assert isinstance(resp["TrainingEndTime"], datetime.datetime) + assert isinstance(resp["LastModifiedTime"], datetime.datetime) + assert "SecondaryStatusTransitions" in resp + assert "Status" in resp["SecondaryStatusTransitions"][0] + assert "StartTime" in resp["SecondaryStatusTransitions"][0] + assert "EndTime" in resp["SecondaryStatusTransitions"][0] + assert "StatusMessage" in resp["SecondaryStatusTransitions"][0] + assert "FinalMetricDataList" in resp + assert "MetricName" in resp["FinalMetricDataList"][0] + assert "Value" in resp["FinalMetricDataList"][0] + assert "Timestamp" in resp["FinalMetricDataList"][0] pass diff --git a/tests/test_secretsmanager/__init__.py b/tests/test_secretsmanager/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_secretsmanager/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_secretsmanager/test_list_secrets.py b/tests/test_secretsmanager/test_list_secrets.py index 5470e3e12e16..826e09de7219 100644 --- a/tests/test_secretsmanager/test_list_secrets.py +++ b/tests/test_secretsmanager/test_list_secrets.py @@ -6,12 +6,7 @@ from moto import mock_secretsmanager from botocore.exceptions import ClientError import sure # noqa -from nose.tools import assert_raises - -try: - from nose.tools import assert_items_equal -except ImportError: - from nose.tools import assert_count_equal as assert_items_equal +import pytest def boto_client(): @@ -24,7 +19,7 @@ def test_empty(): secrets = conn.list_secrets() - assert_items_equal(secrets["SecretList"], []) + assert secrets["SecretList"] == [] @mock_secretsmanager @@ -60,7 +55,7 @@ def test_with_name_filter(): secrets = conn.list_secrets(Filters=[{"Key": "name", "Values": ["foo"]}]) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert_items_equal(secret_names, ["foo"]) + assert secret_names == ["foo"] @mock_secretsmanager @@ -75,7 +70,7 @@ def test_with_tag_key_filter(): secrets = conn.list_secrets(Filters=[{"Key": "tag-key", "Values": ["baz"]}]) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert_items_equal(secret_names, ["foo"]) + assert secret_names == ["foo"] @mock_secretsmanager @@ -90,7 +85,7 @@ def test_with_tag_value_filter(): secrets = conn.list_secrets(Filters=[{"Key": "tag-value", "Values": ["baz"]}]) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert_items_equal(secret_names, ["foo"]) + assert secret_names == ["foo"] @mock_secretsmanager @@ -103,7 +98,7 @@ def test_with_description_filter(): secrets = conn.list_secrets(Filters=[{"Key": "description", "Values": ["baz"]}]) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert_items_equal(secret_names, ["foo"]) + assert secret_names == ["foo"] @mock_secretsmanager @@ -128,14 +123,14 @@ def test_with_all_filter(): secrets = conn.list_secrets(Filters=[{"Key": "all", "Values": ["foo"]}]) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert_items_equal(secret_names, ["foo", "bar", "baz", "qux", "multi"]) + assert secret_names == ["foo", "bar", "baz", "qux", "multi"] @mock_secretsmanager def test_with_no_filter_key(): conn = boto_client() - with assert_raises(ClientError) as ire: + with pytest.raises(ClientError) as ire: conn.list_secrets(Filters=[{"Values": ["foo"]}]) ire.exception.response["Error"]["Code"].should.equal("InvalidParameterException") @@ -148,7 +143,7 @@ def test_with_no_filter_values(): conn.create_secret(Name="foo", SecretString="secret", Description="hello") - with assert_raises(ClientError) as ire: + with pytest.raises(ClientError) as ire: conn.list_secrets(Filters=[{"Key": "description"}]) ire.exception.response["Error"]["Code"].should.equal("InvalidParameterException") @@ -161,7 +156,7 @@ def test_with_no_filter_values(): def test_with_invalid_filter_key(): conn = boto_client() - with assert_raises(ClientError) as ire: + with pytest.raises(ClientError) as ire: conn.list_secrets(Filters=[{"Key": "invalid", "Values": ["foo"]}]) ire.exception.response["Error"]["Code"].should.equal("ValidationException") @@ -190,7 +185,7 @@ def test_with_duplicate_filter_keys(): ) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert_items_equal(secret_names, ["foo"]) + assert secret_names == ["foo"] @mock_secretsmanager @@ -220,7 +215,7 @@ def test_with_multiple_filters(): ) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert_items_equal(secret_names, ["foo"]) + assert secret_names == ["foo"] @mock_secretsmanager @@ -234,7 +229,7 @@ def test_with_filter_with_multiple_values(): secrets = conn.list_secrets(Filters=[{"Key": "name", "Values": ["foo", "bar"]}]) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert_items_equal(secret_names, ["foo", "bar"]) + assert secret_names == ["foo", "bar"] @mock_secretsmanager @@ -250,4 +245,4 @@ def test_with_filter_with_value_with_multiple_words(): secrets = conn.list_secrets(Filters=[{"Key": "description", "Values": ["one two"]}]) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert_items_equal(secret_names, ["foo", "bar"]) + assert secret_names == ["foo", "bar"] diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 68a7e6742ae2..301ceb081d29 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -9,7 +9,7 @@ import pytz from datetime import datetime import sure # noqa -from nose.tools import assert_raises, assert_equal +import pytest from six import b DEFAULT_SECRET_NAME = "test-secret" @@ -53,13 +53,12 @@ def test_get_secret_value_binary(): def test_get_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: result = conn.get_secret_value(SecretId="i-dont-exist") - assert_equal( - "Secrets Manager can't find the specified secret.", - cm.exception.response["Error"]["Message"], - ) + assert \ + "Secrets Manager can't find the specified secret." == \ + cm.exception.response["Error"]["Message"] @mock_secretsmanager @@ -69,13 +68,12 @@ def test_get_secret_that_does_not_match(): Name="java-util-test-password", SecretString="foosecret" ) - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: result = conn.get_secret_value(SecretId="i-dont-match") - assert_equal( - "Secrets Manager can't find the specified secret.", - cm.exception.response["Error"]["Message"], - ) + assert \ + "Secrets Manager can't find the specified secret." == \ + cm.exception.response["Error"]["Message"] @mock_secretsmanager @@ -86,7 +84,7 @@ def test_get_secret_value_that_is_marked_deleted(): conn.delete_secret(SecretId="test-secret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.get_secret_value(SecretId="test-secret") @@ -96,13 +94,12 @@ def test_get_secret_that_has_no_value(): create_secret = conn.create_secret(Name="java-util-test-password") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: result = conn.get_secret_value(SecretId="java-util-test-password") - assert_equal( - "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT", - cm.exception.response["Error"]["Message"], - ) + assert \ + "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT" == \ + cm.exception.response["Error"] @mock_secretsmanager @@ -227,7 +224,7 @@ def test_delete_secret_force(): assert result["DeletionDate"] > datetime.fromtimestamp(1, pytz.utc) assert result["Name"] == "test-secret" - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.get_secret_value(SecretId="test-secret") @@ -245,7 +242,7 @@ def test_delete_secret_force_with_arn(): assert result["DeletionDate"] > datetime.fromtimestamp(1, pytz.utc) assert result["Name"] == "test-secret" - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.get_secret_value(SecretId="test-secret") @@ -253,7 +250,7 @@ def test_delete_secret_force_with_arn(): def test_delete_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret( SecretId="i-dont-exist", ForceDeleteWithoutRecovery=True ) @@ -265,7 +262,7 @@ def test_delete_secret_fails_with_both_force_delete_flag_and_recovery_window_fla conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret( SecretId="test-secret", RecoveryWindowInDays=1, @@ -279,7 +276,7 @@ def test_delete_secret_recovery_window_too_short(): conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret(SecretId="test-secret", RecoveryWindowInDays=6) @@ -289,7 +286,7 @@ def test_delete_secret_recovery_window_too_long(): conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret(SecretId="test-secret", RecoveryWindowInDays=31) @@ -301,7 +298,7 @@ def test_delete_secret_that_is_marked_deleted(): deleted_secret = conn.delete_secret(SecretId="test-secret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.delete_secret(SecretId="test-secret") @@ -339,7 +336,7 @@ def test_get_random_exclude_lowercase(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=55, ExcludeLowercase=True) - assert any(c.islower() for c in random_password["RandomPassword"]) == False + assert not any(c.islower() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -347,7 +344,7 @@ def test_get_random_exclude_uppercase(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=55, ExcludeUppercase=True) - assert any(c.isupper() for c in random_password["RandomPassword"]) == False + assert not any(c.isupper() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -357,7 +354,7 @@ def test_get_random_exclude_characters_and_symbols(): random_password = conn.get_random_password( PasswordLength=20, ExcludeCharacters="xyzDje@?!." ) - assert any(c in "xyzDje@?!." for c in random_password["RandomPassword"]) == False + assert not any(c in "xyzDje@?!." for c in random_password["RandomPassword"]) assert len(random_password["RandomPassword"]) == 20 @@ -366,7 +363,7 @@ def test_get_random_exclude_numbers(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=100, ExcludeNumbers=True) - assert any(c.isdigit() for c in random_password["RandomPassword"]) == False + assert not any(c.isdigit() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -376,9 +373,7 @@ def test_get_random_exclude_punctuation(): random_password = conn.get_random_password( PasswordLength=100, ExcludePunctuation=True ) - assert ( - any(c in string.punctuation for c in random_password["RandomPassword"]) == False - ) + assert not any(c in string.punctuation for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -386,7 +381,7 @@ def test_get_random_include_space_false(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=300) - assert any(c.isspace() for c in random_password["RandomPassword"]) == False + assert not any(c.isspace() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -394,7 +389,7 @@ def test_get_random_include_space_true(): conn = boto3.client("secretsmanager", region_name="us-west-2") random_password = conn.get_random_password(PasswordLength=4, IncludeSpace=True) - assert any(c.isspace() for c in random_password["RandomPassword"]) == True + assert any(c.isspace() for c in random_password["RandomPassword"]) @mock_secretsmanager @@ -404,25 +399,17 @@ def test_get_random_require_each_included_type(): random_password = conn.get_random_password( PasswordLength=4, RequireEachIncludedType=True ) - assert ( - any(c in string.punctuation for c in random_password["RandomPassword"]) == True - ) - assert ( - any(c in string.ascii_lowercase for c in random_password["RandomPassword"]) - == True - ) - assert ( - any(c in string.ascii_uppercase for c in random_password["RandomPassword"]) - == True - ) - assert any(c in string.digits for c in random_password["RandomPassword"]) == True + assert any(c in string.punctuation for c in random_password["RandomPassword"]) + assert any(c in string.ascii_lowercase for c in random_password["RandomPassword"]) + assert any(c in string.ascii_uppercase for c in random_password["RandomPassword"]) + assert any(c in string.digits for c in random_password["RandomPassword"]) @mock_secretsmanager def test_get_random_too_short_password(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): random_password = conn.get_random_password(PasswordLength=3) @@ -430,7 +417,7 @@ def test_get_random_too_short_password(): def test_get_random_too_long_password(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(Exception): + with pytest.raises(Exception): random_password = conn.get_random_password(PasswordLength=5555) @@ -468,7 +455,7 @@ def test_describe_secret_with_arn(): def test_describe_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.get_secret_value(SecretId="i-dont-exist") @@ -477,7 +464,7 @@ def test_describe_secret_that_does_not_match(): conn = boto3.client("secretsmanager", region_name="us-west-2") conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.get_secret_value(SecretId="i-dont-match") @@ -515,7 +502,7 @@ def test_restore_secret_that_is_not_deleted(): def test_restore_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.restore_secret(SecretId="i-dont-exist") @@ -566,7 +553,7 @@ def test_rotate_secret_that_is_marked_deleted(): conn.delete_secret(SecretId="test-secret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret(SecretId="test-secret") @@ -574,7 +561,7 @@ def test_rotate_secret_that_is_marked_deleted(): def test_rotate_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", "us-west-2") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret(SecretId="i-dont-exist") @@ -583,7 +570,7 @@ def test_rotate_secret_that_does_not_match(): conn = boto3.client("secretsmanager", region_name="us-west-2") conn.create_secret(Name="test-secret", SecretString="foosecret") - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret(SecretId="i-dont-match") @@ -603,7 +590,7 @@ def test_rotate_secret_client_request_token_too_long(): client_request_token = ( "ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-" "ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C" ) - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret( SecretId=DEFAULT_SECRET_NAME, ClientRequestToken=client_request_token ) @@ -615,7 +602,7 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString="foosecret") rotation_lambda_arn = "85B7-446A-B7E4" * 147 # == 2058 characters - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret( SecretId=DEFAULT_SECRET_NAME, RotationLambdaARN=rotation_lambda_arn ) @@ -635,7 +622,7 @@ def test_rotate_secret_rotation_period_too_long(): conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString="foosecret") rotation_rules = {"AutomaticallyAfterDays": 1001} - with assert_raises(ClientError): + with pytest.raises(ClientError): result = conn.rotate_secret( SecretId=DEFAULT_SECRET_NAME, RotationRules=rotation_rules ) @@ -712,7 +699,7 @@ def test_create_and_put_secret_binary_value_puts_new_secret(): @mock_secretsmanager def test_put_secret_binary_requires_either_string_or_binary(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError) as ire: + with pytest.raises(ClientError) as ire: conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME) ire.exception.response["Error"]["Code"].should.equal("InvalidRequestException") @@ -889,15 +876,14 @@ def test_update_secret_with_tags_and_description(): def test_update_secret_which_does_not_exit(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: updated_secret = conn.update_secret( SecretId="test-secret", SecretString="barsecret" ) - assert_equal( - "Secrets Manager can't find the specified secret.", - cm.exception.response["Error"]["Message"], - ) + assert \ + "Secrets Manager can't find the specified secret." == \ + cm.exception.response["Error"]["Message"] @mock_secretsmanager @@ -907,7 +893,7 @@ def test_update_secret_marked_as_deleted(): created_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") deleted_secret = conn.delete_secret(SecretId="test-secret") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: updated_secret = conn.update_secret( SecretId="test-secret", SecretString="barsecret" ) diff --git a/tests/test_ses/__init__.py b/tests/test_ses/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_ses/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index efd4b980c986..f0af73fd339f 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -4,7 +4,7 @@ from botocore.exceptions import ClientError from six.moves.email_mime_multipart import MIMEMultipart from six.moves.email_mime_text import MIMEText -from nose.tools import assert_raises +import pytest import sure # noqa @@ -298,7 +298,7 @@ def test_create_configuration_set(): }, ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_configuration_set_event_destination( ConfigurationSetName="failtest", EventDestination={ @@ -313,7 +313,7 @@ def test_create_configuration_set(): ex.exception.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_configuration_set_event_destination( ConfigurationSetName="test", EventDestination={ @@ -336,7 +336,7 @@ def test_create_receipt_rule_set(): result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_receipt_rule_set(RuleSetName="testRuleSet") ex.exception.response["Error"]["Code"].should.equal("RuleSetNameAlreadyExists") @@ -378,7 +378,7 @@ def test_create_receipt_rule(): result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_receipt_rule( RuleSetName=rule_set_name, Rule={ @@ -409,7 +409,7 @@ def test_create_receipt_rule(): ex.exception.response["Error"]["Code"].should.equal("RuleAlreadyExists") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_receipt_rule( RuleSetName="InvalidRuleSetaName", Rule={ @@ -455,7 +455,7 @@ def test_create_ses_template(): "

Your favorite animal is {{favoriteanimal}}.

", } ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.create_template( Template={ "TemplateName": "MyTemplate", @@ -475,7 +475,7 @@ def test_create_ses_template(): result["Template"]["SubjectPart"].should.equal("Greetings, {{name}}!") # get a template which is not present - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.get_template(TemplateName="MyFakeTemplate") ex.exception.response["Error"]["Code"].should.equal("TemplateDoesNotExist") diff --git a/tests/test_ses/test_ses_sns_boto3.py b/tests/test_ses/test_ses_sns_boto3.py index 43d4000bf575..2a165080edfc 100644 --- a/tests/test_ses/test_ses_sns_boto3.py +++ b/tests/test_ses/test_ses_sns_boto3.py @@ -7,7 +7,6 @@ from six.moves.email_mime_text import MIMEText import sure # noqa -from nose import tools from moto import mock_ses, mock_sns, mock_sqs from moto.ses.models import SESFeedback from moto.core import ACCOUNT_ID diff --git a/tests/test_sns/__init__.py b/tests/test_sns/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_sns/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 63c409302f8f..07bf04b11d82 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -10,7 +10,7 @@ import responses from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_sns, mock_sqs, settings from moto.core import ACCOUNT_ID from moto.sns import sns_backend @@ -233,13 +233,13 @@ def test_publish_bad_sms(): client = boto3.client("sns", region_name="us-east-1") # Test invalid number - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: client.publish(PhoneNumber="NAA+15551234567", Message="my message") cm.exception.response["Error"]["Code"].should.equal("InvalidParameter") cm.exception.response["Error"]["Message"].should.contain("not meet the E164") # Test to long ASCII message - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: client.publish(PhoneNumber="+15551234567", Message="a" * 1601) cm.exception.response["Error"]["Code"].should.equal("InvalidParameter") cm.exception.response["Error"]["Message"].should.contain("must be less than 1600") @@ -387,7 +387,7 @@ def test_publish_message_too_long(): sns = boto3.resource("sns", region_name="us-east-1") topic = sns.create_topic(Name="some-topic") - with assert_raises(ClientError): + with pytest.raises(ClientError): topic.publish(Message="".join(["." for i in range(0, 262145)])) # message short enough - does not raise an error diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index c15658dcaba2..b476cd86d228 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -5,7 +5,7 @@ import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_sns, mock_sqs from moto.sns.models import ( @@ -293,7 +293,7 @@ def test_creating_subscription_with_attributes(): subscriptions.should.have.length_of(0) # invalid attr name - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.subscribe( TopicArn=topic_arn, Protocol="http", @@ -387,17 +387,17 @@ def test_set_subscription_attributes(): attrs["Attributes"]["FilterPolicy"].should.equal(filter_policy) # not existing subscription - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.set_subscription_attributes( SubscriptionArn="invalid", AttributeName="RawMessageDelivery", AttributeValue="true", ) - with assert_raises(ClientError): + with pytest.raises(ClientError): attrs = conn.get_subscription_attributes(SubscriptionArn="invalid") # invalid attr name - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.set_subscription_attributes( SubscriptionArn=subscription_arn, AttributeName="InvalidName", @@ -502,7 +502,7 @@ def test_check_opted_out_invalid(): conn = boto3.client("sns", region_name="us-east-1") # Invalid phone number - with assert_raises(ClientError): + with pytest.raises(ClientError): conn.check_if_phone_number_is_opted_out(phoneNumber="+44742LALALA") diff --git a/tests/test_sqs/__init__.py b/tests/test_sqs/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_sqs/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index b974e04f6da0..b0a91bbdeafa 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -11,14 +11,13 @@ import botocore.exceptions import six import sure # noqa -import tests.backport_assert_raises # noqa from boto.exception import SQSError from boto.sqs.message import Message, RawMessage from botocore.exceptions import ClientError from freezegun import freeze_time from moto import mock_sqs, mock_sqs_deprecated, mock_lambda, mock_logs, settings -from nose import SkipTest -from nose.tools import assert_raises +from unittest import SkipTest +import pytest from tests.helpers import requires_boto_gte from tests.test_awslambda.test_lambda import get_test_zip_file1, get_role_name from moto.core import ACCOUNT_ID @@ -220,7 +219,7 @@ def test_get_queue_url_errors(): @mock_sqs def test_get_nonexistent_queue(): sqs = boto3.resource("sqs", region_name="us-east-1") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: sqs.get_queue_by_name(QueueName="non-existing-queue") ex = err.exception ex.operation_name.should.equal("GetQueueUrl") @@ -229,7 +228,7 @@ def test_get_nonexistent_queue(): "The specified queue non-existing-queue does not exist for this wsdl version." ) - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: sqs.Queue("http://whatever-incorrect-queue-address").load() ex = err.exception ex.operation_name.should.equal("GetQueueAttributes") @@ -368,7 +367,7 @@ def test_message_with_attributes_invalid_datatype(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="blah") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: queue.send_message( MessageBody="derp", MessageAttributes={ @@ -491,7 +490,7 @@ def test_delete_queue(): queue.delete() conn.list_queues().get("QueueUrls").should.equal(None) - with assert_raises(botocore.exceptions.ClientError): + with pytest.raises(botocore.exceptions.ClientError): queue.delete() @@ -758,10 +757,10 @@ def test_max_number_of_messages_invalid_param(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-queue") - with assert_raises(ClientError): + with pytest.raises(ClientError): queue.receive_messages(MaxNumberOfMessages=11) - with assert_raises(ClientError): + with pytest.raises(ClientError): queue.receive_messages(MaxNumberOfMessages=0) # no error but also no messages returned @@ -773,10 +772,10 @@ def test_wait_time_seconds_invalid_param(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-queue") - with assert_raises(ClientError): + with pytest.raises(ClientError): queue.receive_messages(WaitTimeSeconds=-1) - with assert_raises(ClientError): + with pytest.raises(ClientError): queue.receive_messages(WaitTimeSeconds=21) # no error but also no messages returned @@ -1652,7 +1651,7 @@ def test_add_permission_errors(): Actions=["ReceiveMessage"], ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test", @@ -1667,7 +1666,7 @@ def test_add_permission_errors(): "Value test for parameter Label is invalid. " "Reason: Already exists." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test-2", @@ -1683,7 +1682,7 @@ def test_add_permission_errors(): "Reason: Only the queue owner is allowed to invoke this action." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test-2", @@ -1698,7 +1697,7 @@ def test_add_permission_errors(): "The request must contain the parameter Actions." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test-2", @@ -1713,7 +1712,7 @@ def test_add_permission_errors(): "Value [] for parameter PrincipalId is invalid. Reason: Unable to verify." ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.add_permission( QueueUrl=queue_url, Label="test-2", @@ -1744,7 +1743,7 @@ def test_remove_permission_errors(): response = client.create_queue(QueueName="test-queue") queue_url = response["QueueUrl"] - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.remove_permission(QueueUrl=queue_url, Label="test") ex = e.exception ex.operation_name.should.equal("RemovePermission") @@ -1876,7 +1875,7 @@ def test_create_fifo_queue_with_dlq(): ) # Cant have fifo queue with non fifo DLQ - with assert_raises(ClientError): + with pytest.raises(ClientError): sqs.create_queue( QueueName="test-queue2.fifo", Attributes={ @@ -1970,7 +1969,7 @@ def test_redrive_policy_available(): assert json.loads(attributes["RedrivePolicy"]) == redrive_policy # Cant have redrive policy without maxReceiveCount - with assert_raises(ClientError): + with pytest.raises(ClientError): sqs.create_queue( QueueName="test-queue2", Attributes={ @@ -1988,7 +1987,7 @@ def test_redrive_policy_non_existent_queue(): "maxReceiveCount": 1, } - with assert_raises(ClientError): + with pytest.raises(ClientError): sqs.create_queue( QueueName="test-queue", Attributes={"RedrivePolicy": json.dumps(redrive_policy)}, @@ -2173,7 +2172,7 @@ def test_send_messages_to_fifo_without_message_group_id(): Attributes={"FifoQueue": "true", "ContentBasedDeduplication": "true"}, ) - with assert_raises(Exception) as e: + with pytest.raises(Exception) as e: queue.send_message(MessageBody="message-1") ex = e.exception ex.response["Error"]["Code"].should.equal("MissingParameter") diff --git a/tests/test_ssm/__init__.py b/tests/test_ssm/__init__.py index e69de29bb2d1..08a1c1568c9c 100644 --- a/tests/test_ssm/__init__.py +++ b/tests/test_ssm/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index c590e75b7b34..e3c03203f6cb 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -9,7 +9,7 @@ import uuid from botocore.exceptions import ClientError, ParamValidationError -from nose.tools import assert_raises +import pytest from moto import mock_ec2, mock_ssm @@ -1671,7 +1671,7 @@ def test_list_commands(): cmd["InstanceIds"].should.contain("i-123456") # test the error case for an invalid command id - with assert_raises(ClientError): + with pytest.raises(ClientError): response = client.list_commands(CommandId=str(uuid.uuid4())) @@ -1703,13 +1703,13 @@ def test_get_command_invocation(): invocation_response["InstanceId"].should.equal(instance_id) # test the error case for an invalid instance id - with assert_raises(ClientError): + with pytest.raises(ClientError): invocation_response = client.get_command_invocation( CommandId=cmd_id, InstanceId="i-FAKE" ) # test the error case for an invalid plugin name - with assert_raises(ClientError): + with pytest.raises(ClientError): invocation_response = client.get_command_invocation( CommandId=cmd_id, InstanceId=instance_id, PluginName="FAKE" ) diff --git a/tests/test_stepfunctions/__init__.py b/tests/test_stepfunctions/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_stepfunctions/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index dd11e7961a02..1a946e8e3a11 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -6,7 +6,7 @@ from datetime import datetime from botocore.exceptions import ClientError -from nose.tools import assert_raises +import pytest from moto import mock_cloudformation, mock_sts, mock_stepfunctions from moto.core import ACCOUNT_ID @@ -134,7 +134,7 @@ def test_state_machine_creation_fails_with_invalid_names(): # for invalid_name in invalid_names: - with assert_raises(ClientError): + with pytest.raises(ClientError): client.create_state_machine( name=invalid_name, definition=str(simple_definition), @@ -147,7 +147,7 @@ def test_state_machine_creation_requires_valid_role_arn(): client = boto3.client("stepfunctions", region_name=region) name = "example_step_function" # - with assert_raises(ClientError): + with pytest.raises(ClientError): client.create_state_machine( name=name, definition=str(simple_definition), @@ -291,7 +291,7 @@ def test_state_machine_creation_can_be_described(): def test_state_machine_throws_error_when_describing_unknown_machine(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError): + with pytest.raises(ClientError): unknown_state_machine = ( "arn:aws:states:" + region @@ -307,7 +307,7 @@ def test_state_machine_throws_error_when_describing_unknown_machine(): def test_state_machine_throws_error_when_describing_bad_arn(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError): + with pytest.raises(ClientError): client.describe_state_machine(stateMachineArn="bad") @@ -316,7 +316,7 @@ def test_state_machine_throws_error_when_describing_bad_arn(): def test_state_machine_throws_error_when_describing_machine_in_different_account(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError): + with pytest.raises(ClientError): unknown_state_machine = ( "arn:aws:states:" + region + ":000000000000:stateMachine:unknown" ) @@ -504,7 +504,7 @@ def test_state_machine_start_execution(): def test_state_machine_start_execution_bad_arn_raises_exception(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError): + with pytest.raises(ClientError): client.start_execution(stateMachineArn="bad") @@ -544,7 +544,7 @@ def test_state_machine_start_execution_fails_on_duplicate_execution_name(): stateMachineArn=sm["stateMachineArn"], name="execution_name" ) # - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: _ = client.start_execution( stateMachineArn=sm["stateMachineArn"], name="execution_name" ) @@ -588,9 +588,9 @@ def test_state_machine_start_execution_with_invalid_input(): sm = client.create_state_machine( name="name", definition=str(simple_definition), roleArn=_get_default_role() ) - with assert_raises(ClientError): + with pytest.raises(ClientError): _ = client.start_execution(stateMachineArn=sm["stateMachineArn"], input="") - with assert_raises(ClientError): + with pytest.raises(ClientError): _ = client.start_execution(stateMachineArn=sm["stateMachineArn"], input="{") @@ -744,7 +744,7 @@ def test_state_machine_describe_execution_with_custom_input(): def test_execution_throws_error_when_describing_unknown_execution(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError): + with pytest.raises(ClientError): unknown_execution = ( "arn:aws:states:" + region + ":" + _get_account_id() + ":execution:unknown" ) @@ -775,7 +775,7 @@ def test_state_machine_can_be_described_by_execution(): def test_state_machine_throws_error_when_describing_unknown_execution(): client = boto3.client("stepfunctions", region_name=region) # - with assert_raises(ClientError): + with pytest.raises(ClientError): unknown_execution = ( "arn:aws:states:" + region + ":" + _get_account_id() + ":execution:unknown" ) diff --git a/tests/test_sts/__init__.py b/tests/test_sts/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_sts/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index efc04beb4e62..34b71c358c2f 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -6,7 +6,7 @@ import boto3 from botocore.client import ClientError from freezegun import freeze_time -from nose.tools import assert_raises +import pytest import sure # noqa @@ -357,7 +357,7 @@ def test_federation_token_with_too_long_policy(): json_policy = json.dumps(policy) assert len(json_policy) > MAX_FEDERATION_TOKEN_POLICY_LENGTH - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: cli.get_federation_token(Name="foo", DurationSeconds=3600, Policy=json_policy) exc.exception.response["Error"]["Code"].should.equal("ValidationError") exc.exception.response["Error"]["Message"].should.contain( diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index 32940753f391..9e7579ddd5bb 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -4,9 +4,6 @@ from moto.swf.exceptions import SWFUnknownResourceFault from moto.swf.models import Domain -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa - # Fake WorkflowExecution for tests purposes WorkflowExecution = namedtuple( "WorkflowExecution", ["workflow_id", "run_id", "execution_status", "open"] diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index bec352ce84e2..2832abf753db 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -4,9 +4,6 @@ import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa - from moto import mock_swf_deprecated from moto.core.utils import unix_time diff --git a/tests/test_xray/__init__.py b/tests/test_xray/__init__.py new file mode 100644 index 000000000000..08a1c1568c9c --- /dev/null +++ b/tests/test_xray/__init__.py @@ -0,0 +1 @@ +# This file is intentionally left blank. diff --git a/tox.ini b/tox.ini index 9dacca18ca96..f77df29b35a9 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,7 @@ deps = -r{toxinidir}/requirements-dev.txt commands = {envpython} setup.py test - nosetests {posargs} + pytest -v {posargs} [flake8] ignore = W503,W605,E128,E501,E203,E266,E501,E231 From ea489bce6c8f0d62b8836fa18d92024d1877afbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= Date: Tue, 6 Oct 2020 08:04:09 +0200 Subject: [PATCH 621/658] Finish porting from nose to pytest. --- Makefile | 6 +- requirements-tests.txt | 1 + setup.cfg | 4 + tests/test_apigateway/test_apigateway.py | 96 ++++---- tests/test_athena/test_athena.py | 4 +- tests/test_autoscaling/test_autoscaling.py | 38 +-- .../test_awslambda_cloudformation.py | 2 +- tests/test_awslambda/test_lambda.py | 6 +- tests/test_batch/test_batch.py | 3 + .../test_cloudformation_stack_crud.py | 2 +- .../test_cloudwatch/test_cloudwatch_boto3.py | 8 +- tests/test_codecommit/test_codecommit.py | 10 +- tests/test_codepipeline/test_codepipeline.py | 22 +- .../test_cognitoidentity.py | 6 +- tests/test_cognitoidp/test_cognitoidp.py | 28 +-- tests/test_config/test_config.py | 226 +++++++++--------- tests/test_core/test_auth.py | 98 ++++---- tests/test_core/test_decorator_calls.py | 2 + tests/test_core/test_request_mocking.py | 2 + tests/test_datasync/test_datasync.py | 6 +- tests/test_dynamodb2/test_dynamodb.py | 118 ++++----- .../test_dynamodb_table_with_range_key.py | 4 +- tests/test_ec2/test_amis.py | 110 ++++----- tests/test_ec2/test_dhcp_options.py | 62 ++--- tests/test_ec2/test_elastic_block_store.py | 192 +++++++-------- tests/test_ec2/test_elastic_ip_addresses.py | 110 ++++----- .../test_elastic_network_interfaces.py | 38 +-- tests/test_ec2/test_flow_logs.py | 60 ++--- tests/test_ec2/test_general.py | 8 +- tests/test_ec2/test_instances.py | 128 +++++----- tests/test_ec2/test_internet_gateways.py | 68 +++--- tests/test_ec2/test_key_pairs.py | 56 ++--- tests/test_ec2/test_launch_templates.py | 2 +- tests/test_ec2/test_network_acls.py | 6 +- tests/test_ec2/test_route_tables.py | 64 ++--- tests/test_ec2/test_security_groups.py | 124 +++++----- tests/test_ec2/test_spot_instances.py | 12 +- tests/test_ec2/test_subnets.py | 28 +-- tests/test_ec2/test_tags.py | 56 ++--- tests/test_ec2/test_vpc_peering.py | 30 +-- tests/test_ec2/test_vpcs.py | 26 +- tests/test_ecs/test_ecs_boto3.py | 2 +- tests/test_elb/test_elb.py | 5 +- tests/test_elbv2/test_elbv2.py | 8 +- tests/test_emr/test_emr_boto3.py | 14 +- tests/test_glue/test_datacatalog.py | 50 ++-- tests/test_iam/test_iam.py | 56 ++--- tests/test_iam/test_iam_cloudformation.py | 8 +- tests/test_iam/test_iam_groups.py | 6 +- tests/test_iam/test_iam_policies.py | 6 +- tests/test_iot/test_iot.py | 24 +- tests/test_iotdata/test_iotdata.py | 4 +- tests/test_kms/test_kms.py | 16 +- tests/test_logs/test_integration.py | 27 ++- tests/test_logs/test_logs.py | 6 +- .../test_organizations_boto3.py | 108 ++++----- tests/test_ram/test_ram.py | 18 +- tests/test_s3/test_s3.py | 156 ++++++------ tests/test_s3/test_s3_lifecycle.py | 18 +- tests/test_s3/test_s3_storageclass.py | 10 +- .../test_sagemaker/test_sagemaker_endpoint.py | 16 +- tests/test_sagemaker/test_sagemaker_models.py | 2 +- .../test_sagemaker_notebooks.py | 20 +- .../test_secretsmanager/test_list_secrets.py | 14 +- .../test_secretsmanager.py | 22 +- tests/test_secretsmanager/test_server.py | 7 +- tests/test_ses/test_ses_boto3.py | 14 +- tests/test_sns/test_publishing_boto3.py | 8 +- tests/test_sqs/test_sqs.py | 20 +- tests/test_ssm/test_ssm_boto3.py | 22 +- .../test_stepfunctions/test_stepfunctions.py | 4 +- tests/test_sts/test_sts.py | 6 +- 72 files changed, 1289 insertions(+), 1280 deletions(-) diff --git a/Makefile b/Makefile index 2fc6aea96588..0df12ac17628 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ ifeq ($(TEST_SERVER_MODE), true) # exclude test_kinesisvideoarchivedmedia # because testing with moto_server is difficult with data-endpoint - TEST_EXCLUDE := --exclude='test_iot.*' --exclude="test_kinesisvideoarchivedmedia.*" + TEST_EXCLUDE := -k 'not (test_iot or test_kinesisvideoarchivedmedia)' else TEST_EXCLUDE := endif @@ -23,13 +23,13 @@ lint: test-only: rm -f .coverage rm -rf cover - @nosetests -sv --with-coverage --cover-html ./tests/ $(TEST_EXCLUDE) + @pytest -sv --cov=moto --cov-report html ./tests/ $(TEST_EXCLUDE) test: lint test-only test_server: - @TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/ + @TEST_SERVER_MODE=true pytest -sv --cov=moto --cov-report html ./tests/ aws_managed_policies: scripts/update_managed_policies.py diff --git a/requirements-tests.txt b/requirements-tests.txt index c19f35c7d4d2..817c38640639 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -1,4 +1,5 @@ pytest +pytest-cov sure==1.4.11 freezegun parameterized>=0.7.0 diff --git a/setup.cfg b/setup.cfg index 3c6e79cf31da..1c247ef3d21f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,6 @@ [bdist_wheel] universal=1 + +[tool:pytest] +markers = + network: marks tests which require network connection diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 4a6c3eea396b..c34ddfa723cb 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -96,7 +96,7 @@ def test_create_rest_api_invalid_apikeysource(): description="this is my api", apiKeySource="not a valid api key source", ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Code"].should.equal("ValidationException") @mock_apigateway @@ -132,7 +132,7 @@ def test_create_rest_api_invalid_endpointconfiguration(): description="this is my api", endpointConfiguration={"types": ["INVALID"]}, ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Code"].should.equal("ValidationException") @mock_apigateway @@ -196,8 +196,8 @@ def test_create_resource__validate_name(): for name in invalid_names: with pytest.raises(ClientError) as ex: client.create_resource(restApiId=api_id, parentId=root_id, pathPart=name) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Resource's path part only allow a-zA-Z0-9._- and curly braces at the beginning and the end and an optional plus sign before the closing brace." ) # All valid names should go through @@ -1196,8 +1196,8 @@ def test_create_deployment_requires_REST_methods(): with pytest.raises(ClientError) as ex: client.create_deployment(restApiId=api_id, stageName=stage_name)["id"] - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "The REST API doesn't contain any methods" ) @@ -1219,8 +1219,8 @@ def test_create_deployment_requires_REST_method_integrations(): with pytest.raises(ClientError) as ex: client.create_deployment(restApiId=api_id, stageName=stage_name)["id"] - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "No integration defined for method" ) @@ -1277,8 +1277,8 @@ def test_put_integration_response_requires_responseTemplate(): client.put_integration_response( restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal("Invalid request input") + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal("Invalid request input") # Works fine if responseTemplate is defined client.put_integration_response( restApiId=api_id, @@ -1319,8 +1319,8 @@ def test_put_integration_response_with_response_template(): restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal("Invalid request input") + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal("Invalid request input") client.put_integration_response( restApiId=api_id, @@ -1380,8 +1380,8 @@ def test_put_integration_validation(): type=type, uri="http://httpbin.org/robots.txt", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Enumeration value for HttpMethod must be non-empty" ) for type in types_not_requiring_integration_method: @@ -1440,8 +1440,8 @@ def test_put_integration_validation(): uri="arn:aws:apigateway:us-west-2:s3:path/b/k", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Integrations of type 'AWS_PROXY' currently only supports Lambda function and Firehose stream invocations." ) for type in aws_types: @@ -1456,8 +1456,8 @@ def test_put_integration_validation(): uri="arn:aws:apigateway:us-west-2:s3:path/b/k", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("AccessDeniedException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDeniedException") + ex.value.response["Error"]["Message"].should.equal( "Cross-account pass role is not allowed." ) for type in ["AWS"]: @@ -1471,8 +1471,8 @@ def test_put_integration_validation(): uri="arn:aws:apigateway:us-west-2:s3:path/b/k", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Role ARN must be specified for AWS integrations" ) for type in http_types: @@ -1486,8 +1486,8 @@ def test_put_integration_validation(): uri="non-valid-http", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Invalid HTTP endpoint specified for URI" ) for type in aws_types: @@ -1501,8 +1501,8 @@ def test_put_integration_validation(): uri="non-valid-arn", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "Invalid ARN specified in the request" ) for type in aws_types: @@ -1516,8 +1516,8 @@ def test_put_integration_validation(): uri="arn:aws:iam::0000000000:role/service-role/asdf", integrationHttpMethod="POST", ) - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal( "AWS ARN for integration must contain path or action" ) @@ -1635,8 +1635,8 @@ def test_create_domain_names(): with pytest.raises(ClientError) as ex: client.create_domain_name(domainName="") - ex.exception.response["Error"]["Message"].should.equal("No Domain Name specified") - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal("No Domain Name specified") + ex.value.response["Error"]["Code"].should.equal("BadRequestException") @mock_apigateway @@ -1669,10 +1669,10 @@ def test_get_domain_name(): with pytest.raises(ClientError) as ex: client.get_domain_name(domainName=domain_name) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Message"].should.equal( "Invalid Domain Name specified" ) - ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") # adding a domain name client.create_domain_name(domainName=domain_name) # retrieving the data of added domain name. @@ -1708,10 +1708,10 @@ def test_create_model(): description=description, contentType=content_type, ) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Message"].should.equal( "Invalid Rest API Id specified" ) - ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") with pytest.raises(ClientError) as ex: client.create_model( @@ -1721,8 +1721,8 @@ def test_create_model(): contentType=content_type, ) - ex.exception.response["Error"]["Message"].should.equal("No Model Name specified") - ex.exception.response["Error"]["Code"].should.equal("BadRequestException") + ex.value.response["Error"]["Message"].should.equal("No Model Name specified") + ex.value.response["Error"]["Code"].should.equal("BadRequestException") @mock_apigateway @@ -1772,10 +1772,10 @@ def test_get_model_by_name(): with pytest.raises(ClientError) as ex: client.get_model(restApiId=dummy_rest_api_id, modelName=model_name) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Message"].should.equal( "Invalid Rest API Id specified" ) - ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") @mock_apigateway @@ -1786,10 +1786,10 @@ def test_get_model_with_invalid_name(): # test with an invalid model name with pytest.raises(ClientError) as ex: client.get_model(restApiId=rest_api_id, modelName="fake") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Message"].should.equal( "Invalid Model Name specified" ) - ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") @mock_apigateway @@ -1870,9 +1870,9 @@ def test_create_api_headers(): client.create_api_key(**payload) with pytest.raises(ClientError) as ex: client.create_api_key(**payload) - ex.exception.response["Error"]["Code"].should.equal("ConflictException") + ex.value.response["Error"]["Code"].should.equal("ConflictException") if not settings.TEST_SERVER_MODE: - ex.exception.response["ResponseMetadata"]["HTTPHeaders"].should.equal({}) + ex.value.response["ResponseMetadata"]["HTTPHeaders"].should.equal({}) @mock_apigateway @@ -1941,8 +1941,8 @@ def test_usage_plans(): # # Try to get info about a non existing usage with pytest.raises(ClientError) as ex: client.get_usage_plan(usagePlanId="not_existing") - ex.exception.response["Error"]["Code"].should.equal("NotFoundException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Message"].should.equal( "Invalid Usage Plan ID specified" ) @@ -2032,24 +2032,24 @@ def test_usage_plan_keys(): # Try to get info about a non existing api key with pytest.raises(ClientError) as ex: client.get_usage_plan_key(usagePlanId=usage_plan_id, keyId="not_existing_key") - ex.exception.response["Error"]["Code"].should.equal("NotFoundException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Message"].should.equal( "Invalid API Key identifier specified" ) # Try to get info about an existing api key that has not jet added to a valid usage plan with pytest.raises(ClientError) as ex: client.get_usage_plan_key(usagePlanId=usage_plan_id, keyId=key_id) - ex.exception.response["Error"]["Code"].should.equal("NotFoundException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Message"].should.equal( "Invalid Usage Plan ID specified" ) # Try to get info about an existing api key that has not jet added to a valid usage plan with pytest.raises(ClientError) as ex: client.get_usage_plan_key(usagePlanId="not_existing_plan_id", keyId=key_id) - ex.exception.response["Error"]["Code"].should.equal("NotFoundException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Message"].should.equal( "Invalid Usage Plan ID specified" ) diff --git a/tests/test_athena/test_athena.py b/tests/test_athena/test_athena.py index 98e1dc4b9f13..f667f231697b 100644 --- a/tests/test_athena/test_athena.py +++ b/tests/test_athena/test_athena.py @@ -111,8 +111,8 @@ def test_start_query_validate_workgroup(): ResultConfiguration={"OutputLocation": "string"}, WorkGroup="unknown_workgroup", ) - err.exception.response["Error"]["Code"].should.equal("InvalidRequestException") - err.exception.response["Error"]["Message"].should.equal("WorkGroup does not exist") + err.value.response["Error"]["Code"].should.equal("InvalidRequestException") + err.value.response["Error"]["Message"].should.equal("WorkGroup does not exist") @mock_athena diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 9e51c4b125f1..25b9cc063b61 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -791,9 +791,9 @@ def test_create_autoscaling_group_from_invalid_instance_id(): VPCZoneIdentifier=mocked_networking["subnet1"], NewInstancesProtectedFromScaleIn=False, ) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Code"].should.equal("ValidationError") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( "Instance [{0}] is invalid.".format(invalid_instance_id) ) @@ -842,7 +842,7 @@ def test_create_autoscaling_group_no_template_ref(): )["LaunchTemplate"] client = boto3.client("autoscaling", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchTemplate={"Version": str(template["LatestVersionNumber"])}, @@ -852,9 +852,9 @@ def test_create_autoscaling_group_no_template_ref(): VPCZoneIdentifier=mocked_networking["subnet1"], NewInstancesProtectedFromScaleIn=False, ) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Code"].should.equal("ValidationError") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( "Valid requests must contain either launchTemplateId or LaunchTemplateName" ) @@ -874,7 +874,7 @@ def test_create_autoscaling_group_multiple_template_ref(): )["LaunchTemplate"] client = boto3.client("autoscaling", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchTemplate={ @@ -888,9 +888,9 @@ def test_create_autoscaling_group_multiple_template_ref(): VPCZoneIdentifier=mocked_networking["subnet1"], NewInstancesProtectedFromScaleIn=False, ) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Code"].should.equal("ValidationError") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( "Valid requests must contain either launchTemplateId or LaunchTemplateName" ) @@ -899,7 +899,7 @@ def test_create_autoscaling_group_multiple_template_ref(): def test_create_autoscaling_group_boto3_no_launch_configuration(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_auto_scaling_group( AutoScalingGroupName="test_asg", MinSize=0, @@ -908,9 +908,9 @@ def test_create_autoscaling_group_boto3_no_launch_configuration(): VPCZoneIdentifier=mocked_networking["subnet1"], NewInstancesProtectedFromScaleIn=False, ) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Code"].should.equal("ValidationError") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( "Valid requests must contain either LaunchTemplate, LaunchConfigurationName, " "InstanceId or MixedInstancesPolicy parameter." ) @@ -934,7 +934,7 @@ def test_create_autoscaling_group_boto3_multiple_launch_configurations(): LaunchConfigurationName="test_launch_configuration" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.create_auto_scaling_group( AutoScalingGroupName="test_asg", LaunchConfigurationName="test_launch_configuration", @@ -948,9 +948,9 @@ def test_create_autoscaling_group_boto3_multiple_launch_configurations(): VPCZoneIdentifier=mocked_networking["subnet1"], NewInstancesProtectedFromScaleIn=False, ) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Code"].should.equal("ValidationError") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.equal( "Valid requests must contain either LaunchTemplate, LaunchConfigurationName, " "InstanceId or MixedInstancesPolicy parameter." ) diff --git a/tests/test_awslambda/test_awslambda_cloudformation.py b/tests/test_awslambda/test_awslambda_cloudformation.py index f87918328897..6d998bfd45f7 100644 --- a/tests/test_awslambda/test_awslambda_cloudformation.py +++ b/tests/test_awslambda/test_awslambda_cloudformation.py @@ -111,7 +111,7 @@ def test_lambda_can_be_deleted_by_cloudformation(): # Verify function was deleted with pytest.raises(ClientError) as e: lmbda.get_function(FunctionName=created_fn_name) - e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_cloudformation diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 2de95cb3c091..071c6fed6197 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1662,7 +1662,7 @@ def test_update_function_s3(): @mock_lambda def test_create_function_with_invalid_arn(): err = create_invalid_lambda("test-iam-role") - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Message"].should.equal( r"1 validation error detected: Value 'test-iam-role' at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: arn:(aws[a-zA-Z-]*)?:iam::(\d{12}):role/?[a-zA-Z_0-9+=,.@\-_/]+" ) @@ -1670,7 +1670,7 @@ def test_create_function_with_invalid_arn(): @mock_lambda def test_create_function_with_arn_from_different_account(): err = create_invalid_lambda("arn:aws:iam::000000000000:role/example_role") - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Message"].should.equal( "Cross-account pass role is not allowed." ) @@ -1680,7 +1680,7 @@ def test_create_function_with_unknown_arn(): err = create_invalid_lambda( "arn:aws:iam::" + str(ACCOUNT_ID) + ":role/service-role/unknown_role" ) - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Message"].should.equal( "The role defined for the function cannot be assumed by Lambda." ) diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 511042d1f191..1d4aa1cf27b3 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -6,6 +6,7 @@ from botocore.exceptions import ClientError import sure # noqa from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs +import pytest DEFAULT_REGION = "eu-central-1" @@ -685,6 +686,7 @@ def test_submit_job_by_name(): @mock_ecs @mock_iam @mock_batch +@pytest.mark.network def test_submit_job(): ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) @@ -751,6 +753,7 @@ def test_submit_job(): @mock_ecs @mock_iam @mock_batch +@pytest.mark.network def test_list_jobs(): ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 6baae83bcc42..40004f805543 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -520,7 +520,7 @@ def test_update_stack_when_rolled_back(): with pytest.raises(BotoServerError) as err: conn.update_stack("test_stack", dummy_template_json) - ex = err.exception + ex = err.value ex.body.should.match(r"is in ROLLBACK_COMPLETE state and can not be updated") ex.error_code.should.equal("ValidationError") ex.reason.should.equal("Bad Request") diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index c62f3145962d..d448b0c58b45 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -113,7 +113,7 @@ def test_delete_invalid_alarm(): # trying to delete an alarm which is not created along with valid alarm. with pytest.raises(ClientError) as e: cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName", "testalarm1"]) - e.exception.response["Error"]["Code"].should.equal("ResourceNotFound") + e.value.response["Error"]["Code"].should.equal("ResourceNotFound") resp = cloudwatch.describe_alarms(AlarmNames=["testalarm1"]) # making sure other alarms are not deleted in case of an error. @@ -122,7 +122,7 @@ def test_delete_invalid_alarm(): # test to check if the error raises if only one invalid alarm is tried to delete. with pytest.raises(ClientError) as e: cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName"]) - e.exception.response["Error"]["Code"].should.equal("ResourceNotFound") + e.value.response["Error"]["Code"].should.equal("ResourceNotFound") @mock_cloudwatch @@ -425,7 +425,7 @@ def test_list_metrics_paginated(): # Verify we can't pass a random NextToken with pytest.raises(ClientError) as e: cloudwatch.list_metrics(NextToken=str(uuid4())) - e.exception.response["Error"]["Message"].should.equal( + e.value.response["Error"]["Message"].should.equal( "Request parameter NextToken is invalid" ) # Add a boatload of metrics @@ -454,7 +454,7 @@ def test_list_metrics_paginated(): # Verify that we can't reuse an existing token with pytest.raises(ClientError) as e: cloudwatch.list_metrics(NextToken=first_page["NextToken"]) - e.exception.response["Error"]["Message"].should.equal( + e.value.response["Error"]["Message"].should.equal( "Request parameter NextToken is invalid" ) diff --git a/tests/test_codecommit/test_codecommit.py b/tests/test_codecommit/test_codecommit.py index 7a5867d44a40..4c38252ff4d2 100644 --- a/tests/test_codecommit/test_codecommit.py +++ b/tests/test_codecommit/test_codecommit.py @@ -86,7 +86,7 @@ def test_create_repository_repository_name_exists(): repositoryName="repository_two", repositoryDescription="description repo two", ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreateRepository") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("RepositoryNameExistsException") @@ -101,7 +101,7 @@ def test_create_repository_invalid_repository_name(): with pytest.raises(ClientError) as e: client.create_repository(repositoryName="in_123_valid_@#$_characters") - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreateRepository") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidRepositoryNameException") @@ -158,7 +158,7 @@ def test_get_repository(): with pytest.raises(ClientError) as e: client.get_repository(repositoryName=repository_name) - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetRepository") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("RepositoryDoesNotExistException") @@ -173,7 +173,7 @@ def test_get_repository_invalid_repository_name(): with pytest.raises(ClientError) as e: client.get_repository(repositoryName="repository_one-@#@") - ex = e.exception + ex = e.value ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidRepositoryNameException") ex.response["Error"]["Message"].should.equal( @@ -209,7 +209,7 @@ def test_delete_repository_invalid_repository_name(): with pytest.raises(ClientError) as e: client.delete_repository(repositoryName="_rep@ository_one") - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeleteRepository") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidRepositoryNameException") diff --git a/tests/test_codepipeline/test_codepipeline.py b/tests/test_codepipeline/test_codepipeline.py index ac72f99818c3..ca1094582ad0 100644 --- a/tests/test_codepipeline/test_codepipeline.py +++ b/tests/test_codepipeline/test_codepipeline.py @@ -79,7 +79,7 @@ def test_create_pipeline_errors(): with pytest.raises(ClientError) as e: create_basic_codepipeline(client, "test-pipeline") - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidStructureException") @@ -115,7 +115,7 @@ def test_create_pipeline_errors(): ], } ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidStructureException") @@ -167,7 +167,7 @@ def test_create_pipeline_errors(): ], } ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidStructureException") @@ -203,7 +203,7 @@ def test_create_pipeline_errors(): ], } ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidStructureException") @@ -284,7 +284,7 @@ def test_get_pipeline_errors(): with pytest.raises(ClientError) as e: client.get_pipeline(name="not-existing") - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetPipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("PipelineNotFoundException") @@ -456,7 +456,7 @@ def test_update_pipeline_errors(): ], } ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("UpdatePipeline") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") @@ -521,7 +521,7 @@ def test_list_tags_for_resource_errors(): client.list_tags_for_resource( resourceArn="arn:aws:codepipeline:us-east-1:123456789012:not-existing" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListTagsForResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") @@ -560,7 +560,7 @@ def test_tag_resource_errors(): resourceArn="arn:aws:codepipeline:us-east-1:123456789012:not-existing", tags=[{"key": "key-2", "value": "value-2"}], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("TagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") @@ -573,7 +573,7 @@ def test_tag_resource_errors(): resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), tags=[{"key": "aws:key", "value": "value"}], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("TagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidTagsException") @@ -591,7 +591,7 @@ def test_tag_resource_errors(): for i in range(50) ], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("TagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("TooManyTagsException") @@ -639,7 +639,7 @@ def test_untag_resource_errors(): resourceArn="arn:aws:codepipeline:us-east-1:123456789012:not-existing", tagKeys=["key"], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("UntagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index a159033297ba..cfe673cdffdd 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -78,9 +78,9 @@ def test_describe_identity_pool_with_invalid_id_raises_error(): with pytest.raises(ClientError) as cm: conn.describe_identity_pool(IdentityPoolId="us-west-2_non-existent") - cm.exception.operation_name.should.equal("DescribeIdentityPool") - cm.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.operation_name.should.equal("DescribeIdentityPool") + cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # testing a helper function diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index bbd8d5a39172..54ee9528f71f 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -608,9 +608,9 @@ def test_update_identity_provider_no_user_pool(): UserPoolId="foo", ProviderName="bar", ProviderDetails={"thing": new_value} ) - cm.exception.operation_name.should.equal("UpdateIdentityProvider") - cm.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.operation_name.should.equal("UpdateIdentityProvider") + cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @@ -630,9 +630,9 @@ def test_update_identity_provider_no_identity_provider(): ProviderDetails={"thing": new_value}, ) - cm.exception.operation_name.should.equal("UpdateIdentityProvider") - cm.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.operation_name.should.equal("UpdateIdentityProvider") + cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @@ -701,9 +701,9 @@ def test_create_group_with_duplicate_name_raises_error(): with pytest.raises(ClientError) as cm: conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) - cm.exception.operation_name.should.equal("CreateGroup") - cm.exception.response["Error"]["Code"].should.equal("GroupExistsException") - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.operation_name.should.equal("CreateGroup") + cm.value.response["Error"]["Code"].should.equal("GroupExistsException") + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp @@ -749,7 +749,7 @@ def test_delete_group(): with pytest.raises(ClientError) as cm: conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) - cm.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + cm.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_cognitoidp @@ -1570,12 +1570,12 @@ def test_resource_server(): UserPoolId=user_pool_id, Identifier=identifier, Name=name, Scopes=scopes ) - ex.exception.operation_name.should.equal("CreateResourceServer") - ex.exception.response["Error"]["Code"].should.equal("InvalidParameterException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.operation_name.should.equal("CreateResourceServer") + ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") + ex.value.response["Error"]["Message"].should.equal( "%s already exists in user pool %s." % (identifier, user_pool_id) ) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_cognitoidp diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index 716792863fba..a99efceaea58 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -23,20 +23,20 @@ def test_put_configuration_recorder(): with pytest.raises(ClientError) as ce: client.put_configuration_recorder(ConfigurationRecorder={"roleARN": "somearn"}) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "InvalidConfigurationRecorderNameException" ) - assert "is not valid, blank string." in ce.exception.response["Error"]["Message"] + assert "is not valid, blank string." in ce.value.response["Error"]["Message"] # Try with a really long name: with pytest.raises(ClientError) as ce: client.put_configuration_recorder( ConfigurationRecorder={"name": "a" * 257, "roleARN": "somearn"} ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" assert ( "Member must have length less than or equal to 256" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With resource types and flags set to True: @@ -77,10 +77,10 @@ def test_put_configuration_recorder(): } ) assert ( - ce.exception.response["Error"]["Code"] == "InvalidRecordingGroupException" + ce.value.response["Error"]["Code"] == "InvalidRecordingGroupException" ) assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "The recording group provided is not valid" ) @@ -103,11 +103,11 @@ def test_put_configuration_recorder(): }, } ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" assert "2 validation error detected: Value '['LOLNO', 'LOLSTILLNO']" in str( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] ) - assert "AWS::EC2::Instance" in ce.exception.response["Error"]["Message"] + assert "AWS::EC2::Instance" in ce.value.response["Error"]["Message"] # Create a proper one: client.put_configuration_recorder( @@ -178,12 +178,12 @@ def test_put_configuration_recorder(): } ) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "MaxNumberOfConfigurationRecordersExceededException" ) assert ( "maximum number of configuration recorders: 1 is reached." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) @@ -208,9 +208,9 @@ def test_put_configuration_aggregator(): ) assert ( "Member must have length less than or equal to 1" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # With an invalid region config (no regions defined): with pytest.raises(ClientError) as ce: @@ -225,9 +225,9 @@ def test_put_configuration_aggregator(): ) assert ( "Your request does not specify any regions" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( @@ -238,9 +238,9 @@ def test_put_configuration_aggregator(): ) assert ( "Your request does not specify any regions" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" # With both region flags defined: with pytest.raises(ClientError) as ce: @@ -256,9 +256,9 @@ def test_put_configuration_aggregator(): ) assert ( "You must choose one of these options" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" with pytest.raises(ClientError) as ce: client.put_configuration_aggregator( @@ -271,9 +271,9 @@ def test_put_configuration_aggregator(): ) assert ( "You must choose one of these options" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" # Name too long: with pytest.raises(ClientError) as ce: @@ -283,8 +283,8 @@ def test_put_configuration_aggregator(): {"AccountIds": ["012345678910"], "AllAwsRegions": True} ], ) - assert "configurationAggregatorName" in ce.exception.response["Error"]["Message"] - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert "configurationAggregatorName" in ce.value.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "ValidationException" # Too many tags (>50): with pytest.raises(ClientError) as ce: @@ -299,9 +299,9 @@ def test_put_configuration_aggregator(): ) assert ( "Member must have length less than or equal to 50" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Tag key is too big (>128 chars): with pytest.raises(ClientError) as ce: @@ -314,9 +314,9 @@ def test_put_configuration_aggregator(): ) assert ( "Member must have length less than or equal to 128" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Tag value is too big (>256 chars): with pytest.raises(ClientError) as ce: @@ -329,9 +329,9 @@ def test_put_configuration_aggregator(): ) assert ( "Member must have length less than or equal to 256" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Duplicate Tags: with pytest.raises(ClientError) as ce: @@ -342,8 +342,8 @@ def test_put_configuration_aggregator(): ], Tags=[{"Key": "a", "Value": "a"}, {"Key": "a", "Value": "a"}], ) - assert "Duplicate tag keys found." in ce.exception.response["Error"]["Message"] - assert ce.exception.response["Error"]["Code"] == "InvalidInput" + assert "Duplicate tag keys found." in ce.value.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidInput" # Invalid characters in the tag key: with pytest.raises(ClientError) as ce: @@ -356,9 +356,9 @@ def test_put_configuration_aggregator(): ) assert ( "Member must satisfy regular expression pattern:" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # If it contains both the AccountAggregationSources and the OrganizationAggregationSource with pytest.raises(ClientError) as ce: @@ -374,18 +374,18 @@ def test_put_configuration_aggregator(): ) assert ( "AccountAggregationSource and the OrganizationAggregationSource" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" # If it contains neither: with pytest.raises(ClientError) as ce: client.put_configuration_aggregator(ConfigurationAggregatorName="testing") assert ( "AccountAggregationSource or the OrganizationAggregationSource" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" # Just make one: account_aggregation_source = { @@ -472,10 +472,10 @@ def test_describe_configuration_aggregators(): ) assert ( "The configuration aggregator does not exist." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "NoSuchConfigurationAggregatorException" ) @@ -486,10 +486,10 @@ def test_describe_configuration_aggregators(): ) assert ( "At least one of the configuration aggregators does not exist." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "NoSuchConfigurationAggregatorException" ) @@ -554,9 +554,9 @@ def test_describe_configuration_aggregators(): with pytest.raises(ClientError) as ce: client.describe_configuration_aggregators(NextToken="WRONG") assert ( - "The nextToken provided is invalid" == ce.exception.response["Error"]["Message"] + "The nextToken provided is invalid" == ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidNextTokenException" + assert ce.value.response["Error"]["Code"] == "InvalidNextTokenException" @mock_config @@ -574,9 +574,9 @@ def test_put_aggregation_authorization(): ) assert ( "Member must have length less than or equal to 50" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Tag key is too big (>128 chars): with pytest.raises(ClientError) as ce: @@ -587,9 +587,9 @@ def test_put_aggregation_authorization(): ) assert ( "Member must have length less than or equal to 128" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Tag value is too big (>256 chars): with pytest.raises(ClientError) as ce: @@ -600,9 +600,9 @@ def test_put_aggregation_authorization(): ) assert ( "Member must have length less than or equal to 256" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Duplicate Tags: with pytest.raises(ClientError) as ce: @@ -611,8 +611,8 @@ def test_put_aggregation_authorization(): AuthorizedAwsRegion="us-west-2", Tags=[{"Key": "a", "Value": "a"}, {"Key": "a", "Value": "a"}], ) - assert "Duplicate tag keys found." in ce.exception.response["Error"]["Message"] - assert ce.exception.response["Error"]["Code"] == "InvalidInput" + assert "Duplicate tag keys found." in ce.value.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidInput" # Invalid characters in the tag key: with pytest.raises(ClientError) as ce: @@ -623,9 +623,9 @@ def test_put_aggregation_authorization(): ) assert ( "Member must satisfy regular expression pattern:" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" # Put a normal one there: result = client.put_aggregation_authorization( @@ -711,9 +711,9 @@ def test_describe_aggregation_authorizations(): with pytest.raises(ClientError) as ce: client.describe_aggregation_authorizations(NextToken="WRONG") assert ( - "The nextToken provided is invalid" == ce.exception.response["Error"]["Message"] + "The nextToken provided is invalid" == ce.value.response["Error"]["Message"] ) - assert ce.exception.response["Error"]["Code"] == "InvalidNextTokenException" + assert ce.value.response["Error"]["Code"] == "InvalidNextTokenException" @mock_config @@ -755,10 +755,10 @@ def test_delete_configuration_aggregator(): client.delete_configuration_aggregator(ConfigurationAggregatorName="testing") assert ( "The configuration aggregator does not exist." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "NoSuchConfigurationAggregatorException" ) @@ -799,9 +799,9 @@ def test_describe_configurations(): with pytest.raises(ClientError) as ce: client.describe_configuration_recorders(ConfigurationRecorderNames=["wrong"]) assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" ) - assert "wrong" in ce.exception.response["Error"]["Message"] + assert "wrong" in ce.value.response["Error"]["Message"] # And with both a good and wrong name: with pytest.raises(ClientError) as ce: @@ -809,9 +809,9 @@ def test_describe_configurations(): ConfigurationRecorderNames=["testrecorder", "wrong"] ) assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" ) - assert "wrong" in ce.exception.response["Error"]["Message"] + assert "wrong" in ce.value.response["Error"]["Message"] @mock_config @@ -822,11 +822,11 @@ def test_delivery_channels(): with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={}) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "NoAvailableConfigurationRecorderException" ) assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "Configuration recorder is not available to " "put delivery channel." ) @@ -848,25 +848,25 @@ def test_delivery_channels(): with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={}) assert ( - ce.exception.response["Error"]["Code"] == "InvalidDeliveryChannelNameException" + ce.value.response["Error"]["Code"] == "InvalidDeliveryChannelNameException" ) - assert "is not valid, blank string." in ce.exception.response["Error"]["Message"] + assert "is not valid, blank string." in ce.value.response["Error"]["Message"] # Try with a really long name: with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={"name": "a" * 257}) - assert ce.exception.response["Error"]["Code"] == "ValidationException" + assert ce.value.response["Error"]["Code"] == "ValidationException" assert ( "Member must have length less than or equal to 256" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Without specifying a bucket name: with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={"name": "testchannel"}) - assert ce.exception.response["Error"]["Code"] == "NoSuchBucketException" + assert ce.value.response["Error"]["Code"] == "NoSuchBucketException" assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "Cannot find a S3 bucket with an empty bucket name." ) @@ -874,9 +874,9 @@ def test_delivery_channels(): client.put_delivery_channel( DeliveryChannel={"name": "testchannel", "s3BucketName": ""} ) - assert ce.exception.response["Error"]["Code"] == "NoSuchBucketException" + assert ce.value.response["Error"]["Code"] == "NoSuchBucketException" assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "Cannot find a S3 bucket with an empty bucket name." ) @@ -889,8 +889,8 @@ def test_delivery_channels(): "s3KeyPrefix": "", } ) - assert ce.exception.response["Error"]["Code"] == "InvalidS3KeyPrefixException" - assert "empty s3 key prefix." in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidS3KeyPrefixException" + assert "empty s3 key prefix." in ce.value.response["Error"]["Message"] # With an empty string for the SNS ARN: with pytest.raises(ClientError) as ce: @@ -901,8 +901,8 @@ def test_delivery_channels(): "snsTopicARN": "", } ) - assert ce.exception.response["Error"]["Code"] == "InvalidSNSTopicARNException" - assert "The sns topic arn" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidSNSTopicARNException" + assert "The sns topic arn" in ce.value.response["Error"]["Message"] # With an invalid delivery frequency: with pytest.raises(ClientError) as ce: @@ -913,9 +913,9 @@ def test_delivery_channels(): "configSnapshotDeliveryProperties": {"deliveryFrequency": "WRONG"}, } ) - assert ce.exception.response["Error"]["Code"] == "InvalidDeliveryFrequency" - assert "WRONG" in ce.exception.response["Error"]["Message"] - assert "TwentyFour_Hours" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "InvalidDeliveryFrequency" + assert "WRONG" in ce.value.response["Error"]["Message"] + assert "TwentyFour_Hours" in ce.value.response["Error"]["Message"] # Create a proper one: client.put_delivery_channel( @@ -955,12 +955,12 @@ def test_delivery_channels(): DeliveryChannel={"name": "testchannel2", "s3BucketName": "somebucket"} ) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "MaxNumberOfDeliveryChannelsExceededException" ) assert ( "because the maximum number of delivery channels: 1 is reached." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) @@ -1017,14 +1017,14 @@ def test_describe_delivery_channels(): # Specify an incorrect name: with pytest.raises(ClientError) as ce: client.describe_delivery_channels(DeliveryChannelNames=["wrong"]) - assert ce.exception.response["Error"]["Code"] == "NoSuchDeliveryChannelException" - assert "wrong" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "NoSuchDeliveryChannelException" + assert "wrong" in ce.value.response["Error"]["Message"] # And with both a good and wrong name: with pytest.raises(ClientError) as ce: client.describe_delivery_channels(DeliveryChannelNames=["testchannel", "wrong"]) - assert ce.exception.response["Error"]["Code"] == "NoSuchDeliveryChannelException" - assert "wrong" in ce.exception.response["Error"]["Message"] + assert ce.value.response["Error"]["Code"] == "NoSuchDeliveryChannelException" + assert "wrong" in ce.value.response["Error"]["Message"] @mock_config @@ -1035,7 +1035,7 @@ def test_start_configuration_recorder(): with pytest.raises(ClientError) as ce: client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" ) # Make the config recorder; @@ -1055,7 +1055,7 @@ def test_start_configuration_recorder(): with pytest.raises(ClientError) as ce: client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") assert ( - ce.exception.response["Error"]["Code"] == "NoAvailableDeliveryChannelException" + ce.value.response["Error"]["Code"] == "NoAvailableDeliveryChannelException" ) # Make the delivery channel: @@ -1093,7 +1093,7 @@ def test_stop_configuration_recorder(): with pytest.raises(ClientError) as ce: client.stop_configuration_recorder(ConfigurationRecorderName="testrecorder") assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" ) # Make the config recorder; @@ -1185,9 +1185,9 @@ def test_describe_configuration_recorder_status(): ConfigurationRecorderNames=["testrecorder", "wrong"] ) assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" ) - assert "wrong" in ce.exception.response["Error"]["Message"] + assert "wrong" in ce.value.response["Error"]["Message"] @mock_config @@ -1214,7 +1214,7 @@ def test_delete_configuration_recorder(): with pytest.raises(ClientError) as ce: client.delete_configuration_recorder(ConfigurationRecorderName="testrecorder") assert ( - ce.exception.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" ) @@ -1243,12 +1243,12 @@ def test_delete_delivery_channel(): with pytest.raises(ClientError) as ce: client.delete_delivery_channel(DeliveryChannelName="testchannel") assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "LastDeliveryChannelDeleteFailedException" ) assert ( "because there is a running configuration recorder." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Stop recording: @@ -1260,7 +1260,7 @@ def test_delete_delivery_channel(): # Verify: with pytest.raises(ClientError) as ce: client.delete_delivery_channel(DeliveryChannelName="testchannel") - assert ce.exception.response["Error"]["Code"] == "NoSuchDeliveryChannelException" + assert ce.value.response["Error"]["Code"] == "NoSuchDeliveryChannelException" @mock_config @@ -1343,7 +1343,7 @@ def test_list_discovered_resource(): # Test with an invalid page num > 100: with pytest.raises(ClientError) as ce: client.list_discovered_resources(resourceType="AWS::S3::Bucket", limit=101) - assert "101" in ce.exception.response["Error"]["Message"] + assert "101" in ce.value.response["Error"]["Message"] # Test by supplying both resourceName and also resourceIds: with pytest.raises(ClientError) as ce: @@ -1354,7 +1354,7 @@ def test_list_discovered_resource(): ) assert ( "Both Resource ID and Resource Name cannot be specified in the request" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # More than 20 resourceIds: @@ -1365,7 +1365,7 @@ def test_list_discovered_resource(): ) assert ( "The specified list had more than 20 resource ID's." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) @@ -1384,7 +1384,7 @@ def test_list_aggregate_discovered_resource(): ) assert ( "The configuration aggregator does not exist" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Create the aggregator: @@ -1510,7 +1510,7 @@ def test_list_aggregate_discovered_resource(): ResourceType="AWS::S3::Bucket", Limit=101, ) - assert "101" in ce.exception.response["Error"]["Message"] + assert "101" in ce.value.response["Error"]["Message"] @mock_config @@ -1526,7 +1526,7 @@ def test_get_resource_config_history(): client.get_resource_config_history( resourceType="NOT::A::RESOURCE", resourceId="notcreatedyet" ) - assert ce.exception.response["Error"] == { + assert ce.value.response["Error"] == { "Message": "Resource notcreatedyet of resourceType:NOT::A::RESOURCE is unknown or has " "not been discovered", "Code": "ResourceNotDiscoveredException", @@ -1537,7 +1537,7 @@ def test_get_resource_config_history(): client.get_resource_config_history( resourceType="AWS::S3::Bucket", resourceId="notcreatedyet" ) - assert ce.exception.response["Error"] == { + assert ce.value.response["Error"] == { "Message": "Resource notcreatedyet of resourceType:AWS::S3::Bucket is unknown or has " "not been discovered", "Code": "ResourceNotDiscoveredException", @@ -1569,7 +1569,7 @@ def test_get_resource_config_history(): client.get_resource_config_history( resourceType="AWS::S3::Bucket", resourceId="eu-bucket" ) - assert ce.exception.response["Error"]["Code"] == "ResourceNotDiscoveredException" + assert ce.value.response["Error"]["Code"] == "ResourceNotDiscoveredException" @mock_config @@ -1590,7 +1590,7 @@ def test_batch_get_resource_config(): ) assert ( "Member must have length less than or equal to 100" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With invalid resource types and resources that don't exist: @@ -1659,7 +1659,7 @@ def test_batch_get_aggregate_resource_config(): ) assert ( "The configuration aggregator does not exist" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Create the aggregator: @@ -1679,7 +1679,7 @@ def test_batch_get_aggregate_resource_config(): ) assert ( "Member must have length less than or equal to 100" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Create some S3 buckets: @@ -1816,10 +1816,10 @@ def test_put_evaluations(): # Try without Evaluations supplied: with pytest.raises(ClientError) as ce: client.put_evaluations(Evaluations=[], ResultToken="test", TestMode=True) - assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" assert ( "The Evaluations object in your request cannot be null" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Try without a ResultToken supplied: @@ -1836,7 +1836,7 @@ def test_put_evaluations(): ResultToken="", TestMode=True, ) - assert ce.exception.response["Error"]["Code"] == "InvalidResultTokenException" + assert ce.value.response["Error"]["Code"] == "InvalidResultTokenException" if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": raise SkipTest("Does not work in server mode due to error in Workzeug") @@ -1920,7 +1920,7 @@ def test_put_organization_conformance_pack_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("PutOrganizationConformancePack") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ValidationException") @@ -1935,7 +1935,7 @@ def test_put_organization_conformance_pack_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("PutOrganizationConformancePack") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ValidationException") @@ -1985,7 +1985,7 @@ def test_describe_organization_conformance_packs_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeOrganizationConformancePacks") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain( @@ -2061,7 +2061,7 @@ def test_describe_organization_conformance_pack_statuses_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeOrganizationConformancePackStatuses") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain( @@ -2133,7 +2133,7 @@ def test_get_organization_conformance_pack_detailed_status_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetOrganizationConformancePackDetailedStatus") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain( @@ -2177,7 +2177,7 @@ def test_delete_organization_conformance_pack_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeleteOrganizationConformancePack") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain( diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py index 67c3b67a2c8f..b6fc8a1356dc 100644 --- a/tests/test_core/test_auth.py +++ b/tests/test_core/test_auth.py @@ -179,9 +179,9 @@ def test_invalid_client_token_id(): ) with pytest.raises(ClientError) as ex: client.get_user() - ex.exception.response["Error"]["Code"].should.equal("InvalidClientTokenId") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidClientTokenId") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "The security token included in the request is invalid." ) @@ -197,9 +197,9 @@ def test_auth_failure(): ) with pytest.raises(ClientError) as ex: client.describe_instances() - ex.exception.response["Error"]["Code"].should.equal("AuthFailure") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AuthFailure") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) + ex.value.response["Error"]["Message"].should.equal( "AWS was not able to validate the provided access credentials" ) @@ -216,9 +216,9 @@ def test_signature_does_not_match(): ) with pytest.raises(ClientError) as ex: client.get_user() - ex.exception.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "The request signature we calculated does not match the signature you provided. Check your AWS Secret Access Key and signing method. Consult the service documentation for details." ) @@ -235,9 +235,9 @@ def test_auth_failure_with_valid_access_key_id(): ) with pytest.raises(ClientError) as ex: client.describe_instances() - ex.exception.response["Error"]["Code"].should.equal("AuthFailure") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AuthFailure") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(401) + ex.value.response["Error"]["Message"].should.equal( "AWS was not able to validate the provided access credentials" ) @@ -255,9 +255,9 @@ def test_access_denied_with_no_policy(): ) with pytest.raises(ClientError) as ex: client.describe_instances() - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, user_name=user_name, @@ -283,11 +283,11 @@ def test_access_denied_with_not_allowing_policy(): aws_access_key_id=access_key["AccessKeyId"], aws_secret_access_key=access_key["SecretAccessKey"], ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.describe_instances() - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, user_name=user_name, @@ -321,9 +321,9 @@ def test_access_denied_for_run_instances(): ) with pytest.raises(ClientError) as ex: client.run_instances(MaxCount=1, MinCount=1) - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:RunInstances", ) @@ -352,9 +352,9 @@ def test_access_denied_with_denying_policy(): ) with pytest.raises(ClientError) as ex: client.create_vpc(CidrBlock="10.0.0.0/16") - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:CreateVpc" ) @@ -452,9 +452,9 @@ def test_s3_access_denied_with_denying_attached_group_policy(): ) with pytest.raises(ClientError) as ex: client.list_buckets() - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal("Access Denied") + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal("Access Denied") @set_initial_no_auth_action_count(6) @@ -486,9 +486,9 @@ def test_s3_access_denied_with_denying_inline_group_policy(): client.create_bucket(Bucket=bucket_name) with pytest.raises(ClientError) as ex: client.get_object(Bucket=bucket_name, Key="sdfsdf") - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal("Access Denied") + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal("Access Denied") @set_initial_no_auth_action_count(10) @@ -532,9 +532,9 @@ def test_access_denied_with_many_irrelevant_policies(): ) with pytest.raises(ClientError) as ex: client.create_key_pair(KeyName="TestKey") - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:CreateKeyPair" ) @@ -635,9 +635,9 @@ def test_access_denied_with_temporary_credentials(): DBInstanceClass="db.t3", Engine="aurora-postgresql", ) - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name} is not authorized to perform: {operation}".format( account_id=ACCOUNT_ID, role_name=role_name, @@ -678,9 +678,9 @@ def test_s3_invalid_access_key_id(): ) with pytest.raises(ClientError) as ex: client.list_buckets() - ex.exception.response["Error"]["Code"].should.equal("InvalidAccessKeyId") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidAccessKeyId") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "The AWS Access Key Id you provided does not exist in our records." ) @@ -700,9 +700,9 @@ def test_s3_signature_does_not_match(): client.create_bucket(Bucket=bucket_name) with pytest.raises(ClientError) as ex: client.put_object(Bucket=bucket_name, Key="abc") - ex.exception.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("SignatureDoesNotMatch") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal( "The request signature we calculated does not match the signature you provided. Check your key and signing method." ) @@ -736,9 +736,9 @@ def test_s3_access_denied_not_action(): client.create_bucket(Bucket=bucket_name) with pytest.raises(ClientError) as ex: client.delete_object(Bucket=bucket_name, Key="sdfsdf") - ex.exception.response["Error"]["Code"].should.equal("AccessDenied") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) - ex.exception.response["Error"]["Message"].should.equal("Access Denied") + ex.value.response["Error"]["Code"].should.equal("AccessDenied") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.value.response["Error"]["Message"].should.equal("Access Denied") @set_initial_no_auth_action_count(4) @@ -776,8 +776,8 @@ def test_s3_invalid_token_with_temporary_credentials(): client.create_bucket(Bucket=bucket_name) with pytest.raises(ClientError) as ex: client.list_bucket_metrics_configurations(Bucket=bucket_name) - ex.exception.response["Error"]["Code"].should.equal("InvalidToken") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidToken") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "The provided token is malformed or otherwise invalid." ) diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 5e04f075cc6e..c57d62485e61 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -24,6 +24,7 @@ def test_basic_decorator(): list(conn.get_all_instances()).should.equal([]) +@pytest.mark.network def test_context_manager(): conn = boto.connect_ec2("the_key", "the_secret") with pytest.raises(EC2ResponseError): @@ -38,6 +39,7 @@ def test_context_manager(): conn.get_all_instances() +@pytest.mark.network def test_decorator_start_and_stop(): conn = boto.connect_ec2("the_key", "the_secret") with pytest.raises(EC2ResponseError): diff --git a/tests/test_core/test_request_mocking.py b/tests/test_core/test_request_mocking.py index 2c44d52ce388..3c56c7242c53 100644 --- a/tests/test_core/test_request_mocking.py +++ b/tests/test_core/test_request_mocking.py @@ -1,4 +1,5 @@ import requests +import pytest import sure # noqa import boto3 @@ -6,6 +7,7 @@ @mock_sqs +@pytest.mark.network def test_passthrough_requests(): conn = boto3.client("sqs", region_name="us-west-1") conn.create_queue(QueueName="queue1") diff --git a/tests/test_datasync/test_datasync.py b/tests/test_datasync/test_datasync.py index d8d919f1392f..2214032c9438 100644 --- a/tests/test_datasync/test_datasync.py +++ b/tests/test_datasync/test_datasync.py @@ -139,7 +139,7 @@ def test_delete_location(): response = client.list_locations() assert len(response["Locations"]) == 0 - with assert_raises(ClientError) as e: + with pytest.raises(ClientError): response = client.delete_location(LocationArn=location_arn) @@ -262,7 +262,7 @@ def test_update_task(): assert response["Name"] == updated_name assert response["Options"] == updated_options - with assert_raises(ClientError) as e: + with pytest.raises(ClientError): client.update_task(TaskArn="doesnt_exist") @@ -286,7 +286,7 @@ def test_delete_task(): response = client.list_tasks() assert len(response["Tasks"]) == 0 - with assert_raises(ClientError) as e: + with pytest.raises(ClientError): response = client.delete_task(TaskArn=task_arn) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 6704bbcc78ba..04b23177346e 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -213,9 +213,9 @@ def test_item_add_empty_string_exception(): }, ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: An AttributeValue may not contain an empty string" ) @@ -256,9 +256,9 @@ def test_update_item_with_empty_string_exception(): ExpressionAttributeValues={":Body": {"S": ""}}, ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: An AttributeValue may not contain an empty string" ) @@ -1356,10 +1356,10 @@ def test_put_empty_item(): with pytest.raises(ClientError) as ex: table.put_item(Item={}) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: Missing the key structure_id in the item" ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Code"].should.equal("ValidationException") @mock_dynamodb2 @@ -1375,10 +1375,10 @@ def test_put_item_nonexisting_hash_key(): with pytest.raises(ClientError) as ex: table.put_item(Item={"a_terribly_misguided_id_attribute": "abcdef"}) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: Missing the key structure_id in the item" ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Code"].should.equal("ValidationException") @mock_dynamodb2 @@ -1400,10 +1400,10 @@ def test_put_item_nonexisting_range_key(): with pytest.raises(ClientError) as ex: table.put_item(Item={"structure_id": "abcdef"}) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Message"].should.equal( "One or more parameter values were invalid: Missing the key added_at in the item" ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Code"].should.equal("ValidationException") def test_filter_expression(): @@ -2089,7 +2089,7 @@ def test_describe_continuous_backups_errors(): client.describe_continuous_backups(TableName="not-existing-table") # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeContinuousBackups") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("TableNotFoundException") @@ -2178,7 +2178,7 @@ def test_update_continuous_backups_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("UpdateContinuousBackups") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("TableNotFoundException") @@ -2444,9 +2444,9 @@ def test_put_return_attributes(): Item={"id": {"S": "foo"}, "col1": {"S": "val3"}}, ReturnValues="ALL_NEW", ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "Return values set to invalid value" ) @@ -2969,9 +2969,9 @@ def test_scan_by_non_exists_index(): with pytest.raises(ClientError) as ex: dynamodb.scan(TableName="test", IndexName="non_exists_index") - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "The table does not have the specified index: non_exists_index" ) @@ -3008,8 +3008,8 @@ def test_query_by_non_exists_index(): KeyConditionExpression="CarModel=M", ) - ex.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + ex.value.response["Error"]["Message"].should.equal( "Invalid index: non_exists_index for table: test. Available indexes are: test_gsi" ) @@ -3052,8 +3052,8 @@ def test_batch_items_throws_exception_when_requesting_100_items_for_single_table } } ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - msg = ex.exception.response["Error"]["Message"] + ex.value.response["Error"]["Code"].should.equal("ValidationException") + msg = ex.value.response["Error"]["Message"] msg.should.contain("1 validation error detected: Value") msg.should.contain( "at 'requestItems.users.member.keys' failed to satisfy constraint: Member must have length less than or equal to 100" @@ -3080,8 +3080,8 @@ def test_batch_items_throws_exception_when_requesting_100_items_across_all_table }, } ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Too many items requested for the BatchGetItem call" ) @@ -3172,8 +3172,8 @@ def test_batch_items_should_throw_exception_for_duplicate_request(): } } ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Provided list of item keys contains duplicates" ) @@ -3210,8 +3210,8 @@ def test_index_with_unknown_attributes_should_fail(): BillingMode="PAY_PER_REQUEST", ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.contain(expected_exception) + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain(expected_exception) @mock_dynamodb2 @@ -3377,8 +3377,8 @@ def test_update_list_index__set_index_of_a_string(): "Item" ] - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "The document path provided in the update expression is invalid for update" ) @@ -3617,8 +3617,8 @@ def test_item_size_is_under_400KB(): def assert_failure_due_to_item_size(func, **kwargs): with pytest.raises(ClientError) as ex: func(**kwargs) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Item size has exceeded the maximum allowed size" ) @@ -3626,8 +3626,8 @@ def assert_failure_due_to_item_size(func, **kwargs): def assert_failure_due_to_item_size_to_update(func, **kwargs): with pytest.raises(ClientError) as ex: func(**kwargs) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Item size to update has exceeded the maximum allowed size" ) @@ -3656,8 +3656,8 @@ def test_hash_key_cannot_use_begins_with_operations(): table = dynamodb.Table("test-table") with pytest.raises(ClientError) as ex: table.query(KeyConditionExpression=Key("key").begins_with("prefix-")) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Query key condition not supported" ) @@ -4056,8 +4056,8 @@ def test_update_catches_invalid_list_append_operation(): ) # Verify correct error is returned - str(ex.exception).should.match("Parameter validation failed:") - str(ex.exception).should.match( + str(ex.value).should.match("Parameter validation failed:") + str(ex.value).should.match( "Invalid type for parameter ExpressionAttributeValues." ) @@ -4169,9 +4169,9 @@ def test_query_catches_when_no_filters(): with pytest.raises(ClientError) as ex: table.query(TableName="original-rbu-dev") - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "Either KeyConditions or QueryFilter should be present" ) @@ -4205,8 +4205,8 @@ def test_invalid_transact_get_items(): ] ) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.match( + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.match( r"failed to satisfy constraint: Member must have length less than or equal to 25", re.I, ) @@ -4219,9 +4219,9 @@ def test_invalid_transact_get_items(): ] ) - ex.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "Requested resource not found" ) @@ -4514,8 +4514,8 @@ def test_transact_write_items_put_conditional_expressions(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Assert all are present items = dynamodb.scan(TableName="test-table")["Items"] items.should.have.length_of(1) @@ -4604,8 +4604,8 @@ def test_transact_write_items_conditioncheck_fails(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Assert the original email address is still present items = dynamodb.scan(TableName="test-table")["Items"] @@ -4701,8 +4701,8 @@ def test_transact_write_items_delete_with_failed_condition_expression(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Assert the original item is still present items = dynamodb.scan(TableName="test-table")["Items"] items.should.have.length_of(1) @@ -4774,8 +4774,8 @@ def test_transact_write_items_update_with_failed_condition_expression(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Assert the original item is still present items = dynamodb.scan(TableName="test-table")["Items"] items.should.have.length_of(1) @@ -5343,9 +5343,9 @@ def test_transact_write_items_fails_with_transaction_canceled_exception(): }, ] ) - ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "Transaction cancelled, please refer cancellation reasons for specific reasons [None, ConditionalCheckFailed]" ) diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index e50cd45c1b5b..1c8c12110c87 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1355,8 +1355,8 @@ def test_update_item_with_expression(): def assert_failure_due_to_key_not_in_schema(func, **kwargs): with pytest.raises(ClientError) as ex: func(**kwargs) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "The provided key element does not match the schema" ) diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index b23eae4ab0f4..eb3fe6549a5f 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -6,7 +6,7 @@ from boto.exception import EC2ResponseError from botocore.exceptions import ClientError -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import sure # noqa @@ -31,9 +31,9 @@ def test_ami_create_and_delete(): image_id = conn.create_image( instance.id, "test-ami", "this is a test ami", dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set" ) @@ -78,9 +78,9 @@ def test_ami_create_and_delete(): # Deregister with pytest.raises(EC2ResponseError) as ex: success = conn.deregister_image(image_id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set" ) @@ -89,9 +89,9 @@ def test_ami_create_and_delete(): with pytest.raises(EC2ResponseError) as cm: conn.deregister_image(image_id) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @requires_boto_gte("2.14.0") @@ -120,9 +120,9 @@ def test_ami_copy(): "this is a test copy ami", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set" ) @@ -159,9 +159,9 @@ def test_ami_copy(): "test-copy-ami", "this is a test copy ami", ) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Copy from non-existent source region. with pytest.raises(EC2ResponseError) as cm: @@ -171,9 +171,9 @@ def test_ami_copy(): conn.copy_image( invalid_region, source_image.id, "test-copy-ami", "this is a test copy ami" ) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -210,9 +210,9 @@ def test_ami_tagging(): with pytest.raises(EC2ResponseError) as ex: image.add_tag("a key", "some value", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -235,9 +235,9 @@ def test_ami_create_from_missing_instance(): with pytest.raises(EC2ResponseError) as cm: conn.create_image(*args) - cm.exception.code.should.equal("InvalidInstanceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInstanceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -355,9 +355,9 @@ def test_getting_missing_ami(): with pytest.raises(EC2ResponseError) as cm: conn.get_image("ami-missing") - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -366,9 +366,9 @@ def test_getting_malformed_ami(): with pytest.raises(EC2ResponseError) as cm: conn.get_image("foo-missing") - cm.exception.code.should.equal("InvalidAMIID.Malformed") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.Malformed") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -401,9 +401,9 @@ def test_ami_attribute_group_permissions(): # Add 'all' group and confirm with pytest.raises(EC2ResponseError) as ex: conn.modify_image_attribute(**dict(ADD_GROUP_ARGS, **{"dry_run": True})) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set" ) @@ -682,9 +682,9 @@ def test_ami_attribute_error_cases(): conn.modify_image_attribute( image.id, attribute="launchPermission", operation="add", groups="everyone" ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with user ID that isn't an integer. with pytest.raises(EC2ResponseError) as cm: @@ -694,9 +694,9 @@ def test_ami_attribute_error_cases(): operation="add", user_ids="12345678901A", ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with user ID that is > length 12. with pytest.raises(EC2ResponseError) as cm: @@ -706,9 +706,9 @@ def test_ami_attribute_error_cases(): operation="add", user_ids="1234567890123", ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with user ID that is < length 12. with pytest.raises(EC2ResponseError) as cm: @@ -718,9 +718,9 @@ def test_ami_attribute_error_cases(): operation="add", user_ids="12345678901", ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with one invalid user ID among other valid IDs, ensure no # partial changes. @@ -731,9 +731,9 @@ def test_ami_attribute_error_cases(): operation="add", user_ids=["123456789011", "foo", "123456789022"], ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none attributes = conn.get_image_attribute(image.id, attribute="launchPermission") attributes.attrs.should.have.length_of(0) @@ -743,9 +743,9 @@ def test_ami_attribute_error_cases(): conn.modify_image_attribute( "ami-abcd1234", attribute="launchPermission", operation="add", groups="all" ) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Remove with invalid image ID with pytest.raises(EC2ResponseError) as cm: @@ -755,9 +755,9 @@ def test_ami_attribute_error_cases(): operation="remove", groups="all", ) - cm.exception.code.should.equal("InvalidAMIID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py index c04faa85d42e..85bc7f2447f1 100644 --- a/tests/test_ec2/test_dhcp_options.py +++ b/tests/test_ec2/test_dhcp_options.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import boto3 @@ -34,9 +34,9 @@ def test_dhcp_options_associate_invalid_dhcp_id(): with pytest.raises(EC2ResponseError) as cm: conn.associate_dhcp_options("foo", vpc.id) - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -47,9 +47,9 @@ def test_dhcp_options_associate_invalid_vpc_id(): with pytest.raises(EC2ResponseError) as cm: conn.associate_dhcp_options(dhcp_options.id, "foo") - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -65,17 +65,17 @@ def test_dhcp_options_delete_with_vpc(): with pytest.raises(EC2ResponseError) as cm: conn.delete_dhcp_options(dhcp_options_id) - cm.exception.code.should.equal("DependencyViolation") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("DependencyViolation") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none vpc.delete() with pytest.raises(EC2ResponseError) as cm: conn.get_all_dhcp_options([dhcp_options_id]) - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -101,15 +101,15 @@ def test_create_dhcp_options_invalid_options(): with pytest.raises(EC2ResponseError) as cm: conn.create_dhcp_options(ntp_servers=servers) - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none with pytest.raises(EC2ResponseError) as cm: conn.create_dhcp_options(netbios_node_type="0") - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -132,9 +132,9 @@ def test_describe_dhcp_options_invalid_id(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_dhcp_options(["1"]) - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -150,9 +150,9 @@ def test_delete_dhcp_options(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_dhcp_options([dhcp_option.id]) - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -163,9 +163,9 @@ def test_delete_dhcp_options_invalid_id(): with pytest.raises(EC2ResponseError) as cm: conn.delete_dhcp_options("dopt-abcd1234") - cm.exception.code.should.equal("InvalidDhcpOptionID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -176,9 +176,9 @@ def test_delete_dhcp_options_malformed_id(): with pytest.raises(EC2ResponseError) as cm: conn.delete_dhcp_options("foo-abcd1234") - cm.exception.code.should.equal("InvalidDhcpOptionsId.Malformed") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidDhcpOptionsId.Malformed") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 846d1bacc218..9e4a3b7bbc43 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest from moto.ec2 import ec2_backends @@ -32,9 +32,9 @@ def test_create_and_delete_volume(): with pytest.raises(EC2ResponseError) as ex: volume.delete(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -47,9 +47,9 @@ def test_create_and_delete_volume(): # Deleting something that was already deleted should throw an error with pytest.raises(EC2ResponseError) as cm: volume.delete() - cm.exception.code.should.equal("InvalidVolume.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVolume.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -71,11 +71,11 @@ def test_delete_attached_volume(): # attempt to delete volume # assert raises VolumeInUseError - with assert_raises(EC2ResponseError) as ex: + with pytest.raises(EC2ResponseError) as ex: volume.delete() - ex.exception.error_code.should.equal("VolumeInUse") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("VolumeInUse") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "Volume {0} is currently attached to {1}".format(volume.id, instance.id) ) @@ -96,9 +96,9 @@ def test_create_encrypted_volume_dryrun(): conn = boto.ec2.connect_to_region("us-east-1") with pytest.raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -110,9 +110,9 @@ def test_create_encrypted_volume(): with pytest.raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -135,9 +135,9 @@ def test_filter_volume_by_id(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_volumes(volume_ids=["vol-does_not_exist"]) - cm.exception.code.should.equal("InvalidVolume.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVolume.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -260,9 +260,9 @@ def test_volume_attach_and_detach(): with pytest.raises(EC2ResponseError) as ex: volume.attach(instance.id, "/dev/sdh", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -276,9 +276,9 @@ def test_volume_attach_and_detach(): with pytest.raises(EC2ResponseError) as ex: volume.detach(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set" ) @@ -289,21 +289,21 @@ def test_volume_attach_and_detach(): with pytest.raises(EC2ResponseError) as cm1: volume.attach("i-1234abcd", "/dev/sdh") - cm1.exception.code.should.equal("InvalidInstanceID.NotFound") - cm1.exception.status.should.equal(400) - cm1.exception.request_id.should_not.be.none + cm1.value.code.should.equal("InvalidInstanceID.NotFound") + cm1.value.status.should.equal(400) + cm1.value.request_id.should_not.be.none with pytest.raises(EC2ResponseError) as cm2: conn.detach_volume(volume.id, instance.id, "/dev/sdh") - cm2.exception.code.should.equal("InvalidAttachment.NotFound") - cm2.exception.status.should.equal(400) - cm2.exception.request_id.should_not.be.none + cm2.value.code.should.equal("InvalidAttachment.NotFound") + cm2.value.status.should.equal(400) + cm2.value.request_id.should_not.be.none with pytest.raises(EC2ResponseError) as cm3: conn.detach_volume(volume.id, "i-1234abcd", "/dev/sdh") - cm3.exception.code.should.equal("InvalidInstanceID.NotFound") - cm3.exception.status.should.equal(400) - cm3.exception.request_id.should_not.be.none + cm3.value.code.should.equal("InvalidInstanceID.NotFound") + cm3.value.status.should.equal(400) + cm3.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -313,9 +313,9 @@ def test_create_snapshot(): with pytest.raises(EC2ResponseError) as ex: snapshot = volume.create_snapshot("a dryrun snapshot", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set" ) @@ -341,9 +341,9 @@ def test_create_snapshot(): # Deleting something that was already deleted should throw an error with pytest.raises(EC2ResponseError) as cm: snapshot.delete() - cm.exception.code.should.equal("InvalidSnapshot.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSnapshot.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -383,9 +383,9 @@ def test_filter_snapshot_by_id(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_snapshots(snapshot_ids=["snap-does_not_exist"]) - cm.exception.code.should.equal("InvalidSnapshot.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSnapshot.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -485,9 +485,9 @@ def test_snapshot_attribute(): with pytest.raises(EC2ResponseError) as ex: conn.modify_snapshot_attribute(**dict(ADD_GROUP_ARGS, **{"dry_run": True})) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set" ) @@ -507,9 +507,9 @@ def test_snapshot_attribute(): # Remove 'all' group and confirm with pytest.raises(EC2ResponseError) as ex: conn.modify_snapshot_attribute(**dict(REMOVE_GROUP_ARGS, **{"dry_run": True})) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set" ) @@ -533,9 +533,9 @@ def test_snapshot_attribute(): operation="add", groups="everyone", ) - cm.exception.code.should.equal("InvalidAMIAttributeItemValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAMIAttributeItemValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Add with invalid snapshot ID with pytest.raises(EC2ResponseError) as cm: @@ -545,9 +545,9 @@ def test_snapshot_attribute(): operation="add", groups="all", ) - cm.exception.code.should.equal("InvalidSnapshot.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSnapshot.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Remove with invalid snapshot ID with pytest.raises(EC2ResponseError) as cm: @@ -557,9 +557,9 @@ def test_snapshot_attribute(): operation="remove", groups="all", ) - cm.exception.code.should.equal("InvalidSnapshot.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSnapshot.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -594,12 +594,12 @@ def test_modify_snapshot_attribute(): } # Add 'all' group and confirm - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_client.modify_snapshot_attribute(**dict(ADD_GROUP_ARGS, **{"DryRun": True})) - cm.exception.response["Error"]["Code"].should.equal("DryRunOperation") - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.response["Error"]["Code"].should.equal("DryRunOperation") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ec2_client.modify_snapshot_attribute(**ADD_GROUP_ARGS) @@ -619,13 +619,13 @@ def test_modify_snapshot_attribute(): ], "This snapshot should have public group permissions." # Remove 'all' group and confirm - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: ec2_client.modify_snapshot_attribute( **dict(REMOVE_GROUP_ARGS, **{"DryRun": True}) ) - cm.exception.response["Error"]["Code"].should.equal("DryRunOperation") - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.response["Error"]["Code"].should.equal("DryRunOperation") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ec2_client.modify_snapshot_attribute(**REMOVE_GROUP_ARGS) @@ -645,40 +645,40 @@ def test_modify_snapshot_attribute(): ], "This snapshot should have no permissions." # Error: Add with group != 'all' - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_client.modify_snapshot_attribute( SnapshotId=snapshot.id, Attribute="createVolumePermission", OperationType="add", GroupNames=["everyone"], ) - cm.exception.response["Error"]["Code"].should.equal("InvalidAMIAttributeItemValue") - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.response["Error"]["Code"].should.equal("InvalidAMIAttributeItemValue") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Error: Add with invalid snapshot ID - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_client.modify_snapshot_attribute( SnapshotId="snapshot-abcd1234", Attribute="createVolumePermission", OperationType="add", GroupNames=["all"], ) - cm.exception.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Error: Remove with invalid snapshot ID - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: ec2_client.modify_snapshot_attribute( SnapshotId="snapshot-abcd1234", Attribute="createVolumePermission", OperationType="remove", GroupNames=["all"], ) - cm.exception.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Test adding user id ec2_client.modify_snapshot_attribute( @@ -741,9 +741,9 @@ def test_create_volume_from_snapshot(): with pytest.raises(EC2ResponseError) as ex: snapshot = volume.create_snapshot("a test snapshot", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set" ) @@ -789,9 +789,9 @@ def test_modify_attribute_blockDeviceMapping(): instance.modify_attribute( "blockDeviceMapping", {"/dev/sda1": True}, dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set" ) @@ -810,9 +810,9 @@ def test_volume_tag_escaping(): with pytest.raises(EC2ResponseError) as ex: snapshot.add_tags({"key": ""}, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] @@ -880,12 +880,12 @@ def test_copy_snapshot(): # Copy from non-existent source ID. with pytest.raises(ClientError) as cm: create_snapshot_error = ec2_client.create_snapshot(VolumeId="vol-abcd1234") - cm.exception.response["Error"]["Code"].should.equal("InvalidVolume.NotFound") - cm.exception.response["Error"]["Message"].should.equal( + cm.value.response["Error"]["Code"].should.equal("InvalidVolume.NotFound") + cm.value.response["Error"]["Message"].should.equal( "The volume 'vol-abcd1234' does not exist." ) - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) # Copy from non-existent source region. with pytest.raises(ClientError) as cm: @@ -893,10 +893,10 @@ def test_copy_snapshot(): SourceSnapshotId=create_snapshot_response["SnapshotId"], SourceRegion="eu-west-2", ) - cm.exception.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") - cm.exception.response["Error"]["Message"].should.be.none - cm.exception.response["ResponseMetadata"]["RequestId"].should_not.be.none - cm.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + cm.value.response["Error"]["Code"].should.equal("InvalidSnapshot.NotFound") + cm.value.response["Error"]["Message"].should.be.none + cm.value.response["ResponseMetadata"]["RequestId"].should_not.be.none + cm.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_ec2 diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index e9a247ea7cbc..8edd92e65966 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import boto @@ -22,9 +22,9 @@ def test_eip_allocate_classic(): with pytest.raises(EC2ResponseError) as ex: standard = conn.allocate_address(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -36,9 +36,9 @@ def test_eip_allocate_classic(): with pytest.raises(EC2ResponseError) as ex: standard.release(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -53,9 +53,9 @@ def test_eip_allocate_vpc(): with pytest.raises(EC2ResponseError) as ex: vpc = conn.allocate_address(domain="vpc", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -85,9 +85,9 @@ def test_eip_allocate_invalid_domain(): with pytest.raises(EC2ResponseError) as cm: conn.allocate_address(domain="bogus") - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -103,17 +103,17 @@ def test_eip_associate_classic(): with pytest.raises(EC2ResponseError) as cm: conn.associate_address(public_ip=eip.public_ip) - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none with pytest.raises(EC2ResponseError) as ex: conn.associate_address( instance_id=instance.id, public_ip=eip.public_ip, dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -124,9 +124,9 @@ def test_eip_associate_classic(): with pytest.raises(EC2ResponseError) as ex: conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -154,9 +154,9 @@ def test_eip_associate_vpc(): with pytest.raises(EC2ResponseError) as cm: conn.associate_address(allocation_id=eip.allocation_id) - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.associate_address(instance_id=instance.id, allocation_id=eip.allocation_id) # no .update() on address ): @@ -170,9 +170,9 @@ def test_eip_associate_vpc(): with pytest.raises(EC2ResponseError) as ex: eip.release(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set" ) @@ -242,9 +242,9 @@ def test_eip_associate_network_interface(): with pytest.raises(EC2ResponseError) as cm: conn.associate_address(network_interface_id=eni.id) - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.associate_address(network_interface_id=eni.id, allocation_id=eip.allocation_id) # no .update() on address ): @@ -279,9 +279,9 @@ def test_eip_reassociate(): conn.associate_address( instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False ) - cm.exception.code.should.equal("Resource.AlreadyAssociated") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Resource.AlreadyAssociated") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.associate_address.when.called_with( instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True @@ -313,9 +313,9 @@ def test_eip_reassociate_nic(): # Different ID detects resource association with pytest.raises(EC2ResponseError) as cm: conn.associate_address(network_interface_id=eni2.id, public_ip=eip.public_ip) - cm.exception.code.should.equal("Resource.AlreadyAssociated") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Resource.AlreadyAssociated") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.associate_address.when.called_with( network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True @@ -337,9 +337,9 @@ def test_eip_associate_invalid_args(): with pytest.raises(EC2ResponseError) as cm: conn.associate_address(instance_id=instance.id) - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none instance.terminate() @@ -351,9 +351,9 @@ def test_eip_disassociate_bogus_association(): with pytest.raises(EC2ResponseError) as cm: conn.disassociate_address(association_id="bogus") - cm.exception.code.should.equal("InvalidAssociationID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAssociationID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -363,9 +363,9 @@ def test_eip_release_bogus_eip(): with pytest.raises(EC2ResponseError) as cm: conn.release_address(allocation_id="bogus") - cm.exception.code.should.equal("InvalidAllocationID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAllocationID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -375,9 +375,9 @@ def test_eip_disassociate_arg_error(): with pytest.raises(EC2ResponseError) as cm: conn.disassociate_address() - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -387,9 +387,9 @@ def test_eip_release_arg_error(): with pytest.raises(EC2ResponseError) as cm: conn.release_address() - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -439,9 +439,9 @@ def test_eip_describe_none(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_addresses(addresses=["256.256.256.256"]) - cm.exception.code.should.equal("InvalidAddress.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAddress.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 259885ee075d..a5bb019b085b 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import boto3 @@ -22,9 +22,9 @@ def test_elastic_network_interfaces(): with pytest.raises(EC2ResponseError) as ex: eni = conn.create_network_interface(subnet.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -39,9 +39,9 @@ def test_elastic_network_interfaces(): with pytest.raises(EC2ResponseError) as ex: conn.delete_network_interface(eni.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -52,9 +52,9 @@ def test_elastic_network_interfaces(): with pytest.raises(EC2ResponseError) as cm: conn.delete_network_interface(eni.id) - cm.exception.error_code.should.equal("InvalidNetworkInterfaceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.error_code.should.equal("InvalidNetworkInterfaceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -63,9 +63,9 @@ def test_elastic_network_interfaces_subnet_validation(): with pytest.raises(EC2ResponseError) as cm: conn.create_network_interface("subnet-abcd1234") - cm.exception.error_code.should.equal("InvalidSubnetID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.error_code.should.equal("InvalidSubnetID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -136,9 +136,9 @@ def test_elastic_network_interfaces_modify_attribute(): conn.modify_network_interface_attribute( eni.id, "groupset", [security_group2.id], dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -229,9 +229,9 @@ def test_elastic_network_interfaces_get_by_tag_name(): with pytest.raises(ClientError) as ex: eni1.create_tags(Tags=[{"Key": "Name", "Value": "eni1"}], DryRun=True) - ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) diff --git a/tests/test_ec2/test_flow_logs.py b/tests/test_ec2/test_flow_logs.py index 1dba572d3e8a..743466eaa472 100644 --- a/tests/test_ec2/test_flow_logs.py +++ b/tests/test_ec2/test_flow_logs.py @@ -44,9 +44,9 @@ def test_create_flow_logs_s3(): LogDestination="arn:aws:s3:::" + bucket.name, DryRun=True, ) - ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "An error occurred (DryRunOperation) when calling the CreateFlowLogs operation: Request would have succeeded, but DryRun flag is set" ) @@ -96,9 +96,9 @@ def test_create_flow_logs_cloud_watch(): DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", DryRun=True, ) - ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "An error occurred (DryRunOperation) when calling the CreateFlowLogs operation: Request would have succeeded, but DryRun flag is set" ) @@ -244,17 +244,17 @@ def test_delete_flow_logs_non_existing(): with pytest.raises(ClientError) as ex: client.delete_flow_logs(FlowLogIds=["fl-1a2b3c4d"]) - ex.exception.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "These flow log ids in the input list are not found: [TotalCount: 1] fl-1a2b3c4d" ) with pytest.raises(ClientError) as ex: client.delete_flow_logs(FlowLogIds=["fl-1a2b3c4d", "fl-2b3c4d5e"]) - ex.exception.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidFlowLogId.NotFound") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "These flow log ids in the input list are not found: [TotalCount: 2] fl-1a2b3c4d fl-2b3c4d5e" ) @@ -312,9 +312,9 @@ def test_create_flow_logs_invalid_parameters(): LogDestination="arn:aws:s3:::" + bucket.name, MaxAggregationInterval=10, ) - ex.exception.response["Error"]["Code"].should.equal("InvalidParameter") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "Invalid Flow Log Max Aggregation Interval" ) @@ -325,9 +325,9 @@ def test_create_flow_logs_invalid_parameters(): TrafficType="ALL", LogDestinationType="s3", ) - ex.exception.response["Error"]["Code"].should.equal("InvalidParameter") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "LogDestination can't be empty if LogGroupName is not provided." ) @@ -339,9 +339,9 @@ def test_create_flow_logs_invalid_parameters(): LogDestinationType="s3", LogGroupName="test", ) - ex.exception.response["Error"]["Code"].should.equal("InvalidParameter") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "LogDestination type must be cloud-watch-logs if LogGroupName is provided." ) @@ -352,9 +352,9 @@ def test_create_flow_logs_invalid_parameters(): TrafficType="ALL", LogGroupName="test", ) - ex.exception.response["Error"]["Code"].should.equal("InvalidParameter") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("InvalidParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "DeliverLogsPermissionArn can't be empty if LogDestinationType is cloud-watch-logs." ) @@ -375,9 +375,9 @@ def test_create_flow_logs_invalid_parameters(): LogDestinationType="s3", LogDestination="arn:aws:s3:::" + bucket.name, ) - ex.exception.response["Error"]["Code"].should.equal("FlowLogAlreadyExists") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("FlowLogAlreadyExists") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "Error. There is an existing Flow Log with the same configuration and log destination." ) @@ -398,9 +398,9 @@ def test_create_flow_logs_invalid_parameters(): LogGroupName="test-group", DeliverLogsPermissionArn="arn:aws:iam::" + ACCOUNT_ID + ":role/test-role", ) - ex.exception.response["Error"]["Code"].should.equal("FlowLogAlreadyExists") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("FlowLogAlreadyExists") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "Error. There is an existing Flow Log with the same configuration and log destination." ) diff --git a/tests/test_ec2/test_general.py b/tests/test_ec2/test_general.py index b6e75ea6a6a8..c2b578929b79 100644 --- a/tests/test_ec2/test_general.py +++ b/tests/test_ec2/test_general.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import boto @@ -26,9 +26,9 @@ def test_console_output_without_instance(): with pytest.raises(EC2ResponseError) as cm: conn.get_console_output("i-1234abcd") - cm.exception.code.should.equal("InvalidInstanceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInstanceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index b770862e2737..fefeee522c2c 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 from botocore.exceptions import ClientError import pytest @@ -53,9 +53,9 @@ def test_instance_launch_and_terminate(): with pytest.raises(EC2ResponseError) as ex: reservation = conn.run_instances("ami-1234abcd", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -88,9 +88,9 @@ def test_instance_launch_and_terminate(): with pytest.raises(EC2ResponseError) as ex: conn.terminate_instances([instance.id], dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -215,14 +215,14 @@ def test_instance_detach_volume_wrong_path(): ) instance = result[0] for volume in instance.volumes.all(): - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: instance.detach_volume(VolumeId=volume.volume_id, Device="/dev/sdf") - ex.exception.response["Error"]["Code"].should.equal( + ex.value.response["Error"]["Code"].should.equal( "InvalidAttachment.NotFound" ) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "The volume {0} is not attached to instance {1} as device {2}".format( volume.volume_id, instance.instance_id, "/dev/sdf" ) @@ -291,9 +291,9 @@ def test_get_instances_by_id(): # Call get_all_instances with a bad id should raise an error with pytest.raises(EC2ResponseError) as cm: conn.get_all_instances(instance_ids=[instance1.id, "i-1234abcd"]) - cm.exception.code.should.equal("InvalidInstanceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInstanceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -744,9 +744,9 @@ def test_instance_start_and_stop(): with pytest.raises(EC2ResponseError) as ex: stopped_instances = conn.stop_instances(instance_ids, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -757,9 +757,9 @@ def test_instance_start_and_stop(): with pytest.raises(EC2ResponseError) as ex: started_instances = conn.start_instances([instances[0].id], dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -775,9 +775,9 @@ def test_instance_reboot(): with pytest.raises(EC2ResponseError) as ex: instance.reboot(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -793,9 +793,9 @@ def test_instance_attribute_instance_type(): with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("instanceType", "m1.small", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set" ) @@ -821,9 +821,9 @@ def test_modify_instance_attribute_security_groups(): with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set" ) @@ -844,9 +844,9 @@ def test_instance_attribute_user_data(): with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("userData", "this is my user data", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set" ) @@ -874,9 +874,9 @@ def test_instance_attribute_source_dest_check(): with pytest.raises(EC2ResponseError) as ex: instance.modify_attribute("sourceDestCheck", False, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set" ) @@ -920,9 +920,9 @@ def test_run_instance_with_security_group_name(): with pytest.raises(EC2ResponseError) as ex: group = conn.create_security_group("group1", "some description", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set" ) @@ -1197,9 +1197,9 @@ def test_instance_with_nic_attach_detach(): # Attach with pytest.raises(EC2ResponseError) as ex: conn.attach_network_interface(eni.id, instance.id, device_index=1, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -1224,9 +1224,9 @@ def test_instance_with_nic_attach_detach(): # Detach with pytest.raises(EC2ResponseError) as ex: conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set" ) @@ -1243,9 +1243,9 @@ def test_instance_with_nic_attach_detach(): # Detach with invalid attachment ID with pytest.raises(EC2ResponseError) as cm: conn.detach_network_interface("eni-attach-1234abcd") - cm.exception.code.should.equal("InvalidAttachmentID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAttachmentID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -1306,12 +1306,12 @@ def test_run_instance_with_block_device_mappings_missing_ebs(): "InstanceType": "t1.micro", "BlockDeviceMappings": [{"DeviceName": "/dev/sda2"}], } - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: ec2_client.run_instances(**kwargs) - ex.exception.response["Error"]["Code"].should.equal("MissingParameter") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("MissingParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "The request must contain the parameter ebs" ) @@ -1330,12 +1330,12 @@ def test_run_instance_with_block_device_mappings_missing_size(): {"DeviceName": "/dev/sda2", "Ebs": {"VolumeType": "standard"}} ], } - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: ec2_client.run_instances(**kwargs) - ex.exception.response["Error"]["Code"].should.equal("MissingParameter") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("MissingParameter") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "The request must contain the parameter size or snapshotId" ) @@ -1411,9 +1411,9 @@ def test_describe_instance_status_with_instance_filter_deprecated(): # Call get_all_instance_status with a bad id should raise an error with pytest.raises(EC2ResponseError) as cm: conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"]) - cm.exception.code.should.equal("InvalidInstanceID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInstanceID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -1540,9 +1540,9 @@ def test_get_instance_by_security_group(): conn.modify_instance_attribute( instance.id, "groupSet", [security_group.id], dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set" ) @@ -1664,9 +1664,9 @@ def test_describe_instance_attribute(): client.describe_instance_attribute( InstanceId=instance_id, Attribute=invalid_instance_attribute ) - ex.exception.response["Error"]["Code"].should.equal("InvalidParameterValue") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) message = "Value ({invalid_instance_attribute}) for parameter attribute is invalid. Unknown attribute.".format( invalid_instance_attribute=invalid_instance_attribute ) - ex.exception.response["Error"]["Message"].should.equal(message) + ex.value.response["Error"]["Message"].should.equal(message) diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index cfa8bafe908c..49cc6e38cdab 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import re @@ -29,9 +29,9 @@ def test_igw_create(): with pytest.raises(EC2ResponseError) as ex: igw = conn.create_internet_gateway(dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set" ) @@ -52,9 +52,9 @@ def test_igw_attach(): with pytest.raises(EC2ResponseError) as ex: conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set" ) @@ -72,9 +72,9 @@ def test_igw_attach_bad_vpc(): with pytest.raises(EC2ResponseError) as cm: conn.attach_internet_gateway(igw.id, BAD_VPC) - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -88,9 +88,9 @@ def test_igw_attach_twice(): with pytest.raises(EC2ResponseError) as cm: conn.attach_internet_gateway(igw.id, vpc2.id) - cm.exception.code.should.equal("Resource.AlreadyAssociated") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Resource.AlreadyAssociated") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -103,9 +103,9 @@ def test_igw_detach(): with pytest.raises(EC2ResponseError) as ex: conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set" ) @@ -125,9 +125,9 @@ def test_igw_detach_wrong_vpc(): with pytest.raises(EC2ResponseError) as cm: conn.detach_internet_gateway(igw.id, vpc2.id) - cm.exception.code.should.equal("Gateway.NotAttached") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Gateway.NotAttached") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -140,9 +140,9 @@ def test_igw_detach_invalid_vpc(): with pytest.raises(EC2ResponseError) as cm: conn.detach_internet_gateway(igw.id, BAD_VPC) - cm.exception.code.should.equal("Gateway.NotAttached") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Gateway.NotAttached") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -154,9 +154,9 @@ def test_igw_detach_unattached(): with pytest.raises(EC2ResponseError) as cm: conn.detach_internet_gateway(igw.id, vpc.id) - cm.exception.code.should.equal("Gateway.NotAttached") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("Gateway.NotAttached") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -170,9 +170,9 @@ def test_igw_delete(): with pytest.raises(EC2ResponseError) as ex: conn.delete_internet_gateway(igw.id, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set" ) @@ -190,9 +190,9 @@ def test_igw_delete_attached(): with pytest.raises(EC2ResponseError) as cm: conn.delete_internet_gateway(igw.id) - cm.exception.code.should.equal("DependencyViolation") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("DependencyViolation") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -210,9 +210,9 @@ def test_igw_describe_bad_id(): conn = boto.connect_vpc("the_key", "the_secret") with pytest.raises(EC2ResponseError) as cm: conn.get_all_internet_gateways([BAD_IGW]) - cm.exception.code.should.equal("InvalidInternetGatewayID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidInternetGatewayID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 022b4ceeb1ae..dcca8b116350 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import boto @@ -57,9 +57,9 @@ def test_key_pairs_invalid_id(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_key_pairs("foo") - cm.exception.code.should.equal("InvalidKeyPair.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidKeyPair.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -68,9 +68,9 @@ def test_key_pairs_create(): with pytest.raises(EC2ResponseError) as ex: conn.create_key_pair("foo", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set" ) @@ -111,9 +111,9 @@ def test_key_pairs_create_exist(): with pytest.raises(EC2ResponseError) as cm: conn.create_key_pair("foo") - cm.exception.code.should.equal("InvalidKeyPair.Duplicate") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidKeyPair.Duplicate") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -131,9 +131,9 @@ def test_key_pairs_delete_exist(): with pytest.raises(EC2ResponseError) as ex: r = conn.delete_key_pair("foo", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set" ) @@ -148,9 +148,9 @@ def test_key_pairs_import(): with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", RSA_PUBLIC_KEY_OPENSSH, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set" ) @@ -177,9 +177,9 @@ def test_key_pairs_import_exist(): with pytest.raises(EC2ResponseError) as cm: conn.create_key_pair("foo") - cm.exception.code.should.equal("InvalidKeyPair.Duplicate") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidKeyPair.Duplicate") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -188,21 +188,21 @@ def test_key_pairs_invalid(): with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", b"") - ex.exception.error_code.should.equal("InvalidKeyPair.Format") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal("Key is not in valid OpenSSH public key format") + ex.value.error_code.should.equal("InvalidKeyPair.Format") + ex.value.status.should.equal(400) + ex.value.message.should.equal("Key is not in valid OpenSSH public key format") with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", b"garbage") - ex.exception.error_code.should.equal("InvalidKeyPair.Format") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal("Key is not in valid OpenSSH public key format") + ex.value.error_code.should.equal("InvalidKeyPair.Format") + ex.value.status.should.equal(400) + ex.value.message.should.equal("Key is not in valid OpenSSH public key format") with pytest.raises(EC2ResponseError) as ex: conn.import_key_pair("foo", DSA_PUBLIC_KEY_OPENSSH) - ex.exception.error_code.should.equal("InvalidKeyPair.Format") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal("Key is not in valid OpenSSH public key format") + ex.value.error_code.should.equal("InvalidKeyPair.Format") + ex.value.status.should.equal(400) + ex.value.message.should.equal("Key is not in valid OpenSSH public key format") @mock_ec2_deprecated diff --git a/tests/test_ec2/test_launch_templates.py b/tests/test_ec2/test_launch_templates.py index 0bcf188ce548..41896be96d62 100644 --- a/tests/test_ec2/test_launch_templates.py +++ b/tests/test_ec2/test_launch_templates.py @@ -43,7 +43,7 @@ def test_launch_template_create(): }, ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidLaunchTemplateName.AlreadyExistsException) when calling the CreateLaunchTemplate operation: Launch template name already in use." ) diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index 1bb0587335fa..c2a790ed7e9a 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -269,7 +269,7 @@ def test_duplicate_network_acl_entry(): RuleAction="deny", RuleNumber=rule_number, ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (NetworkAclEntryAlreadyExists) when calling the CreateNetworkAclEntry " "operation: The network acl entry identified by {} already exists.".format( rule_number @@ -297,10 +297,10 @@ def test_describe_network_acls(): resp2 = conn.describe_network_acls()["NetworkAcls"] resp2.should.have.length_of(3) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: conn.describe_network_acls(NetworkAclIds=["1"]) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidRouteTableID.NotFound) when calling the " "DescribeNetworkAcls operation: The routeTable ID '1' does not exist" ) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 4ebfeb7376d0..1eff22315e68 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import boto @@ -62,9 +62,9 @@ def test_route_tables_additional(): with pytest.raises(EC2ResponseError) as cm: conn.delete_vpc(vpc.id) - cm.exception.code.should.equal("DependencyViolation") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("DependencyViolation") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none conn.delete_route_table(route_table.id) @@ -73,9 +73,9 @@ def test_route_tables_additional(): with pytest.raises(EC2ResponseError) as cm: conn.delete_route_table("rtb-1234abcd") - cm.exception.code.should.equal("InvalidRouteTableID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRouteTableID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -198,9 +198,9 @@ def test_route_table_associations(): # Error: Attempt delete associated route table. with pytest.raises(EC2ResponseError) as cm: conn.delete_route_table(route_table.id) - cm.exception.code.should.equal("DependencyViolation") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("DependencyViolation") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Disassociate conn.disassociate_route_table(association_id) @@ -212,23 +212,23 @@ def test_route_table_associations(): # Error: Disassociate with invalid association ID with pytest.raises(EC2ResponseError) as cm: conn.disassociate_route_table(association_id) - cm.exception.code.should.equal("InvalidAssociationID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAssociationID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Associate with invalid subnet ID with pytest.raises(EC2ResponseError) as cm: conn.associate_route_table(route_table.id, "subnet-1234abcd") - cm.exception.code.should.equal("InvalidSubnetID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSubnetID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Associate with invalid route table ID with pytest.raises(EC2ResponseError) as cm: conn.associate_route_table("rtb-1234abcd", subnet.id) - cm.exception.code.should.equal("InvalidRouteTableID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRouteTableID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @requires_boto_gte("2.16.0") @@ -296,16 +296,16 @@ def test_route_table_replace_route_table_association(): conn.replace_route_table_association_with_assoc( "rtbassoc-1234abcd", route_table1.id ) - cm.exception.code.should.equal("InvalidAssociationID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidAssociationID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Error: Replace association with invalid route table ID with pytest.raises(EC2ResponseError) as cm: conn.replace_route_table_association_with_assoc(association_id2, "rtb-1234abcd") - cm.exception.code.should.equal("InvalidRouteTableID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRouteTableID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -390,9 +390,9 @@ def test_routes_additional(): with pytest.raises(EC2ResponseError) as cm: conn.delete_route(main_route_table.id, ROUTE_CIDR) - cm.exception.code.should.equal("InvalidRoute.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRoute.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -443,9 +443,9 @@ def get_target_route(): with pytest.raises(EC2ResponseError) as cm: conn.replace_route("rtb-1234abcd", ROUTE_CIDR, gateway_id=igw.id) - cm.exception.code.should.equal("InvalidRouteTableID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidRouteTableID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @requires_boto_gte("2.19.0") @@ -574,7 +574,7 @@ def test_create_route_with_invalid_destination_cidr_block_parameter(): route = route_table.create_route( DestinationCidrBlock=destination_cidr_block, GatewayId=internet_gateway.id ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateRoute " "operation: Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format( destination_cidr_block diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 9f8c1aecd5bd..7ce9f3c5cebe 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -3,7 +3,7 @@ import copy import json -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import boto3 @@ -23,9 +23,9 @@ def test_create_and_describe_security_group(): security_group = conn.create_security_group( "test security group", "this is a test security group", dry_run=True ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set" ) @@ -41,9 +41,9 @@ def test_create_and_describe_security_group(): conn.create_security_group( "test security group", "this is a test security group" ) - cm.exception.code.should.equal("InvalidGroup.Duplicate") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.Duplicate") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none all_groups = conn.get_all_security_groups() # The default group gets created automatically @@ -58,9 +58,9 @@ def test_create_security_group_without_description_raises_error(): with pytest.raises(EC2ResponseError) as cm: conn.create_security_group("test security group", "") - cm.exception.code.should.equal("MissingParameter") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("MissingParameter") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -90,9 +90,9 @@ def test_create_and_describe_vpc_security_group(): conn.create_security_group( "test security group", "this is a test security group", vpc_id ) - cm.exception.code.should.equal("InvalidGroup.Duplicate") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.Duplicate") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none all_groups = conn.get_all_security_groups(filters={"vpc_id": [vpc_id]}) @@ -147,16 +147,16 @@ def test_deleting_security_groups(): # Deleting a group that doesn't exist should throw an error with pytest.raises(EC2ResponseError) as cm: conn.delete_security_group("foobar") - cm.exception.code.should.equal("InvalidGroup.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Delete by name with pytest.raises(EC2ResponseError) as ex: conn.delete_security_group("test2", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set" ) @@ -191,9 +191,9 @@ def test_authorize_ip_range_and_revoke(): cidr_ip="123.123.123.123/32", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set" ) @@ -214,9 +214,9 @@ def test_authorize_ip_range_and_revoke(): to_port="2222", cidr_ip="123.123.123.122/32", ) - cm.exception.code.should.equal("InvalidPermission.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidPermission.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Actually revoke with pytest.raises(EC2ResponseError) as ex: @@ -227,9 +227,9 @@ def test_authorize_ip_range_and_revoke(): cidr_ip="123.123.123.123/32", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set" ) @@ -254,9 +254,9 @@ def test_authorize_ip_range_and_revoke(): cidr_ip="123.123.123.123/32", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set" ) @@ -293,9 +293,9 @@ def test_authorize_ip_range_and_revoke(): cidr_ip="123.123.123.123/32", dry_run=True, ) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set" ) @@ -338,9 +338,9 @@ def test_authorize_other_group_and_revoke(): security_group.revoke( ip_protocol="tcp", from_port="22", to_port="2222", src_group=wrong_group ) - cm.exception.code.should.equal("InvalidPermission.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidPermission.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none # Actually revoke security_group.revoke( @@ -441,9 +441,9 @@ def test_get_all_security_groups(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_security_groups(groupnames=["does_not_exist"]) - cm.exception.code.should.equal("InvalidGroup.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none resp.should.have.length_of(1) resp[0].id.should.equal(sg1.id) @@ -472,9 +472,9 @@ def test_authorize_bad_cidr_throws_invalid_parameter_value(): security_group.authorize( ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123" ) - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -486,9 +486,9 @@ def test_security_group_tagging(): with pytest.raises(EC2ResponseError) as ex: sg.add_tag("Test", "Tag", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -539,7 +539,7 @@ def test_sec_group_rule_limit(): ip_protocol="-1", cidr_ip=["{0}.0.0.0/0".format(i) for i in range(110)], ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") sg.rules.should.be.empty # authorize a rule targeting a different sec group (because this count too) @@ -559,13 +559,13 @@ def test_sec_group_rule_limit(): ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", cidr_ip=["100.0.0.0/0"] ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", src_security_group_group_id=other_sg.id ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # EGRESS # authorize a rule targeting a different sec group (because this count too) @@ -584,13 +584,13 @@ def test_sec_group_rule_limit(): ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", cidr_ip="101.0.0.0/0" ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", src_group_id=other_sg.id ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") @mock_ec2_deprecated @@ -610,7 +610,7 @@ def test_sec_group_rule_limit_vpc(): ip_protocol="-1", cidr_ip=["{0}.0.0.0/0".format(i) for i in range(110)], ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") sg.rules.should.be.empty # authorize a rule targeting a different sec group (because this count too) @@ -630,13 +630,13 @@ def test_sec_group_rule_limit_vpc(): ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", cidr_ip=["100.0.0.0/0"] ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group( group_id=sg.id, ip_protocol="-1", src_security_group_group_id=other_sg.id ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # EGRESS # authorize a rule targeting a different sec group (because this count too) @@ -655,13 +655,13 @@ def test_sec_group_rule_limit_vpc(): ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", cidr_ip="50.0.0.0/0" ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") # verify that we cannot authorize past the limit for a different sec group with pytest.raises(EC2ResponseError) as cm: ec2_conn.authorize_security_group_egress( group_id=sg.id, ip_protocol="-1", src_group_id=other_sg.id ) - cm.exception.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") + cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded") """ @@ -766,9 +766,9 @@ def test_security_group_tagging_boto3(): Tags=[{"Key": "Test", "Value": "Tag"}], DryRun=True, ) - ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -927,9 +927,9 @@ def test_get_all_security_groups_filter_with_same_vpc_id(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_security_groups(group_ids=["does_not_exist"]) - cm.exception.code.should.equal("InvalidGroup.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidGroup.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index c7b965918a7f..bb4ccac3bded 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -54,9 +54,9 @@ def test_request_spot_instances(): }, DryRun=True, ) - ex.exception.response["Error"]["Code"].should.equal("DryRunOperation") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("DryRunOperation") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal( "An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set" ) @@ -157,9 +157,9 @@ def test_cancel_spot_instance_request(): with pytest.raises(EC2ResponseError) as ex: conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set" ) diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 1d44999aeb46..9c14f798febf 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import boto3 @@ -31,9 +31,9 @@ def test_subnets(): with pytest.raises(EC2ResponseError) as cm: conn.delete_subnet(subnet.id) - cm.exception.code.should.equal("InvalidSubnetID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSubnetID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -42,9 +42,9 @@ def test_subnet_create_vpc_validation(): with pytest.raises(EC2ResponseError) as cm: conn.create_subnet("vpc-abcd1234", "10.0.0.0/18") - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -229,9 +229,9 @@ def test_subnet_get_by_id(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_subnets(subnet_ids=["subnet-does_not_exist"]) - cm.exception.code.should.equal("InvalidSubnetID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidSubnetID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -391,7 +391,7 @@ def test_create_subnet_with_invalid_availability_zone(): CidrBlock="10.0.0.0/24", AvailabilityZone=subnet_availability_zone, ) - assert str(ex.exception).startswith( + assert str(ex.value).startswith( "An error occurred (InvalidParameterValue) when calling the CreateSubnet " "operation: Value ({}) for parameter availabilityZone is invalid. Subnets can currently only be created in the following availability zones: ".format( subnet_availability_zone @@ -410,7 +410,7 @@ def test_create_subnet_with_invalid_cidr_range(): subnet_cidr_block = "10.1.0.0/20" with pytest.raises(ClientError) as ex: subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block) ) @@ -445,7 +445,7 @@ def test_create_subnet_with_invalid_cidr_block_parameter(): subnet_cidr_block = "1000.1.0.0/20" with pytest.raises(ClientError) as ex: subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateSubnet " "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format( subnet_cidr_block @@ -505,7 +505,7 @@ def test_create_subnets_with_overlapping_cidr_blocks(): with pytest.raises(ClientError) as ex: subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidSubnet.Conflict) when calling the CreateSubnet " "operation: The CIDR '{}' conflicts with another subnet".format( subnet_cidr_block diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 918b02623d5e..82a23c91c79e 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -22,9 +22,9 @@ def test_add_tag(): with pytest.raises(EC2ResponseError) as ex: instance.add_tag("a key", "some value", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -53,9 +53,9 @@ def test_remove_tag(): with pytest.raises(EC2ResponseError) as ex: instance.remove_tag("a key", dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -108,9 +108,9 @@ def test_create_tags(): with pytest.raises(EC2ResponseError) as ex: conn.create_tags(instance.id, tag_dict, dry_run=True) - ex.exception.error_code.should.equal("DryRunOperation") - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( + ex.value.error_code.should.equal("DryRunOperation") + ex.value.status.should.equal(400) + ex.value.message.should.equal( "An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set" ) @@ -133,16 +133,16 @@ def test_tag_limit_exceeded(): with pytest.raises(EC2ResponseError) as cm: conn.create_tags(instance.id, tag_dict) - cm.exception.code.should.equal("TagLimitExceeded") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("TagLimitExceeded") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none instance.add_tag("a key", "a value") with pytest.raises(EC2ResponseError) as cm: conn.create_tags(instance.id, tag_dict) - cm.exception.code.should.equal("TagLimitExceeded") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("TagLimitExceeded") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none tags = conn.get_all_tags() tag = tags[0] @@ -159,9 +159,9 @@ def test_invalid_parameter_tag_null(): with pytest.raises(EC2ResponseError) as cm: instance.add_tag("a key", None) - cm.exception.code.should.equal("InvalidParameterValue") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidParameterValue") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -169,15 +169,15 @@ def test_invalid_id(): conn = boto.connect_ec2("the_key", "the_secret") with pytest.raises(EC2ResponseError) as cm: conn.create_tags("ami-blah", {"key": "tag"}) - cm.exception.code.should.equal("InvalidID") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidID") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none with pytest.raises(EC2ResponseError) as cm: conn.create_tags("blah-blah", {"key": "tag"}) - cm.exception.code.should.equal("InvalidID") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidID") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -451,8 +451,8 @@ def test_create_tag_empty_resource(): # create tag with empty resource with pytest.raises(ClientError) as ex: client.create_tags(Resources=[], Tags=[{"Key": "Value"}]) - ex.exception.response["Error"]["Code"].should.equal("MissingParameter") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("MissingParameter") + ex.value.response["Error"]["Message"].should.equal( "The request must contain the parameter resourceIdSet" ) @@ -464,8 +464,8 @@ def test_delete_tag_empty_resource(): # delete tag with empty resource with pytest.raises(ClientError) as ex: client.delete_tags(Resources=[], Tags=[{"Key": "Value"}]) - ex.exception.response["Error"]["Code"].should.equal("MissingParameter") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("MissingParameter") + ex.value.response["Error"]["Message"].should.equal( "The request must contain the parameter resourceIdSet" ) diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index f852ab3cab33..2ffe89fca684 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest from moto.ec2.exceptions import EC2ClientError from botocore.exceptions import ClientError @@ -50,9 +50,9 @@ def test_vpc_peering_connections_accept(): with pytest.raises(EC2ResponseError) as cm: conn.reject_vpc_peering_connection(vpc_pcx.id) - cm.exception.code.should.equal("InvalidStateTransition") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidStateTransition") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none all_vpc_pcxs = conn.get_all_vpc_peering_connections() all_vpc_pcxs.should.have.length_of(1) @@ -70,9 +70,9 @@ def test_vpc_peering_connections_reject(): with pytest.raises(EC2ResponseError) as cm: conn.accept_vpc_peering_connection(vpc_pcx.id) - cm.exception.code.should.equal("InvalidStateTransition") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidStateTransition") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none all_vpc_pcxs = conn.get_all_vpc_peering_connections() all_vpc_pcxs.should.have.length_of(1) @@ -94,9 +94,9 @@ def test_vpc_peering_connections_delete(): with pytest.raises(EC2ResponseError) as cm: conn.delete_vpc_peering_connection("pcx-1234abcd") - cm.exception.code.should.equal("InvalidVpcPeeringConnectionId.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcPeeringConnectionId.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2 @@ -132,7 +132,7 @@ def test_vpc_peering_connections_cross_region_fail(): ec2_usw1.create_vpc_peering_connection( VpcId=vpc_usw1.id, PeerVpcId=vpc_apn1.id, PeerRegion="ap-northeast-2" ) - cm.exception.response["Error"]["Code"].should.equal("InvalidVpcID.NotFound") + cm.value.response["Error"]["Code"].should.equal("InvalidVpcID.NotFound") @mock_ec2 @@ -254,13 +254,13 @@ def test_vpc_peering_connections_cross_region_accept_wrong_region(): ec2_usw1 = boto3.client("ec2", region_name="us-west-1") with pytest.raises(ClientError) as cm: ec2_usw1.accept_vpc_peering_connection(VpcPeeringConnectionId=vpc_pcx_usw1.id) - cm.exception.response["Error"]["Code"].should.equal("OperationNotPermitted") + cm.value.response["Error"]["Code"].should.equal("OperationNotPermitted") exp_msg = ( "Incorrect region ({0}) specified for this request.VPC " "peering connection {1} must be " "accepted in region {2}".format("us-west-1", vpc_pcx_usw1.id, "ap-northeast-1") ) - cm.exception.response["Error"]["Message"].should.equal(exp_msg) + cm.value.response["Error"]["Message"].should.equal(exp_msg) @mock_ec2 @@ -279,10 +279,10 @@ def test_vpc_peering_connections_cross_region_reject_wrong_region(): ec2_usw1 = boto3.client("ec2", region_name="us-west-1") with pytest.raises(ClientError) as cm: ec2_usw1.reject_vpc_peering_connection(VpcPeeringConnectionId=vpc_pcx_usw1.id) - cm.exception.response["Error"]["Code"].should.equal("OperationNotPermitted") + cm.value.response["Error"]["Code"].should.equal("OperationNotPermitted") exp_msg = ( "Incorrect region ({0}) specified for this request.VPC " "peering connection {1} must be accepted or " "rejected in region {2}".format("us-west-1", vpc_pcx_usw1.id, "ap-northeast-1") ) - cm.exception.response["Error"]["Message"].should.equal(exp_msg) + cm.value.response["Error"]["Message"].should.equal(exp_msg) diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 32e59a91bec9..5344098ba674 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 +# Ensure 'pytest.raises' context manager support for Python 2.6 import pytest from moto.ec2.exceptions import EC2ClientError from botocore.exceptions import ClientError @@ -32,9 +32,9 @@ def test_vpcs(): with pytest.raises(EC2ResponseError) as cm: conn.delete_vpc("vpc-1234abcd") - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -115,9 +115,9 @@ def test_vpc_get_by_id(): with pytest.raises(EC2ResponseError) as cm: conn.get_all_vpcs(vpc_ids=["vpc-does_not_exist"]) - cm.exception.code.should.equal("InvalidVpcID.NotFound") - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none + cm.value.code.should.equal("InvalidVpcID.NotFound") + cm.value.status.should.equal(400) + cm.value.request_id.should_not.be.none @mock_ec2_deprecated @@ -405,7 +405,7 @@ def test_associate_vpc_ipv4_cidr_block(): response = ec2.meta.client.associate_vpc_cidr_block( VpcId=vpc.id, CidrBlock="10.10.50.0/22" ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format( vpc.id @@ -450,7 +450,7 @@ def test_disassociate_vpc_ipv4_cidr_block(): response = ec2.meta.client.disassociate_vpc_cidr_block( AssociationId="vpc-cidr-assoc-BORING123" ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " "'vpc-cidr-assoc-BORING123' does not exist" @@ -472,7 +472,7 @@ def test_disassociate_vpc_ipv4_cidr_block(): response = ec2.meta.client.disassociate_vpc_cidr_block( AssociationId=vpc_base_cidr_assoc_id ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id) @@ -552,7 +552,7 @@ def test_vpc_associate_ipv6_cidr_block(): response = ec2.meta.client.associate_vpc_cidr_block( VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True ) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format( vpc.id @@ -658,7 +658,7 @@ def test_create_vpc_with_invalid_cidr_block_parameter(): vpc_cidr_block = "1000.1.0.0/20" with pytest.raises(ClientError) as ex: vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateVpc " "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format( vpc_cidr_block @@ -673,7 +673,7 @@ def test_create_vpc_with_invalid_cidr_range(): vpc_cidr_block = "10.1.0.0/29" with pytest.raises(ClientError) as ex: vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidVpc.Range) when calling the CreateVpc " "operation: The CIDR '{}' is invalid.".format(vpc_cidr_block) ) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index afec17da26c3..8b6b2798724f 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -2611,7 +2611,7 @@ def test_delete_task_set(): assert len(task_sets) == 0 - with assert_raises(ClientError): + with pytest.raises(ClientError): _ = client.delete_task_set( cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"], ) diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index dd51e8f6064e..a2e8871d87ac 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -184,9 +184,8 @@ def test_apply_security_groups_to_load_balancer(): response = client.apply_security_groups_to_load_balancer( LoadBalancerName="my-lb", SecurityGroups=["not-really-a-security-group"] ) - assert "One or more of the specified security groups do not exist." in str( - error.exception - ) + assert "One or more of the specified security groups do not exist." \ + in str(error.value) @mock_elb_deprecated diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 6ff48095daba..cb8e13e52de1 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -391,8 +391,8 @@ def test_create_target_group_and_listeners(): # listener referencing it with pytest.raises(ClientError) as e: conn.delete_target_group(TargetGroupArn=target_group.get("TargetGroupArn")) - e.exception.operation_name.should.equal("DeleteTargetGroup") - e.exception.args.should.equal( + e.value.operation_name.should.equal("DeleteTargetGroup") + e.value.args.should.equal( ( "An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", ) @@ -1959,7 +1959,7 @@ def test_fixed_response_action_listener_rule_validates_status_code(): DefaultActions=[invalid_status_code_action], ) - invalid_status_code_exception.exception.response["Error"]["Code"].should.equal( + invalid_status_code_exception.value.response["Error"]["Code"].should.equal( "ValidationError" ) @@ -2005,6 +2005,6 @@ def test_fixed_response_action_listener_rule_validates_content_type(): Port=80, DefaultActions=[invalid_content_type_action], ) - invalid_content_type_exception.exception.response["Error"]["Code"].should.equal( + invalid_content_type_exception.value.response["Error"]["Code"].should.equal( "InvalidLoadBalancerAction" ) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index a3308e3fe0f1..c6b93957ab43 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -401,7 +401,7 @@ def test_run_job_flow_with_invalid_params(): args["AmiVersion"] = "2.4" args["ReleaseLabel"] = "emr-5.0.0" client.run_job_flow(**args) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Code"].should.equal("ValidationException") @mock_emr @@ -598,16 +598,16 @@ def test_run_job_flow_with_custom_ami(): args["CustomAmiId"] = "MyEmrCustomId" args["ReleaseLabel"] = "emr-5.6.0" client.run_job_flow(**args) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal("Custom AMI is not allowed") + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal("Custom AMI is not allowed") with pytest.raises(ClientError) as ex: args = deepcopy(run_job_flow_args) args["CustomAmiId"] = "MyEmrCustomId" args["AmiVersion"] = "3.8.1" client.run_job_flow(**args) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.equal( "Custom AMI is not supported in this version of EMR" ) @@ -618,8 +618,8 @@ def test_run_job_flow_with_custom_ami(): args["ReleaseLabel"] = "emr-5.6.0" args["AmiVersion"] = "3.8.1" client.run_job_flow(**args) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( "Only one AMI version and release label may be specified." ) diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index ac63932efb54..5755282398ec 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -35,7 +35,7 @@ def test_create_database_already_exists(): with pytest.raises(ClientError) as exc: helpers.create_database(client, database_name) - exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") + exc.value.response["Error"]["Code"].should.equal("AlreadyExistsException") @mock_glue @@ -46,8 +46,8 @@ def test_get_database_not_exits(): with pytest.raises(ClientError) as exc: helpers.get_database(client, database_name) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match( "Database nosuchdatabase not found" ) @@ -105,7 +105,7 @@ def test_create_table_already_exists(): with pytest.raises(ClientError) as exc: helpers.create_table(client, database_name, table_name) - exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") + exc.value.response["Error"]["Code"].should.equal("AlreadyExistsException") @mock_glue @@ -195,8 +195,8 @@ def test_get_table_version_not_found(): with pytest.raises(ClientError) as exc: helpers.get_table_version(client, database_name, "myfirsttable", "20") - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match("version", re.I) + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match("version", re.I) @mock_glue @@ -210,7 +210,7 @@ def test_get_table_version_invalid_input(): with pytest.raises(ClientError) as exc: helpers.get_table_version(client, database_name, "myfirsttable", "10not-an-int") - exc.exception.response["Error"]["Code"].should.equal("InvalidInputException") + exc.value.response["Error"]["Code"].should.equal("InvalidInputException") @mock_glue @@ -222,8 +222,8 @@ def test_get_table_not_exits(): with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, "myfirsttable") - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match( "Table myfirsttable not found" ) @@ -236,8 +236,8 @@ def test_get_table_when_database_not_exits(): with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, "myfirsttable") - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match( "Database nosuchdatabase not found" ) @@ -259,8 +259,8 @@ def test_delete_table(): with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, table_name) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match( "Table myspecialtable not found" ) @@ -284,8 +284,8 @@ def test_batch_delete_table(): with pytest.raises(ClientError) as exc: helpers.get_table(client, database_name, table_name) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match( + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match( "Table myspecialtable not found" ) @@ -353,7 +353,7 @@ def test_create_partition_already_exist(): with pytest.raises(ClientError) as exc: helpers.create_partition(client, database_name, table_name, values=values) - exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") + exc.value.response["Error"]["Code"].should.equal("AlreadyExistsException") @mock_glue @@ -369,8 +369,8 @@ def test_get_partition_not_found(): with pytest.raises(ClientError) as exc: helpers.get_partition(client, database_name, table_name, values) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match("partition") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match("partition") @mock_glue @@ -551,8 +551,8 @@ def test_update_partition_not_found_moving(): values=["2018-10-02"], ) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match("partition") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match("partition") @mock_glue @@ -570,8 +570,8 @@ def test_update_partition_not_found_change_in_place(): client, database_name, table_name, old_values=values, values=values ) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.exception.response["Error"]["Message"].should.match("partition") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Message"].should.match("partition") @mock_glue @@ -593,7 +593,7 @@ def test_update_partition_cannot_overwrite(): client, database_name, table_name, old_values=values[0], values=values[1] ) - exc.exception.response["Error"]["Code"].should.equal("AlreadyExistsException") + exc.value.response["Error"]["Code"].should.equal("AlreadyExistsException") @mock_glue @@ -652,7 +652,7 @@ def test_update_partition_move(): helpers.get_partition(client, database_name, table_name, values) # Old partition shouldn't exist anymore - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") response = client.get_partition( DatabaseName=database_name, TableName=table_name, PartitionValues=new_values @@ -702,7 +702,7 @@ def test_delete_partition_bad_partition(): DatabaseName=database_name, TableName=table_name, PartitionValues=values ) - exc.exception.response["Error"]["Code"].should.equal("EntityNotFoundException") + exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") @mock_glue diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index d63e1777f588..b404a41060f1 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -445,9 +445,9 @@ def test_create_policy_already_exists(): response = conn.create_policy( PolicyName="TestCreatePolicy", PolicyDocument=MOCK_POLICY ) - ex.exception.response["Error"]["Code"].should.equal("EntityAlreadyExists") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(409) - ex.exception.response["Error"]["Message"].should.contain("TestCreatePolicy") + ex.value.response["Error"]["Code"].should.equal("EntityAlreadyExists") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(409) + ex.value.response["Error"]["Message"].should.contain("TestCreatePolicy") @mock_iam @@ -1825,7 +1825,7 @@ def test_signing_certs(): client.upload_signing_certificate( UserName="testing", CertificateBody="notacert" ) - assert ce.exception.response["Error"]["Code"] == "MalformedCertificate" + assert ce.value.response["Error"]["Code"] == "MalformedCertificate" # Upload with an invalid user: with pytest.raises(ClientError): @@ -1848,7 +1848,7 @@ def test_signing_certs(): UserName="testing", CertificateId="x" * 32, Status="Inactive" ) - assert ce.exception.response["Error"][ + assert ce.value.response["Error"][ "Message" ] == "The Certificate with id {id} cannot be found.".format(id="x" * 32) @@ -1918,7 +1918,7 @@ def test_delete_saml_provider(): with pytest.raises(ClientError) as ce: conn.delete_signing_certificate(UserName="testing", CertificateId=cert_id) - assert ce.exception.response["Error"][ + assert ce.value.response["Error"][ "Message" ] == "The Certificate with id {id} cannot be found.".format(id=cert_id) @@ -1985,7 +1985,7 @@ def test_create_role_with_tags(): ) assert ( "failed to satisfy constraint: Member must have length less than or equal to 50." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a duplicate tag: @@ -1997,7 +1997,7 @@ def test_create_role_with_tags(): ) assert ( "Duplicate tag keys found. Please note that Tag keys are case insensitive." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Duplicate tag with different casing: @@ -2009,7 +2009,7 @@ def test_create_role_with_tags(): ) assert ( "Duplicate tag keys found. Please note that Tag keys are case insensitive." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a really big key: @@ -2021,7 +2021,7 @@ def test_create_role_with_tags(): ) assert ( "Member must have length less than or equal to 128." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a really big value: @@ -2033,7 +2033,7 @@ def test_create_role_with_tags(): ) assert ( "Member must have length less than or equal to 256." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With an invalid character: @@ -2045,7 +2045,7 @@ def test_create_role_with_tags(): ) assert ( "Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) @@ -2126,7 +2126,7 @@ def test_tag_role(): conn.tag_role(RoleName="my-role", Tags=too_many_tags) assert ( "failed to satisfy constraint: Member must have length less than or equal to 50." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a duplicate tag: @@ -2137,7 +2137,7 @@ def test_tag_role(): ) assert ( "Duplicate tag keys found. Please note that Tag keys are case insensitive." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Duplicate tag with different casing: @@ -2148,7 +2148,7 @@ def test_tag_role(): ) assert ( "Duplicate tag keys found. Please note that Tag keys are case insensitive." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a really big key: @@ -2156,7 +2156,7 @@ def test_tag_role(): conn.tag_role(RoleName="my-role", Tags=[{"Key": "0" * 129, "Value": ""}]) assert ( "Member must have length less than or equal to 128." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a really big value: @@ -2164,7 +2164,7 @@ def test_tag_role(): conn.tag_role(RoleName="my-role", Tags=[{"Key": "0", "Value": "0" * 257}]) assert ( "Member must have length less than or equal to 256." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With an invalid character: @@ -2172,7 +2172,7 @@ def test_tag_role(): conn.tag_role(RoleName="my-role", Tags=[{"Key": "NOWAY!", "Value": ""}]) assert ( "Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # With a role that doesn't exist: @@ -2212,27 +2212,27 @@ def test_untag_role(): conn.untag_role(RoleName="my-role", TagKeys=[str(x) for x in range(0, 51)]) assert ( "failed to satisfy constraint: Member must have length less than or equal to 50." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert "tagKeys" in ce.exception.response["Error"]["Message"] + assert "tagKeys" in ce.value.response["Error"]["Message"] # With a really big key: with pytest.raises(ClientError) as ce: conn.untag_role(RoleName="my-role", TagKeys=["0" * 129]) assert ( "Member must have length less than or equal to 128." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert "tagKeys" in ce.exception.response["Error"]["Message"] + assert "tagKeys" in ce.value.response["Error"]["Message"] # With an invalid character: with pytest.raises(ClientError) as ce: conn.untag_role(RoleName="my-role", TagKeys=["NOWAY!"]) assert ( "Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) - assert "tagKeys" in ce.exception.response["Error"]["Message"] + assert "tagKeys" in ce.value.response["Error"]["Message"] # With a role that doesn't exist: with pytest.raises(ClientError): @@ -2461,8 +2461,8 @@ def test_create_role_with_same_name_should_fail(): AssumeRolePolicyDocument="policy", Description="test", ) - err.exception.response["Error"]["Code"].should.equal("EntityAlreadyExists") - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Code"].should.equal("EntityAlreadyExists") + err.value.response["Error"]["Message"].should.equal( "Role with name {0} already exists.".format(test_role_name) ) @@ -2475,8 +2475,8 @@ def test_create_policy_with_same_name_should_fail(): # Create the role again, and verify that it fails with pytest.raises(ClientError) as err: iam.create_policy(PolicyName=test_policy_name, PolicyDocument=MOCK_POLICY) - err.exception.response["Error"]["Code"].should.equal("EntityAlreadyExists") - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Code"].should.equal("EntityAlreadyExists") + err.value.response["Error"]["Message"].should.equal( "A policy called {0} already exists. Duplicate names are not allowed.".format( test_policy_name ) diff --git a/tests/test_iam/test_iam_cloudformation.py b/tests/test_iam/test_iam_cloudformation.py index 737e76323b12..a50ed8234c96 100644 --- a/tests/test_iam/test_iam_cloudformation.py +++ b/tests/test_iam/test_iam_cloudformation.py @@ -113,7 +113,7 @@ def test_iam_cloudformation_update_user_replacement(): with pytest.raises(ClientError) as e: iam_client.get_user(UserName=original_user_name) - e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") + e.value.response["Error"]["Code"].should.equal("NoSuchEntity") iam_client.get_user(UserName=new_user_name) @@ -177,7 +177,7 @@ def test_iam_cloudformation_update_drop_user(): iam_client.get_user(UserName=second_user_name) with pytest.raises(ClientError) as e: iam_client.get_user(UserName=first_user_name) - e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") + e.value.response["Error"]["Code"].should.equal("NoSuchEntity") @mock_iam @@ -207,7 +207,7 @@ def test_iam_cloudformation_delete_user(): with pytest.raises(ClientError) as e: user = iam_client.get_user(UserName=user_name) - e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") + e.value.response["Error"]["Code"].should.equal("NoSuchEntity") @mock_iam @@ -237,7 +237,7 @@ def test_iam_cloudformation_delete_user_having_generated_name(): with pytest.raises(ClientError) as e: user = iam_client.get_user(UserName=user_name) - e.exception.response["Error"]["Code"].should.equal("NoSuchEntity") + e.value.response["Error"]["Code"].should.equal("NoSuchEntity") @mock_iam diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index a6bb5f4c0637..85464b44d963 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -199,9 +199,9 @@ def test_delete_group(): @mock_iam def test_delete_unknown_group(): conn = boto3.client("iam", region_name="us-east-1") - with assert_raises(ClientError) as err: + with pytest.raises(ClientError) as err: conn.delete_group(GroupName="unknown-group") - err.exception.response["Error"]["Code"].should.equal("NoSuchEntity") - err.exception.response["Error"]["Message"].should.equal( + err.value.response["Error"]["Code"].should.equal("NoSuchEntity") + err.value.response["Error"]["Message"].should.equal( "The group with name unknown-group cannot be found." ) diff --git a/tests/test_iam/test_iam_policies.py b/tests/test_iam/test_iam_policies.py index dae533827597..fec291c94cd2 100644 --- a/tests/test_iam/test_iam_policies.py +++ b/tests/test_iam/test_iam_policies.py @@ -1629,9 +1629,9 @@ def check_create_policy_with_invalid_policy_document(test_case): PolicyName="TestCreatePolicy", PolicyDocument=json.dumps(test_case["document"]), ) - ex.exception.response["Error"]["Code"].should.equal("MalformedPolicyDocument") - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal(test_case["error_message"]) + ex.value.response["Error"]["Code"].should.equal("MalformedPolicyDocument") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.value.response["Error"]["Message"].should.equal(test_case["error_message"]) @mock_iam diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 44b365182d40..bea6958ac8d7 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -604,7 +604,7 @@ def test_create_certificate_validation(): client = boto3.client("iot", region_name="us-east-1") cert = client.create_keys_and_certificate(setAsActive=False) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.register_certificate( certificatePem=cert["certificatePem"], setAsActive=False ) @@ -612,7 +612,7 @@ def test_create_certificate_validation(): "The certificate is already provisioned or registered" ) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.register_certificate_without_ca( certificatePem=cert["certificatePem"], status="ACTIVE" ) @@ -645,7 +645,7 @@ def test_delete_policy_validation(): with pytest.raises(ClientError) as e: client.delete_policy(policyName=policy_name) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "The policy cannot be deleted as the policy is attached to one or more principals (name=%s)" % policy_name ) @@ -686,7 +686,7 @@ def test_delete_certificate_validation(): with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "Certificate must be deactivated (not ACTIVE) before deletion." ) res = client.list_certificates() @@ -695,7 +695,7 @@ def test_delete_certificate_validation(): client.update_certificate(certificateId=cert_id, newStatus="REVOKED") with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "Things must be detached before deletion (arn: %s)" % cert_arn ) res = client.list_certificates() @@ -704,7 +704,7 @@ def test_delete_certificate_validation(): client.detach_thing_principal(thingName=thing_name, principal=cert_arn) with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "Certificate policies must be detached before deletion (arn: %s)" % cert_arn ) res = client.list_certificates() @@ -800,7 +800,7 @@ def test_principal_policy(): res.should.have.key("principals").which.should.have.length_of(0) with pytest.raises(ClientError) as e: client.detach_policy(policyName=policy_name, target=cert_arn) - e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_iot @@ -857,11 +857,11 @@ def test_principal_thing(): res = client.list_thing_principals(thingName=thing_name) res.should.have.key("principals").which.should.have.length_of(0) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_thing_principals(thingName="xxx") - e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - e.exception.response["Error"]["Message"].should.equal( + e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + e.value.response["Error"]["Message"].should.equal( "Failed to list principals for thing xxx because the thing does not exist in your account" ) @@ -937,9 +937,9 @@ def test_should_list_all_groups_filtered_by_parent(self): resp = client.list_thing_groups(parentGroup=self.group_name_1b) resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(0) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.list_thing_groups(parentGroup="inexistant-group-name") - e.exception.response["Error"]["Code"].should.equal( + e.value.response["Error"]["Code"].should.equal( "ResourceNotFoundException" ) diff --git a/tests/test_iotdata/test_iotdata.py b/tests/test_iotdata/test_iotdata.py index caebdbde8090..bbef49348bd4 100644 --- a/tests/test_iotdata/test_iotdata.py +++ b/tests/test_iotdata/test_iotdata.py @@ -101,8 +101,8 @@ def test_update(): raw_payload = b'{"state": {"desired": {"led": "on"}}, "version": 1}' with pytest.raises(ClientError) as ex: client.update_thing_shadow(thingName=name, payload=raw_payload) - ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(409) - ex.exception.response["Error"]["Message"].should.equal("Version conflict") + ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(409) + ex.value.response["Error"]["Message"].should.equal("Version conflict") @mock_iotdata diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 64c70078e4a7..e79bf8bbf5fe 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -367,7 +367,7 @@ def test__create_alias__raises_if_reserved_alias(): with pytest.raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) - ex = err.exception + ex = err.value ex.error_message.should.be.none ex.error_code.should.equal("NotAuthorizedException") ex.body.should.equal({"__type": "NotAuthorizedException"}) @@ -395,7 +395,7 @@ def test__create_alias__raises_if_wrong_prefix(): with pytest.raises(JSONResponseError) as err: kms.create_alias("wrongprefix/my-alias", key_id) - ex = err.exception + ex = err.value ex.error_message.should.equal("Invalid identifier") ex.error_code.should.equal("ValidationException") ex.body.should.equal( @@ -418,7 +418,7 @@ def test__create_alias__raises_if_duplicate(): with pytest.raises(AlreadyExistsException) as err: kms.create_alias(alias, key_id) - ex = err.exception + ex = err.value ex.error_message.should.match( r"An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists".format( **locals() @@ -452,7 +452,7 @@ def test__create_alias__raises_if_alias_has_restricted_characters(): for alias_name in alias_names_with_restricted_characters: with pytest.raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) - ex = err.exception + ex = err.value ex.body["__type"].should.equal("ValidationException") ex.body["message"].should.equal( "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format( @@ -482,7 +482,7 @@ def test__create_alias__raises_if_alias_has_colon_character(): for alias_name in alias_names_with_restricted_characters: with pytest.raises(JSONResponseError) as err: kms.create_alias(alias_name, key_id) - ex = err.exception + ex = err.value ex.body["__type"].should.equal("ValidationException") ex.body["message"].should.equal( "{alias_name} contains invalid characters for an alias".format(**locals()) @@ -517,7 +517,7 @@ def test__create_alias__raises_if_target_key_id_is_existing_alias(): with pytest.raises(JSONResponseError) as err: kms.create_alias(alias, alias) - ex = err.exception + ex = err.value ex.body["__type"].should.equal("ValidationException") ex.body["message"].should.equal("Aliases must refer to keys. Not aliases") ex.error_code.should.equal("ValidationException") @@ -557,7 +557,7 @@ def test__delete_alias__raises_if_wrong_prefix(): with pytest.raises(JSONResponseError) as err: kms.delete_alias("wrongprefix/my-alias") - ex = err.exception + ex = err.value ex.body["__type"].should.equal("ValidationException") ex.body["message"].should.equal("Invalid identifier") ex.error_code.should.equal("ValidationException") @@ -578,7 +578,7 @@ def test__delete_alias__raises_if_alias_is_not_found(): expected_message_match = r"Alias arn:aws:kms:{region}:[0-9]{{12}}:{alias_name} is not found.".format( region=region, alias_name=alias_name ) - ex = err.exception + ex = err.value ex.body["__type"].should.equal("NotFoundException") ex.body["message"].should.match(expected_message_match) ex.box_usage.should.be.none diff --git a/tests/test_logs/test_integration.py b/tests/test_logs/test_integration.py index bda233485901..eab839970c10 100644 --- a/tests/test_logs/test_integration.py +++ b/tests/test_logs/test_integration.py @@ -7,7 +7,7 @@ from botocore.exceptions import ClientError from io import BytesIO from moto import mock_logs, mock_lambda, mock_iam -from nose.tools import assert_raises +import pytest from zipfile import ZipFile, ZIP_DEFLATED @@ -78,7 +78,7 @@ def test_put_subscription_filter_update(): # when # only one subscription filter can be associated with a log group - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client_logs.put_subscription_filter( logGroupName=log_group_name, filterName="test-2", @@ -87,7 +87,7 @@ def test_put_subscription_filter_update(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("PutSubscriptionFilter") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("LimitExceededException") @@ -96,6 +96,7 @@ def test_put_subscription_filter_update(): @mock_lambda @mock_logs +@pytest.mark.network def test_put_subscription_filter_with_lambda(): # given region_name = "us-east-1" @@ -240,13 +241,13 @@ def test_delete_subscription_filter_errors(): ) # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client_logs.delete_subscription_filter( logGroupName="not-existing-log-group", filterName="test", ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeleteSubscriptionFilter") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") @@ -255,13 +256,13 @@ def test_delete_subscription_filter_errors(): ) # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client_logs.delete_subscription_filter( logGroupName="/test", filterName="wrong-filter-name", ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeleteSubscriptionFilter") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") @@ -278,7 +279,7 @@ def test_put_subscription_filter_errors(): client.create_log_group(logGroupName=log_group_name) # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.put_subscription_filter( logGroupName="not-existing-log-group", filterName="test", @@ -287,7 +288,7 @@ def test_put_subscription_filter_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("PutSubscriptionFilter") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") @@ -296,7 +297,7 @@ def test_put_subscription_filter_errors(): ) # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.put_subscription_filter( logGroupName="/test", filterName="test", @@ -305,7 +306,7 @@ def test_put_subscription_filter_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("PutSubscriptionFilter") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterException") @@ -315,7 +316,7 @@ def test_put_subscription_filter_errors(): ) # when - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.put_subscription_filter( logGroupName="/test", filterName="test", @@ -324,7 +325,7 @@ def test_put_subscription_filter_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("PutSubscriptionFilter") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterException") diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index f693aeb1e294..fc9868ffb85a 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -338,7 +338,7 @@ def test_get_log_events_errors(): logStreamName=log_stream_name, nextToken="n/00000000000000000000000000000000000000000000000000000000", ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetLogEvents") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.equal("InvalidParameterException") @@ -352,7 +352,7 @@ def test_get_log_events_errors(): logStreamName=log_stream_name, nextToken="not-existing-token", ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetLogEvents") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.equal("InvalidParameterException") @@ -451,7 +451,7 @@ def test_describe_subscription_filters_errors(): client.describe_subscription_filters(logGroupName="not-existing-log-group",) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeSubscriptionFilters") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 1d2ef371546c..07cd3afa67f6 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -63,7 +63,7 @@ def test_describe_organization_exception(): client = boto3.client("organizations", region_name="us-east-1") with pytest.raises(ClientError) as e: response = client.describe_organization() - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeOrganization") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AWSOrganizationsNotInUseException") @@ -114,7 +114,7 @@ def test_describe_organizational_unit_exception(): response = client.describe_organizational_unit( OrganizationalUnitId=utils.make_random_root_id() ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeOrganizationalUnit") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( @@ -143,7 +143,7 @@ def test_list_organizational_units_for_parent_exception(): response = client.list_organizational_units_for_parent( ParentId=utils.make_random_root_id() ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListOrganizationalUnitsForParent") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("ParentNotFoundException") @@ -195,7 +195,7 @@ def test_describe_account_exception(): client = boto3.client("organizations", region_name="us-east-1") with pytest.raises(ClientError) as e: response = client.describe_account(AccountId=utils.make_random_account_id()) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribeAccount") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AccountNotFoundException") @@ -339,13 +339,13 @@ def test_list_children_exception(): response = client.list_children( ParentId=utils.make_random_root_id(), ChildType="ACCOUNT" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListChildren") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("ParentNotFoundException") with pytest.raises(ClientError) as e: response = client.list_children(ParentId=root_id, ChildType="BLEE") - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListChildren") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -396,7 +396,7 @@ def test_create_policy_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreatePolicy") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -429,13 +429,13 @@ def test_describe_policy_exception(): policy_id = "p-47fhe9s3" with pytest.raises(ClientError) as e: response = client.describe_policy(PolicyId=policy_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribePolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") with pytest.raises(ClientError) as e: response = client.describe_policy(PolicyId="meaninglessstring") - ex = e.exception + ex = e.value ex.operation_name.should.equal("DescribePolicy") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -514,9 +514,9 @@ def test_detach_policy_root_ou_not_found_exception(): )["Policy"]["PolicySummary"]["Id"] client.attach_policy(PolicyId=policy_id, TargetId=root_id) client.attach_policy(PolicyId=policy_id, TargetId=account_id) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.detach_policy(PolicyId=policy_id, TargetId="r-xy85") - ex = e.exception + ex = e.value ex.operation_name.should.equal("DetachPolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( @@ -539,11 +539,11 @@ def test_detach_policy_ou_not_found_exception(): Type="SERVICE_CONTROL_POLICY", )["Policy"]["PolicySummary"]["Id"] client.attach_policy(PolicyId=policy_id, TargetId=ou_id) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.detach_policy( PolicyId=policy_id, TargetId="ou-zx86-z3x4yr2t7" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DetachPolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( @@ -565,9 +565,9 @@ def test_detach_policy_account_id_not_found_exception(): Type="SERVICE_CONTROL_POLICY", )["Policy"]["PolicySummary"]["Id"] client.attach_policy(PolicyId=policy_id, TargetId=account_id) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.detach_policy(PolicyId=policy_id, TargetId="111619863336") - ex = e.exception + ex = e.value ex.operation_name.should.equal("DetachPolicy") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AccountNotFoundException") @@ -591,9 +591,9 @@ def test_detach_policy_invalid_target_exception(): Type="SERVICE_CONTROL_POLICY", )["Policy"]["PolicySummary"]["Id"] client.attach_policy(PolicyId=policy_id, TargetId=ou_id) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: response = client.detach_policy(PolicyId=policy_id, TargetId="invalidtargetid") - ex = e.exception + ex = e.value ex.operation_name.should.equal("DetachPolicy") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -628,7 +628,7 @@ def test_delete_policy_exception(): non_existent_policy_id = utils.make_random_policy_id() with pytest.raises(ClientError) as e: response = client.delete_policy(PolicyId=non_existent_policy_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeletePolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") @@ -644,7 +644,7 @@ def test_delete_policy_exception(): client.attach_policy(PolicyId=policy_id, TargetId=root_id) with pytest.raises(ClientError) as e: response = client.delete_policy(PolicyId=policy_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeletePolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("PolicyInUseException") @@ -665,7 +665,7 @@ def test_attach_policy_exception(): )["Policy"]["PolicySummary"]["Id"] with pytest.raises(ClientError) as e: response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AttachPolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( @@ -673,7 +673,7 @@ def test_attach_policy_exception(): ) with pytest.raises(ClientError) as e: response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AttachPolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( @@ -681,7 +681,7 @@ def test_attach_policy_exception(): ) with pytest.raises(ClientError) as e: response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AttachPolicy") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AccountNotFoundException") @@ -692,7 +692,7 @@ def test_attach_policy_exception(): response = client.attach_policy( PolicyId=policy_id, TargetId="meaninglessstring" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AttachPolicy") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -731,7 +731,7 @@ def test_update_policy_exception(): non_existent_policy_id = utils.make_random_policy_id() with pytest.raises(ClientError) as e: response = client.update_policy(PolicyId=non_existent_policy_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("UpdatePolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") @@ -795,7 +795,7 @@ def test_list_policies_for_target_exception(): response = client.list_policies_for_target( TargetId=ou_id, Filter="SERVICE_CONTROL_POLICY" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListPoliciesForTarget") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain( @@ -805,7 +805,7 @@ def test_list_policies_for_target_exception(): response = client.list_policies_for_target( TargetId=account_id, Filter="SERVICE_CONTROL_POLICY" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListPoliciesForTarget") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AccountNotFoundException") @@ -816,7 +816,7 @@ def test_list_policies_for_target_exception(): response = client.list_policies_for_target( TargetId="meaninglessstring", Filter="SERVICE_CONTROL_POLICY" ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListPoliciesForTarget") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -830,7 +830,7 @@ def test_list_policies_for_target_exception(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListPoliciesForTarget") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("TargetNotFoundException") @@ -844,7 +844,7 @@ def test_list_policies_for_target_exception(): client.list_policies_for_target(TargetId=root_id, Filter="MOTO") # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListPoliciesForTarget") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -889,13 +889,13 @@ def test_list_targets_for_policy_exception(): policy_id = "p-47fhe9s3" with pytest.raises(ClientError) as e: response = client.list_targets_for_policy(PolicyId=policy_id) - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListTargetsForPolicy") ex.response["Error"]["Code"].should.equal("400") ex.response["Error"]["Message"].should.contain("PolicyNotFoundException") with pytest.raises(ClientError) as e: response = client.list_targets_for_policy(PolicyId="meaninglessstring") - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListTargetsForPolicy") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -933,7 +933,7 @@ def test_tag_resource_errors(): client.tag_resource( ResourceId="000000000000", Tags=[{"Key": "key", "Value": "value"},], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("TagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -963,7 +963,7 @@ def test_list_tags_for_resource_errors(): with pytest.raises(ClientError) as e: client.list_tags_for_resource(ResourceId="000000000000") - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListTagsForResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -1000,7 +1000,7 @@ def test_untag_resource_errors(): with pytest.raises(ClientError) as e: client.untag_resource(ResourceId="000000000000", TagKeys=["key"]) - ex = e.exception + ex = e.value ex.operation_name.should.equal("UntagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -1039,7 +1039,7 @@ def test_update_organizational_unit_duplicate_error(): client.update_organizational_unit( OrganizationalUnitId=response["OrganizationalUnit"]["Id"], Name=ou_name ) - exc = e.exception + exc = e.value exc.operation_name.should.equal("UpdateOrganizationalUnit") exc.response["Error"]["Code"].should.contain("DuplicateOrganizationalUnitException") exc.response["Error"]["Message"].should.equal( @@ -1083,7 +1083,7 @@ def test_enable_aws_service_access(): with pytest.raises(ClientError) as e: client.enable_aws_service_access(ServicePrincipal="moto.amazonaws.com") - ex = e.exception + ex = e.value ex.operation_name.should.equal("EnableAWSServiceAccess") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -1144,7 +1144,7 @@ def test_disable_aws_service_access_errors(): with pytest.raises(ClientError) as e: client.disable_aws_service_access(ServicePrincipal="moto.amazonaws.com") - ex = e.exception + ex = e.value ex.operation_name.should.equal("DisableAWSServiceAccess") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -1205,7 +1205,7 @@ def test_register_delegated_administrator_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("RegisterDelegatedAdministrator") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ConstraintViolationException") @@ -1221,7 +1221,7 @@ def test_register_delegated_administrator_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("RegisterDelegatedAdministrator") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AccountNotFoundException") @@ -1237,7 +1237,7 @@ def test_register_delegated_administrator_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("RegisterDelegatedAdministrator") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -1253,7 +1253,7 @@ def test_register_delegated_administrator_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("RegisterDelegatedAdministrator") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AccountAlreadyRegisteredException") @@ -1323,7 +1323,7 @@ def test_list_delegated_administrators_erros(): client.list_delegated_administrators(ServicePrincipal="moto.amazonaws.com") # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListDelegatedAdministrators") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -1369,7 +1369,7 @@ def test_list_delegated_services_for_account_erros(): client.list_delegated_services_for_account(AccountId="000000000000") # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListDelegatedServicesForAccount") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AWSOrganizationsNotInUseException") @@ -1383,7 +1383,7 @@ def test_list_delegated_services_for_account_erros(): client.list_delegated_services_for_account(AccountId=ACCOUNT_ID) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("ListDelegatedServicesForAccount") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AccountNotRegisteredException") @@ -1431,7 +1431,7 @@ def test_deregister_delegated_administrator_erros(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeregisterDelegatedAdministrator") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ConstraintViolationException") @@ -1447,7 +1447,7 @@ def test_deregister_delegated_administrator_erros(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeregisterDelegatedAdministrator") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AccountNotFoundException") @@ -1463,7 +1463,7 @@ def test_deregister_delegated_administrator_erros(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeregisterDelegatedAdministrator") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("AccountNotRegisteredException") @@ -1484,7 +1484,7 @@ def test_deregister_delegated_administrator_erros(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeregisterDelegatedAdministrator") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -1535,7 +1535,7 @@ def test_enable_policy_type_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("EnablePolicyType") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("RootNotFoundException") @@ -1549,7 +1549,7 @@ def test_enable_policy_type_errors(): client.enable_policy_type(RootId=root_id, PolicyType="SERVICE_CONTROL_POLICY") # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("EnablePolicyType") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("PolicyTypeAlreadyEnabledException") @@ -1563,7 +1563,7 @@ def test_enable_policy_type_errors(): client.enable_policy_type(RootId=root_id, PolicyType="MOTO") # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("EnablePolicyType") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") @@ -1610,7 +1610,7 @@ def test_disable_policy_type_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DisablePolicyType") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("RootNotFoundException") @@ -1626,7 +1626,7 @@ def test_disable_policy_type_errors(): ) # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DisablePolicyType") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("PolicyTypeNotEnabledException") @@ -1640,7 +1640,7 @@ def test_disable_policy_type_errors(): client.disable_policy_type(RootId=root_id, PolicyType="MOTO") # then - ex = e.exception + ex = e.value ex.operation_name.should.equal("DisablePolicyType") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidInputException") diff --git a/tests/test_ram/test_ram.py b/tests/test_ram/test_ram.py index dbc57a2c06e7..73a23331bbad 100644 --- a/tests/test_ram/test_ram.py +++ b/tests/test_ram/test_ram.py @@ -67,7 +67,7 @@ def test_create_resource_share_errors(): # when with pytest.raises(ClientError) as e: client.create_resource_share(name="test", resourceArns=["inalid-arn"]) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreateResourceShare") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("MalformedArnException") @@ -82,7 +82,7 @@ def test_create_resource_share_errors(): client.create_resource_share( name="test", resourceArns=["arn:aws:iam::{}:role/test".format(ACCOUNT_ID)] ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreateResourceShare") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("MalformedArnException") @@ -102,7 +102,7 @@ def test_create_resource_share_errors(): ) ], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreateResourceShare") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterException") @@ -174,7 +174,7 @@ def test_create_resource_share_with_organization_errors(): ) ], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreateResourceShare") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("UnknownResourceException") @@ -196,7 +196,7 @@ def test_create_resource_share_with_organization_errors(): ) ], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("CreateResourceShare") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("UnknownResourceException") @@ -238,7 +238,7 @@ def test_get_resource_shares_errors(): # when with pytest.raises(ClientError) as e: client.get_resource_shares(resourceOwner="invalid") - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetResourceShares") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterException") @@ -289,7 +289,7 @@ def test_update_resource_share_errors(): ), name="test-update", ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("UpdateResourceShare") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("UnknownResourceException") @@ -334,7 +334,7 @@ def test_delete_resource_share_errors(): ACCOUNT_ID ) ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("DeleteResourceShare") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("UnknownResourceException") @@ -370,7 +370,7 @@ def test_enable_sharing_with_aws_organization_errors(): # when with pytest.raises(ClientError) as e: client.enable_sharing_with_aws_organization() - ex = e.exception + ex = e.value ex.operation_name.should.equal("EnableSharingWithAwsOrganization") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("OperationNotPermittedException") diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 933d02c6d0da..67f247f1840f 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -666,7 +666,7 @@ def test_delete_keys_invalid(): def test_boto3_delete_empty_keys_list(): with pytest.raises(ClientError) as err: boto3.client("s3").delete_objects(Bucket="foobar", Delete={"Objects": []}) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" @mock_s3_deprecated @@ -1016,7 +1016,7 @@ def test_s3_object_in_public_bucket(): with pytest.raises(ClientError) as exc: s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get() - exc.exception.response["Error"]["Code"].should.equal("403") + exc.value.response["Error"]["Code"].should.equal("403") @mock_s3 @@ -1090,7 +1090,7 @@ def test_s3_object_in_private_bucket(): with pytest.raises(ClientError) as exc: s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get() - exc.exception.response["Error"]["Code"].should.equal("403") + exc.value.response["Error"]["Code"].should.equal("403") bucket.put_object(ACL="public-read", Body=b"ABCD", Key="file.txt") contents = ( @@ -1182,7 +1182,7 @@ def test_s3_location_should_error_outside_useast1(): with pytest.raises(ClientError) as e: s3.create_bucket(Bucket=bucket_name) - e.exception.response["Error"]["Message"].should.equal( + e.value.response["Error"]["Message"].should.equal( "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." ) @@ -1201,13 +1201,13 @@ def test_get_public_access_block_for_account(): # With an invalid account ID: with pytest.raises(ClientError) as ce: client.get_public_access_block(AccountId="111111111111") - assert ce.exception.response["Error"]["Code"] == "AccessDenied" + assert ce.value.response["Error"]["Code"] == "AccessDenied" # Without one defined: with pytest.raises(ClientError) as ce: client.get_public_access_block(AccountId=ACCOUNT_ID) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" ) @@ -1217,17 +1217,17 @@ def test_get_public_access_block_for_account(): AccountId="111111111111", PublicAccessBlockConfiguration={"BlockPublicAcls": True}, ) - assert ce.exception.response["Error"]["Code"] == "AccessDenied" + assert ce.value.response["Error"]["Code"] == "AccessDenied" # Put with an invalid PAB: with pytest.raises(ClientError) as ce: client.put_public_access_block( AccountId=ACCOUNT_ID, PublicAccessBlockConfiguration={} ) - assert ce.exception.response["Error"]["Code"] == "InvalidRequest" + assert ce.value.response["Error"]["Code"] == "InvalidRequest" assert ( "Must specify at least one configuration." - in ce.exception.response["Error"]["Message"] + in ce.value.response["Error"]["Message"] ) # Correct PAB: @@ -1256,7 +1256,7 @@ def test_get_public_access_block_for_account(): # Delete with an invalid account ID: with pytest.raises(ClientError) as ce: client.delete_public_access_block(AccountId="111111111111") - assert ce.exception.response["Error"]["Code"] == "AccessDenied" + assert ce.value.response["Error"]["Code"] == "AccessDenied" # Delete successfully: client.delete_public_access_block(AccountId=ACCOUNT_ID) @@ -1265,7 +1265,7 @@ def test_get_public_access_block_for_account(): with pytest.raises(ClientError) as ce: client.get_public_access_block(AccountId=ACCOUNT_ID) assert ( - ce.exception.response["Error"]["Code"] + ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" ) @@ -1466,7 +1466,7 @@ def test_config_get_account_pab(): resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID ) assert ( - ce.exception.response["Error"]["Code"] == "ResourceNotDiscoveredException" + ce.value.response["Error"]["Code"] == "ResourceNotDiscoveredException" ) # aggregate result = config_client.batch_get_resource_config( @@ -1635,7 +1635,7 @@ def test_policy(): with pytest.raises(S3ResponseError) as err: bucket.get_policy() - ex = err.exception + ex = err.value ex.box_usage.should.be.none ex.error_code.should.equal("NoSuchBucketPolicy") ex.message.should.equal("The bucket policy does not exist") @@ -1979,7 +1979,7 @@ def test_bucket_create_duplicate(): s3.create_bucket( Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-west-2"} ) - exc.exception.response["Error"]["Code"].should.equal("BucketAlreadyExists") + exc.value.response["Error"]["Code"].should.equal("BucketAlreadyExists") @mock_s3 @@ -1990,7 +1990,7 @@ def test_bucket_create_force_us_east_1(): Bucket="blah", CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME}, ) - exc.exception.response["Error"]["Code"].should.equal("InvalidLocationConstraint") + exc.value.response["Error"]["Code"].should.equal("InvalidLocationConstraint") @mock_s3 @@ -2012,8 +2012,8 @@ def test_bucket_create_empty_bucket_configuration_should_return_malformed_xml_er s3 = boto3.resource("s3", region_name="us-east-1") with pytest.raises(ClientError) as e: s3.create_bucket(Bucket="whatever", CreateBucketConfiguration={}) - e.exception.response["Error"]["Code"].should.equal("MalformedXML") - e.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + e.value.response["Error"]["Code"].should.equal("MalformedXML") + e.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @mock_s3 @@ -2031,7 +2031,7 @@ def test_boto3_head_object(): s3.Object("blah", "hello2.txt").meta.client.head_object( Bucket="blah", Key="hello_bad.txt" ) - e.exception.response["Error"]["Code"].should.equal("404") + e.value.response["Error"]["Code"].should.equal("404") @mock_s3 @@ -2079,7 +2079,7 @@ def test_boto3_get_object(): with pytest.raises(ClientError) as e: s3.Object("blah", "hello2.txt").get() - e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + e.value.response["Error"]["Code"].should.equal("NoSuchKey") @mock_s3 @@ -2108,7 +2108,7 @@ def test_boto3_get_missing_object_with_part_number(): Bucket="blah", Key="hello.txt", PartNumber=123 ) - e.exception.response["Error"]["Code"].should.equal("404") + e.value.response["Error"]["Code"].should.equal("404") @mock_s3 @@ -2181,7 +2181,7 @@ def test_boto3_copy_object_with_versioning(): Bucket="blah", Key="test5", ) - e.exception.response["Error"]["Code"].should.equal("404") + e.value.response["Error"]["Code"].should.equal("404") response = client.create_multipart_upload(Bucket="blah", Key="test4") upload_id = response["UploadId"] @@ -2215,7 +2215,7 @@ def test_s3_abort_multipart_data_with_invalid_upload_and_key(): client.abort_multipart_upload( Bucket="blah", Key="foobar", UploadId="dummy_upload_id" ) - err.exception.response["Error"]["Code"].should.equal("NoSuchUpload") + err.value.response["Error"]["Code"].should.equal("NoSuchUpload") @mock_s3 @@ -2365,7 +2365,7 @@ def test_boto3_get_object_if_modified_since(): Key=key, IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1), ) - e = err.exception + e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @@ -2379,13 +2379,13 @@ def test_boto3_get_object_if_unmodified_since(): s3.put_object(Bucket=bucket_name, Key=key, Body="test") - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: s3.get_object( Bucket=bucket_name, Key=key, IfUnmodifiedSince=datetime.datetime.utcnow() - datetime.timedelta(hours=1), ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("PreconditionFailed") e.response["Error"]["Condition"].should.equal("If-Unmodified-Since") @@ -2400,11 +2400,11 @@ def test_boto3_get_object_if_match(): s3.put_object(Bucket=bucket_name, Key=key, Body="test") - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: s3.get_object( Bucket=bucket_name, Key=key, IfMatch='"hello"', ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("PreconditionFailed") e.response["Error"]["Condition"].should.equal("If-Match") @@ -2419,11 +2419,11 @@ def test_boto3_get_object_if_none_match(): etag = s3.put_object(Bucket=bucket_name, Key=key, Body="test")["ETag"] - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: s3.get_object( Bucket=bucket_name, Key=key, IfNoneMatch=etag, ) - e = err.exception + e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @@ -2443,7 +2443,7 @@ def test_boto3_head_object_if_modified_since(): Key=key, IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1), ) - e = err.exception + e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @@ -2457,13 +2457,13 @@ def test_boto3_head_object_if_unmodified_since(): s3.put_object(Bucket=bucket_name, Key=key, Body="test") - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: s3.head_object( Bucket=bucket_name, Key=key, IfUnmodifiedSince=datetime.datetime.utcnow() - datetime.timedelta(hours=1), ) - e = err.exception + e = err.value e.response["Error"].should.equal({"Code": "412", "Message": "Precondition Failed"}) @@ -2477,11 +2477,11 @@ def test_boto3_head_object_if_match(): s3.put_object(Bucket=bucket_name, Key=key, Body="test") - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: s3.head_object( Bucket=bucket_name, Key=key, IfMatch='"hello"', ) - e = err.exception + e = err.value e.response["Error"].should.equal({"Code": "412", "Message": "Precondition Failed"}) @@ -2495,11 +2495,11 @@ def test_boto3_head_object_if_none_match(): etag = s3.put_object(Bucket=bucket_name, Key=key, Body="test")["ETag"] - with assert_raises(botocore.exceptions.ClientError) as err: + with pytest.raises(botocore.exceptions.ClientError) as err: s3.head_object( Bucket=bucket_name, Key=key, IfNoneMatch=etag, ) - e = err.exception + e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @@ -2642,7 +2642,7 @@ def test_boto3_put_bucket_tagging(): ] }, ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("InvalidTag") e.response["Error"]["Message"].should.equal( "Cannot provide multiple Tags with the same key" @@ -2654,7 +2654,7 @@ def test_boto3_put_bucket_tagging(): Bucket=bucket_name, Tagging={"TagSet": [{"Key": "aws:sometag", "Value": "nope"}]}, ) - e = ce.exception + e = ce.value e.response["Error"]["Code"].should.equal("InvalidTag") e.response["Error"]["Message"].should.equal( "System tags cannot be added/updated by requester" @@ -2693,7 +2693,7 @@ def test_boto3_get_bucket_tagging(): with pytest.raises(ClientError) as err: s3.get_bucket_tagging(Bucket=bucket_name) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("NoSuchTagSet") e.response["Error"]["Message"].should.equal("The TagSet does not exist") @@ -2720,7 +2720,7 @@ def test_boto3_delete_bucket_tagging(): with pytest.raises(ClientError) as err: s3.get_bucket_tagging(Bucket=bucket_name) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("NoSuchTagSet") e.response["Error"]["Message"].should.equal("The TagSet does not exist") @@ -2764,7 +2764,7 @@ def test_boto3_put_bucket_cors(): ] }, ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("InvalidRequest") e.response["Error"]["Message"].should.equal( "Found unsupported HTTP method in CORS config. " "Unsupported method is NOTREAL" @@ -2772,7 +2772,7 @@ def test_boto3_put_bucket_cors(): with pytest.raises(ClientError) as err: s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={"CORSRules": []}) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("MalformedXML") # And 101: @@ -2781,7 +2781,7 @@ def test_boto3_put_bucket_cors(): s3.put_bucket_cors( Bucket=bucket_name, CORSConfiguration={"CORSRules": many_rules} ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("MalformedXML") @@ -2795,7 +2795,7 @@ def test_boto3_get_bucket_cors(): with pytest.raises(ClientError) as err: s3.get_bucket_cors(Bucket=bucket_name) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") @@ -2845,7 +2845,7 @@ def test_boto3_delete_bucket_cors(): with pytest.raises(ClientError) as err: s3.get_bucket_cors(Bucket=bucket_name) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") @@ -2920,7 +2920,7 @@ def test_put_bucket_acl_body(): ] }, ) - assert err.exception.response["Error"]["Code"] == "MalformedACLError" + assert err.value.response["Error"]["Code"] == "MalformedACLError" # With incorrect permission: with pytest.raises(ClientError) as err: @@ -2939,7 +2939,7 @@ def test_put_bucket_acl_body(): "Owner": bucket_owner, }, ) - assert err.exception.response["Error"]["Code"] == "MalformedACLError" + assert err.value.response["Error"]["Code"] == "MalformedACLError" # Clear the ACLs: result = s3.put_bucket_acl( @@ -3199,9 +3199,9 @@ def test_put_bucket_notification_errors(): }, ) - assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.value.response["Error"]["Code"] == "InvalidArgument" assert ( - err.exception.response["Error"]["Message"] == "The ARN is not well formed" + err.value.response["Error"]["Message"] == "The ARN is not well formed" ) # Region not the same as the bucket: @@ -3218,9 +3218,9 @@ def test_put_bucket_notification_errors(): }, ) - assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.value.response["Error"]["Code"] == "InvalidArgument" assert ( - err.exception.response["Error"]["Message"] + err.value.response["Error"]["Message"] == "The notification destination service region is not valid for the bucket location constraint" ) @@ -3237,9 +3237,9 @@ def test_put_bucket_notification_errors(): ] }, ) - assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.value.response["Error"]["Code"] == "InvalidArgument" assert ( - err.exception.response["Error"]["Message"] + err.value.response["Error"]["Message"] == "The event is not supported for notifications" ) @@ -3269,7 +3269,7 @@ def test_boto3_put_bucket_logging(): "LoggingEnabled": {"TargetBucket": "IAMNOTREAL", "TargetPrefix": ""} }, ) - assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert err.value.response["Error"]["Code"] == "InvalidTargetBucketForLogging" # A log-bucket that's missing the proper ACLs for LogDelivery: with pytest.raises(ClientError) as err: @@ -3279,8 +3279,8 @@ def test_boto3_put_bucket_logging(): "LoggingEnabled": {"TargetBucket": log_bucket, "TargetPrefix": ""} }, ) - assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" - assert "log-delivery" in err.exception.response["Error"]["Message"] + assert err.value.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert "log-delivery" in err.value.response["Error"]["Message"] # Add the proper "log-delivery" ACL to the log buckets: bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] @@ -3323,7 +3323,7 @@ def test_boto3_put_bucket_logging(): } }, ) - assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" + assert err.value.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" # Correct logging: s3.put_bucket_logging( @@ -3420,7 +3420,7 @@ def test_boto3_put_bucket_logging(): } }, ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" @mock_s3 @@ -3442,7 +3442,7 @@ def test_boto3_put_object_tagging(): }, ) - e = err.exception + e = err.value e.response["Error"].should.equal( { "Code": "NoSuchKey", @@ -3490,7 +3490,7 @@ def test_boto3_put_object_tagging_on_earliest_version(): }, ) - e = err.exception + e = err.value e.response["Error"].should.equal( { "Code": "NoSuchKey", @@ -3558,7 +3558,7 @@ def test_boto3_put_object_tagging_on_both_version(): }, ) - e = err.exception + e = err.value e.response["Error"].should.equal( { "Code": "NoSuchKey", @@ -3773,7 +3773,7 @@ def test_boto3_delete_markers(): with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=key) - e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + e.value.response["Error"]["Code"].should.equal("NoSuchKey") response = s3.list_object_versions(Bucket=bucket_name) response["Versions"].should.have.length_of(2) @@ -3893,7 +3893,7 @@ def test_boto3_bucket_name_too_long(): s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) with pytest.raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 64) - exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") + exc.value.response["Error"]["Code"].should.equal("InvalidBucketName") @mock_s3 @@ -3901,7 +3901,7 @@ def test_boto3_bucket_name_too_short(): s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) with pytest.raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 2) - exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") + exc.value.response["Error"]["Code"].should.equal("InvalidBucketName") @mock_s3 @@ -3975,7 +3975,7 @@ def test_accelerate_configuration_status_validation(): s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "bad_status"} ) - exc.exception.response["Error"]["Code"].should.equal("MalformedXML") + exc.value.response["Error"]["Code"].should.equal("MalformedXML") @mock_s3 @@ -3987,7 +3987,7 @@ def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"} ) - exc.exception.response["Error"]["Code"].should.equal("InvalidRequest") + exc.value.response["Error"]["Code"].should.equal("InvalidRequest") def store_and_read_back_a_key(key): @@ -4029,11 +4029,11 @@ def test_leading_slashes_not_removed(bucket_name): with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=invalid_key_1) - e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + e.value.response["Error"]["Code"].should.equal("NoSuchKey") with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=invalid_key_2) - e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + e.value.response["Error"]["Code"].should.equal("NoSuchKey") @parameterized( @@ -4054,7 +4054,7 @@ def assert_deleted(): with pytest.raises(ClientError) as e: s3.get_object(Bucket=bucket_name, Key=key) - e.exception.response["Error"]["Code"].should.equal("NoSuchKey") + e.value.response["Error"]["Code"].should.equal("NoSuchKey") put_object() s3.delete_object(Bucket=bucket_name, Key=key) @@ -4076,13 +4076,13 @@ def test_public_access_block(): client.get_public_access_block(Bucket="mybucket") assert ( - ce.exception.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" + ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" ) assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "The public access block configuration was not found" ) - assert ce.exception.response["ResponseMetadata"]["HTTPStatusCode"] == 404 + assert ce.value.response["ResponseMetadata"]["HTTPStatusCode"] == 404 # Put a public block in place: test_map = { @@ -4127,12 +4127,12 @@ def test_public_access_block(): Bucket="mybucket", PublicAccessBlockConfiguration={} ) - assert ce.exception.response["Error"]["Code"] == "InvalidRequest" + assert ce.value.response["Error"]["Code"] == "InvalidRequest" assert ( - ce.exception.response["Error"]["Message"] + ce.value.response["Error"]["Message"] == "Must specify at least one configuration." ) - assert ce.exception.response["ResponseMetadata"]["HTTPStatusCode"] == 400 + assert ce.value.response["ResponseMetadata"]["HTTPStatusCode"] == 400 # Test that things work with AWS Config: config_client = boto3.client("config", region_name=DEFAULT_REGION_NAME) @@ -4158,7 +4158,7 @@ def test_public_access_block(): with pytest.raises(ClientError) as ce: client.get_public_access_block(Bucket="mybucket") assert ( - ce.exception.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" + ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" ) @@ -4303,7 +4303,7 @@ def test_list_config_discovered_resources(): with pytest.raises(InvalidNextTokenException) as inte: s3_config_query.list_config_service_resources(None, None, 1, "notabucket") - assert "The nextToken provided is invalid" in inte.exception.message + assert "The nextToken provided is invalid" in inte.value.message @mock_s3 @@ -4803,7 +4803,7 @@ def test_presigned_url_restrict_parameters(): ClientMethod="put_object", Params={"Bucket": bucket, "Key": key, "Unknown": "metadata"}, ) - assert str(err.exception).should.match( + assert str(err.value).should.match( r'Parameter validation failed:\nUnknown parameter in input: "Unknown", must be one of:.*' ) diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index da9ffbca4c3d..d3d9344efbfa 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -84,7 +84,7 @@ def test_lifecycle_with_filters(): client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" # With a tag: lfc["Rules"][0]["Filter"] = {"Tag": {"Key": "mytag", "Value": "mytagvalue"}} @@ -172,14 +172,14 @@ def test_lifecycle_with_filters(): client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" lfc["Rules"][0]["Prefix"] = "some/path" with pytest.raises(ClientError) as err: client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" # No filters -- just a prefix: del lfc["Rules"][0]["Filter"] @@ -200,7 +200,7 @@ def test_lifecycle_with_filters(): client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" lfc["Rules"][0]["Filter"] = { "Tag": {"Key": "mytag", "Value": "mytagvalue"}, @@ -216,7 +216,7 @@ def test_lifecycle_with_filters(): client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" # Make sure multiple rules work: lfc = { @@ -283,7 +283,7 @@ def test_lifecycle_with_eodm(): client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" del lfc["Rules"][0]["Expiration"]["Days"] lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) @@ -291,7 +291,7 @@ def test_lifecycle_with_eodm(): client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" @mock_s3 @@ -387,7 +387,7 @@ def test_lifecycle_with_nvt(): client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] @@ -395,7 +395,7 @@ def test_lifecycle_with_nvt(): client.put_bucket_lifecycle_configuration( Bucket="bucket", LifecycleConfiguration=lfc ) - assert err.exception.response["Error"]["Code"] == "MalformedXML" + assert err.value.response["Error"]["Code"] == "MalformedXML" @mock_s3 diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py index 0e8152b03d8f..ec7090369a3e 100644 --- a/tests/test_s3/test_s3_storageclass.py +++ b/tests/test_s3/test_s3_storageclass.py @@ -113,7 +113,7 @@ def test_s3_invalid_copied_storage_class(): StorageClass="STANDARD2", ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("InvalidStorageClass") e.response["Error"]["Message"].should.equal( "The storage class you specified is not valid" @@ -133,7 +133,7 @@ def test_s3_invalid_storage_class(): Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD" ) - e = err.exception + e = err.value e.response["Error"]["Code"].should.equal("InvalidStorageClass") e.response["Error"]["Message"].should.equal( "The storage class you specified is not valid" @@ -166,14 +166,14 @@ def test_s3_copy_object_error_for_glacier_storage_class_not_restored(): Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="GLACIER" ) - with pytest.raises(ClientError) as exc: + with pytest.raises(ClientError) as ex: s3.copy_object( CopySource={"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket", Key="Second_Object", ) - exc.exception.response["Error"]["Code"].should.equal("ObjectNotInActiveTierError") + ex.value.response["Error"]["Code"].should.equal("ObjectNotInActiveTierError") @mock_s3 @@ -194,7 +194,7 @@ def test_s3_copy_object_error_for_deep_archive_storage_class_not_restored(): Key="Second_Object", ) - exc.exception.response["Error"]["Code"].should.equal("ObjectNotInActiveTierError") + exc.value.response["Error"]["Code"].should.equal("ObjectNotInActiveTierError") @mock_s3 diff --git a/tests/test_sagemaker/test_sagemaker_endpoint.py b/tests/test_sagemaker/test_sagemaker_endpoint.py index 0d21ad1ef1bc..1e1ecd494b8e 100644 --- a/tests/test_sagemaker/test_sagemaker_endpoint.py +++ b/tests/test_sagemaker/test_sagemaker_endpoint.py @@ -38,7 +38,7 @@ def test_create_endpoint_config(): EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants, ) - assert e.exception.response["Error"]["Message"].startswith("Could not find model") + assert e.value.response["Error"]["Message"].startswith("Could not find model") _create_model(sagemaker, model_name) resp = sagemaker.create_endpoint_config( @@ -88,11 +88,11 @@ def test_delete_endpoint_config(): resp = sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) with pytest.raises(ClientError) as e: sagemaker.describe_endpoint_config(EndpointConfigName=endpoint_config_name) - assert e.exception.response["Error"]["Message"].startswith("Could not find endpoint configuration") + assert e.value.response["Error"]["Message"].startswith("Could not find endpoint configuration") with pytest.raises(ClientError) as e: sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) - assert e.exception.response["Error"]["Message"].startswith( "Could not find endpoint configuration") + assert e.value.response["Error"]["Message"].startswith( "Could not find endpoint configuration") @mock_sagemaker @@ -118,11 +118,11 @@ def test_create_endpoint_invalid_instance_type(): EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants, ) - assert e.exception.response["Error"]["Code"] == "ValidationException" + assert e.value.response["Error"]["Code"] == "ValidationException" expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( instance_type ) - assert expected_message in e.exception.response["Error"]["Message"] + assert expected_message in e.value.response["Error"]["Message"] @mock_sagemaker @@ -134,7 +134,7 @@ def test_create_endpoint(): sagemaker.create_endpoint( EndpointName=endpoint_name, EndpointConfigName="NonexistentEndpointConfig" ) - assert e.exception.response["Error"]["Message"].startswith("Could not find endpoint configuration") + assert e.value.response["Error"]["Message"].startswith("Could not find endpoint configuration") model_name = "MyModel" _create_model(sagemaker, model_name) @@ -182,11 +182,11 @@ def test_delete_endpoint(): sagemaker.delete_endpoint(EndpointName=endpoint_name) with pytest.raises(ClientError) as e: sagemaker.describe_endpoint(EndpointName=endpoint_name) - assert e.exception.response["Error"]["Message"].startswith("Could not find endpoint") + assert e.value.response["Error"]["Message"].startswith("Could not find endpoint") with pytest.raises(ClientError) as e: sagemaker.delete_endpoint(EndpointName=endpoint_name) - assert e.exception.response["Error"]["Message"].startswith("Could not find endpoint") + assert e.value.response["Error"]["Message"].startswith("Could not find endpoint") def _create_model(boto_client, model_name): diff --git a/tests/test_sagemaker/test_sagemaker_models.py b/tests/test_sagemaker/test_sagemaker_models.py index 1f2f4440dce4..91fc3bb5bed6 100644 --- a/tests/test_sagemaker/test_sagemaker_models.py +++ b/tests/test_sagemaker/test_sagemaker_models.py @@ -79,7 +79,7 @@ def test_delete_model_not_found(): boto3.client("sagemaker", region_name="us-east-1").delete_model( ModelName="blah" ) - assert err.exception.response["Error"]["Code"].should.equal("404") + assert err.value.response["Error"]["Code"].should.equal("404") @mock_sagemaker diff --git a/tests/test_sagemaker/test_sagemaker_notebooks.py b/tests/test_sagemaker/test_sagemaker_notebooks.py index 9f6a2be39d2d..9fd082689da0 100644 --- a/tests/test_sagemaker/test_sagemaker_notebooks.py +++ b/tests/test_sagemaker/test_sagemaker_notebooks.py @@ -136,7 +136,7 @@ def test_create_notebook_instance_bad_volume_size(): with pytest.raises(ParamValidationError) as ex: sagemaker.create_notebook_instance(**args) assert \ - ex.exception.args[0] == \ + ex.value.args[0] == \ "Parameter validation failed:\nInvalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf".format(vol_size) @@ -153,12 +153,12 @@ def test_create_notebook_instance_invalid_instance_type(): } with pytest.raises(ClientError) as ex: sagemaker.create_notebook_instance(**args) - assert ex.exception.response["Error"]["Code"] == "ValidationException" + assert ex.value.response["Error"]["Code"] == "ValidationException" expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( instance_type ) - assert expected_message in ex.exception.response["Error"]["Message"] + assert expected_message in ex.value.response["Error"]["Message"] @mock_sagemaker @@ -182,11 +182,11 @@ def test_notebook_instance_lifecycle(): with pytest.raises(ClientError) as ex: sagemaker.delete_notebook_instance(NotebookInstanceName=NAME_PARAM) - assert ex.exception.response["Error"]["Code"] == "ValidationException" + assert ex.value.response["Error"]["Code"] == "ValidationException" expected_message = "Status (InService) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format( notebook_instance_arn ) - assert expected_message in ex.exception.response["Error"]["Message"] + assert expected_message in ex.value.response["Error"]["Message"] sagemaker.stop_notebook_instance(NotebookInstanceName=NAME_PARAM) @@ -207,7 +207,7 @@ def test_notebook_instance_lifecycle(): with pytest.raises(ClientError) as ex: sagemaker.describe_notebook_instance(NotebookInstanceName=NAME_PARAM) - assert ex.exception.response["Error"]["Message"] == "RecordNotFound" + assert ex.value.response["Error"]["Message"] == "RecordNotFound" @mock_sagemaker @@ -216,7 +216,7 @@ def test_describe_nonexistent_model(): with pytest.raises(ClientError) as e: sagemaker.describe_model(ModelName="Nonexistent") - assert e.exception.response["Error"]["Message"].startswith("Could not find model") + assert e.value.response["Error"]["Message"].startswith("Could not find model") @mock_sagemaker @@ -239,7 +239,7 @@ def test_notebook_instance_lifecycle_config(): OnStart=on_start, ) assert \ - e.exception.response["Error"]["Message"].endswith( + e.value.response["Error"]["Message"].endswith( "Notebook Instance Lifecycle Config already exists.)" ) @@ -264,7 +264,7 @@ def test_notebook_instance_lifecycle_config(): NotebookInstanceLifecycleConfigName=name, ) assert \ - e.exception.response["Error"]["Message"].endswith( + e.value.response["Error"]["Message"].endswith( "Notebook Instance Lifecycle Config does not exist.)" ) @@ -273,6 +273,6 @@ def test_notebook_instance_lifecycle_config(): NotebookInstanceLifecycleConfigName=name, ) assert \ - e.exception.response["Error"]["Message"].endswith( + e.value.response["Error"]["Message"].endswith( "Notebook Instance Lifecycle Config does not exist.)" ) diff --git a/tests/test_secretsmanager/test_list_secrets.py b/tests/test_secretsmanager/test_list_secrets.py index 826e09de7219..324ab1838e38 100644 --- a/tests/test_secretsmanager/test_list_secrets.py +++ b/tests/test_secretsmanager/test_list_secrets.py @@ -123,7 +123,7 @@ def test_with_all_filter(): secrets = conn.list_secrets(Filters=[{"Key": "all", "Values": ["foo"]}]) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert secret_names == ["foo", "bar", "baz", "qux", "multi"] + assert sorted(secret_names) == ['bar', 'baz', 'foo', 'multi', 'qux'] @mock_secretsmanager @@ -133,8 +133,8 @@ def test_with_no_filter_key(): with pytest.raises(ClientError) as ire: conn.list_secrets(Filters=[{"Values": ["foo"]}]) - ire.exception.response["Error"]["Code"].should.equal("InvalidParameterException") - ire.exception.response["Error"]["Message"].should.equal("Invalid filter key") + ire.value.response["Error"]["Code"].should.equal("InvalidParameterException") + ire.value.response["Error"]["Message"].should.equal("Invalid filter key") @mock_secretsmanager @@ -146,8 +146,8 @@ def test_with_no_filter_values(): with pytest.raises(ClientError) as ire: conn.list_secrets(Filters=[{"Key": "description"}]) - ire.exception.response["Error"]["Code"].should.equal("InvalidParameterException") - ire.exception.response["Error"]["Message"].should.equal( + ire.value.response["Error"]["Code"].should.equal("InvalidParameterException") + ire.value.response["Error"]["Message"].should.equal( "Invalid filter values for key: description" ) @@ -159,8 +159,8 @@ def test_with_invalid_filter_key(): with pytest.raises(ClientError) as ire: conn.list_secrets(Filters=[{"Key": "invalid", "Values": ["foo"]}]) - ire.exception.response["Error"]["Code"].should.equal("ValidationException") - ire.exception.response["Error"]["Message"].should.equal( + ire.value.response["Error"]["Code"].should.equal("ValidationException") + ire.value.response["Error"]["Message"].should.equal( "1 validation error detected: Value 'invalid' at 'filters.1.member.key' failed to satisfy constraint: Member " "must satisfy enum value set: [all, name, tag-key, description, tag-value]" ) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 301ceb081d29..14d30bf36fd5 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -58,7 +58,7 @@ def test_get_secret_that_does_not_exist(): assert \ "Secrets Manager can't find the specified secret." == \ - cm.exception.response["Error"]["Message"] + cm.value.response["Error"]["Message"] @mock_secretsmanager @@ -73,7 +73,7 @@ def test_get_secret_that_does_not_match(): assert \ "Secrets Manager can't find the specified secret." == \ - cm.exception.response["Error"]["Message"] + cm.value.response["Error"]["Message"] @mock_secretsmanager @@ -99,7 +99,7 @@ def test_get_secret_that_has_no_value(): assert \ "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT" == \ - cm.exception.response["Error"] + cm.value.response["Error"]["Message"] @mock_secretsmanager @@ -110,16 +110,14 @@ def test_get_secret_version_that_does_not_exist(): secret_arn = result["ARN"] missing_version_id = "00000000-0000-0000-0000-000000000000" - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: conn.get_secret_value(SecretId=secret_arn, VersionId=missing_version_id) - assert_equal( + assert \ ( "An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets " "Manager can't find the specified secret value for VersionId: 00000000-0000-0000-0000-000000000000" - ), - cm.exception.response["Error"]["Message"], - ) + ) == cm.value.response["Error"]["Message"] @mock_secretsmanager @@ -702,8 +700,8 @@ def test_put_secret_binary_requires_either_string_or_binary(): with pytest.raises(ClientError) as ire: conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME) - ire.exception.response["Error"]["Code"].should.equal("InvalidRequestException") - ire.exception.response["Error"]["Message"].should.equal( + ire.value.response["Error"]["Code"].should.equal("InvalidRequestException") + ire.value.response["Error"]["Message"].should.equal( "You must provide either SecretString or SecretBinary." ) @@ -883,7 +881,7 @@ def test_update_secret_which_does_not_exit(): assert \ "Secrets Manager can't find the specified secret." == \ - cm.exception.response["Error"]["Message"] + cm.value.response["Error"]["Message"] @mock_secretsmanager @@ -900,7 +898,7 @@ def test_update_secret_marked_as_deleted(): assert ( "because it was marked for deletion." - in cm.exception.response["Error"]["Message"] + in cm.value.response["Error"]["Message"] ) diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index da41eb5fba80..1d3c9d218107 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -89,10 +89,9 @@ def test_get_secret_that_has_no_value(): ) json_data = json.loads(get_secret.data.decode("utf-8")) - assert ( - json_data["message"] - == "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT" - ) + assert \ + json_data["message"] == \ + "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT" assert json_data["__type"] == "ResourceNotFoundException" diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index f0af73fd339f..dee28210ca5a 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -311,7 +311,7 @@ def test_create_configuration_set(): }, ) - ex.exception.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist") + ex.value.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist") with pytest.raises(ClientError) as ex: conn.create_configuration_set_event_destination( @@ -326,7 +326,7 @@ def test_create_configuration_set(): }, ) - ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") + ex.value.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") @mock_ses @@ -339,7 +339,7 @@ def test_create_receipt_rule_set(): with pytest.raises(ClientError) as ex: conn.create_receipt_rule_set(RuleSetName="testRuleSet") - ex.exception.response["Error"]["Code"].should.equal("RuleSetNameAlreadyExists") + ex.value.response["Error"]["Code"].should.equal("RuleSetNameAlreadyExists") @mock_ses @@ -407,7 +407,7 @@ def test_create_receipt_rule(): }, ) - ex.exception.response["Error"]["Code"].should.equal("RuleAlreadyExists") + ex.value.response["Error"]["Code"].should.equal("RuleAlreadyExists") with pytest.raises(ClientError) as ex: conn.create_receipt_rule( @@ -438,7 +438,7 @@ def test_create_receipt_rule(): }, ) - ex.exception.response["Error"]["Code"].should.equal("RuleSetDoesNotExist") + ex.value.response["Error"]["Code"].should.equal("RuleSetDoesNotExist") @mock_ses @@ -467,7 +467,7 @@ def test_create_ses_template(): } ) - ex.exception.response["Error"]["Code"].should.equal("TemplateNameAlreadyExists") + ex.value.response["Error"]["Code"].should.equal("TemplateNameAlreadyExists") # get a template which is already added result = conn.get_template(TemplateName="MyTemplate") @@ -478,7 +478,7 @@ def test_create_ses_template(): with pytest.raises(ClientError) as ex: conn.get_template(TemplateName="MyFakeTemplate") - ex.exception.response["Error"]["Code"].should.equal("TemplateDoesNotExist") + ex.value.response["Error"]["Code"].should.equal("TemplateDoesNotExist") result = conn.list_templates() result["TemplatesMetadata"][0]["Name"].should.equal("MyTemplate") diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 07bf04b11d82..797ccdaba5fa 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -235,14 +235,14 @@ def test_publish_bad_sms(): # Test invalid number with pytest.raises(ClientError) as cm: client.publish(PhoneNumber="NAA+15551234567", Message="my message") - cm.exception.response["Error"]["Code"].should.equal("InvalidParameter") - cm.exception.response["Error"]["Message"].should.contain("not meet the E164") + cm.value.response["Error"]["Code"].should.equal("InvalidParameter") + cm.value.response["Error"]["Message"].should.contain("not meet the E164") # Test to long ASCII message with pytest.raises(ClientError) as cm: client.publish(PhoneNumber="+15551234567", Message="a" * 1601) - cm.exception.response["Error"]["Code"].should.equal("InvalidParameter") - cm.exception.response["Error"]["Message"].should.contain("must be less than 1600") + cm.value.response["Error"]["Code"].should.equal("InvalidParameter") + cm.value.response["Error"]["Message"].should.contain("must be less than 1600") @mock_sqs diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index b0a91bbdeafa..57dd97ac3e6e 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -221,7 +221,7 @@ def test_get_nonexistent_queue(): sqs = boto3.resource("sqs", region_name="us-east-1") with pytest.raises(ClientError) as err: sqs.get_queue_by_name(QueueName="non-existing-queue") - ex = err.exception + ex = err.value ex.operation_name.should.equal("GetQueueUrl") ex.response["Error"]["Code"].should.equal("AWS.SimpleQueueService.NonExistentQueue") ex.response["Error"]["Message"].should.equal( @@ -230,7 +230,7 @@ def test_get_nonexistent_queue(): with pytest.raises(ClientError) as err: sqs.Queue("http://whatever-incorrect-queue-address").load() - ex = err.exception + ex = err.value ex.operation_name.should.equal("GetQueueAttributes") ex.response["Error"]["Code"].should.equal("AWS.SimpleQueueService.NonExistentQueue") @@ -377,7 +377,7 @@ def test_message_with_attributes_invalid_datatype(): } }, ) - ex = e.exception + ex = e.value ex.response["Error"]["Code"].should.equal("MessageAttributesInvalid") ex.response["Error"]["Message"].should.equal( "The message attribute 'timestamp' has an invalid message attribute type, the set of supported type " @@ -1658,7 +1658,7 @@ def test_add_permission_errors(): AWSAccountIds=["111111111111"], Actions=["ReceiveMessage", "SendMessage"], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterValue") @@ -1673,7 +1673,7 @@ def test_add_permission_errors(): AWSAccountIds=["111111111111"], Actions=["RemovePermission"], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterValue") @@ -1689,7 +1689,7 @@ def test_add_permission_errors(): AWSAccountIds=["111111111111"], Actions=[], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("MissingParameter") @@ -1704,7 +1704,7 @@ def test_add_permission_errors(): AWSAccountIds=[], Actions=["ReceiveMessage"], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterValue") @@ -1728,7 +1728,7 @@ def test_add_permission_errors(): "SendMessage", ], ) - ex = e.exception + ex = e.value ex.operation_name.should.equal("AddPermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.response["Error"]["Code"].should.contain("OverLimit") @@ -1745,7 +1745,7 @@ def test_remove_permission_errors(): with pytest.raises(ClientError) as e: client.remove_permission(QueueUrl=queue_url, Label="test") - ex = e.exception + ex = e.value ex.operation_name.should.equal("RemovePermission") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("InvalidParameterValue") @@ -2174,7 +2174,7 @@ def test_send_messages_to_fifo_without_message_group_id(): with pytest.raises(Exception) as e: queue.send_message(MessageBody="message-1") - ex = e.exception + ex = e.value ex.response["Error"]["Code"].should.equal("MissingParameter") ex.response["Error"]["Message"].should.equal( "The request must contain the parameter MessageGroupId." diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index e3c03203f6cb..1eeec09d0a6f 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -35,10 +35,10 @@ def test_delete_parameter(): def test_delete_nonexistent_parameter(): client = boto3.client("ssm", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.delete_parameter(Name="test_noexist") - ex.exception.response["Error"]["Code"].should.equal("ParameterNotFound") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ParameterNotFound") + ex.value.response["Error"]["Message"].should.equal( "Parameter test_noexist not found." ) @@ -438,17 +438,17 @@ def test_get_parameter_with_version_and_labels(): "arn:aws:ssm:us-east-1:1234567890:parameter/test-2" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_parameter(Name="test-2:2:3", WithDecryption=False) - ex.exception.response["Error"]["Code"].should.equal("ParameterNotFound") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ParameterNotFound") + ex.value.response["Error"]["Message"].should.equal( "Parameter test-2:2:3 not found." ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.get_parameter(Name="test-2:2", WithDecryption=False) - ex.exception.response["Error"]["Code"].should.equal("ParameterNotFound") - ex.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Code"].should.equal("ParameterNotFound") + ex.value.response["Error"]["Message"].should.equal( "Parameter test-2:2 not found." ) @@ -462,9 +462,9 @@ def test_get_parameters_errors(): for name, value in ssm_parameters.items(): client.put_parameter(Name=name, Value=value, Type="String") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: client.get_parameters(Names=list(ssm_parameters.keys())) - ex = e.exception + ex = e.value ex.operation_name.should.equal("GetParameters") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ValidationException") diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 1a946e8e3a11..d5eb76ae72fa 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -544,11 +544,11 @@ def test_state_machine_start_execution_fails_on_duplicate_execution_name(): stateMachineArn=sm["stateMachineArn"], name="execution_name" ) # - with pytest.raises(ClientError) as exc: + with pytest.raises(ClientError) as ex: _ = client.start_execution( stateMachineArn=sm["stateMachineArn"], name="execution_name" ) - exc.exception.response["Error"]["Message"].should.equal( + ex.value.response["Error"]["Message"].should.equal( "Execution Already Exists: '" + execution_one["executionArn"] + "'" ) diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 34b71c358c2f..098da5881c2a 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -357,9 +357,9 @@ def test_federation_token_with_too_long_policy(): json_policy = json.dumps(policy) assert len(json_policy) > MAX_FEDERATION_TOKEN_POLICY_LENGTH - with pytest.raises(ClientError) as exc: + with pytest.raises(ClientError) as ex: cli.get_federation_token(Name="foo", DurationSeconds=3600, Policy=json_policy) - exc.exception.response["Error"]["Code"].should.equal("ValidationError") - exc.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("ValidationError") + ex.value.response["Error"]["Message"].should.contain( str(MAX_FEDERATION_TOKEN_POLICY_LENGTH) ) From 5697ff87a81a914298cc88554a4daa71dbf6ae3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= Date: Tue, 6 Oct 2020 08:46:05 +0200 Subject: [PATCH 622/658] Back to Black --- docs/conf.py | 150 ++++---- moto/applicationautoscaling/models.py | 9 +- moto/applicationautoscaling/responses.py | 10 +- moto/athena/responses.py | 7 +- moto/autoscaling/models.py | 2 +- moto/autoscaling/responses.py | 8 +- moto/cloudformation/parsing.py | 12 +- moto/cloudformation/utils.py | 3 +- moto/cognitoidp/responses.py | 4 +- moto/config/exceptions.py | 12 +- moto/config/models.py | 18 +- moto/config/responses.py | 6 +- moto/core/responses.py | 6 +- moto/core/utils.py | 14 +- moto/dynamodb2/models/__init__.py | 3 +- moto/ec2/exceptions.py | 11 +- moto/ec2/models.py | 32 +- moto/ec2/responses/vpcs.py | 8 +- moto/ecs/exceptions.py | 3 +- moto/ecs/models.py | 6 +- moto/elasticbeanstalk/models.py | 16 +- moto/elasticbeanstalk/responses.py | 23 +- moto/emr/utils.py | 4 +- moto/iam/access_control.py | 10 +- moto/iam/exceptions.py | 6 +- moto/iam/models.py | 36 +- moto/iam/policy_validation.py | 25 +- moto/iot/responses.py | 3 +- moto/iotdata/models.py | 3 +- moto/kinesis/models.py | 6 +- moto/kinesisvideo/exceptions.py | 3 +- moto/kinesisvideo/responses.py | 10 +- moto/kinesisvideoarchivedmedia/responses.py | 42 ++- moto/managedblockchain/models.py | 20 +- moto/managedblockchain/responses.py | 23 +- moto/organizations/models.py | 6 +- moto/packages/httpretty/core.py | 12 +- moto/ram/models.py | 6 +- moto/s3/models.py | 17 +- moto/s3/responses.py | 4 +- moto/s3/utils.py | 2 +- moto/sagemaker/models.py | 17 +- moto/sagemaker/responses.py | 14 +- moto/sns/models.py | 14 +- moto/sqs/responses.py | 10 +- moto/stepfunctions/models.py | 4 +- moto/sts/models.py | 10 +- moto/transcribe/models.py | 6 +- scripts/get_amis.py | 71 ++-- scripts/implementation_coverage.py | 44 ++- scripts/scaffold.py | 355 ++++++++++-------- scripts/update_managed_policies.py | 51 ++- setup.py | 70 ++-- tests/test_acm/test_acm.py | 19 +- tests/test_apigateway/test_apigateway.py | 48 ++- .../test_validation.py | 9 +- tests/test_athena/test_athena.py | 8 +- tests/test_autoscaling/test_autoscaling.py | 13 +- .../test_autoscaling_cloudformation.py | 9 +- .../test_launch_configurations.py | 4 +- tests/test_autoscaling/test_policies.py | 2 +- tests/test_awslambda/test_lambda.py | 11 +- .../test_cloudformation_depends_on.py | 8 +- .../test_cloudformation_stack_crud_boto3.py | 6 +- .../test_cloudformation_stack_integration.py | 9 +- tests/test_codepipeline/test_codepipeline.py | 12 +- tests/test_cognitoidp/test_cognitoidp.py | 64 +++- tests/test_config/test_config.py | 79 ++-- tests/test_core/test_auth.py | 4 +- tests/test_dynamodb2/test_dynamodb.py | 233 +++++++++--- .../test_dynamodb2/test_dynamodb_executor.py | 80 +++- .../test_dynamodb_validation.py | 10 +- tests/test_ec2/test_amis.py | 6 +- tests/test_ec2/test_flow_logs.py | 4 +- tests/test_ec2/test_instances.py | 12 +- tests/test_ec2/test_route_tables.py | 4 +- tests/test_ec2/test_subnets.py | 6 +- tests/test_ec2/test_vpn_connections.py | 4 +- tests/test_ecs/test_ecs_boto3.py | 50 ++- tests/test_elasticbeanstalk/test_eb.py | 48 ++- tests/test_elb/test_elb.py | 5 +- tests/test_emr/test_emr_boto3.py | 12 +- tests/test_glue/test_datacatalog.py | 4 +- tests/test_iam/test_iam.py | 216 ++++++++--- tests/test_iot/test_iot.py | 8 +- .../test_kinesisvideoarchivedmedia.py | 8 +- tests/test_kms/test_kms.py | 6 +- tests/test_kms/test_kms_boto3.py | 24 +- tests/test_logs/test_integration.py | 16 +- tests/test_logs/test_logs.py | 4 +- .../test_managedblockchain_members.py | 18 +- .../test_managedblockchain_nodes.py | 33 +- .../test_managedblockchain_proposals.py | 11 +- .../test_managedblockchain_proposalvotes.py | 3 +- .../test_organizations_boto3.py | 5 +- tests/test_s3/test_s3.py | 42 +-- tests/test_s3/test_s3_cloudformation.py | 7 +- .../test_sagemaker/test_sagemaker_endpoint.py | 12 +- .../test_sagemaker_notebooks.py | 45 ++- .../test_sagemaker/test_sagemaker_training.py | 21 +- .../test_secretsmanager/test_list_secrets.py | 2 +- .../test_secretsmanager.py | 54 +-- tests/test_secretsmanager/test_server.py | 7 +- tests/test_ses/test_ses_boto3.py | 27 +- tests/test_sns/test_publishing_boto3.py | 4 +- tests/test_sns/test_topics_boto3.py | 9 +- tests/test_sqs/test_sqs.py | 9 +- tests/test_ssm/test_ssm_boto3.py | 22 +- .../test_stepfunctions/test_stepfunctions.py | 16 +- .../test_transcribe/test_transcribe_boto3.py | 20 +- update_version_from_git.py | 77 ++-- wait_for.py | 4 +- 112 files changed, 1803 insertions(+), 977 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index a902d0ecf8f4..7bba967b2922 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,12 +20,12 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -33,23 +33,23 @@ extensions = [] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'Moto' -copyright = '2015, Steve Pulec' -author = 'Steve Pulec' +project = "Moto" +copyright = "2015, Steve Pulec" +author = "Steve Pulec" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -57,6 +57,7 @@ # # The short X.Y version. import moto + version = moto.__version__ # The full version, including alpha/beta/rc tags. release = moto.__version__ @@ -70,37 +71,37 @@ # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -110,156 +111,149 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'Motodoc' +htmlhelp_basename = "Motodoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', + # Latex figure (float) alignment + #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'Moto.tex', 'Moto Documentation', - 'Steve Pulec', 'manual'), + (master_doc, "Moto.tex", "Moto Documentation", "Steve Pulec", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'moto', 'Moto Documentation', - [author], 1) -] +man_pages = [(master_doc, "moto", "Moto Documentation", [author], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -268,19 +262,25 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'Moto', 'Moto Documentation', - author, 'Moto', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "Moto", + "Moto Documentation", + author, + "Moto", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False diff --git a/moto/applicationautoscaling/models.py b/moto/applicationautoscaling/models.py index 47a1adad834a..9e80cc02212d 100644 --- a/moto/applicationautoscaling/models.py +++ b/moto/applicationautoscaling/models.py @@ -72,7 +72,10 @@ def applicationautoscaling_backend(self): return applicationautoscaling_backends[self.region] def describe_scalable_targets( - self, namespace, r_ids=None, dimension=None, + self, + namespace, + r_ids=None, + dimension=None, ): """ Describe scalable targets. """ if r_ids is None: @@ -110,8 +113,8 @@ def _scalable_target_exists(self, r_id, dimension): return r_id in self.targets.get(dimension, []) def _ecs_service_exists_for_target(self, r_id): - """ Raises a ValidationException if an ECS service does not exist - for the specified resource ID. + """Raises a ValidationException if an ECS service does not exist + for the specified resource ID. """ resource_type, cluster, service = r_id.split("/") result = self.ecs_backend.describe_services(cluster, [service]) diff --git a/moto/applicationautoscaling/responses.py b/moto/applicationautoscaling/responses.py index 5bb0a4144e45..b70d7b528900 100644 --- a/moto/applicationautoscaling/responses.py +++ b/moto/applicationautoscaling/responses.py @@ -21,8 +21,10 @@ def describe_scalable_targets(self): scalable_dimension = self._get_param("ScalableDimension") max_results = self._get_int_param("MaxResults", 50) marker = self._get_param("NextToken") - all_scalable_targets = self.applicationautoscaling_backend.describe_scalable_targets( - service_namespace, resource_ids, scalable_dimension + all_scalable_targets = ( + self.applicationautoscaling_backend.describe_scalable_targets( + service_namespace, resource_ids, scalable_dimension + ) ) start = int(marker) + 1 if marker else 0 next_token = None @@ -96,8 +98,8 @@ def delete_scaling_policy(self): return json.dumps({}) def _validate_params(self): - """ Validate parameters. - TODO Integrate this validation with the validation in models.py + """Validate parameters. + TODO Integrate this validation with the validation in models.py """ namespace = self._get_param("ServiceNamespace") dimension = self._get_param("ScalableDimension") diff --git a/moto/athena/responses.py b/moto/athena/responses.py index b5e6d6a95777..bc14774e1c76 100644 --- a/moto/athena/responses.py +++ b/moto/athena/responses.py @@ -82,7 +82,12 @@ def stop_query_execution(self): def error(self, msg, status): return ( - json.dumps({"__type": "InvalidRequestException", "Message": msg,}), + json.dumps( + { + "__type": "InvalidRequestException", + "Message": msg, + } + ), dict(status=status), ) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 1a25a656d0e6..ee5cd9acdddf 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -863,7 +863,7 @@ def change_capacity(self, group_name, scaling_adjustment): self.set_desired_capacity(group_name, desired_capacity) def change_capacity_percent(self, group_name, scaling_adjustment): - """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html + """http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html If PercentChangeInCapacity returns a value between 0 and 1, Auto Scaling will round it off to 1. If the PercentChangeInCapacity returns a value greater than 1, Auto Scaling will round it off to the diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index a9651a7743b7..1b4bb9f44324 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -42,8 +42,8 @@ def create_launch_configuration(self): def describe_launch_configurations(self): names = self._get_multi_param("LaunchConfigurationNames.member") - all_launch_configurations = self.autoscaling_backend.describe_launch_configurations( - names + all_launch_configurations = ( + self.autoscaling_backend.describe_launch_configurations(names) ) marker = self._get_param("NextToken") all_names = [lc.name for lc in all_launch_configurations] @@ -153,8 +153,8 @@ def attach_load_balancer_target_groups(self): @amzn_request_id def describe_load_balancer_target_groups(self): group_name = self._get_param("AutoScalingGroupName") - target_group_arns = self.autoscaling_backend.describe_load_balancer_target_groups( - group_name + target_group_arns = ( + self.autoscaling_backend.describe_load_balancer_target_groups(group_name) ) template = self.response_template(DESCRIBE_LOAD_BALANCER_TARGET_GROUPS) return template.render(target_group_arns=target_group_arns) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 168536f79e23..c6049f175cb2 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -254,7 +254,8 @@ def generate_resource_name(resource_type, stack_name, logical_id): def parse_resource( - resource_json, resources_map, + resource_json, + resources_map, ): resource_type = resource_json["Type"] resource_class = resource_class_from_type(resource_type) @@ -275,7 +276,9 @@ def parse_resource( def parse_resource_and_generate_name( - logical_id, resource_json, resources_map, + logical_id, + resource_json, + resources_map, ): resource_tuple = parse_resource(resource_json, resources_map) if not resource_tuple: @@ -695,7 +698,10 @@ def delete(self): ] parse_and_delete_resource( - resource_name, resource_json, self, self._region_name, + resource_name, + resource_json, + self, + self._region_name, ) self._parsed_resources.pop(parsed_resource.logical_resource_id) diff --git a/moto/cloudformation/utils.py b/moto/cloudformation/utils.py index d025af5fd092..c9e522efb317 100644 --- a/moto/cloudformation/utils.py +++ b/moto/cloudformation/utils.py @@ -41,8 +41,7 @@ def random_suffix(): def yaml_tag_constructor(loader, tag, node): - """convert shorthand intrinsic function to full name - """ + """convert shorthand intrinsic function to full name""" def _f(loader, tag, node): if tag == "!GetAtt": diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index e10a122823a7..d119c9e2173e 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -412,7 +412,9 @@ def confirm_sign_up(self): username = self._get_param("Username") confirmation_code = self._get_param("ConfirmationCode") cognitoidp_backends[self.region].confirm_sign_up( - client_id=client_id, username=username, confirmation_code=confirmation_code, + client_id=client_id, + username=username, + confirmation_code=confirmation_code, ) return "" diff --git a/moto/config/exceptions.py b/moto/config/exceptions.py index 4030b87a3b45..52cfd245094c 100644 --- a/moto/config/exceptions.py +++ b/moto/config/exceptions.py @@ -101,8 +101,10 @@ class InvalidDeliveryChannelNameException(JsonRESTError): code = 400 def __init__(self, name): - message = "The delivery channel name '{name}' is not valid, blank string.".format( - name=name + message = ( + "The delivery channel name '{name}' is not valid, blank string.".format( + name=name + ) ) super(InvalidDeliveryChannelNameException, self).__init__( "InvalidDeliveryChannelNameException", message @@ -287,8 +289,10 @@ class InvalidTagCharacters(JsonRESTError): code = 400 def __init__(self, tag, param="tags.X.member.key"): - message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format( - tag, param + message = ( + "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format( + tag, param + ) ) message += "constraint: Member must satisfy regular expression pattern: [\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]+" diff --git a/moto/config/models.py b/moto/config/models.py index db25563432e5..3646f6704e86 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -395,8 +395,10 @@ def __init__( self.delivery_s3_key_prefix = delivery_s3_key_prefix self.excluded_accounts = excluded_accounts or [] self.last_update_time = datetime2int(datetime.utcnow()) - self.organization_conformance_pack_arn = "arn:aws:config:{0}:{1}:organization-conformance-pack/{2}".format( - region, DEFAULT_ACCOUNT_ID, self._unique_pack_name + self.organization_conformance_pack_arn = ( + "arn:aws:config:{0}:{1}:organization-conformance-pack/{2}".format( + region, DEFAULT_ACCOUNT_ID, self._unique_pack_name + ) ) self.organization_conformance_pack_name = name @@ -1006,9 +1008,9 @@ def list_aggregate_discovered_resources( def get_resource_config_history(self, resource_type, id, backend_region): """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend. - NOTE: This is --NOT-- returning history as it is not supported in moto at this time. (PR's welcome!) - As such, the later_time, earlier_time, limit, and next_token are ignored as this will only - return 1 item. (If no items, it raises an exception) + NOTE: This is --NOT-- returning history as it is not supported in moto at this time. (PR's welcome!) + As such, the later_time, earlier_time, limit, and next_token are ignored as this will only + return 1 item. (If no items, it raises an exception) """ # If the type isn't implemented then we won't find the item: if resource_type not in RESOURCE_MAP: @@ -1090,10 +1092,10 @@ def batch_get_aggregate_resource_config( ): """Returns the configuration of an item in the AWS Config format of the resource for the current regional backend. - As far a moto goes -- the only real difference between this function and the `batch_get_resource_config` function is that - this will require a Config Aggregator be set up a priori and can search based on resource regions. + As far a moto goes -- the only real difference between this function and the `batch_get_resource_config` function is that + this will require a Config Aggregator be set up a priori and can search based on resource regions. - Note: moto will IGNORE the resource account ID in the search query. + Note: moto will IGNORE the resource account ID in the search query. """ if not self.config_aggregators.get(aggregator_name): raise NoSuchConfigurationAggregatorException() diff --git a/moto/config/responses.py b/moto/config/responses.py index 7dcc9a01bc6d..489f2b5749ac 100644 --- a/moto/config/responses.py +++ b/moto/config/responses.py @@ -190,8 +190,10 @@ def describe_organization_conformance_pack_statuses(self): def get_organization_conformance_pack_detailed_status(self): # 'Filters' parameter is not implemented yet - statuses = self.config_backend.get_organization_conformance_pack_detailed_status( - self._get_param("OrganizationConformancePackName") + statuses = ( + self.config_backend.get_organization_conformance_pack_detailed_status( + self._get_param("OrganizationConformancePackName") + ) ) return json.dumps(statuses) diff --git a/moto/core/responses.py b/moto/core/responses.py index fdac22c18485..1149ab0be3a4 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -62,9 +62,9 @@ def _decode_dict(d): class DynamicDictLoader(DictLoader): """ - Note: There's a bug in jinja2 pre-2.7.3 DictLoader where caching does not work. - Including the fixed (current) method version here to ensure performance benefit - even for those using older jinja versions. + Note: There's a bug in jinja2 pre-2.7.3 DictLoader where caching does not work. + Including the fixed (current) method version here to ensure performance benefit + even for those using older jinja versions. """ def get_source(self, environment, template): diff --git a/moto/core/utils.py b/moto/core/utils.py index 5f35538de36e..7e86a7045d0f 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -16,7 +16,7 @@ def camelcase_to_underscores(argument): - """ Converts a camelcase param like theNewAttribute to the equivalent + """Converts a camelcase param like theNewAttribute to the equivalent python underscore variable like the_new_attribute""" result = "" prev_char_title = True @@ -42,9 +42,9 @@ def camelcase_to_underscores(argument): def underscores_to_camelcase(argument): - """ Converts a camelcase param like the_new_attribute to the equivalent + """Converts a camelcase param like the_new_attribute to the equivalent camelcase version like theNewAttribute. Note that the first letter is - NOT capitalized by this function """ + NOT capitalized by this function""" result = "" previous_was_underscore = False for char in argument: @@ -350,11 +350,15 @@ def tags_from_query_string( tag_index = key.replace(prefix + ".", "").replace("." + key_suffix, "") tag_key = querystring_dict.get( "{prefix}.{index}.{key_suffix}".format( - prefix=prefix, index=tag_index, key_suffix=key_suffix, + prefix=prefix, + index=tag_index, + key_suffix=key_suffix, ) )[0] tag_value_key = "{prefix}.{index}.{value_suffix}".format( - prefix=prefix, index=tag_index, value_suffix=value_suffix, + prefix=prefix, + index=tag_index, + value_suffix=value_suffix, ) if tag_value_key in querystring_dict: response_values[tag_key] = querystring_dict.get(tag_value_key)[0] diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 782ddcee9ce1..2a3d8b8733c1 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -1052,7 +1052,8 @@ def update_table_global_indexes(self, name, global_index_updates): ) gsis_by_name[gsi_to_create["IndexName"]] = GlobalSecondaryIndex.create( - gsi_to_create, table.table_key_attrs, + gsi_to_create, + table.table_key_attrs, ) # in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index e14a60bf1c2c..f43f83697672 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -340,7 +340,9 @@ def __init__(self, dependant_parameter, parameter, parameter_value): super(InvalidDependantParameterError, self).__init__( "InvalidParameter", "{0} can't be empty if {1} is {2}.".format( - dependant_parameter, parameter, parameter_value, + dependant_parameter, + parameter, + parameter_value, ), ) @@ -350,7 +352,9 @@ def __init__(self, dependant_parameter, parameter_value, parameter): super(InvalidDependantParameterTypeError, self).__init__( "InvalidParameter", "{0} type must be {1} if {2} is provided.".format( - dependant_parameter, parameter_value, parameter, + dependant_parameter, + parameter_value, + parameter, ), ) @@ -358,7 +362,8 @@ def __init__(self, dependant_parameter, parameter_value, parameter): class InvalidAggregationIntervalParameterError(EC2ClientError): def __init__(self, parameter): super(InvalidAggregationIntervalParameterError, self).__init__( - "InvalidParameter", "Invalid {0}".format(parameter), + "InvalidParameter", + "Invalid {0}".format(parameter), ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 7a0cef7a2173..47e2b0ac0790 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1059,7 +1059,7 @@ def get_instance_by_id(self, instance_id): return instance def get_reservations_by_instance_ids(self, instance_ids, filters=None): - """ Go through all of the reservations and filter to only return those + """Go through all of the reservations and filter to only return those associated with the given instance_ids. """ reservations = [] @@ -1358,9 +1358,9 @@ def __init__( elif source_ami: """ - http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html - "We don't copy launch permissions, user-defined tags, or Amazon S3 bucket permissions from the source AMI to the new AMI." - ~ 2014.09.29 + http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html + "We don't copy launch permissions, user-defined tags, or Amazon S3 bucket permissions from the source AMI to the new AMI." + ~ 2014.09.29 """ self.virtualization_type = source_ami.virtualization_type self.architecture = source_ami.architecture @@ -1491,7 +1491,12 @@ def describe_images( # Limit by owner ids if owners: # support filtering by Owners=['self'] - owners = list(map(lambda o: OWNER_ID if o == "self" else o, owners,)) + owners = list( + map( + lambda o: OWNER_ID if o == "self" else o, + owners, + ) + ) images = [ami for ami in images if ami.owner_id in owners] # Generic filters @@ -1518,9 +1523,9 @@ def validate_permission_targets(self, user_ids=None, group=None): # If anything is invalid, nothing is added. (No partial success.) if user_ids: """ - AWS docs: - "The AWS account ID is a 12-digit number, such as 123456789012, that you use to construct Amazon Resource Names (ARNs)." - http://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html + AWS docs: + "The AWS account ID is a 12-digit number, such as 123456789012, that you use to construct Amazon Resource Names (ARNs)." + http://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html """ for user_id in user_ids: if len(user_id) != 12 or not user_id.isdigit(): @@ -3705,13 +3710,17 @@ def _validate_request( ): if log_group_name is None and log_destination is None: raise InvalidDependantParameterError( - "LogDestination", "LogGroupName", "not provided", + "LogDestination", + "LogGroupName", + "not provided", ) if log_destination_type == "s3": if log_group_name is not None: raise InvalidDependantParameterTypeError( - "LogDestination", "cloud-watch-logs", "LogGroupName", + "LogDestination", + "cloud-watch-logs", + "LogGroupName", ) elif log_destination_type == "cloud-watch-logs": if deliver_logs_permission_arn is None: @@ -3859,7 +3868,8 @@ def delete_flow_logs(self, flow_log_ids): if non_existing: raise InvalidFlowLogIdError( - len(flow_log_ids), " ".join(x for x in flow_log_ids), + len(flow_log_ids), + " ".join(x for x in flow_log_ids), ) return True diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index de4bb3febdaa..1e8add46ca85 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -70,8 +70,8 @@ def describe_vpc_classic_link_dns_support(self): def enable_vpc_classic_link_dns_support(self): vpc_id = self._get_param("VpcId") - classic_link_dns_supported = self.ec2_backend.enable_vpc_classic_link_dns_support( - vpc_id=vpc_id + classic_link_dns_supported = ( + self.ec2_backend.enable_vpc_classic_link_dns_support(vpc_id=vpc_id) ) doc_date = self._get_doc_date() template = self.response_template(ENABLE_VPC_CLASSIC_LINK_DNS_SUPPORT_RESPONSE) @@ -81,8 +81,8 @@ def enable_vpc_classic_link_dns_support(self): def disable_vpc_classic_link_dns_support(self): vpc_id = self._get_param("VpcId") - classic_link_dns_supported = self.ec2_backend.disable_vpc_classic_link_dns_support( - vpc_id=vpc_id + classic_link_dns_supported = ( + self.ec2_backend.disable_vpc_classic_link_dns_support(vpc_id=vpc_id) ) doc_date = self._get_doc_date() template = self.response_template(DISABLE_VPC_CLASSIC_LINK_DNS_SUPPORT_RESPONSE) diff --git a/moto/ecs/exceptions.py b/moto/ecs/exceptions.py index 72129224ea37..cbd5d5f53295 100644 --- a/moto/ecs/exceptions.py +++ b/moto/ecs/exceptions.py @@ -38,5 +38,6 @@ class ClusterNotFoundException(JsonRESTError): def __init__(self): super(ClusterNotFoundException, self).__init__( - error_type="ClientException", message="Cluster not found", + error_type="ClientException", + message="Cluster not found", ) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index a4522660e0ee..d7f840d53f1a 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -431,8 +431,10 @@ def __init__(self, ec2_instance_id, region_name): "type": "STRINGSET", }, ] - self.container_instance_arn = "arn:aws:ecs:{0}:012345678910:container-instance/{1}".format( - region_name, str(uuid.uuid4()) + self.container_instance_arn = ( + "arn:aws:ecs:{0}:012345678910:container-instance/{1}".format( + region_name, str(uuid.uuid4()) + ) ) self.pending_tasks_count = 0 self.remaining_resources = [ diff --git a/moto/elasticbeanstalk/models.py b/moto/elasticbeanstalk/models.py index 3767846c1117..303d34b87e96 100644 --- a/moto/elasticbeanstalk/models.py +++ b/moto/elasticbeanstalk/models.py @@ -8,7 +8,11 @@ class FakeEnvironment(BaseModel): def __init__( - self, application, environment_name, solution_stack_name, tags, + self, + application, + environment_name, + solution_stack_name, + tags, ): self.application = weakref.proxy( application @@ -49,7 +53,10 @@ def __init__(self, backend, application_name): self.environments = dict() def create_environment( - self, environment_name, solution_stack_name, tags, + self, + environment_name, + solution_stack_name, + tags, ): if environment_name in self.environments: raise InvalidParameterValueError @@ -86,7 +93,10 @@ def create_application(self, application_name): raise InvalidParameterValueError( "Application {} already exists.".format(application_name) ) - new_app = FakeApplication(backend=self, application_name=application_name,) + new_app = FakeApplication( + backend=self, + application_name=application_name, + ) self.applications[application_name] = new_app return new_app diff --git a/moto/elasticbeanstalk/responses.py b/moto/elasticbeanstalk/responses.py index 387cbb3ea24a..f35e0f3ff99a 100644 --- a/moto/elasticbeanstalk/responses.py +++ b/moto/elasticbeanstalk/responses.py @@ -18,11 +18,16 @@ def create_application(self): ) template = self.response_template(EB_CREATE_APPLICATION) - return template.render(region_name=self.backend.region, application=app,) + return template.render( + region_name=self.backend.region, + application=app, + ) def describe_applications(self): template = self.response_template(EB_DESCRIBE_APPLICATIONS) - return template.render(applications=self.backend.applications.values(),) + return template.render( + applications=self.backend.applications.values(), + ) def create_environment(self): application_name = self._get_param("ApplicationName") @@ -42,13 +47,18 @@ def create_environment(self): ) template = self.response_template(EB_CREATE_ENVIRONMENT) - return template.render(environment=env, region=self.backend.region,) + return template.render( + environment=env, + region=self.backend.region, + ) def describe_environments(self): envs = self.backend.describe_environments() template = self.response_template(EB_DESCRIBE_ENVIRONMENTS) - return template.render(environments=envs,) + return template.render( + environments=envs, + ) def list_available_solution_stacks(self): return EB_LIST_AVAILABLE_SOLUTION_STACKS @@ -68,7 +78,10 @@ def list_tags_for_resource(self): tags = self.backend.list_tags_for_resource(resource_arn) template = self.response_template(EB_LIST_TAGS_FOR_RESOURCE) - return template.render(tags=tags, arn=resource_arn,) + return template.render( + tags=tags, + arn=resource_arn, + ) EB_CREATE_APPLICATION = """ diff --git a/moto/emr/utils.py b/moto/emr/utils.py index 4d9da84349c5..48f3232facee 100644 --- a/moto/emr/utils.py +++ b/moto/emr/utils.py @@ -43,14 +43,14 @@ def steps_from_query_string(querystring_dict): class Unflattener: @staticmethod def unflatten_complex_params(input_dict, param_name): - """ Function to unflatten (portions of) dicts with complex keys. The moto request parser flattens the incoming + """Function to unflatten (portions of) dicts with complex keys. The moto request parser flattens the incoming request bodies, which is generally helpful, but for nested dicts/lists can result in a hard-to-manage parameter exposion. This function allows one to selectively unflatten a set of dict keys, replacing them with a deep dist/list structure named identically to the root component in the complex name. Complex keys are composed of multiple components separated by periods. Components may be prefixed with _, which is stripped. Lists indexes are represented - with two components, 'member' and the index number. """ + with two components, 'member' and the index number.""" items_to_process = {} for k in input_dict.keys(): if k.startswith(param_name): diff --git a/moto/iam/access_control.py b/moto/iam/access_control.py index bcde25d9ea0f..abf51928a40c 100644 --- a/moto/iam/access_control.py +++ b/moto/iam/access_control.py @@ -125,10 +125,12 @@ def __init__(self, access_key_id, headers): @property def arn(self): - return "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( - account_id=ACCOUNT_ID, - role_name=self._owner_role_name, - session_name=self._session_name, + return ( + "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( + account_id=ACCOUNT_ID, + role_name=self._owner_role_name, + session_name=self._session_name, + ) ) def create_credentials(self): diff --git a/moto/iam/exceptions.py b/moto/iam/exceptions.py index 1d0f3ca01180..e1070c42ed0a 100644 --- a/moto/iam/exceptions.py +++ b/moto/iam/exceptions.py @@ -88,8 +88,10 @@ class InvalidTagCharacters(RESTError): code = 400 def __init__(self, tag, param="tags.X.member.key"): - message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format( - tag, param + message = ( + "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format( + tag, param + ) ) message += "constraint: Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" diff --git a/moto/iam/models.py b/moto/iam/models.py index 76b824d609bb..a28e3b9fc58e 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -362,7 +362,12 @@ def __init__( self.update(policy_name, policy_document, group_names, role_names, user_names) def update( - self, policy_name, policy_document, group_names, role_names, user_names, + self, + policy_name, + policy_document, + group_names, + role_names, + user_names, ): self.policy_name = policy_name self.policy_document = ( @@ -404,7 +409,11 @@ def create_from_cloudformation_json( @classmethod def update_from_cloudformation_json( - cls, original_resource, new_resource_name, cloudformation_json, region_name, + cls, + original_resource, + new_resource_name, + cloudformation_json, + region_name, ): properties = cloudformation_json["Properties"] @@ -807,11 +816,18 @@ def create_from_cloudformation_json( user_name = properties.get("UserName") status = properties.get("Status", "Active") - return iam_backend.create_access_key(user_name, status=status,) + return iam_backend.create_access_key( + user_name, + status=status, + ) @classmethod def update_from_cloudformation_json( - cls, original_resource, new_resource_name, cloudformation_json, region_name, + cls, + original_resource, + new_resource_name, + cloudformation_json, + region_name, ): properties = cloudformation_json["Properties"] @@ -1139,7 +1155,11 @@ def create_from_cloudformation_json( @classmethod def update_from_cloudformation_json( - cls, original_resource, new_resource_name, cloudformation_json, region_name, + cls, + original_resource, + new_resource_name, + cloudformation_json, + region_name, ): properties = cloudformation_json["Properties"] @@ -2557,7 +2577,11 @@ def update_inline_policy( inline_policy = self.get_inline_policy(resource_name) inline_policy.unapply_policy(self) inline_policy.update( - policy_name, policy_document, group_names, role_names, user_names, + policy_name, + policy_document, + group_names, + role_names, + user_names, ) inline_policy.apply_policy(self) return inline_policy diff --git a/moto/iam/policy_validation.py b/moto/iam/policy_validation.py index 95610ac4db1e..251af606770e 100644 --- a/moto/iam/policy_validation.py +++ b/moto/iam/policy_validation.py @@ -343,8 +343,10 @@ def _validate_resource_format(self, resource): resource_partitions = resource.partition(":") if resource_partitions[1] == "": - self._resource_error = 'Resource {resource} must be in ARN format or "*".'.format( - resource=resource + self._resource_error = ( + 'Resource {resource} must be in ARN format or "*".'.format( + resource=resource + ) ) return @@ -390,15 +392,14 @@ def _validate_resource_format(self, resource): service = resource_partitions[0] - if service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() and not resource_partitions[ - 2 - ].startswith( - ":" + if ( + service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() + and not resource_partitions[2].startswith(":") ): - self._resource_error = SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[ - service - ].format( - resource=resource + self._resource_error = ( + SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[service].format( + resource=resource + ) ) return @@ -520,8 +521,8 @@ def _validate_iso_8601_datetime(datetime): assert 0 <= int(time_zone_minutes) <= 59 else: seconds_with_decimal_fraction = time_parts[2] - seconds_with_decimal_fraction_partition = seconds_with_decimal_fraction.partition( - "." + seconds_with_decimal_fraction_partition = ( + seconds_with_decimal_fraction.partition(".") ) seconds = seconds_with_decimal_fraction_partition[0] assert 0 <= int(seconds) <= 59 diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 15c62d91ea9f..7f2c602ea294 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -340,7 +340,8 @@ def register_certificate_without_ca(self): status = self._get_param("status") cert = self.iot_backend.register_certificate_without_ca( - certificate_pem=certificate_pem, status=status, + certificate_pem=certificate_pem, + status=status, ) return json.dumps( dict(certificateId=cert.certificate_id, certificateArn=cert.arn) diff --git a/moto/iotdata/models.py b/moto/iotdata/models.py index 41b69bc7f324..f695fb3fc421 100644 --- a/moto/iotdata/models.py +++ b/moto/iotdata/models.py @@ -114,8 +114,7 @@ def to_response_dict(self): } def to_dict(self, include_delta=True): - """returning nothing except for just top-level keys for now. - """ + """returning nothing except for just top-level keys for now.""" if self.deleted: return {"timestamp": self.timestamp, "version": self.version} delta = self.parse_payload(self.desired, self.reported) diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index 280402d5f025..4548fb347e05 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -261,7 +261,11 @@ def create_from_cloudformation_json( @classmethod def update_from_cloudformation_json( - cls, original_resource, new_resource_name, cloudformation_json, region_name, + cls, + original_resource, + new_resource_name, + cloudformation_json, + region_name, ): properties = cloudformation_json["Properties"] diff --git a/moto/kinesisvideo/exceptions.py b/moto/kinesisvideo/exceptions.py index e2e119b3799d..33c7e603a745 100644 --- a/moto/kinesisvideo/exceptions.py +++ b/moto/kinesisvideo/exceptions.py @@ -20,5 +20,6 @@ class ResourceInUseException(KinesisvideoClientError): def __init__(self, message): self.code = 400 super(ResourceInUseException, self).__init__( - "ResourceInUseException", message, + "ResourceInUseException", + message, ) diff --git a/moto/kinesisvideo/responses.py b/moto/kinesisvideo/responses.py index d1e386f2eeea..383777ab745a 100644 --- a/moto/kinesisvideo/responses.py +++ b/moto/kinesisvideo/responses.py @@ -32,7 +32,8 @@ def describe_stream(self): stream_name = self._get_param("StreamName") stream_arn = self._get_param("StreamARN") stream_info = self.kinesisvideo_backend.describe_stream( - stream_name=stream_name, stream_arn=stream_arn, + stream_name=stream_name, + stream_arn=stream_arn, ) return json.dumps(dict(StreamInfo=stream_info)) @@ -51,7 +52,8 @@ def delete_stream(self): stream_arn = self._get_param("StreamARN") current_version = self._get_param("CurrentVersion") self.kinesisvideo_backend.delete_stream( - stream_arn=stream_arn, current_version=current_version, + stream_arn=stream_arn, + current_version=current_version, ) return json.dumps(dict()) @@ -60,6 +62,8 @@ def get_data_endpoint(self): stream_arn = self._get_param("StreamARN") api_name = self._get_param("APIName") data_endpoint = self.kinesisvideo_backend.get_data_endpoint( - stream_name=stream_name, stream_arn=stream_arn, api_name=api_name, + stream_name=stream_name, + stream_arn=stream_arn, + api_name=api_name, ) return json.dumps(dict(DataEndpoint=data_endpoint)) diff --git a/moto/kinesisvideoarchivedmedia/responses.py b/moto/kinesisvideoarchivedmedia/responses.py index d021ced0e72b..a566930971eb 100644 --- a/moto/kinesisvideoarchivedmedia/responses.py +++ b/moto/kinesisvideoarchivedmedia/responses.py @@ -23,16 +23,18 @@ def get_hls_streaming_session_url(self): max_media_playlist_fragment_results = self._get_param( "MaxMediaPlaylistFragmentResults" ) - hls_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_hls_streaming_session_url( - stream_name=stream_name, - stream_arn=stream_arn, - playback_mode=playback_mode, - hls_fragment_selector=hls_fragment_selector, - container_format=container_format, - discontinuity_mode=discontinuity_mode, - display_fragment_timestamp=display_fragment_timestamp, - expires=expires, - max_media_playlist_fragment_results=max_media_playlist_fragment_results, + hls_streaming_session_url = ( + self.kinesisvideoarchivedmedia_backend.get_hls_streaming_session_url( + stream_name=stream_name, + stream_arn=stream_arn, + playback_mode=playback_mode, + hls_fragment_selector=hls_fragment_selector, + container_format=container_format, + discontinuity_mode=discontinuity_mode, + display_fragment_timestamp=display_fragment_timestamp, + expires=expires, + max_media_playlist_fragment_results=max_media_playlist_fragment_results, + ) ) return json.dumps(dict(HLSStreamingSessionURL=hls_streaming_session_url)) @@ -45,15 +47,17 @@ def get_dash_streaming_session_url(self): dash_fragment_selector = self._get_param("DASHFragmentSelector") expires = self._get_int_param("Expires") max_manifest_fragment_results = self._get_param("MaxManifestFragmentResults") - dash_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_dash_streaming_session_url( - stream_name=stream_name, - stream_arn=stream_arn, - playback_mode=playback_mode, - display_fragment_timestamp=display_fragment_timestamp, - display_fragment_number=display_fragment_number, - dash_fragment_selector=dash_fragment_selector, - expires=expires, - max_manifest_fragment_results=max_manifest_fragment_results, + dash_streaming_session_url = ( + self.kinesisvideoarchivedmedia_backend.get_dash_streaming_session_url( + stream_name=stream_name, + stream_arn=stream_arn, + playback_mode=playback_mode, + display_fragment_timestamp=display_fragment_timestamp, + display_fragment_number=display_fragment_number, + dash_fragment_selector=dash_fragment_selector, + expires=expires, + max_manifest_fragment_results=max_manifest_fragment_results, + ) ) return json.dumps(dict(DASHStreamingSessionURL=dash_streaming_session_url)) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py index 233e875c3203..92e2456b2e4d 100644 --- a/moto/managedblockchain/models.py +++ b/moto/managedblockchain/models.py @@ -352,7 +352,11 @@ def set_network_status(self, network_status): class ManagedBlockchainMember(BaseModel): def __init__( - self, id, networkid, member_configuration, region, + self, + id, + networkid, + member_configuration, + region, ): self.creationdate = datetime.datetime.utcnow() self.id = id @@ -583,7 +587,11 @@ def get_network(self, network_id): return self.networks.get(network_id) def create_proposal( - self, networkid, memberid, actions, description=None, + self, + networkid, + memberid, + actions, + description=None, ): # Check if network exists if networkid not in self.networks: @@ -783,7 +791,10 @@ def reject_invitation(self, invitationid): self.invitations.get(invitationid).reject_invitation() def create_member( - self, invitationid, networkid, member_configuration, + self, + invitationid, + networkid, + member_configuration, ): # Check if network exists if networkid not in self.networks: @@ -988,7 +999,8 @@ def create_node( chkregionpreregex = self.region_name + "[a-z]" if re.match(chkregionpreregex, availabilityzone, re.IGNORECASE) is None: raise InvalidRequestException( - "CreateNode", "Availability Zone is not valid", + "CreateNode", + "Availability Zone is not valid", ) node_id = get_node_id() diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py index 7dd628eba4ba..ccbc08d4482b 100644 --- a/moto/managedblockchain/responses.py +++ b/moto/managedblockchain/responses.py @@ -134,7 +134,10 @@ def _proposal_response_post(self, network_id, json_body, querystring, headers): description = json_body.get("Description", None) response = self.backend.create_proposal( - network_id, memberid, actions, description, + network_id, + memberid, + actions, + description, ) return 200, headers, json.dumps(response) @@ -198,7 +201,10 @@ def _proposal_votes_response_post( vote = json_body["Vote"] self.backend.vote_on_proposal( - network_id, proposal_id, votermemberid, vote, + network_id, + proposal_id, + votermemberid, + vote, ) return 200, headers, "" @@ -278,7 +284,9 @@ def _member_response_post(self, network_id, json_body, querystring, headers): member_configuration = json_body["MemberConfiguration"] response = self.backend.create_member( - invitationid, network_id, member_configuration, + invitationid, + network_id, + member_configuration, ) return 200, headers, json.dumps(response) @@ -317,7 +325,9 @@ def _memberid_response_get(self, network_id, member_id, headers): def _memberid_response_patch(self, network_id, member_id, json_body, headers): logpublishingconfiguration = json_body["LogPublishingConfiguration"] self.backend.update_member( - network_id, member_id, logpublishingconfiguration, + network_id, + member_id, + logpublishingconfiguration, ) return 200, headers, "" @@ -417,7 +427,10 @@ def _nodeid_response_patch( ): logpublishingconfiguration = json_body self.backend.update_node( - network_id, member_id, node_id, logpublishingconfiguration, + network_id, + member_id, + node_id, + logpublishingconfiguration, ) return 200, headers, "" diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 5655326c02ff..6fc696c91ed3 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -785,7 +785,8 @@ def deregister_delegated_administrator(self, **kwargs): ) admin = next( - (admin for admin in self.admins if admin.account.id == account_id), None, + (admin for admin in self.admins if admin.account.id == account_id), + None, ) if admin is None: account = next( @@ -841,7 +842,8 @@ def detach_policy(self, **kwargs): ) elif re.match(account_id_regex, target_id): account = next( - (account for account in self.accounts if account.id == target_id), None, + (account for account in self.accounts if account.id == target_id), + None, ) if account is not None: if account in account.attached_policies: diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index 83bd19237017..2f48ad567a0e 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -269,13 +269,13 @@ def __init__( _sock=None, ): """ - Matches both the Python 2 API: - def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): - https://github.com/python/cpython/blob/2.7/Lib/socket.py + Matches both the Python 2 API: + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): + https://github.com/python/cpython/blob/2.7/Lib/socket.py - and the Python 3 API: - def __init__(self, family=-1, type=-1, proto=-1, fileno=None): - https://github.com/python/cpython/blob/3.5/Lib/socket.py + and the Python 3 API: + def __init__(self, family=-1, type=-1, proto=-1, fileno=None): + https://github.com/python/cpython/blob/3.5/Lib/socket.py """ if httpretty.allow_net_connect: if PY3: diff --git a/moto/ram/models.py b/moto/ram/models.py index d38099374c62..0d2b8bfd0ad1 100644 --- a/moto/ram/models.py +++ b/moto/ram/models.py @@ -88,8 +88,10 @@ def add_principals(self, principals): ) if root_id: - ous = self.organizations_backend.list_organizational_units_for_parent( - ParentId=root_id + ous = ( + self.organizations_backend.list_organizational_units_for_parent( + ParentId=root_id + ) ) if any(principal == ou["Arn"] for ou in ous["OrganizationalUnits"]): continue diff --git a/moto/s3/models.py b/moto/s3/models.py index 17282739a321..9e85d8f43d31 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -523,7 +523,10 @@ def to_config_dict(self): for key, value in self.tags.items(): data.append( - {"type": "LifecycleTagPredicate", "tag": {"key": key, "value": value},} + { + "type": "LifecycleTagPredicate", + "tag": {"key": key, "value": value}, + } ) return data @@ -1129,7 +1132,11 @@ def create_from_cloudformation_json( @classmethod def update_from_cloudformation_json( - cls, original_resource, new_resource_name, cloudformation_json, region_name, + cls, + original_resource, + new_resource_name, + cloudformation_json, + region_name, ): properties = cloudformation_json["Properties"] @@ -1469,7 +1476,8 @@ def set_key_tags(self, key, tags, key_name=None): raise MissingKey(key_name) self.tagger.delete_all_tags_for_resource(key.arn) self.tagger.tag_resource( - key.arn, [{"Key": k, "Value": v} for (k, v) in tags.items()], + key.arn, + [{"Key": k, "Value": v} for (k, v) in tags.items()], ) return key @@ -1481,7 +1489,8 @@ def put_bucket_tagging(self, bucket_name, tags): bucket = self.get_bucket(bucket_name) self.tagger.delete_all_tags_for_resource(bucket.arn) self.tagger.tag_resource( - bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()], + bucket.arn, + [{"Key": key, "Value": value} for key, value in tags.items()], ) def delete_bucket_tagging(self, bucket_name): diff --git a/moto/s3/responses.py b/moto/s3/responses.py index b01bed1fbd85..c27b57cf666a 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -406,8 +406,8 @@ def _bucket_response_get(self, bucket_name, querystring): template = self.response_template(S3_BUCKET_CORS_RESPONSE) return template.render(cors=cors) elif "notification" in querystring: - notification_configuration = self.backend.get_bucket_notification_configuration( - bucket_name + notification_configuration = ( + self.backend.get_bucket_notification_configuration(bucket_name) ) if not notification_configuration: return 200, {}, "" diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 2cdb7e8623f8..d02da3a60189 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -98,7 +98,7 @@ def undo_clean_key_name(key_name): class _VersionedKeyStore(dict): - """ A simplified/modified version of Django's `MultiValueDict` taken from: + """A simplified/modified version of Django's `MultiValueDict` taken from: https://github.com/django/django/blob/70576740b0bb5289873f5a9a9a4e1a26b2c330e5/django/utils/datastructures.py#L282 """ diff --git a/moto/sagemaker/models.py b/moto/sagemaker/models.py index 8fef306b8d3a..d13925ba9d92 100644 --- a/moto/sagemaker/models.py +++ b/moto/sagemaker/models.py @@ -517,8 +517,10 @@ def __init__( self.creation_time = self.last_modified_time = datetime.now().strftime( "%Y-%m-%d %H:%M:%S" ) - self.notebook_instance_lifecycle_config_arn = FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( - self.notebook_instance_lifecycle_config_name, self.region_name + self.notebook_instance_lifecycle_config_arn = ( + FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + self.notebook_instance_lifecycle_config_name, self.region_name + ) ) @staticmethod @@ -580,7 +582,11 @@ def describe_model(self, model_name=None): message = "Could not find model '{}'.".format( Model.arn_for_model_name(model_name, self.region_name) ) - raise ValidationError(message=message) + raise RESTError( + error_type="ValidationException", + message=message, + template="error_json", + ) def list_models(self): models = [] @@ -790,7 +796,10 @@ def delete_endpoint_config(self, endpoint_config_name): raise ValidationError(message=message) def create_endpoint( - self, endpoint_name, endpoint_config_name, tags, + self, + endpoint_name, + endpoint_config_name, + tags, ): try: endpoint_config = self.describe_endpoint_config(endpoint_config_name) diff --git a/moto/sagemaker/responses.py b/moto/sagemaker/responses.py index d5d2cab435d5..9abb1369af9a 100644 --- a/moto/sagemaker/responses.py +++ b/moto/sagemaker/responses.py @@ -243,12 +243,14 @@ def delete_training_job(self): @amzn_request_id def create_notebook_instance_lifecycle_config(self): try: - lifecycle_configuration = self.sagemaker_backend.create_notebook_instance_lifecycle_config( - notebook_instance_lifecycle_config_name=self._get_param( - "NotebookInstanceLifecycleConfigName" - ), - on_create=self._get_param("OnCreate"), - on_start=self._get_param("OnStart"), + lifecycle_configuration = ( + self.sagemaker_backend.create_notebook_instance_lifecycle_config( + notebook_instance_lifecycle_config_name=self._get_param( + "NotebookInstanceLifecycleConfigName" + ), + on_create=self._get_param("OnCreate"), + on_start=self._get_param("OnStart"), + ) ) response = { "NotebookInstanceLifecycleConfigArn": lifecycle_configuration.notebook_instance_lifecycle_config_arn, diff --git a/moto/sns/models.py b/moto/sns/models.py index 7d297fbdc097..5da2c06b7e18 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -340,12 +340,14 @@ def enabled(self): @property def arn(self): - return "arn:aws:sns:{region}:{AccountId}:endpoint/{platform}/{name}/{id}".format( - region=self.region, - AccountId=DEFAULT_ACCOUNT_ID, - platform=self.application.platform, - name=self.application.name, - id=self.id, + return ( + "arn:aws:sns:{region}:{AccountId}:endpoint/{platform}/{name}/{id}".format( + region=self.region, + AccountId=DEFAULT_ACCOUNT_ID, + platform=self.application.platform, + name=self.application.name, + id=self.id, + ) ) def publish(self, message): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 016637b4c8a3..1168d8094053 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -354,7 +354,9 @@ def receive_message(self): queue_name = self._get_queue_name() message_attributes = self._get_multi_param("message_attributes") if not message_attributes: - message_attributes = extract_input_message_attributes(self.querystring,) + message_attributes = extract_input_message_attributes( + self.querystring, + ) queue = self.sqs_backend.get_queue(queue_name) @@ -718,8 +720,10 @@ def list_queue_tags(self): 6fde8d1e-52cd-4581-8cd9-c512f4c64223 """ -ERROR_MAX_VISIBILITY_TIMEOUT_RESPONSE = "Invalid request, maximum visibility timeout is {0}".format( - MAXIMUM_VISIBILTY_TIMEOUT +ERROR_MAX_VISIBILITY_TIMEOUT_RESPONSE = ( + "Invalid request, maximum visibility timeout is {0}".format( + MAXIMUM_VISIBILTY_TIMEOUT + ) ) ERROR_INEXISTENT_QUEUE = """ diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index 125e5d807e64..c3a266130fe1 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -148,7 +148,9 @@ def update_from_cloudformation_json( tags = cfn_to_api_tags(properties.get("Tags", [])) sf_backend = stepfunction_backends[region_name] state_machine = sf_backend.update_state_machine( - original_resource.arn, definition=definition, role_arn=role_arn, + original_resource.arn, + definition=definition, + role_arn=role_arn, ) state_machine.add_tags(tags) return state_machine diff --git a/moto/sts/models.py b/moto/sts/models.py index b274b1acdcfe..04c1233da751 100644 --- a/moto/sts/models.py +++ b/moto/sts/models.py @@ -48,10 +48,12 @@ def user_id(self): @property def arn(self): - return "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( - account_id=ACCOUNT_ID, - role_name=self.role_arn.split("/")[-1], - session_name=self.session_name, + return ( + "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( + account_id=ACCOUNT_ID, + role_name=self.role_arn.split("/")[-1], + session_name=self.session_name, + ) ) diff --git a/moto/transcribe/models.py b/moto/transcribe/models.py index bf8e602e63dc..f149a7989b5c 100644 --- a/moto/transcribe/models.py +++ b/moto/transcribe/models.py @@ -153,7 +153,11 @@ def advance_job_status(self): class FakeMedicalVocabulary(BaseObject): def __init__( - self, region_name, vocabulary_name, language_code, vocabulary_file_uri, + self, + region_name, + vocabulary_name, + language_code, + vocabulary_file_uri, ): self._region_name = region_name self.vocabulary_name = vocabulary_name diff --git a/scripts/get_amis.py b/scripts/get_amis.py index 687dab2d4f91..b694340bd14b 100644 --- a/scripts/get_amis.py +++ b/scripts/get_amis.py @@ -3,35 +3,64 @@ # Taken from free tier list when creating an instance instances = [ - 'ami-760aaa0f', 'ami-bb9a6bc2', 'ami-35e92e4c', 'ami-785db401', 'ami-b7e93bce', 'ami-dca37ea5', 'ami-999844e0', - 'ami-9b32e8e2', 'ami-f8e54081', 'ami-bceb39c5', 'ami-03cf127a', 'ami-1ecc1e67', 'ami-c2ff2dbb', 'ami-12c6146b', - 'ami-d1cb19a8', 'ami-61db0918', 'ami-56ec3e2f', 'ami-84ee3cfd', 'ami-86ee3cff', 'ami-f0e83a89', 'ami-1f12c066', - 'ami-afee3cd6', 'ami-1812c061', 'ami-77ed3f0e', 'ami-3bf32142', 'ami-6ef02217', 'ami-f4cf1d8d', 'ami-3df32144', - 'ami-c6f321bf', 'ami-24f3215d', 'ami-fa7cdd89', 'ami-1e749f67', 'ami-a9cc1ed0', 'ami-8104a4f8' + "ami-760aaa0f", + "ami-bb9a6bc2", + "ami-35e92e4c", + "ami-785db401", + "ami-b7e93bce", + "ami-dca37ea5", + "ami-999844e0", + "ami-9b32e8e2", + "ami-f8e54081", + "ami-bceb39c5", + "ami-03cf127a", + "ami-1ecc1e67", + "ami-c2ff2dbb", + "ami-12c6146b", + "ami-d1cb19a8", + "ami-61db0918", + "ami-56ec3e2f", + "ami-84ee3cfd", + "ami-86ee3cff", + "ami-f0e83a89", + "ami-1f12c066", + "ami-afee3cd6", + "ami-1812c061", + "ami-77ed3f0e", + "ami-3bf32142", + "ami-6ef02217", + "ami-f4cf1d8d", + "ami-3df32144", + "ami-c6f321bf", + "ami-24f3215d", + "ami-fa7cdd89", + "ami-1e749f67", + "ami-a9cc1ed0", + "ami-8104a4f8", ] -client = boto3.client('ec2', region_name='eu-west-1') +client = boto3.client("ec2", region_name="eu-west-1") test = client.describe_images(ImageIds=instances) result = [] -for image in test['Images']: +for image in test["Images"]: try: tmp = { - 'ami_id': image['ImageId'], - 'name': image['Name'], - 'description': image['Description'], - 'owner_id': image['OwnerId'], - 'public': image['Public'], - 'virtualization_type': image['VirtualizationType'], - 'architecture': image['Architecture'], - 'state': image['State'], - 'platform': image.get('Platform'), - 'image_type': image['ImageType'], - 'hypervisor': image['Hypervisor'], - 'root_device_name': image['RootDeviceName'], - 'root_device_type': image['RootDeviceType'], - 'sriov': image.get('SriovNetSupport', 'simple') + "ami_id": image["ImageId"], + "name": image["Name"], + "description": image["Description"], + "owner_id": image["OwnerId"], + "public": image["Public"], + "virtualization_type": image["VirtualizationType"], + "architecture": image["Architecture"], + "state": image["State"], + "platform": image.get("Platform"), + "image_type": image["ImageType"], + "hypervisor": image["Hypervisor"], + "root_device_name": image["RootDeviceName"], + "root_device_type": image["RootDeviceType"], + "sriov": image.get("SriovNetSupport", "simple"), } result.append(tmp) except Exception as err: diff --git a/scripts/implementation_coverage.py b/scripts/implementation_coverage.py index 57f978ff9b5a..23def77007dc 100755 --- a/scripts/implementation_coverage.py +++ b/scripts/implementation_coverage.py @@ -7,12 +7,18 @@ script_dir = os.path.dirname(os.path.abspath(__file__)) -alternative_service_names = {'lambda': 'awslambda', 'dynamodb': 'dynamodb2'} +alternative_service_names = {"lambda": "awslambda", "dynamodb": "dynamodb2"} def get_moto_implementation(service_name): - service_name = service_name.replace("-", "") if "-" in service_name else service_name - alt_service_name = alternative_service_names[service_name] if service_name in alternative_service_names else service_name + service_name = ( + service_name.replace("-", "") if "-" in service_name else service_name + ) + alt_service_name = ( + alternative_service_names[service_name] + if service_name in alternative_service_names + else service_name + ) if hasattr(moto, "mock_{}".format(alt_service_name)): mock = getattr(moto, "mock_{}".format(alt_service_name)) elif hasattr(moto, "mock_{}".format(service_name)): @@ -31,11 +37,13 @@ def calculate_implementation_coverage(): coverage = {} for service_name in service_names: moto_client = get_moto_implementation(service_name) - real_client = boto3.client(service_name, region_name='us-east-1') + real_client = boto3.client(service_name, region_name="us-east-1") implemented = [] not_implemented = [] - operation_names = [xform_name(op) for op in real_client.meta.service_model.operation_names] + operation_names = [ + xform_name(op) for op in real_client.meta.service_model.operation_names + ] for op in operation_names: if moto_client and op in dir(moto_client): implemented.append(op) @@ -43,20 +51,22 @@ def calculate_implementation_coverage(): not_implemented.append(op) coverage[service_name] = { - 'implemented': implemented, - 'not_implemented': not_implemented, + "implemented": implemented, + "not_implemented": not_implemented, } return coverage def print_implementation_coverage(coverage): for service_name in sorted(coverage): - implemented = coverage.get(service_name)['implemented'] - not_implemented = coverage.get(service_name)['not_implemented'] + implemented = coverage.get(service_name)["implemented"] + not_implemented = coverage.get(service_name)["not_implemented"] operations = sorted(implemented + not_implemented) if implemented and not_implemented: - percentage_implemented = int(100.0 * len(implemented) / (len(implemented) + len(not_implemented))) + percentage_implemented = int( + 100.0 * len(implemented) / (len(implemented) + len(not_implemented)) + ) elif implemented: percentage_implemented = 100 else: @@ -84,12 +94,14 @@ def write_implementation_coverage_to_file(coverage): print("Writing to {}".format(implementation_coverage_file)) with open(implementation_coverage_file, "w+") as file: for service_name in sorted(coverage): - implemented = coverage.get(service_name)['implemented'] - not_implemented = coverage.get(service_name)['not_implemented'] + implemented = coverage.get(service_name)["implemented"] + not_implemented = coverage.get(service_name)["not_implemented"] operations = sorted(implemented + not_implemented) if implemented and not_implemented: - percentage_implemented = int(100.0 * len(implemented) / (len(implemented) + len(not_implemented))) + percentage_implemented = int( + 100.0 * len(implemented) / (len(implemented) + len(not_implemented)) + ) elif implemented: percentage_implemented = 100 else: @@ -98,7 +110,9 @@ def write_implementation_coverage_to_file(coverage): file.write("\n") file.write("## {}\n".format(service_name)) file.write("
\n") - file.write("{}% implemented\n\n".format(percentage_implemented)) + file.write( + "{}% implemented\n\n".format(percentage_implemented) + ) for op in operations: if op in implemented: file.write("- [X] {}\n".format(op)) @@ -107,7 +121,7 @@ def write_implementation_coverage_to_file(coverage): file.write("
\n") -if __name__ == '__main__': +if __name__ == "__main__": cov = calculate_implementation_coverage() write_implementation_coverage_to_file(cov) print_implementation_coverage(cov) diff --git a/scripts/scaffold.py b/scripts/scaffold.py index de6781b3f756..9255ac008287 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -17,9 +17,7 @@ import click import jinja2 -from prompt_toolkit import ( - prompt -) +from prompt_toolkit import prompt from prompt_toolkit.completion import WordCompleter from prompt_toolkit.shortcuts import print_formatted_text @@ -29,35 +27,35 @@ from moto.core.responses import BaseResponse from moto.core import BaseBackend -from implementation_coverage import ( - get_moto_implementation -) +from implementation_coverage import get_moto_implementation from inflection import singularize -TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), './template') +TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), "./template") -INPUT_IGNORED_IN_BACKEND = ['Marker', 'PageSize'] -OUTPUT_IGNORED_IN_BACKEND = ['NextMarker'] +INPUT_IGNORED_IN_BACKEND = ["Marker", "PageSize"] +OUTPUT_IGNORED_IN_BACKEND = ["NextMarker"] def print_progress(title, body, color): - click.secho(u'\t{}\t'.format(title), fg=color, nl=False) + click.secho(u"\t{}\t".format(title), fg=color, nl=False) click.echo(body) def select_service_and_operation(): service_names = Session().get_available_services() service_completer = WordCompleter(service_names) - service_name = prompt(u'Select service: ', completer=service_completer) + service_name = prompt(u"Select service: ", completer=service_completer) if service_name not in service_names: - click.secho(u'{} is not valid service'.format(service_name), fg='red') + click.secho(u"{} is not valid service".format(service_name), fg="red") raise click.Abort() moto_client = get_moto_implementation(service_name) - real_client = boto3.client(service_name, region_name='us-east-1') + real_client = boto3.client(service_name, region_name="us-east-1") implemented = [] not_implemented = [] - operation_names = [xform_name(op) for op in real_client.meta.service_model.operation_names] + operation_names = [ + xform_name(op) for op in real_client.meta.service_model.operation_names + ] for op in operation_names: if moto_client and op in dir(moto_client): implemented.append(op) @@ -65,133 +63,148 @@ def select_service_and_operation(): not_implemented.append(op) operation_completer = WordCompleter(operation_names) - click.echo('==Current Implementation Status==') + click.echo("==Current Implementation Status==") for operation_name in operation_names: - check = 'X' if operation_name in implemented else ' ' - click.secho('[{}] {}'.format(check, operation_name)) - click.echo('=================================') - operation_name = prompt(u'Select Operation: ', completer=operation_completer) + check = "X" if operation_name in implemented else " " + click.secho("[{}] {}".format(check, operation_name)) + click.echo("=================================") + operation_name = prompt(u"Select Operation: ", completer=operation_completer) if operation_name not in operation_names: - click.secho('{} is not valid operation'.format(operation_name), fg='red') + click.secho("{} is not valid operation".format(operation_name), fg="red") raise click.Abort() if operation_name in implemented: - click.secho('{} is already implemented'.format(operation_name), fg='red') + click.secho("{} is already implemented".format(operation_name), fg="red") raise click.Abort() return service_name, operation_name + def get_escaped_service(service): - return service.replace('-', '') + return service.replace("-", "") + def get_lib_dir(service): - return os.path.join('moto', get_escaped_service(service)) + return os.path.join("moto", get_escaped_service(service)) + def get_test_dir(service): - return os.path.join('tests', 'test_{}'.format(get_escaped_service(service))) + return os.path.join("tests", "test_{}".format(get_escaped_service(service))) def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None): - is_test = True if 'test' in tmpl_dir else False - rendered = jinja2.Environment( - loader=jinja2.FileSystemLoader(tmpl_dir) - ).get_template(tmpl_filename).render(context) + is_test = True if "test" in tmpl_dir else False + rendered = ( + jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_dir)) + .get_template(tmpl_filename) + .render(context) + ) dirname = get_test_dir(service) if is_test else get_lib_dir(service) filename = alt_filename or os.path.splitext(tmpl_filename)[0] filepath = os.path.join(dirname, filename) if os.path.exists(filepath): - print_progress('skip creating', filepath, 'yellow') + print_progress("skip creating", filepath, "yellow") else: - print_progress('creating', filepath, 'green') - with open(filepath, 'w') as f: + print_progress("creating", filepath, "green") + with open(filepath, "w") as f: f.write(rendered) def append_mock_to_init_py(service): - path = os.path.join(os.path.dirname(__file__), '..', 'moto', '__init__.py') + path = os.path.join(os.path.dirname(__file__), "..", "moto", "__init__.py") with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] + lines = [_.replace("\n", "") for _ in f.readlines()] - if any(_ for _ in lines if re.match('^mock_{}.*lazy_load(.*)$'.format(service), _)): + if any(_ for _ in lines if re.match("^mock_{}.*lazy_load(.*)$".format(service), _)): return - filtered_lines = [_ for _ in lines if re.match('^mock_.*lazy_load(.*)$', _)] + filtered_lines = [_ for _ in lines if re.match("^mock_.*lazy_load(.*)$", _)] last_import_line_index = lines.index(filtered_lines[-1]) - new_line = 'mock_{} = lazy_load(".{}", "mock_{}")'.format(get_escaped_service(service), get_escaped_service(service), get_escaped_service(service)) + new_line = 'mock_{} = lazy_load(".{}", "mock_{}")'.format( + get_escaped_service(service), + get_escaped_service(service), + get_escaped_service(service), + ) lines.insert(last_import_line_index + 1, new_line) - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: + body = "\n".join(lines) + "\n" + with open(path, "w") as f: f.write(body) def append_mock_dict_to_backends_py(service): - path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py') + path = os.path.join(os.path.dirname(__file__), "..", "moto", "backends.py") with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] + lines = [_.replace("\n", "") for _ in f.readlines()] - if any(_ for _ in lines if re.match(".*\"{}\": {}_backends.*".format(service, service), _)): + if any( + _ + for _ in lines + if re.match('.*"{}": {}_backends.*'.format(service, service), _) + ): return - filtered_lines = [_ for _ in lines if re.match(".*\".*\":.*_backends.*", _)] + filtered_lines = [_ for _ in lines if re.match('.*".*":.*_backends.*', _)] last_elem_line_index = lines.index(filtered_lines[-1]) - new_line = " \"{}\": (\"{}\", \"{}_backends\"),".format(service, get_escaped_service(service), get_escaped_service(service)) + new_line = ' "{}": ("{}", "{}_backends"),'.format( + service, get_escaped_service(service), get_escaped_service(service) + ) prev_line = lines[last_elem_line_index] - if not prev_line.endswith('{') and not prev_line.endswith(','): - lines[last_elem_line_index] += ',' + if not prev_line.endswith("{") and not prev_line.endswith(","): + lines[last_elem_line_index] += "," lines.insert(last_elem_line_index + 1, new_line) - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: + body = "\n".join(lines) + "\n" + with open(path, "w") as f: f.write(body) + def initialize_service(service, operation, api_protocol): - """create lib and test dirs if not exist - """ + """create lib and test dirs if not exist""" lib_dir = get_lib_dir(service) test_dir = get_test_dir(service) - print_progress('Initializing service', service, 'green') + print_progress("Initializing service", service, "green") client = boto3.client(service) service_class = client.__class__.__name__ endpoint_prefix = client._service_model.endpoint_prefix tmpl_context = { - 'service': service, - 'service_class': service_class, - 'endpoint_prefix': endpoint_prefix, - 'api_protocol': api_protocol, - 'escaped_service': get_escaped_service(service) + "service": service, + "service_class": service_class, + "endpoint_prefix": endpoint_prefix, + "api_protocol": api_protocol, + "escaped_service": get_escaped_service(service), } # initialize service directory if os.path.exists(lib_dir): - print_progress('skip creating', lib_dir, 'yellow') + print_progress("skip creating", lib_dir, "yellow") else: - print_progress('creating', lib_dir, 'green') + print_progress("creating", lib_dir, "green") os.makedirs(lib_dir) - tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib') + tmpl_dir = os.path.join(TEMPLATE_DIR, "lib") for tmpl_filename in os.listdir(tmpl_dir): - render_template( - tmpl_dir, tmpl_filename, tmpl_context, service - ) + render_template(tmpl_dir, tmpl_filename, tmpl_context, service) # initialize test directory if os.path.exists(test_dir): - print_progress('skip creating', test_dir, 'yellow') + print_progress("skip creating", test_dir, "yellow") else: - print_progress('creating', test_dir, 'green') + print_progress("creating", test_dir, "green") os.makedirs(test_dir) - tmpl_dir = os.path.join(TEMPLATE_DIR, 'test') + tmpl_dir = os.path.join(TEMPLATE_DIR, "test") for tmpl_filename in os.listdir(tmpl_dir): - alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None - render_template( - tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename + alt_filename = ( + "test_{}.py".format(get_escaped_service(service)) + if tmpl_filename == "test_service.py.j2" + else None ) + render_template(tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename) # append mock to init files append_mock_to_init_py(service) @@ -199,22 +212,24 @@ def initialize_service(service, operation, api_protocol): def to_upper_camel_case(s): - return ''.join([_.title() for _ in s.split('_')]) + return "".join([_.title() for _ in s.split("_")]) def to_lower_camel_case(s): - words = s.split('_') - return ''.join(words[:1] + [_.title() for _ in words[1:]]) + words = s.split("_") + return "".join(words[:1] + [_.title() for _ in words[1:]]) def to_snake_case(s): - s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", s) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() + def get_operation_name_in_keys(operation_name, operation_keys): index = [_.lower() for _ in operation_keys].index(operation_name.lower()) return operation_keys[index] + def get_function_in_responses(service, operation, protocol): """refers to definition of API in botocore, and autogenerates function You can see example of elbv2 from link below. @@ -224,44 +239,56 @@ def get_function_in_responses(service, operation, protocol): aws_operation_name = get_operation_name_in_keys( to_upper_camel_case(operation), - list(client._service_model._service_description['operations'].keys()) + list(client._service_model._service_description["operations"].keys()), ) op_model = client._service_model.operation_model(aws_operation_name) - if not hasattr(op_model.output_shape, 'members'): + if not hasattr(op_model.output_shape, "members"): outputs = {} else: outputs = op_model.output_shape.members inputs = op_model.input_shape.members - input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] - output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] - body = '\ndef {}(self):\n'.format(operation) + input_names = [ + to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND + ] + output_names = [ + to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND + ] + body = "\ndef {}(self):\n".format(operation) for input_name, input_type in inputs.items(): type_name = input_type.type_name - if type_name == 'integer': + if type_name == "integer": arg_line_tmpl = ' {} = self._get_int_param("{}")\n' - elif type_name == 'list': + elif type_name == "list": arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n' else: arg_line_tmpl = ' {} = self._get_param("{}")\n' body += arg_line_tmpl.format(to_snake_case(input_name), input_name) if output_names: - body += ' {} = self.{}_backend.{}(\n'.format(', '.join(output_names), get_escaped_service(service), operation) + body += " {} = self.{}_backend.{}(\n".format( + ", ".join(output_names), get_escaped_service(service), operation + ) else: - body += ' self.{}_backend.{}(\n'.format(get_escaped_service(service), operation) + body += " self.{}_backend.{}(\n".format( + get_escaped_service(service), operation + ) for input_name in input_names: - body += ' {}={},\n'.format(input_name, input_name) + body += " {}={},\n".format(input_name, input_name) - body += ' )\n' - if protocol == 'query': - body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper()) - body += ' return template.render({})\n'.format( - ', '.join(['{}={}'.format(_, _) for _ in output_names]) + body += " )\n" + if protocol == "query": + body += " template = self.response_template({}_TEMPLATE)\n".format( + operation.upper() + ) + body += " return template.render({})\n".format( + ", ".join(["{}={}".format(_, _) for _ in output_names]) + ) + elif protocol in ["json", "rest-json"]: + body += " # TODO: adjust response\n" + body += " return json.dumps(dict({}))\n".format( + ", ".join(["{}={}".format(to_lower_camel_case(_), _) for _ in output_names]) ) - elif protocol in ['json', 'rest-json']: - body += ' # TODO: adjust response\n' - body += ' return json.dumps(dict({}))\n'.format(', '.join(['{}={}'.format(to_lower_camel_case(_), _) for _ in output_names])) return body @@ -273,44 +300,55 @@ def get_function_in_models(service, operation): client = boto3.client(service) aws_operation_name = get_operation_name_in_keys( to_upper_camel_case(operation), - list(client._service_model._service_description['operations'].keys()) + list(client._service_model._service_description["operations"].keys()), ) op_model = client._service_model.operation_model(aws_operation_name) inputs = op_model.input_shape.members - if not hasattr(op_model.output_shape, 'members'): + if not hasattr(op_model.output_shape, "members"): outputs = {} else: outputs = op_model.output_shape.members - input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] - output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] + input_names = [ + to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND + ] + output_names = [ + to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND + ] if input_names: - body = 'def {}(self, {}):\n'.format(operation, ', '.join(input_names)) + body = "def {}(self, {}):\n".format(operation, ", ".join(input_names)) else: - body = 'def {}(self)\n' - body += ' # implement here\n' - body += ' return {}\n\n'.format(', '.join(output_names)) + body = "def {}(self)\n" + body += " # implement here\n" + body += " return {}\n\n".format(", ".join(output_names)) return body def _get_subtree(name, shape, replace_list, name_prefix=[]): class_name = shape.__class__.__name__ - if class_name in ('StringShape', 'Shape'): + if class_name in ("StringShape", "Shape"): t = etree.Element(name) if name_prefix: - t.text = '{{ %s.%s }}' % (name_prefix[-1], to_snake_case(name)) + t.text = "{{ %s.%s }}" % (name_prefix[-1], to_snake_case(name)) else: - t.text = '{{ %s }}' % to_snake_case(name) + t.text = "{{ %s }}" % to_snake_case(name) return t - elif class_name in ('ListShape', ): + elif class_name in ("ListShape",): replace_list.append((name, name_prefix)) t = etree.Element(name) - t_member = etree.Element('member') + t_member = etree.Element("member") t.append(t_member) for nested_name, nested_shape in shape.member.members.items(): - t_member.append(_get_subtree(nested_name, nested_shape, replace_list, name_prefix + [singularize(name.lower())])) + t_member.append( + _get_subtree( + nested_name, + nested_shape, + replace_list, + name_prefix + [singularize(name.lower())], + ) + ) return t - raise ValueError('Not supported Shape') + raise ValueError("Not supported Shape") def get_response_query_template(service, operation): @@ -323,22 +361,22 @@ def get_response_query_template(service, operation): client = boto3.client(service) aws_operation_name = get_operation_name_in_keys( to_upper_camel_case(operation), - list(client._service_model._service_description['operations'].keys()) + list(client._service_model._service_description["operations"].keys()), ) op_model = client._service_model.operation_model(aws_operation_name) - result_wrapper = op_model.output_shape.serialization['resultWrapper'] - response_wrapper = result_wrapper.replace('Result', 'Response') + result_wrapper = op_model.output_shape.serialization["resultWrapper"] + response_wrapper = result_wrapper.replace("Result", "Response") metadata = op_model.metadata - xml_namespace = metadata['xmlNamespace'] + xml_namespace = metadata["xmlNamespace"] # build xml tree - t_root = etree.Element(response_wrapper, xmlns=xml_namespace) + t_root = etree.Element(response_wrapper, xmlns=xml_namespace) # build metadata - t_metadata = etree.Element('ResponseMetadata') - t_request_id = etree.Element('RequestId') - t_request_id.text = '1549581b-12b7-11e3-895e-1334aEXAMPLE' + t_metadata = etree.Element("ResponseMetadata") + t_request_id = etree.Element("RequestId") + t_request_id.text = "1549581b-12b7-11e3-895e-1334aEXAMPLE" t_metadata.append(t_request_id) t_root.append(t_metadata) @@ -349,68 +387,73 @@ def get_response_query_template(service, operation): for output_name, output_shape in outputs.items(): t_result.append(_get_subtree(output_name, output_shape, replace_list)) t_root.append(t_result) - xml_body = etree.tostring(t_root, pretty_print=True).decode('utf-8') + xml_body = etree.tostring(t_root, pretty_print=True).decode("utf-8") xml_body_lines = xml_body.splitlines() for replace in replace_list: name = replace[0] prefix = replace[1] singular_name = singularize(name) - start_tag = '<%s>' % name - iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower() - loop_start = '{%% for %s in %s %%}' % (singular_name.lower(), iter_name) - end_tag = '' % name - loop_end = '{{ endfor }}' + start_tag = "<%s>" % name + iter_name = "{}.{}".format(prefix[-1], name.lower()) if prefix else name.lower() + loop_start = "{%% for %s in %s %%}" % (singular_name.lower(), iter_name) + end_tag = "" % name + loop_end = "{{ endfor }}" start_tag_indexes = [i for i, l in enumerate(xml_body_lines) if start_tag in l] if len(start_tag_indexes) != 1: - raise Exception('tag %s not found in response body' % start_tag) + raise Exception("tag %s not found in response body" % start_tag) start_tag_index = start_tag_indexes[0] xml_body_lines.insert(start_tag_index + 1, loop_start) end_tag_indexes = [i for i, l in enumerate(xml_body_lines) if end_tag in l] if len(end_tag_indexes) != 1: - raise Exception('tag %s not found in response body' % end_tag) + raise Exception("tag %s not found in response body" % end_tag) end_tag_index = end_tag_indexes[0] xml_body_lines.insert(end_tag_index, loop_end) - xml_body = '\n'.join(xml_body_lines) + xml_body = "\n".join(xml_body_lines) body = '\n{}_TEMPLATE = """{}"""'.format(operation.upper(), xml_body) return body def insert_code_to_class(path, base_class, new_code): with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] - mod_path = os.path.splitext(path)[0].replace('/', '.') + lines = [_.replace("\n", "") for _ in f.readlines()] + mod_path = os.path.splitext(path)[0].replace("/", ".") mod = importlib.import_module(mod_path) clsmembers = inspect.getmembers(mod, inspect.isclass) - _response_cls = [_[1] for _ in clsmembers if issubclass(_[1], base_class) and _[1] != base_class] + _response_cls = [ + _[1] for _ in clsmembers if issubclass(_[1], base_class) and _[1] != base_class + ] if len(_response_cls) != 1: - raise Exception('unknown error, number of clsmembers is not 1') + raise Exception("unknown error, number of clsmembers is not 1") response_cls = _response_cls[0] code_lines, line_no = inspect.getsourcelines(response_cls) end_line_no = line_no + len(code_lines) - func_lines = [' ' * 4 + _ for _ in new_code.splitlines()] + func_lines = [" " * 4 + _ for _ in new_code.splitlines()] lines = lines[:end_line_no] + func_lines + lines[end_line_no:] - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: + body = "\n".join(lines) + "\n" + with open(path, "w") as f: f.write(body) + def insert_url(service, operation, api_protocol): client = boto3.client(service) service_class = client.__class__.__name__ aws_operation_name = get_operation_name_in_keys( to_upper_camel_case(operation), - list(client._service_model._service_description['operations'].keys()) + list(client._service_model._service_description["operations"].keys()), ) - uri = client._service_model.operation_model(aws_operation_name).http['requestUri'] + uri = client._service_model.operation_model(aws_operation_name).http["requestUri"] - path = os.path.join(os.path.dirname(__file__), '..', 'moto', get_escaped_service(service), 'urls.py') + path = os.path.join( + os.path.dirname(__file__), "..", "moto", get_escaped_service(service), "urls.py" + ) with open(path) as f: - lines = [_.replace('\n', '') for _ in f.readlines()] + lines = [_.replace("\n", "") for _ in f.readlines()] if any(_ for _ in lines if re.match(uri, _)): return @@ -418,50 +461,49 @@ def insert_url(service, operation, api_protocol): url_paths_found = False last_elem_line_index = -1 for i, line in enumerate(lines): - if line.startswith('url_paths'): + if line.startswith("url_paths"): url_paths_found = True - if url_paths_found and line.startswith('}'): + if url_paths_found and line.startswith("}"): last_elem_line_index = i - 1 prev_line = lines[last_elem_line_index] - if not prev_line.endswith('{') and not prev_line.endswith(','): - lines[last_elem_line_index] += ',' + if not prev_line.endswith("{") and not prev_line.endswith(","): + lines[last_elem_line_index] += "," # generate url pattern - if api_protocol == 'rest-json': + if api_protocol == "rest-json": new_line = " '{0}/.*$': response.dispatch," else: - new_line = " '{0}%s$': %sResponse.dispatch," % ( - uri, service_class - ) + new_line = " '{0}%s$': %sResponse.dispatch," % (uri, service_class) if new_line in lines: return lines.insert(last_elem_line_index + 1, new_line) - body = '\n'.join(lines) + '\n' - with open(path, 'w') as f: + body = "\n".join(lines) + "\n" + with open(path, "w") as f: f.write(body) + def insert_codes(service, operation, api_protocol): func_in_responses = get_function_in_responses(service, operation, api_protocol) func_in_models = get_function_in_models(service, operation) # edit responses.py - responses_path = 'moto/{}/responses.py'.format(get_escaped_service(service)) - print_progress('inserting code', responses_path, 'green') + responses_path = "moto/{}/responses.py".format(get_escaped_service(service)) + print_progress("inserting code", responses_path, "green") insert_code_to_class(responses_path, BaseResponse, func_in_responses) # insert template - if api_protocol == 'query': + if api_protocol == "query": template = get_response_query_template(service, operation) with open(responses_path) as f: lines = [_[:-1] for _ in f.readlines()] lines += template.splitlines() - with open(responses_path, 'w') as f: - f.write('\n'.join(lines)) + with open(responses_path, "w") as f: + f.write("\n".join(lines)) # edit models.py - models_path = 'moto/{}/models.py'.format(get_escaped_service(service)) - print_progress('inserting code', models_path, 'green') + models_path = "moto/{}/models.py".format(get_escaped_service(service)) + print_progress("inserting code", models_path, "green") insert_code_to_class(models_path, BaseBackend, func_in_models) # edit urls.py @@ -471,15 +513,20 @@ def insert_codes(service, operation, api_protocol): @click.command() def main(): service, operation = select_service_and_operation() - api_protocol = boto3.client(service)._service_model.metadata['protocol'] + api_protocol = boto3.client(service)._service_model.metadata["protocol"] initialize_service(service, operation, api_protocol) - if api_protocol in ['query', 'json', 'rest-json']: + if api_protocol in ["query", "json", "rest-json"]: insert_codes(service, operation, api_protocol) else: - print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow') + print_progress( + "skip inserting code", + 'api protocol "{}" is not supported'.format(api_protocol), + "yellow", + ) click.echo('You will still need to add the mock into "__init__.py"'.format(service)) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/scripts/update_managed_policies.py b/scripts/update_managed_policies.py index de7058fd7b8e..2e227b75238b 100755 --- a/scripts/update_managed_policies.py +++ b/scripts/update_managed_policies.py @@ -23,42 +23,53 @@ def json_serial(obj): raise TypeError("Type not serializable") -client = boto3.client('iam') +client = boto3.client("iam") policies = {} -paginator = client.get_paginator('list_policies') +paginator = client.get_paginator("list_policies") try: - response_iterator = paginator.paginate(Scope='AWS') + response_iterator = paginator.paginate(Scope="AWS") for response in response_iterator: - for policy in response['Policies']: - policies[policy['PolicyName']] = policy + for policy in response["Policies"]: + policies[policy["PolicyName"]] = policy except NoCredentialsError: print("USAGE:") print("Put your AWS credentials into ~/.aws/credentials and run:") print(__file__) print("") print("Or specify them on the command line:") - print("AWS_ACCESS_KEY_ID=your_personal_access_key AWS_SECRET_ACCESS_KEY=your_personal_secret {}".format(__file__)) + print( + "AWS_ACCESS_KEY_ID=your_personal_access_key AWS_SECRET_ACCESS_KEY=your_personal_secret {}".format( + __file__ + ) + ) print("") sys.exit(1) for policy_name in policies: response = client.get_policy_version( - PolicyArn=policies[policy_name]['Arn'], - VersionId=policies[policy_name]['DefaultVersionId']) - for key in response['PolicyVersion']: - if key != "CreateDate": # the policy's CreateDate should not be overwritten by its version's CreateDate - policies[policy_name][key] = response['PolicyVersion'][key] + PolicyArn=policies[policy_name]["Arn"], + VersionId=policies[policy_name]["DefaultVersionId"], + ) + for key in response["PolicyVersion"]: + if ( + key != "CreateDate" + ): # the policy's CreateDate should not be overwritten by its version's CreateDate + policies[policy_name][key] = response["PolicyVersion"][key] -with open(output_file, 'w') as f: - triple_quote = '\"\"\"' +with open(output_file, "w") as f: + triple_quote = '"""' f.write("# Imported via `make aws_managed_policies`\n") - f.write('aws_managed_policies_data = {}\n'.format(triple_quote)) - f.write(json.dumps(policies, - sort_keys=True, - indent=4, - separators=(',', ': '), - default=json_serial)) - f.write('{}\n'.format(triple_quote)) + f.write("aws_managed_policies_data = {}\n".format(triple_quote)) + f.write( + json.dumps( + policies, + sort_keys=True, + indent=4, + separators=(",", ": "), + default=json_serial, + ) + ) + f.write("{}\n".format(triple_quote)) diff --git a/setup.py b/setup.py index bcbc88a20f3c..a738feab6f6d 100755 --- a/setup.py +++ b/setup.py @@ -13,20 +13,22 @@ # Borrowed from pip at https://github.com/pypa/pip/blob/62c27dee45625e1b63d1e023b0656310f276e050/setup.py#L11-L15 here = os.path.abspath(os.path.dirname(__file__)) + def read(*parts): # intentionally *not* adding an encoding option to open, See: # https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 - with open(os.path.join(here, *parts), 'r') as fp: + with open(os.path.join(here, *parts), "r") as fp: return fp.read() def get_version(): - version_file = read('moto', '__init__.py') - version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', - version_file, re.MULTILINE) + version_file = read("moto", "__init__.py") + version_match = re.search( + r'^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.MULTILINE + ) if version_match: return version_match.group(1) - raise RuntimeError('Unable to find version string.') + raise RuntimeError("Unable to find version string.") install_requires = [ @@ -77,7 +79,9 @@ def get_version(): _dep_PyYAML = "PyYAML>=5.1" _dep_python_jose = "python-jose[cryptography]>=3.1.0,<4.0.0" -_dep_python_jose_ecdsa_pin = "ecdsa<0.15" # https://github.com/spulec/moto/pull/3263#discussion_r477404984 +_dep_python_jose_ecdsa_pin = ( + "ecdsa<0.15" # https://github.com/spulec/moto/pull/3263#discussion_r477404984 +) _dep_docker = "docker>=2.5.1" _dep_jsondiff = "jsondiff>=1.1.2" _dep_aws_xray_sdk = "aws-xray-sdk!=0.96,>=0.93" @@ -98,31 +102,31 @@ def get_version(): _dep_sshpubkeys_py2, _dep_sshpubkeys_py3, ] -all_server_deps = all_extra_deps + ['flask', 'flask-cors'] +all_server_deps = all_extra_deps + ["flask", "flask-cors"] # TODO: do we want to add ALL services here? # i.e. even those without extra dependencies. # Would be good for future-compatibility, I guess. extras_per_service = { - 'apigateway': [_dep_python_jose, _dep_python_jose_ecdsa_pin], - 'awslambda': [_dep_docker], - 'batch': [_dep_docker], - 'cloudformation': [_dep_docker, _dep_PyYAML, _dep_cfn_lint], - 'cognitoidp': [_dep_python_jose, _dep_python_jose_ecdsa_pin], - 'dynamodb2': [_dep_docker], - 'dynamodbstreams': [_dep_docker], + "apigateway": [_dep_python_jose, _dep_python_jose_ecdsa_pin], + "awslambda": [_dep_docker], + "batch": [_dep_docker], + "cloudformation": [_dep_docker, _dep_PyYAML, _dep_cfn_lint], + "cognitoidp": [_dep_python_jose, _dep_python_jose_ecdsa_pin], + "dynamodb2": [_dep_docker], + "dynamodbstreams": [_dep_docker], "ec2": [_dep_docker, _dep_sshpubkeys_py2, _dep_sshpubkeys_py3], - 'iotdata': [_dep_jsondiff], - 's3': [_dep_PyYAML], - 'ses': [_dep_docker], - 'sns': [_dep_docker], - 'sqs': [_dep_docker], - 'ssm': [_dep_docker, _dep_PyYAML, _dep_cfn_lint], - 'xray': [_dep_aws_xray_sdk], + "iotdata": [_dep_jsondiff], + "s3": [_dep_PyYAML], + "ses": [_dep_docker], + "sns": [_dep_docker], + "sqs": [_dep_docker], + "ssm": [_dep_docker, _dep_PyYAML, _dep_cfn_lint], + "xray": [_dep_aws_xray_sdk], } extras_require = { - 'all': all_extra_deps, - 'server': all_server_deps, + "all": all_extra_deps, + "server": all_server_deps, } extras_require.update(extras_per_service) @@ -136,18 +140,18 @@ def get_version(): setup( - name='moto', + name="moto", version=get_version(), - description='A library that allows your python tests to easily' - ' mock out the boto library', - long_description=read('README.md'), - long_description_content_type='text/markdown', - author='Steve Pulec', - author_email='spulec@gmail.com', - url='https://github.com/spulec/moto', + description="A library that allows your python tests to easily" + " mock out the boto library", + long_description=read("README.md"), + long_description_content_type="text/markdown", + author="Steve Pulec", + author_email="spulec@gmail.com", + url="https://github.com/spulec/moto", entry_points={ - 'console_scripts': [ - 'moto_server = moto.server:main', + "console_scripts": [ + "moto_server = moto.server:main", ], }, packages=find_packages(exclude=("tests", "tests.*")), diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index 5a1596a4d05f..0a3b32c1f46d 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -22,8 +22,10 @@ SERVER_COMMON_NAME = "*.moto.com" SERVER_CRT_BAD = _GET_RESOURCE("star_moto_com-bad.pem") SERVER_KEY = _GET_RESOURCE("star_moto_com.key") -BAD_ARN = "arn:aws:acm:us-east-2:{}:certificate/_0000000-0000-0000-0000-000000000000".format( - ACCOUNT_ID +BAD_ARN = ( + "arn:aws:acm:us-east-2:{}:certificate/_0000000-0000-0000-0000-000000000000".format( + ACCOUNT_ID + ) ) @@ -56,7 +58,10 @@ def test_import_certificate_with_tags(): Certificate=SERVER_CRT, PrivateKey=SERVER_KEY, CertificateChain=CA_CRT, - Tags=[{"Key": "Environment", "Value": "QA"}, {"Key": "KeyOnly"},], + Tags=[ + {"Key": "Environment", "Value": "QA"}, + {"Key": "KeyOnly"}, + ], ) arn = resp["CertificateArn"] @@ -368,7 +373,10 @@ def test_request_certificate_with_tags(): DomainName="google.com", IdempotencyToken=token, SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], - Tags=[{"Key": "Environment", "Value": "Prod"}, {"Key": "KeyOnly"},], + Tags=[ + {"Key": "Environment", "Value": "Prod"}, + {"Key": "KeyOnly"}, + ], ) arn_2 = resp["CertificateArn"] @@ -398,7 +406,8 @@ def test_operations_with_invalid_tags(): # request certificate with invalid tags with assert_raises(ClientError) as ex: client.request_certificate( - DomainName="example.com", Tags=[{"Key": "X" * 200, "Value": "Valid"}], + DomainName="example.com", + Tags=[{"Key": "X" * 200, "Value": "Valid"}], ) ex.exception.response["Error"]["Code"].should.equal("ValidationException") ex.exception.response["Error"]["Message"].should.contain( diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index c34ddfa723cb..01529fadc227 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -105,7 +105,9 @@ def test_create_rest_api_valid_apikeysources(): # 1. test creating rest api with HEADER apiKeySource response = client.create_rest_api( - name="my_api", description="this is my api", apiKeySource="HEADER", + name="my_api", + description="this is my api", + apiKeySource="HEADER", ) api_id = response["id"] @@ -114,7 +116,9 @@ def test_create_rest_api_valid_apikeysources(): # 2. test creating rest api with AUTHORIZER apiKeySource response = client.create_rest_api( - name="my_api2", description="this is my api", apiKeySource="AUTHORIZER", + name="my_api2", + description="this is my api", + apiKeySource="AUTHORIZER", ) api_id = response["id"] @@ -149,7 +153,9 @@ def test_create_rest_api_valid_endpointconfigurations(): response = client.get_rest_api(restApiId=api_id) response["endpointConfiguration"].should.equal( - {"types": ["PRIVATE"],} + { + "types": ["PRIVATE"], + } ) # 2. test creating rest api with REGIONAL endpointConfiguration @@ -162,7 +168,9 @@ def test_create_rest_api_valid_endpointconfigurations(): response = client.get_rest_api(restApiId=api_id) response["endpointConfiguration"].should.equal( - {"types": ["REGIONAL"],} + { + "types": ["REGIONAL"], + } ) # 3. test creating rest api with EDGE endpointConfiguration @@ -175,7 +183,9 @@ def test_create_rest_api_valid_endpointconfigurations(): response = client.get_rest_api(restApiId=api_id) response["endpointConfiguration"].should.equal( - {"types": ["EDGE"],} + { + "types": ["EDGE"], + } ) @@ -221,7 +231,11 @@ def test_create_resource(): root_resource["ResponseMetadata"].pop("HTTPHeaders", None) root_resource["ResponseMetadata"].pop("RetryAttempts", None) root_resource.should.equal( - {"path": "/", "id": root_id, "ResponseMetadata": {"HTTPStatusCode": 200},} + { + "path": "/", + "id": root_id, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } ) client.create_resource(restApiId=api_id, parentId=root_id, pathPart="users") @@ -1669,9 +1683,7 @@ def test_get_domain_name(): with pytest.raises(ClientError) as ex: client.get_domain_name(domainName=domain_name) - ex.value.response["Error"]["Message"].should.equal( - "Invalid Domain Name specified" - ) + ex.value.response["Error"]["Message"].should.equal("Invalid Domain Name specified") ex.value.response["Error"]["Code"].should.equal("NotFoundException") # adding a domain name client.create_domain_name(domainName=domain_name) @@ -1708,9 +1720,7 @@ def test_create_model(): description=description, contentType=content_type, ) - ex.value.response["Error"]["Message"].should.equal( - "Invalid Rest API Id specified" - ) + ex.value.response["Error"]["Message"].should.equal("Invalid Rest API Id specified") ex.value.response["Error"]["Code"].should.equal("NotFoundException") with pytest.raises(ClientError) as ex: @@ -1772,9 +1782,7 @@ def test_get_model_by_name(): with pytest.raises(ClientError) as ex: client.get_model(restApiId=dummy_rest_api_id, modelName=model_name) - ex.value.response["Error"]["Message"].should.equal( - "Invalid Rest API Id specified" - ) + ex.value.response["Error"]["Message"].should.equal("Invalid Rest API Id specified") ex.value.response["Error"]["Code"].should.equal("NotFoundException") @@ -1786,9 +1794,7 @@ def test_get_model_with_invalid_name(): # test with an invalid model name with pytest.raises(ClientError) as ex: client.get_model(restApiId=rest_api_id, modelName="fake") - ex.value.response["Error"]["Message"].should.equal( - "Invalid Model Name specified" - ) + ex.value.response["Error"]["Message"].should.equal("Invalid Model Name specified") ex.value.response["Error"]["Code"].should.equal("NotFoundException") @@ -1828,8 +1834,10 @@ def test_http_proxying_integration(): stage_name = "staging" client.create_deployment(restApiId=api_id, stageName=stage_name) - deploy_url = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}".format( - api_id=api_id, region_name=region_name, stage_name=stage_name + deploy_url = ( + "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}".format( + api_id=api_id, region_name=region_name, stage_name=stage_name + ) ) if not settings.TEST_SERVER_MODE: diff --git a/tests/test_applicationautoscaling/test_validation.py b/tests/test_applicationautoscaling/test_validation.py index c77b64fc80be..8b396d242bd5 100644 --- a/tests/test_applicationautoscaling/test_validation.py +++ b/tests/test_applicationautoscaling/test_validation.py @@ -49,7 +49,8 @@ def test_describe_scalable_targets_with_invalid_scalable_dimension_should_return with pytest.raises(ClientError) as err: response = client.describe_scalable_targets( - ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, ScalableDimension="foo", + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, + ScalableDimension="foo", ) err.response["Error"]["Code"].should.equal("ValidationException") err.response["Error"]["Message"].split(":")[0].should.look_like( @@ -64,7 +65,8 @@ def test_describe_scalable_targets_with_invalid_service_namespace_should_return_ with pytest.raises(ClientError) as err: response = client.describe_scalable_targets( - ServiceNamespace="foo", ScalableDimension=DEFAULT_SCALABLE_DIMENSION, + ServiceNamespace="foo", + ScalableDimension=DEFAULT_SCALABLE_DIMENSION, ) err.response["Error"]["Code"].should.equal("ValidationException") err.response["Error"]["Message"].split(":")[0].should.look_like( @@ -79,7 +81,8 @@ def test_describe_scalable_targets_with_multiple_invalid_parameters_should_retur with pytest.raises(ClientError) as err: response = client.describe_scalable_targets( - ServiceNamespace="foo", ScalableDimension="bar", + ServiceNamespace="foo", + ScalableDimension="bar", ) err.response["Error"]["Code"].should.equal("ValidationException") err.response["Error"]["Message"].split(":")[0].should.look_like( diff --git a/tests/test_athena/test_athena.py b/tests/test_athena/test_athena.py index f667f231697b..d3362cd7fc74 100644 --- a/tests/test_athena/test_athena.py +++ b/tests/test_athena/test_athena.py @@ -178,7 +178,9 @@ def test_create_named_query(): # craete named query res = client.create_named_query( - Name="query-name", Database="target_db", QueryString="SELECT * FROM table1", + Name="query-name", + Database="target_db", + QueryString="SELECT * FROM table1", ) assert "NamedQueryId" in res @@ -215,6 +217,8 @@ def create_basic_workgroup(client, name): Name=name, Description="Test work group", Configuration={ - "ResultConfiguration": {"OutputLocation": "s3://bucket-name/prefix/",} + "ResultConfiguration": { + "OutputLocation": "s3://bucket-name/prefix/", + } }, ) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 25b9cc063b61..0b6ec88f7569 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -97,8 +97,8 @@ def test_create_autoscaling_group(): @mock_autoscaling_deprecated def test_create_autoscaling_groups_defaults(): - """ Test with the minimum inputs and check that all of the proper defaults - are assigned for the other attributes """ + """Test with the minimum inputs and check that all of the proper defaults + are assigned for the other attributes""" mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() @@ -961,7 +961,8 @@ def test_describe_autoscaling_groups_boto3_launch_config(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") client.create_launch_configuration( - LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", + LaunchConfigurationName="test_launch_configuration", + InstanceType="t2.micro", ) client.create_auto_scaling_group( AutoScalingGroupName="test_asg", @@ -1040,7 +1041,8 @@ def test_describe_autoscaling_instances_boto3_launch_config(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") client.create_launch_configuration( - LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", + LaunchConfigurationName="test_launch_configuration", + InstanceType="t2.micro", ) client.create_auto_scaling_group( AutoScalingGroupName="test_asg", @@ -2154,7 +2156,8 @@ def test_standby_exit_standby(): response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") response = client.exit_standby( - AutoScalingGroupName="test_asg", InstanceIds=[instance_to_standby_exit_standby], + AutoScalingGroupName="test_asg", + InstanceIds=[instance_to_standby_exit_standby], ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) diff --git a/tests/test_autoscaling/test_autoscaling_cloudformation.py b/tests/test_autoscaling/test_autoscaling_cloudformation.py index 24a5b5628a7f..ac7884c5fcf2 100644 --- a/tests/test_autoscaling/test_autoscaling_cloudformation.py +++ b/tests/test_autoscaling/test_autoscaling_cloudformation.py @@ -32,7 +32,8 @@ def test_launch_configuration(): """.strip() cf_client.create_stack( - StackName=stack_name, TemplateBody=cf_template, + StackName=stack_name, + TemplateBody=cf_template, ) stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] stack["Outputs"][0]["OutputValue"].should.be.equal("test_launch_configuration") @@ -56,7 +57,8 @@ def test_launch_configuration(): """.strip() cf_client.update_stack( - StackName=stack_name, TemplateBody=cf_template, + StackName=stack_name, + TemplateBody=cf_template, ) stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] stack["Outputs"][0]["OutputValue"].should.be.equal("test_launch_configuration") @@ -76,7 +78,8 @@ def test_autoscaling_group_from_launch_config(): client = boto3.client("autoscaling", region_name="us-east-1") client.create_launch_configuration( - LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", + LaunchConfigurationName="test_launch_configuration", + InstanceType="t2.micro", ) stack_name = "test-auto-scaling-group" diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index ab2743f54072..3ed296f64caf 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -152,8 +152,8 @@ def test_create_launch_configuration_using_ip_association_should_default_to_fals @mock_autoscaling_deprecated def test_create_launch_configuration_defaults(): - """ Test with the minimum inputs and check that all of the proper defaults - are assigned for the other attributes """ + """Test with the minimum inputs and check that all of the proper defaults + are assigned for the other attributes""" conn = boto.connect_autoscale() config = LaunchConfiguration( name="tester", image_id="ami-abcd1234", instance_type="m1.small" diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py index 284fe267a8f5..ca1009dbe19b 100644 --- a/tests/test_autoscaling/test_policies.py +++ b/tests/test_autoscaling/test_policies.py @@ -170,7 +170,7 @@ def test_execute_policy_percent_change_in_capacity(): @mock_autoscaling_deprecated def test_execute_policy_small_percent_change_in_capacity(): - """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html + """http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html If PercentChangeInCapacity returns a value between 0 and 1, Auto Scaling will round it off to 1.""" setup_autoscale_group() diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 071c6fed6197..8d1efc7a9a54 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -204,7 +204,9 @@ def test_invoke_dryrun_function(): Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", - Code={"ZipFile": get_test_zip_file1(),}, + Code={ + "ZipFile": get_test_zip_file1(), + }, Description="test lambda function", Timeout=3, MemorySize=128, @@ -1275,7 +1277,8 @@ def wait_for_log_msg(expected_msg, log_group): for log_stream in log_streams: result = logs_conn.get_log_events( - logGroupName=log_group, logStreamName=log_stream["logStreamName"], + logGroupName=log_group, + logStreamName=log_stream["logStreamName"], ) received_messages.extend( [event["message"] for event in result.get("events")] @@ -1713,7 +1716,9 @@ def test_remove_function_permission(): ) remove = conn.remove_permission( - FunctionName="testFunction", StatementId="1", Qualifier="2", + FunctionName="testFunction", + StatementId="1", + Qualifier="2", ) remove["ResponseMetadata"]["HTTPStatusCode"].should.equal(204) policy = conn.get_policy(FunctionName="testFunction", Qualifier="2")["Policy"] diff --git a/tests/test_cloudformation/test_cloudformation_depends_on.py b/tests/test_cloudformation/test_cloudformation_depends_on.py index 1b47b40648fb..6a8e17428da5 100644 --- a/tests/test_cloudformation/test_cloudformation_depends_on.py +++ b/tests/test_cloudformation/test_cloudformation_depends_on.py @@ -23,7 +23,9 @@ }, "LaunchConfig": { "Type": "AWS::AutoScaling::LaunchConfiguration", - "Properties": {"LaunchConfigurationName": "test-launch-config",}, + "Properties": { + "LaunchConfigurationName": "test-launch-config", + }, }, }, } @@ -45,7 +47,9 @@ }, "LaunchConfig": { "Type": "AWS::AutoScaling::LaunchConfiguration", - "Properties": {"LaunchConfigurationName": "test-launch-config",}, + "Properties": { + "LaunchConfigurationName": "test-launch-config", + }, }, }, } diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 86b6f1a94e82..fdf1f2426ad5 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -1369,10 +1369,12 @@ def test_non_json_redrive_policy(): def test_boto3_create_duplicate_stack(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") cf_conn.create_stack( - StackName="test_stack", TemplateBody=dummy_template_json, + StackName="test_stack", + TemplateBody=dummy_template_json, ) with pytest.raises(ClientError): cf_conn.create_stack( - StackName="test_stack", TemplateBody=dummy_template_json, + StackName="test_stack", + TemplateBody=dummy_template_json, ) diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 9949bb4a5035..852bb805015b 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2325,7 +2325,10 @@ def test_stack_dynamodb_resources_integration(): dynamodb_client = boto3.client("dynamodb", region_name="us-east-1") table_desc = dynamodb_client.describe_table(TableName="myTableName")["Table"] table_desc["StreamSpecification"].should.equal( - {"StreamEnabled": True, "StreamViewType": "KEYS_ONLY",} + { + "StreamEnabled": True, + "StreamViewType": "KEYS_ONLY", + } ) dynamodb_conn = boto3.resource("dynamodb", region_name="us-east-1") @@ -2779,7 +2782,9 @@ def test_stack_events_get_attribute_integration(): @mock_dynamodb2 def test_dynamodb_table_creation(): CFN_TEMPLATE = { - "Outputs": {"MyTableName": {"Value": {"Ref": "MyTable"}},}, + "Outputs": { + "MyTableName": {"Value": {"Ref": "MyTable"}}, + }, "Resources": { "MyTable": { "Type": "AWS::DynamoDB::Table", diff --git a/tests/test_codepipeline/test_codepipeline.py b/tests/test_codepipeline/test_codepipeline.py index ca1094582ad0..c80a732612a8 100644 --- a/tests/test_codepipeline/test_codepipeline.py +++ b/tests/test_codepipeline/test_codepipeline.py @@ -326,7 +326,9 @@ def test_update_pipeline(): "S3Bucket": "different-bucket", "S3ObjectKey": "test-object", }, - "outputArtifacts": [{"name": "artifact"},], + "outputArtifacts": [ + {"name": "artifact"}, + ], }, ], }, @@ -435,7 +437,9 @@ def test_update_pipeline_errors(): "S3Bucket": "test-bucket", "S3ObjectKey": "test-object", }, - "outputArtifacts": [{"name": "artifact"},], + "outputArtifacts": [ + {"name": "artifact"}, + ], }, ], }, @@ -696,7 +700,9 @@ def create_basic_codepipeline(client, name): "S3Bucket": "test-bucket", "S3ObjectKey": "test-object", }, - "outputArtifacts": [{"name": "artifact"},], + "outputArtifacts": [ + {"name": "artifact"}, + ], }, ], }, diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 54ee9528f71f..b4893b4ecdad 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1272,15 +1272,20 @@ def user_authentication_flow(conn): )["UserPoolClient"]["ClientId"] conn.sign_up( - ClientId=client_id, Username=username, Password=password, + ClientId=client_id, + Username=username, + Password=password, ) client_secret = conn.describe_user_pool_client( - UserPoolId=user_pool_id, ClientId=client_id, + UserPoolId=user_pool_id, + ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( - ClientId=client_id, Username=username, ConfirmationCode="123456", + ClientId=client_id, + Username=username, + ConfirmationCode="123456", ) # generating secret hash @@ -1318,18 +1323,25 @@ def user_authentication_flow(conn): ) conn.verify_software_token( - AccessToken=result["AuthenticationResult"]["AccessToken"], UserCode="123456", + AccessToken=result["AuthenticationResult"]["AccessToken"], + UserCode="123456", ) conn.set_user_mfa_preference( AccessToken=result["AuthenticationResult"]["AccessToken"], - SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True,}, + SoftwareTokenMfaSettings={ + "Enabled": True, + "PreferredMfa": True, + }, ) result = conn.initiate_auth( ClientId=client_id, AuthFlow="REFRESH_TOKEN", - AuthParameters={"SECRET_HASH": secret_hash, "REFRESH_TOKEN": refresh_token,}, + AuthParameters={ + "SECRET_HASH": secret_hash, + "REFRESH_TOKEN": refresh_token, + }, ) result["AuthenticationResult"]["IdToken"].should_not.be.none @@ -1583,7 +1595,8 @@ def test_sign_up(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) password = str(uuid.uuid4()) @@ -1599,12 +1612,16 @@ def test_confirm_sign_up(): password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) conn.confirm_sign_up( - ClientId=client_id, Username=username, ConfirmationCode="123456", + ClientId=client_id, + Username=username, + ConfirmationCode="123456", ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) @@ -1618,14 +1635,19 @@ def test_initiate_auth_USER_SRP_AUTH(): password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( - UserPoolId=user_pool_id, ClientId=client_id, + UserPoolId=user_pool_id, + ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( - ClientId=client_id, Username=username, ConfirmationCode="123456", + ClientId=client_id, + Username=username, + ConfirmationCode="123456", ) key = bytes(str(client_secret).encode("latin-1")) @@ -1669,11 +1691,14 @@ def test_initiate_auth_for_unconfirmed_user(): password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( - UserPoolId=user_pool_id, ClientId=client_id, + UserPoolId=user_pool_id, + ClientId=client_id, )["UserPoolClient"]["ClientSecret"] key = bytes(str(client_secret).encode("latin-1")) @@ -1705,14 +1730,19 @@ def test_initiate_auth_with_invalid_secret_hash(): password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( - UserPoolId=user_pool_id, ClientId=client_id, + UserPoolId=user_pool_id, + ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( - ClientId=client_id, Username=username, ConfirmationCode="123456", + ClientId=client_id, + Username=username, + ConfirmationCode="123456", ) invalid_secret_hash = str(uuid.uuid4()) diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index a99efceaea58..54678cf7f539 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -76,9 +76,7 @@ def test_put_configuration_recorder(): "recordingGroup": bg, } ) - assert ( - ce.value.response["Error"]["Code"] == "InvalidRecordingGroupException" - ) + assert ce.value.response["Error"]["Code"] == "InvalidRecordingGroupException" assert ( ce.value.response["Error"]["Message"] == "The recording group provided is not valid" @@ -255,8 +253,7 @@ def test_put_configuration_aggregator(): ], ) assert ( - "You must choose one of these options" - in ce.value.response["Error"]["Message"] + "You must choose one of these options" in ce.value.response["Error"]["Message"] ) assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" @@ -270,8 +267,7 @@ def test_put_configuration_aggregator(): }, ) assert ( - "You must choose one of these options" - in ce.value.response["Error"]["Message"] + "You must choose one of these options" in ce.value.response["Error"]["Message"] ) assert ce.value.response["Error"]["Code"] == "InvalidParameterValueException" @@ -475,8 +471,7 @@ def test_describe_configuration_aggregators(): in ce.value.response["Error"]["Message"] ) assert ( - ce.value.response["Error"]["Code"] - == "NoSuchConfigurationAggregatorException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationAggregatorException" ) # Error describe with more than 1 item in the list: @@ -489,8 +484,7 @@ def test_describe_configuration_aggregators(): in ce.value.response["Error"]["Message"] ) assert ( - ce.value.response["Error"]["Code"] - == "NoSuchConfigurationAggregatorException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationAggregatorException" ) # Get the normal list: @@ -553,9 +547,7 @@ def test_describe_configuration_aggregators(): # Test with an invalid filter: with pytest.raises(ClientError) as ce: client.describe_configuration_aggregators(NextToken="WRONG") - assert ( - "The nextToken provided is invalid" == ce.value.response["Error"]["Message"] - ) + assert "The nextToken provided is invalid" == ce.value.response["Error"]["Message"] assert ce.value.response["Error"]["Code"] == "InvalidNextTokenException" @@ -710,9 +702,7 @@ def test_describe_aggregation_authorizations(): # Test with an invalid filter: with pytest.raises(ClientError) as ce: client.describe_aggregation_authorizations(NextToken="WRONG") - assert ( - "The nextToken provided is invalid" == ce.value.response["Error"]["Message"] - ) + assert "The nextToken provided is invalid" == ce.value.response["Error"]["Message"] assert ce.value.response["Error"]["Code"] == "InvalidNextTokenException" @@ -758,8 +748,7 @@ def test_delete_configuration_aggregator(): in ce.value.response["Error"]["Message"] ) assert ( - ce.value.response["Error"]["Code"] - == "NoSuchConfigurationAggregatorException" + ce.value.response["Error"]["Code"] == "NoSuchConfigurationAggregatorException" ) @@ -798,9 +787,7 @@ def test_describe_configurations(): # Specify an incorrect name: with pytest.raises(ClientError) as ce: client.describe_configuration_recorders(ConfigurationRecorderNames=["wrong"]) - assert ( - ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" assert "wrong" in ce.value.response["Error"]["Message"] # And with both a good and wrong name: @@ -808,9 +795,7 @@ def test_describe_configurations(): client.describe_configuration_recorders( ConfigurationRecorderNames=["testrecorder", "wrong"] ) - assert ( - ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" assert "wrong" in ce.value.response["Error"]["Message"] @@ -847,9 +832,7 @@ def test_delivery_channels(): # Try without a name supplied: with pytest.raises(ClientError) as ce: client.put_delivery_channel(DeliveryChannel={}) - assert ( - ce.value.response["Error"]["Code"] == "InvalidDeliveryChannelNameException" - ) + assert ce.value.response["Error"]["Code"] == "InvalidDeliveryChannelNameException" assert "is not valid, blank string." in ce.value.response["Error"]["Message"] # Try with a really long name: @@ -1034,9 +1017,7 @@ def test_start_configuration_recorder(): # Without a config recorder: with pytest.raises(ClientError) as ce: client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") - assert ( - ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" # Make the config recorder; client.put_configuration_recorder( @@ -1054,9 +1035,7 @@ def test_start_configuration_recorder(): # Without a delivery channel: with pytest.raises(ClientError) as ce: client.start_configuration_recorder(ConfigurationRecorderName="testrecorder") - assert ( - ce.value.response["Error"]["Code"] == "NoAvailableDeliveryChannelException" - ) + assert ce.value.response["Error"]["Code"] == "NoAvailableDeliveryChannelException" # Make the delivery channel: client.put_delivery_channel( @@ -1092,9 +1071,7 @@ def test_stop_configuration_recorder(): # Without a config recorder: with pytest.raises(ClientError) as ce: client.stop_configuration_recorder(ConfigurationRecorderName="testrecorder") - assert ( - ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" # Make the config recorder; client.put_configuration_recorder( @@ -1184,9 +1161,7 @@ def test_describe_configuration_recorder_status(): client.describe_configuration_recorder_status( ConfigurationRecorderNames=["testrecorder", "wrong"] ) - assert ( - ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" assert "wrong" in ce.value.response["Error"]["Message"] @@ -1213,9 +1188,7 @@ def test_delete_configuration_recorder(): # Try again -- it should be deleted: with pytest.raises(ClientError) as ce: client.delete_configuration_recorder(ConfigurationRecorderName="testrecorder") - assert ( - ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchConfigurationRecorderException" @mock_config @@ -1243,8 +1216,7 @@ def test_delete_delivery_channel(): with pytest.raises(ClientError) as ce: client.delete_delivery_channel(DeliveryChannelName="testchannel") assert ( - ce.value.response["Error"]["Code"] - == "LastDeliveryChannelDeleteFailedException" + ce.value.response["Error"]["Code"] == "LastDeliveryChannelDeleteFailedException" ) assert ( "because there is a running configuration recorder." @@ -1267,7 +1239,7 @@ def test_delete_delivery_channel(): @mock_s3 def test_list_discovered_resource(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "list_config_service_resources" function. + for that individual service's "list_config_service_resources" function. """ client = boto3.client("config", region_name="us-west-2") @@ -1373,7 +1345,7 @@ def test_list_discovered_resource(): @mock_s3 def test_list_aggregate_discovered_resource(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "list_config_service_resources" function. + for that individual service's "list_config_service_resources" function. """ client = boto3.client("config", region_name="us-west-2") @@ -1517,7 +1489,7 @@ def test_list_aggregate_discovered_resource(): @mock_s3 def test_get_resource_config_history(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "get_config_resource" function. + for that individual service's "get_config_resource" function. """ client = boto3.client("config", region_name="us-west-2") @@ -1576,7 +1548,7 @@ def test_get_resource_config_history(): @mock_s3 def test_batch_get_resource_config(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "get_config_resource" function. + for that individual service's "get_config_resource" function. """ client = boto3.client("config", region_name="us-west-2") @@ -1640,7 +1612,7 @@ def test_batch_get_resource_config(): @mock_s3 def test_batch_get_aggregate_resource_config(): """NOTE: We are only really testing the Config part. For each individual service, please add tests - for that individual service's "get_config_resource" function. + for that individual service's "get_config_resource" function. """ from moto.config.models import DEFAULT_ACCOUNT_ID @@ -1873,7 +1845,12 @@ def test_put_evaluations(): response["ResponseMetadata"].pop("HTTPHeaders", None) response["ResponseMetadata"].pop("RetryAttempts", None) response.should.equal( - {"FailedEvaluations": [], "ResponseMetadata": {"HTTPStatusCode": 200,},} + { + "FailedEvaluations": [], + "ResponseMetadata": { + "HTTPStatusCode": 200, + }, + } ) diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py index b6fc8a1356dc..f867b434ec1f 100644 --- a/tests/test_core/test_auth.py +++ b/tests/test_core/test_auth.py @@ -325,7 +325,9 @@ def test_access_denied_for_run_instances(): ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:RunInstances", + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:RunInstances", ) ) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 04b23177346e..7dc739f700ea 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1347,9 +1347,13 @@ def test_get_item_returns_consumed_capacity(): def test_put_empty_item(): dynamodb = boto3.resource("dynamodb", region_name="us-east-1") dynamodb.create_table( - AttributeDefinitions=[{"AttributeName": "structure_id", "AttributeType": "S"},], + AttributeDefinitions=[ + {"AttributeName": "structure_id", "AttributeType": "S"}, + ], TableName="test", - KeySchema=[{"AttributeName": "structure_id", "KeyType": "HASH"},], + KeySchema=[ + {"AttributeName": "structure_id", "KeyType": "HASH"}, + ], ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, ) table = dynamodb.Table("test") @@ -1366,9 +1370,13 @@ def test_put_empty_item(): def test_put_item_nonexisting_hash_key(): dynamodb = boto3.resource("dynamodb", region_name="us-east-1") dynamodb.create_table( - AttributeDefinitions=[{"AttributeName": "structure_id", "AttributeType": "S"},], + AttributeDefinitions=[ + {"AttributeName": "structure_id", "AttributeType": "S"}, + ], TableName="test", - KeySchema=[{"AttributeName": "structure_id", "KeyType": "HASH"},], + KeySchema=[ + {"AttributeName": "structure_id", "KeyType": "HASH"}, + ], ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, ) table = dynamodb.Table("test") @@ -2287,7 +2295,10 @@ def test_update_item_on_map(): table.update_item( Key={"forum_name": "the-key", "subject": "123"}, UpdateExpression="SET body.#nested.#data = :tb", - ExpressionAttributeNames={"#nested": "nested", "#data": "data",}, + ExpressionAttributeNames={ + "#nested": "nested", + "#data": "data", + }, ExpressionAttributeValues={":tb": "new_value"}, ) # Running this against AWS DDB gives an exception so make sure it also fails.: @@ -3951,19 +3962,30 @@ def test_update_supports_nested_update_if_nested_value_not_exists(): table = dynamodb.Table(name) table.put_item( - Item={"user_id": "1234", "friends": {"5678": {"name": "friend_5678"}},}, + Item={ + "user_id": "1234", + "friends": {"5678": {"name": "friend_5678"}}, + }, ) table.update_item( Key={"user_id": "1234"}, - ExpressionAttributeNames={"#friends": "friends", "#friendid": "0000",}, - ExpressionAttributeValues={":friend": {"name": "friend_0000"},}, + ExpressionAttributeNames={ + "#friends": "friends", + "#friendid": "0000", + }, + ExpressionAttributeValues={ + ":friend": {"name": "friend_0000"}, + }, UpdateExpression="SET #friends.#friendid = :friend", ReturnValues="UPDATED_NEW", ) item = table.get_item(Key={"user_id": "1234"})["Item"] assert item == { "user_id": "1234", - "friends": {"5678": {"name": "friend_5678"}, "0000": {"name": "friend_0000"},}, + "friends": { + "5678": {"name": "friend_5678"}, + "0000": {"name": "friend_0000"}, + }, } @@ -4057,9 +4079,7 @@ def test_update_catches_invalid_list_append_operation(): # Verify correct error is returned str(ex.value).should.match("Parameter validation failed:") - str(ex.value).should.match( - "Invalid type for parameter ExpressionAttributeValues." - ) + str(ex.value).should.match("Invalid type for parameter ExpressionAttributeValues.") def _create_user_table(): @@ -4188,11 +4208,17 @@ def test_invalid_transact_get_items(): ) table = dynamodb.Table("test1") table.put_item( - Item={"id": "1", "val": "1",} + Item={ + "id": "1", + "val": "1", + } ) table.put_item( - Item={"id": "1", "val": "2",} + Item={ + "id": "1", + "val": "2", + } ) client = boto3.client("dynamodb", region_name="us-east-1") @@ -4214,16 +4240,28 @@ def test_invalid_transact_get_items(): with pytest.raises(ClientError) as ex: client.transact_get_items( TransactItems=[ - {"Get": {"Key": {"id": {"S": "1"},}, "TableName": "test1"}}, - {"Get": {"Key": {"id": {"S": "1"},}, "TableName": "non_exists_table"}}, + { + "Get": { + "Key": { + "id": {"S": "1"}, + }, + "TableName": "test1", + } + }, + { + "Get": { + "Key": { + "id": {"S": "1"}, + }, + "TableName": "non_exists_table", + } + }, ] ) ex.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.value.response["Error"]["Message"].should.equal( - "Requested resource not found" - ) + ex.value.response["Error"]["Message"].should.equal("Requested resource not found") @mock_dynamodb2 @@ -4243,11 +4281,17 @@ def test_valid_transact_get_items(): ) table1 = dynamodb.Table("test1") table1.put_item( - Item={"id": "1", "sort_key": "1",} + Item={ + "id": "1", + "sort_key": "1", + } ) table1.put_item( - Item={"id": "1", "sort_key": "2",} + Item={ + "id": "1", + "sort_key": "2", + } ) dynamodb.create_table( @@ -4264,7 +4308,10 @@ def test_valid_transact_get_items(): ) table2 = dynamodb.Table("test2") table2.put_item( - Item={"id": "1", "sort_key": "1",} + Item={ + "id": "1", + "sort_key": "1", + } ) client = boto3.client("dynamodb", region_name="us-east-1") @@ -4378,7 +4425,10 @@ def test_valid_transact_get_items(): "TableName": "test1", "CapacityUnits": 4.0, "ReadCapacityUnits": 4.0, - "Table": {"CapacityUnits": 4.0, "ReadCapacityUnits": 4.0,}, + "Table": { + "CapacityUnits": 4.0, + "ReadCapacityUnits": 4.0, + }, } ) @@ -4387,7 +4437,10 @@ def test_valid_transact_get_items(): "TableName": "test2", "CapacityUnits": 2.0, "ReadCapacityUnits": 2.0, - "Table": {"CapacityUnits": 2.0, "ReadCapacityUnits": 2.0,}, + "Table": { + "CapacityUnits": 2.0, + "ReadCapacityUnits": 2.0, + }, } ) @@ -4403,7 +4456,9 @@ def test_gsi_verify_negative_number_order(): {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, ], - "Projection": {"ProjectionType": "KEYS_ONLY",}, + "Projection": { + "ProjectionType": "KEYS_ONLY", + }, } ], "AttributeDefinitions": [ @@ -4454,7 +4509,9 @@ def test_gsi_verify_negative_number_order(): def test_transact_write_items_put(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4465,7 +4522,10 @@ def test_transact_write_items_put(): TransactItems=[ { "Put": { - "Item": {"id": {"S": "foo{}".format(str(i))}, "foo": {"S": "bar"},}, + "Item": { + "id": {"S": "foo{}".format(str(i))}, + "foo": {"S": "bar"}, + }, "TableName": "test-table", } } @@ -4481,14 +4541,19 @@ def test_transact_write_items_put(): def test_transact_write_items_put_conditional_expressions(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema ) dynamodb.put_item( - TableName="test-table", Item={"id": {"S": "foo2"},}, + TableName="test-table", + Item={ + "id": {"S": "foo2"}, + }, ) # Put multiple items with pytest.raises(ClientError) as ex: @@ -4526,7 +4591,9 @@ def test_transact_write_items_put_conditional_expressions(): def test_transact_write_items_conditioncheck_passes(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4534,7 +4601,10 @@ def test_transact_write_items_conditioncheck_passes(): ) # Insert an item without email address dynamodb.put_item( - TableName="test-table", Item={"id": {"S": "foo"},}, + TableName="test-table", + Item={ + "id": {"S": "foo"}, + }, ) # Put an email address, after verifying it doesn't exist yet dynamodb.transact_write_items( @@ -4568,7 +4638,9 @@ def test_transact_write_items_conditioncheck_passes(): def test_transact_write_items_conditioncheck_fails(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4617,7 +4689,9 @@ def test_transact_write_items_conditioncheck_fails(): def test_transact_write_items_delete(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4625,12 +4699,20 @@ def test_transact_write_items_delete(): ) # Insert an item dynamodb.put_item( - TableName="test-table", Item={"id": {"S": "foo"},}, + TableName="test-table", + Item={ + "id": {"S": "foo"}, + }, ) # Delete the item dynamodb.transact_write_items( TransactItems=[ - {"Delete": {"Key": {"id": {"S": "foo"}}, "TableName": "test-table",}} + { + "Delete": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + } + } ] ) # Assert the item is deleted @@ -4642,7 +4724,9 @@ def test_transact_write_items_delete(): def test_transact_write_items_delete_with_successful_condition_expression(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4650,14 +4734,19 @@ def test_transact_write_items_delete_with_successful_condition_expression(): ) # Insert an item without email address dynamodb.put_item( - TableName="test-table", Item={"id": {"S": "foo"},}, + TableName="test-table", + Item={ + "id": {"S": "foo"}, + }, ) # ConditionExpression will pass - no email address has been specified yet dynamodb.transact_write_items( TransactItems=[ { "Delete": { - "Key": {"id": {"S": "foo"},}, + "Key": { + "id": {"S": "foo"}, + }, "TableName": "test-table", "ConditionExpression": "attribute_not_exists(#e)", "ExpressionAttributeNames": {"#e": "email_address"}, @@ -4674,7 +4763,9 @@ def test_transact_write_items_delete_with_successful_condition_expression(): def test_transact_write_items_delete_with_failed_condition_expression(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4692,7 +4783,9 @@ def test_transact_write_items_delete_with_failed_condition_expression(): TransactItems=[ { "Delete": { - "Key": {"id": {"S": "foo"},}, + "Key": { + "id": {"S": "foo"}, + }, "TableName": "test-table", "ConditionExpression": "attribute_not_exists(#e)", "ExpressionAttributeNames": {"#e": "email_address"}, @@ -4713,7 +4806,9 @@ def test_transact_write_items_delete_with_failed_condition_expression(): def test_transact_write_items_update(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4745,7 +4840,9 @@ def test_transact_write_items_update(): def test_transact_write_items_update_with_failed_condition_expression(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4935,12 +5032,18 @@ def create_simple_table_and_return_client(): dynamodb.create_table( TableName="moto-test", KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], - AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"},], + AttributeDefinitions=[ + {"AttributeName": "id", "AttributeType": "S"}, + ], ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1}, ) dynamodb.put_item( TableName="moto-test", - Item={"id": {"S": "1"}, "myNum": {"N": "1"}, "MyStr": {"S": "1"},}, + Item={ + "id": {"S": "1"}, + "myNum": {"N": "1"}, + "MyStr": {"S": "1"}, + }, ) return dynamodb @@ -5004,7 +5107,11 @@ def test_update_expression_with_plus_in_attribute_name(): dynamodb.put_item( TableName="moto-test", - Item={"id": {"S": "1"}, "my+Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, + Item={ + "id": {"S": "1"}, + "my+Num": {"S": "1"}, + "MyStr": {"S": "aaa"}, + }, ) try: dynamodb.update_item( @@ -5031,7 +5138,11 @@ def test_update_expression_with_minus_in_attribute_name(): dynamodb.put_item( TableName="moto-test", - Item={"id": {"S": "1"}, "my-Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, + Item={ + "id": {"S": "1"}, + "my-Num": {"S": "1"}, + "MyStr": {"S": "aaa"}, + }, ) try: dynamodb.update_item( @@ -5058,7 +5169,11 @@ def test_update_expression_with_space_in_attribute_name(): dynamodb.put_item( TableName="moto-test", - Item={"id": {"S": "1"}, "my Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, + Item={ + "id": {"S": "1"}, + "my Num": {"S": "1"}, + "MyStr": {"S": "aaa"}, + }, ) try: @@ -5241,7 +5356,8 @@ def test_update_item_atomic_counter_from_zero(): key = {"t_id": {"S": "item1"}} ddb_mock.put_item( - TableName=table, Item=key, + TableName=table, + Item=key, ) ddb_mock.update_item( @@ -5267,7 +5383,8 @@ def test_update_item_add_to_non_existent_set(): ) key = {"t_id": {"S": "item1"}} ddb_mock.put_item( - TableName=table, Item=key, + TableName=table, + Item=key, ) ddb_mock.update_item( @@ -5292,7 +5409,8 @@ def test_update_item_add_to_non_existent_number_set(): ) key = {"t_id": {"S": "item1"}} ddb_mock.put_item( - TableName=table, Item=key, + TableName=table, + Item=key, ) ddb_mock.update_item( @@ -5309,7 +5427,9 @@ def test_update_item_add_to_non_existent_number_set(): def test_transact_write_items_fails_with_transaction_canceled_exception(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + "AttributeDefinitions": [ + {"AttributeName": "id", "AttributeType": "S"}, + ], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -5361,7 +5481,9 @@ def test_gsi_projection_type_keys_only(): {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, ], - "Projection": {"ProjectionType": "KEYS_ONLY",}, + "Projection": { + "ProjectionType": "KEYS_ONLY", + }, } ], "AttributeDefinitions": [ @@ -5414,7 +5536,9 @@ def test_lsi_projection_type_keys_only(): {"AttributeName": "partitionKey", "KeyType": "HASH"}, {"AttributeName": "lsiK1SortKey", "KeyType": "RANGE"}, ], - "Projection": {"ProjectionType": "KEYS_ONLY",}, + "Projection": { + "ProjectionType": "KEYS_ONLY", + }, } ], "AttributeDefinitions": [ @@ -5439,7 +5563,8 @@ def test_lsi_projection_type_keys_only(): table.put_item(Item=item) items = table.query( - KeyConditionExpression=Key("partitionKey").eq("pk-1"), IndexName="LSI", + KeyConditionExpression=Key("partitionKey").eq("pk-1"), + IndexName="LSI", )["Items"] items.should.have.length_of(1) # Item should only include GSI Keys and Table Keys, as per the ProjectionType diff --git a/tests/test_dynamodb2/test_dynamodb_executor.py b/tests/test_dynamodb2/test_dynamodb_executor.py index 4ef0bb423285..538bf41574cf 100644 --- a/tests/test_dynamodb2/test_dynamodb_executor.py +++ b/tests/test_dynamodb2/test_dynamodb_executor.py @@ -211,7 +211,11 @@ def test_execution_of_remove_in_map(): "itemlist": { "L": [ {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, - {"M": {"foo10": {"S": "bar1"},}}, + { + "M": { + "foo10": {"S": "bar1"}, + } + }, ] } } @@ -260,7 +264,9 @@ def test_execution_of_remove_in_list(): "itemmap": { "M": { "itemlist": { - "L": [{"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}},] + "L": [ + {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, + ] } } }, @@ -277,7 +283,10 @@ def test_execution_of_delete_element_from_set(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, + attrs={ + "id": {"S": "foo2"}, + "s": {"SS": ["value1", "value2", "value3"]}, + }, ) validated_ast = UpdateExpressionValidator( update_expression_ast, @@ -291,7 +300,10 @@ def test_execution_of_delete_element_from_set(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value3"]},}, + attrs={ + "id": {"S": "foo2"}, + "s": {"SS": ["value1", "value3"]}, + }, ) assert expected_item == item @@ -304,7 +316,10 @@ def test_execution_of_add_number(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={"id": {"S": "foo2"}, "s": {"N": "5"},}, + attrs={ + "id": {"S": "foo2"}, + "s": {"N": "5"}, + }, ) validated_ast = UpdateExpressionValidator( update_expression_ast, @@ -331,7 +346,10 @@ def test_execution_of_add_set_to_a_number(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={"id": {"S": "foo2"}, "s": {"N": "5"},}, + attrs={ + "id": {"S": "foo2"}, + "s": {"N": "5"}, + }, ) try: validated_ast = UpdateExpressionValidator( @@ -362,7 +380,10 @@ def test_execution_of_add_to_a_set(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, + attrs={ + "id": {"S": "foo2"}, + "s": {"SS": ["value1", "value2", "value3"]}, + }, ) validated_ast = UpdateExpressionValidator( update_expression_ast, @@ -386,13 +407,34 @@ def test_execution_of_add_to_a_set(): @parameterized( [ - ({":value": {"S": "10"}}, "STRING",), - ({":value": {"N": "10"}}, "NUMBER",), - ({":value": {"B": "10"}}, "BINARY",), - ({":value": {"BOOL": True}}, "BOOLEAN",), - ({":value": {"NULL": True}}, "NULL",), - ({":value": {"M": {"el0": {"S": "10"}}}}, "MAP",), - ({":value": {"L": []}}, "LIST",), + ( + {":value": {"S": "10"}}, + "STRING", + ), + ( + {":value": {"N": "10"}}, + "NUMBER", + ), + ( + {":value": {"B": "10"}}, + "BINARY", + ), + ( + {":value": {"BOOL": True}}, + "BOOLEAN", + ), + ( + {":value": {"NULL": True}}, + "NULL", + ), + ( + {":value": {"M": {"el0": {"S": "10"}}}}, + "MAP", + ), + ( + {":value": {"L": []}}, + "LIST", + ), ] ) def test_execution_of__delete_element_from_set_invalid_value( @@ -406,7 +448,10 @@ def test_execution_of__delete_element_from_set_invalid_value( hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, + attrs={ + "id": {"S": "foo2"}, + "s": {"SS": ["value1", "value2", "value3"]}, + }, ) try: validated_ast = UpdateExpressionValidator( @@ -431,7 +476,10 @@ def test_execution_of_delete_element_from_a_string_attribute(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={"id": {"S": "foo2"}, "s": {"S": "5"},}, + attrs={ + "id": {"S": "foo2"}, + "s": {"S": "5"}, + }, ) try: validated_ast = UpdateExpressionValidator( diff --git a/tests/test_dynamodb2/test_dynamodb_validation.py b/tests/test_dynamodb2/test_dynamodb_validation.py index d60dd48f6c15..93adf88b2659 100644 --- a/tests/test_dynamodb2/test_dynamodb_validation.py +++ b/tests/test_dynamodb2/test_dynamodb_validation.py @@ -42,7 +42,10 @@ def test_validation_of_update_expression_with_keyword(): @parameterized( - ["SET a = #b + :val2", "SET a = :val2 + #b",] + [ + "SET a = #b + :val2", + "SET a = :val2 + #b", + ] ) def test_validation_of_a_set_statement_with_incorrect_passed_value(update_expression): """ @@ -99,7 +102,10 @@ def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_i @parameterized( - ["SET a = #c", "SET a = #c + #d",] + [ + "SET a = #c", + "SET a = #c + #d", + ] ) def test_validation_of_update_expression_with_attribute_name_that_is_not_defined( update_expression, diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index eb3fe6549a5f..db1e263b3cc1 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -616,9 +616,9 @@ def test_ami_describe_executable_users_and_filter(): @mock_ec2_deprecated def test_ami_attribute_user_and_group_permissions(): """ - Boto supports adding/removing both users and groups at the same time. - Just spot-check this -- input variations, idempotency, etc are validated - via user-specific and group-specific tests above. + Boto supports adding/removing both users and groups at the same time. + Just spot-check this -- input variations, idempotency, etc are validated + via user-specific and group-specific tests above. """ conn = boto.connect_ec2("the_key", "the_secret") reservation = conn.run_instances("ami-1234abcd") diff --git a/tests/test_ec2/test_flow_logs.py b/tests/test_ec2/test_flow_logs.py index 743466eaa472..0bf7a61d00e3 100644 --- a/tests/test_ec2/test_flow_logs.py +++ b/tests/test_ec2/test_flow_logs.py @@ -144,7 +144,9 @@ def test_create_flow_log_create(): bucket = s3.create_bucket( Bucket="test-flow-logs", - CreateBucketConfiguration={"LocationConstraint": "us-west-1",}, + CreateBucketConfiguration={ + "LocationConstraint": "us-west-1", + }, ) response = client.create_flow_logs( diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index fefeee522c2c..28aeb62a959e 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -211,16 +211,16 @@ def test_instance_detach_volume_wrong_path(): ImageId="ami-d3adb33f", MinCount=1, MaxCount=1, - BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},], + BlockDeviceMappings=[ + {"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}, + ], ) instance = result[0] for volume in instance.volumes.all(): with pytest.raises(ClientError) as ex: instance.detach_volume(VolumeId=volume.volume_id, Device="/dev/sdf") - ex.value.response["Error"]["Code"].should.equal( - "InvalidAttachment.NotFound" - ) + ex.value.response["Error"]["Code"].should.equal("InvalidAttachment.NotFound") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.value.response["Error"]["Message"].should.equal( "The volume {0} is not attached to instance {1} as device {2}".format( @@ -1585,7 +1585,9 @@ def test_create_instance_ebs_optimized(): instance.ebs_optimized.should.be(False) instance = ec2_resource.create_instances( - ImageId="ami-12345678", MaxCount=1, MinCount=1, + ImageId="ami-12345678", + MaxCount=1, + MinCount=1, )[0] instance.load() instance.ebs_optimized.should.be(False) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 1eff22315e68..889515962a1c 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -235,8 +235,8 @@ def test_route_table_associations(): @mock_ec2_deprecated def test_route_table_replace_route_table_association(): """ - Note: Boto has deprecated replace_route_table_association (which returns status) - and now uses replace_route_table_association_with_assoc (which returns association ID). + Note: Boto has deprecated replace_route_table_association (which returns status) + and now uses replace_route_table_association_with_assoc (which returns association ID). """ conn = boto.connect_vpc("the_key", "the_secret") vpc = conn.create_vpc("10.0.0.0/16") diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 9c14f798febf..85859ba40d65 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -661,7 +661,11 @@ def test_run_instances_should_attach_to_default_subnet(): client = boto3.client("ec2", region_name="us-west-1") ec2.create_security_group(GroupName="sg01", Description="Test security group sg01") # run_instances - instances = client.run_instances(MinCount=1, MaxCount=1, SecurityGroups=["sg01"],) + instances = client.run_instances( + MinCount=1, + MaxCount=1, + SecurityGroups=["sg01"], + ) # Assert subnet is created appropriately subnets = client.describe_subnets()["Subnets"] default_subnet_id = subnets[0]["SubnetId"] diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index ca8897417345..95bd7b66c05b 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -60,7 +60,9 @@ def test_create_vpn_connection_with_vpn_gateway(): vpn_gateway = client.create_vpn_gateway(Type="ipsec.1").get("VpnGateway", {}) customer_gateway = client.create_customer_gateway( - Type="ipsec.1", PublicIp="205.251.242.54", BgpAsn=65534, + Type="ipsec.1", + PublicIp="205.251.242.54", + BgpAsn=65534, ).get("CustomerGateway", {}) vpn_connection = client.create_vpn_connection( Type="ipsec.1", diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 8b6b2798724f..b535f5713c04 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -2531,7 +2531,9 @@ def test_describe_task_sets(): assert "tags" not in task_sets[0] task_sets = client.describe_task_sets( - cluster=cluster_name, service=service_name, include=["TAGS"], + cluster=cluster_name, + service=service_name, + include=["TAGS"], )["taskSets"] cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][ @@ -2591,29 +2593,39 @@ def test_delete_task_set(): ) task_set = client.create_task_set( - cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + cluster=cluster_name, + service=service_name, + taskDefinition=task_def_name, )["taskSet"] task_sets = client.describe_task_sets( - cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], + cluster=cluster_name, + service=service_name, + taskSets=[task_set["taskSetArn"]], )["taskSets"] assert len(task_sets) == 1 response = client.delete_task_set( - cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"], + cluster=cluster_name, + service=service_name, + taskSet=task_set["taskSetArn"], ) assert response["taskSet"]["taskSetArn"] == task_set["taskSetArn"] task_sets = client.describe_task_sets( - cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], + cluster=cluster_name, + service=service_name, + taskSets=[task_set["taskSetArn"]], )["taskSets"] assert len(task_sets) == 0 with pytest.raises(ClientError): _ = client.delete_task_set( - cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"], + cluster=cluster_name, + service=service_name, + taskSet=task_set["taskSetArn"], ) @@ -2649,7 +2661,9 @@ def test_update_service_primary_task_set(): ) task_set = client.create_task_set( - cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + cluster=cluster_name, + service=service_name, + taskDefinition=task_def_name, )["taskSet"] service = client.describe_services(cluster=cluster_name, services=[service_name],)[ @@ -2669,7 +2683,9 @@ def test_update_service_primary_task_set(): assert service["taskDefinition"] == service["taskSets"][0]["taskDefinition"] another_task_set = client.create_task_set( - cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + cluster=cluster_name, + service=service_name, + taskDefinition=task_def_name, )["taskSet"] service = client.describe_services(cluster=cluster_name, services=[service_name],)[ "services" @@ -2721,11 +2737,15 @@ def test_update_task_set(): ) task_set = client.create_task_set( - cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + cluster=cluster_name, + service=service_name, + taskDefinition=task_def_name, )["taskSet"] another_task_set = client.create_task_set( - cluster=cluster_name, service=service_name, taskDefinition=task_def_name, + cluster=cluster_name, + service=service_name, + taskDefinition=task_def_name, )["taskSet"] assert another_task_set["scale"]["unit"] == "PERCENT" assert another_task_set["scale"]["value"] == 100.0 @@ -2738,7 +2758,9 @@ def test_update_task_set(): ) updated_task_set = client.describe_task_sets( - cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], + cluster=cluster_name, + service=service_name, + taskSets=[task_set["taskSetArn"]], )["taskSets"][0] assert updated_task_set["scale"]["value"] == 25.0 assert updated_task_set["scale"]["unit"] == "PERCENT" @@ -2784,11 +2806,13 @@ def test_list_tasks_with_filters(): } _ = ecs.register_task_definition( - family="test_task_def_1", containerDefinitions=[test_container_def], + family="test_task_def_1", + containerDefinitions=[test_container_def], ) _ = ecs.register_task_definition( - family="test_task_def_2", containerDefinitions=[test_container_def], + family="test_task_def_2", + containerDefinitions=[test_container_def], ) _ = ecs.start_task( diff --git a/tests/test_elasticbeanstalk/test_eb.py b/tests/test_elasticbeanstalk/test_eb.py index 42eb09be3eba..8eb32d24ed71 100644 --- a/tests/test_elasticbeanstalk/test_eb.py +++ b/tests/test_elasticbeanstalk/test_eb.py @@ -9,24 +9,30 @@ def test_create_application(): # Create Elastic Beanstalk Application conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - app = conn.create_application(ApplicationName="myapp",) + app = conn.create_application( + ApplicationName="myapp", + ) app["Application"]["ApplicationName"].should.equal("myapp") @mock_elasticbeanstalk def test_create_application_dup(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application(ApplicationName="myapp",) - conn.create_application.when.called_with(ApplicationName="myapp",).should.throw( - ClientError + conn.create_application( + ApplicationName="myapp", ) + conn.create_application.when.called_with( + ApplicationName="myapp", + ).should.throw(ClientError) @mock_elasticbeanstalk def test_describe_applications(): # Create Elastic Beanstalk Application conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application(ApplicationName="myapp",) + conn.create_application( + ApplicationName="myapp", + ) apps = conn.describe_applications() len(apps["Applications"]).should.equal(1) @@ -37,8 +43,13 @@ def test_describe_applications(): def test_create_environment(): # Create Elastic Beanstalk Environment conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - app = conn.create_application(ApplicationName="myapp",) - env = conn.create_environment(ApplicationName="myapp", EnvironmentName="myenv",) + app = conn.create_application( + ApplicationName="myapp", + ) + env = conn.create_environment( + ApplicationName="myapp", + EnvironmentName="myenv", + ) env["EnvironmentName"].should.equal("myenv") @@ -46,9 +57,12 @@ def test_create_environment(): def test_describe_environments(): # List Elastic Beanstalk Envs conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application(ApplicationName="myapp",) + conn.create_application( + ApplicationName="myapp", + ) conn.create_environment( - ApplicationName="myapp", EnvironmentName="myenv", + ApplicationName="myapp", + EnvironmentName="myenv", ) envs = conn.describe_environments() @@ -75,7 +89,9 @@ def tags_list_to_dict(tag_list): @mock_elasticbeanstalk def test_create_environment_tags(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application(ApplicationName="myapp",) + conn.create_application( + ApplicationName="myapp", + ) env_tags = {"initial key": "initial value"} env = conn.create_environment( ApplicationName="myapp", @@ -83,7 +99,9 @@ def test_create_environment_tags(): Tags=tags_dict_to_list(env_tags), ) - tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],) + tags = conn.list_tags_for_resource( + ResourceArn=env["EnvironmentArn"], + ) tags["ResourceArn"].should.equal(env["EnvironmentArn"]) tags_list_to_dict(tags["ResourceTags"]).should.equal(env_tags) @@ -91,7 +109,9 @@ def test_create_environment_tags(): @mock_elasticbeanstalk def test_update_tags(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application(ApplicationName="myapp",) + conn.create_application( + ApplicationName="myapp", + ) env_tags = { "initial key": "initial value", "to remove": "delete me", @@ -117,7 +137,9 @@ def test_update_tags(): total_env_tags.update(extra_env_tags) del total_env_tags["to remove"] - tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],) + tags = conn.list_tags_for_resource( + ResourceArn=env["EnvironmentArn"], + ) tags["ResourceArn"].should.equal(env["EnvironmentArn"]) tags_list_to_dict(tags["ResourceTags"]).should.equal(total_env_tags) diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index a2e8871d87ac..427cb740c563 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -184,8 +184,9 @@ def test_apply_security_groups_to_load_balancer(): response = client.apply_security_groups_to_load_balancer( LoadBalancerName="my-lb", SecurityGroups=["not-really-a-security-group"] ) - assert "One or more of the specified security groups do not exist." \ - in str(error.value) + assert "One or more of the specified security groups do not exist." in str( + error.value + ) @mock_elb_deprecated diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index c6b93957ab43..5ea433fc90fa 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -524,8 +524,10 @@ def test_run_job_flow_with_instance_groups_with_autoscaling(): if "AutoScalingPolicy" in y: x["AutoScalingPolicy"]["Status"]["State"].should.equal("ATTACHED") returned_policy = deepcopy(x["AutoScalingPolicy"]) - auto_scaling_policy_with_cluster_id = _patch_cluster_id_placeholder_in_autoscaling_policy( - y["AutoScalingPolicy"], cluster_id + auto_scaling_policy_with_cluster_id = ( + _patch_cluster_id_placeholder_in_autoscaling_policy( + y["AutoScalingPolicy"], cluster_id + ) ) del returned_policy["Status"] returned_policy.should.equal(auto_scaling_policy_with_cluster_id) @@ -551,8 +553,10 @@ def test_put_remove_auto_scaling_policy(): AutoScalingPolicy=auto_scaling_policy, ) - auto_scaling_policy_with_cluster_id = _patch_cluster_id_placeholder_in_autoscaling_policy( - auto_scaling_policy, cluster_id + auto_scaling_policy_with_cluster_id = ( + _patch_cluster_id_placeholder_in_autoscaling_policy( + auto_scaling_policy, cluster_id + ) ) del resp["AutoScalingPolicy"]["Status"] resp["AutoScalingPolicy"].should.equal(auto_scaling_policy_with_cluster_id) diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index 5755282398ec..38a3831d522d 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -223,9 +223,7 @@ def test_get_table_not_exits(): helpers.get_table(client, database_name, "myfirsttable") exc.value.response["Error"]["Code"].should.equal("EntityNotFoundException") - exc.value.response["Error"]["Message"].should.match( - "Table myfirsttable not found" - ) + exc.value.response["Error"]["Message"].should.match("Table myfirsttable not found") @mock_glue diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index b404a41060f1..7e8d1560f501 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -207,7 +207,9 @@ def test_remove_role_from_instance_profile(): def test_delete_instance_profile(): conn = boto3.client("iam", region_name="us-east-1") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", ) conn.create_instance_profile(InstanceProfileName="my-profile") conn.add_role_to_instance_profile( @@ -257,7 +259,9 @@ def test_delete_role(): # Test deletion failure with a managed policy conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", ) response = conn.create_policy( PolicyName="my-managed-policy", PolicyDocument=MOCK_POLICY @@ -273,10 +277,14 @@ def test_delete_role(): # Test deletion failure with an inline policy conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", ) conn.put_role_policy( - RoleName="my-role", PolicyName="my-role-policy", PolicyDocument=MOCK_POLICY + RoleName="my-role", + PolicyName="my-role-policy", + PolicyDocument=MOCK_POLICY, ) with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_role(RoleName="my-role") @@ -287,7 +295,9 @@ def test_delete_role(): # Test deletion failure with attachment to an instance profile conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", ) conn.create_instance_profile(InstanceProfileName="my-profile") conn.add_role_to_instance_profile( @@ -304,7 +314,9 @@ def test_delete_role(): # Test deletion with no conflicts conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", ) conn.delete_role(RoleName="my-role") with pytest.raises(conn.exceptions.NoSuchEntityException): @@ -331,7 +343,9 @@ def test_list_instance_profiles_for_role(): conn = boto.connect_iam() conn.create_role( - role_name="my-role", assume_role_policy_document="some policy", path="my-path" + role_name="my-role", + assume_role_policy_document="some policy", + path="my-path", ) conn.create_role( role_name="my-role2", @@ -343,7 +357,8 @@ def test_list_instance_profiles_for_role(): profile_path_list = ["my-path", "my-path2"] for profile_count in range(0, 2): conn.create_instance_profile( - profile_name_list[profile_count], path=profile_path_list[profile_count] + profile_name_list[profile_count], + path=profile_path_list[profile_count], ) for profile_count in range(0, 2): @@ -409,7 +424,9 @@ def test_put_role_policy(): def test_get_role_policy(): conn = boto3.client("iam", region_name="us-east-1") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="my-path" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="my-path", ) with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role_policy(RoleName="my-role", PolicyName="does-not-exist") @@ -898,19 +915,19 @@ def test_get_all_access_keys(): conn = boto.connect_iam() conn.create_user("my-user") response = conn.get_all_access_keys("my-user") - assert \ + assert ( response["list_access_keys_response"]["list_access_keys_result"][ "access_key_metadata" - ] == [] + ] + == [] + ) conn.create_access_key("my-user") response = conn.get_all_access_keys("my-user") - assert \ - sorted( - response["list_access_keys_response"]["list_access_keys_result"][ - "access_key_metadata" - ][0].keys() - ) == \ - sorted(["status", "create_date", "user_name", "access_key_id"]) + assert sorted( + response["list_access_keys_response"]["list_access_keys_result"][ + "access_key_metadata" + ][0].keys() + ) == sorted(["status", "create_date", "user_name", "access_key_id"]) @mock_iam @@ -921,9 +938,9 @@ def test_list_access_keys(): assert response["AccessKeyMetadata"] == [] access_key = conn.create_access_key(UserName="my-user")["AccessKey"] response = conn.list_access_keys(UserName="my-user") - assert \ - sorted(response["AccessKeyMetadata"][0].keys()) == \ - sorted(["Status", "CreateDate", "UserName", "AccessKeyId"] + assert sorted(response["AccessKeyMetadata"][0].keys()) == sorted( + ["Status", "CreateDate", "UserName", "AccessKeyId"] + ) conn = boto3.client( "iam", region_name="us-east-1", @@ -931,9 +948,9 @@ def test_list_access_keys(): aws_secret_access_key=access_key["SecretAccessKey"], ) response = conn.list_access_keys() - assert \ - sorted(response["AccessKeyMetadata"][0].keys()) == \ - sorted(["Status", "CreateDate", "UserName", "AccessKeyId"]) + assert sorted(response["AccessKeyMetadata"][0].keys()) == sorted( + ["Status", "CreateDate", "UserName", "AccessKeyId"] + ) @mock_iam_deprecated() @@ -1022,7 +1039,8 @@ def test_create_virtual_mfa_device_errors(): client.create_virtual_mfa_device.when.called_with( VirtualMFADeviceName="test-device" ).should.throw( - ClientError, "MFADevice entity at the same path and name already exists." + ClientError, + "MFADevice entity at the same path and name already exists.", ) client.create_virtual_mfa_device.when.called_with( @@ -1211,7 +1229,9 @@ def test_delete_user(): # Test deletion failure with an inline policy conn.create_user(UserName="my-user") conn.put_user_policy( - UserName="my-user", PolicyName="my-user-policy", PolicyDocument=MOCK_POLICY + UserName="my-user", + PolicyName="my-user-policy", + PolicyDocument=MOCK_POLICY, ) with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_user(UserName="my-user") @@ -1396,7 +1416,9 @@ def test_managed_policy(): role_name = "my-role" conn.create_role( - role_name, assume_role_policy_document={"policy": "test"}, path="my-path" + role_name, + assume_role_policy_document={"policy": "test"}, + path="my-path", ) for policy_name in [ "AmazonElasticMapReduceRole", @@ -1423,7 +1445,8 @@ def test_managed_policy(): ].should.have.length_of(2) conn.detach_role_policy( - "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", role_name + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + role_name, ) rows = conn.list_policies(only_attached=True)["list_policies_response"][ "list_policies_result" @@ -1444,7 +1467,8 @@ def test_managed_policy(): with pytest.raises(BotoServerError): conn.detach_role_policy( - "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", role_name + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + role_name, ) with pytest.raises(BotoServerError): @@ -1562,7 +1586,9 @@ def test_get_ssh_public_key(): with pytest.raises(ClientError): client.get_ssh_public_key( - UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Encoding="SSH" + UserName=username, + SSHPublicKeyId="xxnon-existent-keyxx", + Encoding="SSH", ) resp = client.upload_ssh_public_key(UserName=username, SSHPublicKeyBody=public_key) @@ -1603,7 +1629,9 @@ def test_update_ssh_public_key(): with pytest.raises(ClientError): client.update_ssh_public_key( - UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Status="Inactive" + UserName=username, + SSHPublicKeyId="xxnon-existent-keyxx", + Status="Inactive", ) resp = client.upload_ssh_public_key(UserName=username, SSHPublicKeyBody=public_key) @@ -1681,7 +1709,9 @@ def test_get_account_authorization_details(): UserName="testUser", PolicyName="testPolicy", PolicyDocument=test_policy ) conn.put_group_policy( - GroupName="testGroup", PolicyName="testPolicy", PolicyDocument=test_policy + GroupName="testGroup", + PolicyName="testPolicy", + PolicyDocument=test_policy, ) conn.attach_user_policy( @@ -1981,7 +2011,9 @@ def test_create_role_with_tags(): map(lambda x: {"Key": str(x), "Value": str(x)}, range(0, 51)) ) conn.create_role( - RoleName="my-role3", AssumeRolePolicyDocument="{}", Tags=too_many_tags + RoleName="my-role3", + AssumeRolePolicyDocument="{}", + Tags=too_many_tags, ) assert ( "failed to satisfy constraint: Member must have length less than or equal to 50." @@ -2247,7 +2279,9 @@ def test_update_role_description(): conn.delete_role(RoleName="my-role") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", ) response = conn.update_role_description(RoleName="my-role", Description="test") @@ -2262,7 +2296,9 @@ def test_update_role(): conn.delete_role(RoleName="my-role") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", ) response = conn.update_role_description(RoleName="my-role", Description="test") assert response["Role"]["RoleName"] == "my-role" @@ -2276,7 +2312,9 @@ def test_update_role(): conn.delete_role(RoleName="my-role") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", ) response = conn.update_role(RoleName="my-role", Description="test") assert len(response.keys()) == 1 @@ -2317,7 +2355,9 @@ def test_list_entities_for_policy(): conn = boto3.client("iam", region_name="us-east-1") conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Path="/my-path/", ) conn.create_user(Path="/", UserName="testUser") conn.create_group(Path="/", GroupName="testGroup") @@ -2333,7 +2373,9 @@ def test_list_entities_for_policy(): UserName="testUser", PolicyName="testPolicy", PolicyDocument=test_policy ) conn.put_group_policy( - GroupName="testGroup", PolicyName="testPolicy", PolicyDocument=test_policy + GroupName="testGroup", + PolicyName="testPolicy", + PolicyDocument=test_policy, ) conn.attach_user_policy( @@ -2396,7 +2438,9 @@ def test_list_entities_for_policy(): def test_create_role_no_path(): conn = boto3.client("iam", region_name="us-east-1") resp = conn.create_role( - RoleName="my-role", AssumeRolePolicyDocument="some policy", Description="test" + RoleName="my-role", + AssumeRolePolicyDocument="some policy", + Description="test", ) resp.get("Role").get("Arn").should.equal( "arn:aws:iam::{}:role/my-role".format(ACCOUNT_ID) @@ -2452,7 +2496,9 @@ def test_create_role_with_same_name_should_fail(): iam = boto3.client("iam", region_name="us-east-1") test_role_name = str(uuid4()) iam.create_role( - RoleName=test_role_name, AssumeRolePolicyDocument="policy", Description="test" + RoleName=test_role_name, + AssumeRolePolicyDocument="policy", + Description="test", ) # Create the role again, and verify that it fails with pytest.raises(ClientError) as err: @@ -2539,14 +2585,24 @@ def test_create_open_id_connect_provider_errors(): client.create_open_id_connect_provider.when.called_with( Url="http://example.org", - ThumbprintList=["a" * 40, "b" * 40, "c" * 40, "d" * 40, "e" * 40, "f" * 40], + ThumbprintList=[ + "a" * 40, + "b" * 40, + "c" * 40, + "d" * 40, + "e" * 40, + "f" * 40, + ], ).should.throw(ClientError, "Thumbprint list must contain fewer than 5 entries.") too_many_client_ids = ["{}".format(i) for i in range(101)] client.create_open_id_connect_provider.when.called_with( - Url="http://example.org", ThumbprintList=[], ClientIDList=too_many_client_ids + Url="http://example.org", + ThumbprintList=[], + ClientIDList=too_many_client_ids, ).should.throw( - ClientError, "Cannot exceed quota for ClientIdsPerOpenIdConnectProvider: 100" + ClientError, + "Cannot exceed quota for ClientIdsPerOpenIdConnectProvider: 100", ) too_long_url = "b" * 256 @@ -2587,7 +2643,8 @@ def test_delete_open_id_connect_provider(): client.get_open_id_connect_provider.when.called_with( OpenIDConnectProviderArn=open_id_arn ).should.throw( - ClientError, "OpenIDConnect Provider not found for arn {}".format(open_id_arn) + ClientError, + "OpenIDConnect Provider not found for arn {}".format(open_id_arn), ) # deleting a non existing provider should be successful @@ -2679,7 +2736,9 @@ def test_update_account_password_policy_errors(): client = boto3.client("iam", region_name="us-east-1") client.update_account_password_policy.when.called_with( - MaxPasswordAge=1096, MinimumPasswordLength=129, PasswordReusePrevention=25 + MaxPasswordAge=1096, + MinimumPasswordLength=129, + PasswordReusePrevention=25, ).should.throw( ClientError, "3 validation errors detected: " @@ -2757,7 +2816,8 @@ def test_delete_account_password_policy_errors(): client = boto3.client("iam", region_name="us-east-1") client.delete_account_password_policy.when.called_with().should.throw( - ClientError, "The account policy with name PasswordPolicy cannot be found." + ClientError, + "The account policy with name PasswordPolicy cannot be found.", ) @@ -2885,7 +2945,8 @@ def test_list_user_tags(): conn = boto3.client("iam", region_name="us-east-1") conn.create_user(UserName="kenny-bania") conn.create_user( - UserName="jackie-chiles", Tags=[{"Key": "Sue-Allen", "Value": "Oh-Henry"}] + UserName="jackie-chiles", + Tags=[{"Key": "Sue-Allen", "Value": "Oh-Henry"}], ) conn.create_user( UserName="cosmo", @@ -2904,7 +2965,10 @@ def test_list_user_tags(): response = conn.list_user_tags(UserName="cosmo") response["Tags"].should.equal( - [{"Key": "Stan", "Value": "The Caddy"}, {"Key": "like-a", "Value": "glove"}] + [ + {"Key": "Stan", "Value": "The Caddy"}, + {"Key": "like-a", "Value": "glove"}, + ] ) response["IsTruncated"].should_not.be.ok @@ -2947,7 +3011,8 @@ def test_delete_account_password_policy_errors(): client = boto3.client("iam", region_name="us-east-1") client.delete_account_password_policy.when.called_with().should.throw( - ClientError, "The account policy with name PasswordPolicy cannot be found." + ClientError, + "The account policy with name PasswordPolicy cannot be found.", ) @@ -2976,7 +3041,10 @@ def test_role_list_config_discovered_resources(): max_session_duration=3600, ) roles.append( - {"id": this_role.id, "name": this_role.name,} + { + "id": this_role.id, + "name": this_role.name, + } ) assert len(roles) == num_roles @@ -3034,7 +3102,11 @@ def test_role_config_dict(): basic_assume_role = { "Version": "2012-10-17", "Statement": [ - {"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "sts:AssumeRole"} + { + "Effect": "Allow", + "Principal": {"AWS": "*"}, + "Action": "sts:AssumeRole", + } ], } @@ -3351,7 +3423,9 @@ def test_role_config_client(): # Test non-aggregated pagination assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", limit=1, nextToken=result["nextToken"] + resourceType="AWS::IAM::Role", + limit=1, + nextToken=result["nextToken"], )["resourceIdentifiers"][0]["resourceId"] ) != first_result @@ -3387,14 +3461,18 @@ def test_role_config_client(): # Test non-aggregated resource name/id filter assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", resourceName=roles[1]["name"], limit=1, + resourceType="AWS::IAM::Role", + resourceName=roles[1]["name"], + limit=1, )["resourceIdentifiers"][0]["resourceName"] == roles[1]["name"] ) assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", resourceIds=[roles[0]["id"]], limit=1, + resourceType="AWS::IAM::Role", + resourceIds=[roles[0]["id"]], + limit=1, )["resourceIdentifiers"][0]["resourceName"] == roles[0]["name"] ) @@ -3440,13 +3518,17 @@ def test_role_config_client(): # Test non-aggregated resource name/id filter assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", resourceName=roles[1]["name"], limit=1, + resourceType="AWS::IAM::Role", + resourceName=roles[1]["name"], + limit=1, )["resourceIdentifiers"][0]["resourceName"] == roles[1]["name"] ) assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", resourceIds=[roles[0]["id"]], limit=1, + resourceType="AWS::IAM::Role", + resourceIds=[roles[0]["id"]], + limit=1, )["resourceIdentifiers"][0]["resourceName"] == roles[0]["name"] ) @@ -3556,7 +3638,10 @@ def test_policy_list_config_discovered_resources(): policy_name="policy{}".format(ix), ) policies.append( - {"id": this_policy.id, "name": this_policy.name,} + { + "id": this_policy.id, + "name": this_policy.name, + } ) assert len(policies) == num_policies @@ -3781,7 +3866,9 @@ def test_policy_config_client(): # Test non-aggregated pagination assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Policy", limit=1, nextToken=result["nextToken"] + resourceType="AWS::IAM::Policy", + limit=1, + nextToken=result["nextToken"], )["resourceIdentifiers"][0]["resourceId"] ) != first_result @@ -3818,14 +3905,18 @@ def test_policy_config_client(): # Test non-aggregated resource name/id filter assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Policy", resourceName=policies[1]["name"], limit=1, + resourceType="AWS::IAM::Policy", + resourceName=policies[1]["name"], + limit=1, )["resourceIdentifiers"][0]["resourceName"] == policies[1]["name"] ) assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Policy", resourceIds=[policies[0]["id"]], limit=1, + resourceType="AWS::IAM::Policy", + resourceIds=[policies[0]["id"]], + limit=1, )["resourceIdentifiers"][0]["resourceName"] == policies[0]["name"] ) @@ -3906,7 +3997,10 @@ def test_policy_config_client(): assert ( config_client.batch_get_resource_config( resourceKeys=[ - {"resourceType": "AWS::IAM::Policy", "resourceId": policies[7]["id"]} + { + "resourceType": "AWS::IAM::Policy", + "resourceId": policies[7]["id"], + } ] )["baseConfigurationItems"][0]["resourceName"] == policies[7]["name"] diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index bea6958ac8d7..426b1fa69459 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -939,9 +939,7 @@ def test_should_list_all_groups_filtered_by_parent(self): resp["thingGroups"].should.have.length_of(0) with pytest.raises(ClientError) as e: client.list_thing_groups(parentGroup="inexistant-group-name") - e.value.response["Error"]["Code"].should.equal( - "ResourceNotFoundException" - ) + e.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @mock_iot def test_should_list_all_groups_filtered_by_parent_non_recursively(self): @@ -1019,7 +1017,9 @@ def test_delete_thing_group(): group_name_1a = "my-group-name-1a" group_name_2a = "my-group-name-2a" tree_dict = { - group_name_1a: {group_name_2a: {},}, + group_name_1a: { + group_name_2a: {}, + }, } group_catalog = generate_thing_group_tree(client, tree_dict) diff --git a/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py index ee44391977c9..65785f2e27e8 100644 --- a/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py +++ b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py @@ -24,7 +24,9 @@ def test_get_hls_streaming_session_url(): region_name=region_name, endpoint_url=data_endpoint, ) - res = client.get_hls_streaming_session_url(StreamName=stream_name,) + res = client.get_hls_streaming_session_url( + StreamName=stream_name, + ) reg_exp = "^{}/hls/v1/getHLSMasterPlaylist.m3u8\?SessionToken\=.+$".format( data_endpoint ) @@ -48,7 +50,9 @@ def test_get_dash_streaming_session_url(): region_name=region_name, endpoint_url=data_endpoint, ) - res = client.get_dash_streaming_session_url(StreamName=stream_name,) + res = client.get_dash_streaming_session_url( + StreamName=stream_name, + ) reg_exp = "^{}/dash/v1/getDASHManifest.mpd\?SessionToken\=.+$".format(data_endpoint) res.should.have.key("DASHStreamingSessionURL").which.should.match(reg_exp) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index e79bf8bbf5fe..fa5a353f85f1 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -575,8 +575,10 @@ def test__delete_alias__raises_if_alias_is_not_found(): with pytest.raises(NotFoundException) as err: kms.delete_alias(alias_name) - expected_message_match = r"Alias arn:aws:kms:{region}:[0-9]{{12}}:{alias_name} is not found.".format( - region=region, alias_name=alias_name + expected_message_match = ( + r"Alias arn:aws:kms:{region}:[0-9]{{12}}:{alias_name} is not found.".format( + region=region, alias_name=alias_name + ) ) ex = err.value ex.body["__type"].should.equal("NotFoundException") diff --git a/tests/test_kms/test_kms_boto3.py b/tests/test_kms/test_kms_boto3.py index 53f2845759c5..26e0eef9133e 100644 --- a/tests/test_kms/test_kms_boto3.py +++ b/tests/test_kms/test_kms_boto3.py @@ -55,14 +55,20 @@ def test_create_key(): key["KeyMetadata"]["Origin"].should.equal("AWS_KMS") key["KeyMetadata"].should_not.have.key("SigningAlgorithms") - key = conn.create_key(KeyUsage="ENCRYPT_DECRYPT", CustomerMasterKeySpec="RSA_2048",) + key = conn.create_key( + KeyUsage="ENCRYPT_DECRYPT", + CustomerMasterKeySpec="RSA_2048", + ) sorted(key["KeyMetadata"]["EncryptionAlgorithms"]).should.equal( ["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"] ) key["KeyMetadata"].should_not.have.key("SigningAlgorithms") - key = conn.create_key(KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="RSA_2048",) + key = conn.create_key( + KeyUsage="SIGN_VERIFY", + CustomerMasterKeySpec="RSA_2048", + ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") sorted(key["KeyMetadata"]["SigningAlgorithms"]).should.equal( @@ -77,21 +83,24 @@ def test_create_key(): ) key = conn.create_key( - KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_SECG_P256K1", + KeyUsage="SIGN_VERIFY", + CustomerMasterKeySpec="ECC_SECG_P256K1", ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_256"]) key = conn.create_key( - KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P384", + KeyUsage="SIGN_VERIFY", + CustomerMasterKeySpec="ECC_NIST_P384", ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_384"]) key = conn.create_key( - KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P521", + KeyUsage="SIGN_VERIFY", + CustomerMasterKeySpec="ECC_NIST_P521", ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") @@ -101,7 +110,10 @@ def test_create_key(): @mock_kms def test_describe_key(): client = boto3.client("kms", region_name="us-east-1") - response = client.create_key(Description="my key", KeyUsage="ENCRYPT_DECRYPT",) + response = client.create_key( + Description="my key", + KeyUsage="ENCRYPT_DECRYPT", + ) key_id = response["KeyMetadata"]["KeyId"] response = client.describe_key(KeyId=key_id) diff --git a/tests/test_logs/test_integration.py b/tests/test_logs/test_integration.py index eab839970c10..f7b4fc399622 100644 --- a/tests/test_logs/test_integration.py +++ b/tests/test_logs/test_integration.py @@ -205,7 +205,8 @@ def test_delete_subscription_filter_errors(): # when client_logs.delete_subscription_filter( - logGroupName="/test", filterName="test", + logGroupName="/test", + filterName="test", ) # then @@ -243,7 +244,8 @@ def test_delete_subscription_filter_errors(): # when with pytest.raises(ClientError) as e: client_logs.delete_subscription_filter( - logGroupName="not-existing-log-group", filterName="test", + logGroupName="not-existing-log-group", + filterName="test", ) # then @@ -258,7 +260,8 @@ def test_delete_subscription_filter_errors(): # when with pytest.raises(ClientError) as e: client_logs.delete_subscription_filter( - logGroupName="/test", filterName="wrong-filter-name", + logGroupName="/test", + filterName="wrong-filter-name", ) # then @@ -342,7 +345,9 @@ def _get_role_name(region_name): return iam.get_role(RoleName="test-role")["Role"]["Arn"] except ClientError: return iam.create_role( - RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/", + RoleName="test-role", + AssumeRolePolicyDocument="test policy", + Path="/", )["Role"]["Arn"] @@ -372,7 +377,8 @@ def _wait_for_log_msg(client, log_group_name, expected_msg_part): for log_stream in log_streams: result = client.get_log_events( - logGroupName=log_group_name, logStreamName=log_stream["logStreamName"], + logGroupName=log_group_name, + logStreamName=log_stream["logStreamName"], ) received_messages.extend( [event["message"] for event in result.get("events")] diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index fc9868ffb85a..cbfed65f00a7 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -448,7 +448,9 @@ def test_describe_subscription_filters_errors(): # when with pytest.raises(ClientError) as e: - client.describe_subscription_filters(logGroupName="not-existing-log-group",) + client.describe_subscription_filters( + logGroupName="not-existing-log-group", + ) # then ex = e.value diff --git a/tests/test_managedblockchain/test_managedblockchain_members.py b/tests/test_managedblockchain/test_managedblockchain_members.py index 9120e4aee42c..74b40db26306 100644 --- a/tests/test_managedblockchain/test_managedblockchain_members.py +++ b/tests/test_managedblockchain/test_managedblockchain_members.py @@ -183,7 +183,8 @@ def test_create_another_member_withopts(): # But cannot get response = conn.get_member.when.called_with( - NetworkId=network_id, MemberId=member_id2, + NetworkId=network_id, + MemberId=member_id2, ).should.throw(Exception, "Member {0} not found".format(member_id2)) # Delete member 1 @@ -255,7 +256,9 @@ def test_invite_and_remove_member(): # Create proposal (invite and remove member) response = conn.create_proposal( - NetworkId=network_id, MemberId=member_id, Actions=both_policy_actions, + NetworkId=network_id, + MemberId=member_id, + Actions=both_policy_actions, ) proposal_id2 = response["ProposalId"] @@ -368,7 +371,10 @@ def test_create_too_many_members(): MemberConfiguration=helpers.create_member_configuration( "testmember6", "admin", "Admin12345", False, "Test Member 6" ), - ).should.throw(Exception, "is the maximum number of members allowed in a",) + ).should.throw( + Exception, + "is the maximum number of members allowed in a", + ) @mock_managedblockchain @@ -594,7 +600,8 @@ def test_get_member_badmember(): network_id = response["NetworkId"] response = conn.get_member.when.called_with( - NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") @@ -624,7 +631,8 @@ def test_delete_member_badmember(): network_id = response["NetworkId"] response = conn.delete_member.when.called_with( - NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_nodes.py b/tests/test_managedblockchain/test_managedblockchain_nodes.py index 32a5bc62c5d2..ec657700804d 100644 --- a/tests/test_managedblockchain/test_managedblockchain_nodes.py +++ b/tests/test_managedblockchain/test_managedblockchain_nodes.py @@ -58,7 +58,9 @@ def test_create_node(): # Delete node conn.delete_node( - NetworkId=network_id, MemberId=member_id, NodeId=node_id, + NetworkId=network_id, + MemberId=member_id, + NodeId=node_id, ) # Find node in full list @@ -77,7 +79,9 @@ def test_create_node(): # But cannot get response = conn.get_node.when.called_with( - NetworkId=network_id, MemberId=member_id, NodeId=node_id, + NetworkId=network_id, + MemberId=member_id, + NodeId=node_id, ).should.throw(Exception, "Node {0} not found".format(node_id)) @@ -103,7 +107,9 @@ def test_create_node_standard_edition(): logconfigbad = dict(helpers.default_nodeconfiguration) logconfigbad["InstanceType"] = "bc.t3.large" response = conn.create_node( - NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=logconfigbad, ) node_id = response["NodeId"] @@ -146,7 +152,8 @@ def test_create_node_standard_edition(): # Should now be an exception response = conn.list_nodes.when.called_with( - NetworkId=network_id, MemberId=member_id, + NetworkId=network_id, + MemberId=member_id, ).should.throw(Exception, "Member {0} not found".format(member_id)) @@ -192,7 +199,8 @@ def test_create_too_many_nodes(): MemberId=member_id, NodeConfiguration=helpers.default_nodeconfiguration, ).should.throw( - Exception, "Maximum number of nodes exceeded in member {0}".format(member_id), + Exception, + "Maximum number of nodes exceeded in member {0}".format(member_id), ) @@ -249,14 +257,18 @@ def test_create_node_badnodeconfig(): logconfigbad = dict(helpers.default_nodeconfiguration) logconfigbad["InstanceType"] = "foo" response = conn.create_node.when.called_with( - NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=logconfigbad, ).should.throw(Exception, "Requested instance foo isn't supported.") # Incorrect instance type for edition logconfigbad = dict(helpers.default_nodeconfiguration) logconfigbad["InstanceType"] = "bc.t3.large" response = conn.create_node.when.called_with( - NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=logconfigbad, ).should.throw( Exception, "Instance type bc.t3.large is not supported with STARTER Edition networks", @@ -266,7 +278,9 @@ def test_create_node_badnodeconfig(): logconfigbad = dict(helpers.default_nodeconfiguration) logconfigbad["AvailabilityZone"] = "us-east-11" response = conn.create_node.when.called_with( - NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, + NetworkId=network_id, + MemberId=member_id, + NodeConfiguration=logconfigbad, ).should.throw(Exception, "Availability Zone is not valid") @@ -296,7 +310,8 @@ def test_list_nodes_badmember(): network_id = response["NetworkId"] response = conn.list_nodes.when.called_with( - NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposals.py b/tests/test_managedblockchain/test_managedblockchain_proposals.py index aa899e3a1623..d0339535c81c 100644 --- a/tests/test_managedblockchain/test_managedblockchain_proposals.py +++ b/tests/test_managedblockchain/test_managedblockchain_proposals.py @@ -131,7 +131,9 @@ def test_create_proposal_badinvitationacctid(): member_id = response["MemberId"] response = conn.create_proposal.when.called_with( - NetworkId=network_id, MemberId=member_id, Actions=actions, + NetworkId=network_id, + MemberId=member_id, + Actions=actions, ).should.throw(Exception, "Account ID format specified in proposal is not valid") @@ -155,7 +157,9 @@ def test_create_proposal_badremovalmemid(): member_id = response["MemberId"] response = conn.create_proposal.when.called_with( - NetworkId=network_id, MemberId=member_id, Actions=actions, + NetworkId=network_id, + MemberId=member_id, + Actions=actions, ).should.throw(Exception, "Member ID format specified in proposal is not valid") @@ -194,5 +198,6 @@ def test_get_proposal_badproposal(): network_id = response["NetworkId"] response = conn.get_proposal.when.called_with( - NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py index e8f4043d5536..d7739ebac28c 100644 --- a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py +++ b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py @@ -666,5 +666,6 @@ def test_list_proposal_votes_badproposal(): member_id = response["MemberId"] response = conn.list_proposal_votes.when.called_with( - NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 07cd3afa67f6..2339116d3aee 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -931,7 +931,10 @@ def test_tag_resource_errors(): with pytest.raises(ClientError) as e: client.tag_resource( - ResourceId="000000000000", Tags=[{"Key": "key", "Value": "value"},], + ResourceId="000000000000", + Tags=[ + {"Key": "key", "Value": "value"}, + ], ) ex = e.value ex.operation_name.should.equal("TagResource") diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 67f247f1840f..ee50485c1a2b 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -48,8 +48,8 @@ def reduced_min_part_size(f): - """ speed up tests by temporarily making the multipart minimum part size - small + """speed up tests by temporarily making the multipart minimum part size + small """ orig_size = s3model.UPLOAD_PART_MIN_SIZE @@ -1207,8 +1207,7 @@ def test_get_public_access_block_for_account(): with pytest.raises(ClientError) as ce: client.get_public_access_block(AccountId=ACCOUNT_ID) assert ( - ce.value.response["Error"]["Code"] - == "NoSuchPublicAccessBlockConfiguration" + ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" ) # Put a with an invalid account ID: @@ -1265,8 +1264,7 @@ def test_get_public_access_block_for_account(): with pytest.raises(ClientError) as ce: client.get_public_access_block(AccountId=ACCOUNT_ID) assert ( - ce.value.response["Error"]["Code"] - == "NoSuchPublicAccessBlockConfiguration" + ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" ) @mock_s3 @@ -1465,9 +1463,7 @@ def test_config_get_account_pab(): config_client.get_resource_config_history( resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID ) - assert ( - ce.value.response["Error"]["Code"] == "ResourceNotDiscoveredException" - ) + assert ce.value.response["Error"]["Code"] == "ResourceNotDiscoveredException" # aggregate result = config_client.batch_get_resource_config( resourceKeys=[ @@ -2402,7 +2398,9 @@ def test_boto3_get_object_if_match(): with pytest.raises(botocore.exceptions.ClientError) as err: s3.get_object( - Bucket=bucket_name, Key=key, IfMatch='"hello"', + Bucket=bucket_name, + Key=key, + IfMatch='"hello"', ) e = err.value e.response["Error"]["Code"].should.equal("PreconditionFailed") @@ -2421,7 +2419,9 @@ def test_boto3_get_object_if_none_match(): with pytest.raises(botocore.exceptions.ClientError) as err: s3.get_object( - Bucket=bucket_name, Key=key, IfNoneMatch=etag, + Bucket=bucket_name, + Key=key, + IfNoneMatch=etag, ) e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @@ -2479,7 +2479,9 @@ def test_boto3_head_object_if_match(): with pytest.raises(botocore.exceptions.ClientError) as err: s3.head_object( - Bucket=bucket_name, Key=key, IfMatch='"hello"', + Bucket=bucket_name, + Key=key, + IfMatch='"hello"', ) e = err.value e.response["Error"].should.equal({"Code": "412", "Message": "Precondition Failed"}) @@ -2497,7 +2499,9 @@ def test_boto3_head_object_if_none_match(): with pytest.raises(botocore.exceptions.ClientError) as err: s3.head_object( - Bucket=bucket_name, Key=key, IfNoneMatch=etag, + Bucket=bucket_name, + Key=key, + IfNoneMatch=etag, ) e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @@ -3200,9 +3204,7 @@ def test_put_bucket_notification_errors(): ) assert err.value.response["Error"]["Code"] == "InvalidArgument" - assert ( - err.value.response["Error"]["Message"] == "The ARN is not well formed" - ) + assert err.value.response["Error"]["Message"] == "The ARN is not well formed" # Region not the same as the bucket: with pytest.raises(ClientError) as err: @@ -4075,9 +4077,7 @@ def test_public_access_block(): with pytest.raises(ClientError) as ce: client.get_public_access_block(Bucket="mybucket") - assert ( - ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" assert ( ce.value.response["Error"]["Message"] == "The public access block configuration was not found" @@ -4157,9 +4157,7 @@ def test_public_access_block(): with pytest.raises(ClientError) as ce: client.get_public_access_block(Bucket="mybucket") - assert ( - ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" - ) + assert ce.value.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration" @mock_s3 diff --git a/tests/test_s3/test_s3_cloudformation.py b/tests/test_s3/test_s3_cloudformation.py index ebaa03b7879c..68f191622880 100644 --- a/tests/test_s3/test_s3_cloudformation.py +++ b/tests/test_s3/test_s3_cloudformation.py @@ -14,7 +14,12 @@ def test_s3_bucket_cloudformation_basic(): template = { "AWSTemplateFormatVersion": "2010-09-09", - "Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}}, + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": {}, + } + }, "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, } template_json = json.dumps(template) diff --git a/tests/test_sagemaker/test_sagemaker_endpoint.py b/tests/test_sagemaker/test_sagemaker_endpoint.py index 1e1ecd494b8e..45ae96b126fc 100644 --- a/tests/test_sagemaker/test_sagemaker_endpoint.py +++ b/tests/test_sagemaker/test_sagemaker_endpoint.py @@ -88,11 +88,15 @@ def test_delete_endpoint_config(): resp = sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) with pytest.raises(ClientError) as e: sagemaker.describe_endpoint_config(EndpointConfigName=endpoint_config_name) - assert e.value.response["Error"]["Message"].startswith("Could not find endpoint configuration") + assert e.value.response["Error"]["Message"].startswith( + "Could not find endpoint configuration" + ) with pytest.raises(ClientError) as e: sagemaker.delete_endpoint_config(EndpointConfigName=endpoint_config_name) - assert e.value.response["Error"]["Message"].startswith( "Could not find endpoint configuration") + assert e.value.response["Error"]["Message"].startswith( + "Could not find endpoint configuration" + ) @mock_sagemaker @@ -134,7 +138,9 @@ def test_create_endpoint(): sagemaker.create_endpoint( EndpointName=endpoint_name, EndpointConfigName="NonexistentEndpointConfig" ) - assert e.value.response["Error"]["Message"].startswith("Could not find endpoint configuration") + assert e.value.response["Error"]["Message"].startswith( + "Could not find endpoint configuration" + ) model_name = "MyModel" _create_model(sagemaker, model_name) diff --git a/tests/test_sagemaker/test_sagemaker_notebooks.py b/tests/test_sagemaker/test_sagemaker_notebooks.py index 9fd082689da0..3a3137dec08f 100644 --- a/tests/test_sagemaker/test_sagemaker_notebooks.py +++ b/tests/test_sagemaker/test_sagemaker_notebooks.py @@ -49,8 +49,9 @@ def test_create_notebook_instance_minimal_params(): assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) assert resp["NotebookInstanceName"] == NAME_PARAM assert resp["NotebookInstanceStatus"] == "InService" - assert resp["Url"] == \ - "{}.notebook.{}.sagemaker.aws".format(NAME_PARAM, TEST_REGION_NAME) + assert resp["Url"] == "{}.notebook.{}.sagemaker.aws".format( + NAME_PARAM, TEST_REGION_NAME + ) assert resp["InstanceType"] == INSTANCE_TYPE_PARAM assert resp["RoleArn"] == FAKE_ROLE_ARN assert isinstance(resp["LastModifiedTime"], datetime.datetime) @@ -99,8 +100,9 @@ def test_create_notebook_instance_params(): assert resp["NotebookInstanceArn"].endswith(args["NotebookInstanceName"]) assert resp["NotebookInstanceName"] == NAME_PARAM assert resp["NotebookInstanceStatus"] == "InService" - assert resp["Url"] == \ - "{}.notebook.{}.sagemaker.aws".format(NAME_PARAM, TEST_REGION_NAME) + assert resp["Url"] == "{}.notebook.{}.sagemaker.aws".format( + NAME_PARAM, TEST_REGION_NAME + ) assert resp["InstanceType"] == INSTANCE_TYPE_PARAM assert resp["RoleArn"] == FAKE_ROLE_ARN assert isinstance(resp["LastModifiedTime"], datetime.datetime) @@ -111,8 +113,7 @@ def test_create_notebook_instance_params(): assert resp["SubnetId"] == FAKE_SUBNET_ID assert resp["SecurityGroups"] == FAKE_SECURITY_GROUP_IDS assert resp["KmsKeyId"] == FAKE_KMS_KEY_ID - assert resp["NotebookInstanceLifecycleConfigName"] == \ - FAKE_LIFECYCLE_CONFIG_NAME + assert resp["NotebookInstanceLifecycleConfigName"] == FAKE_LIFECYCLE_CONFIG_NAME assert resp["AcceleratorTypes"] == ACCELERATOR_TYPES_PARAM assert resp["DefaultCodeRepository"] == FAKE_DEFAULT_CODE_REPO assert resp["AdditionalCodeRepositories"] == FAKE_ADDL_CODE_REPOS @@ -135,9 +136,11 @@ def test_create_notebook_instance_bad_volume_size(): } with pytest.raises(ParamValidationError) as ex: sagemaker.create_notebook_instance(**args) - assert \ - ex.value.args[0] == \ - "Parameter validation failed:\nInvalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf".format(vol_size) + assert ex.value.args[ + 0 + ] == "Parameter validation failed:\nInvalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf".format( + vol_size + ) @mock_sagemaker @@ -238,17 +241,15 @@ def test_notebook_instance_lifecycle_config(): OnCreate=on_create, OnStart=on_start, ) - assert \ - e.value.response["Error"]["Message"].endswith( - "Notebook Instance Lifecycle Config already exists.)" - ) + assert e.value.response["Error"]["Message"].endswith( + "Notebook Instance Lifecycle Config already exists.)" + ) resp = sagemaker.describe_notebook_instance_lifecycle_config( NotebookInstanceLifecycleConfigName=name, ) assert resp["NotebookInstanceLifecycleConfigName"] == name - assert \ - resp["NotebookInstanceLifecycleConfigArn"].startswith("arn:aws:sagemaker") + assert resp["NotebookInstanceLifecycleConfigArn"].startswith("arn:aws:sagemaker") assert resp["NotebookInstanceLifecycleConfigArn"].endswith(name) assert resp["OnStart"] == on_start assert resp["OnCreate"] == on_create @@ -263,16 +264,14 @@ def test_notebook_instance_lifecycle_config(): sagemaker.describe_notebook_instance_lifecycle_config( NotebookInstanceLifecycleConfigName=name, ) - assert \ - e.value.response["Error"]["Message"].endswith( - "Notebook Instance Lifecycle Config does not exist.)" - ) + assert e.value.response["Error"]["Message"].endswith( + "Notebook Instance Lifecycle Config does not exist.)" + ) with pytest.raises(ClientError) as e: sagemaker.delete_notebook_instance_lifecycle_config( NotebookInstanceLifecycleConfigName=name, ) - assert \ - e.value.response["Error"]["Message"].endswith( - "Notebook Instance Lifecycle Config does not exist.)" - ) + assert e.value.response["Error"]["Message"].endswith( + "Notebook Instance Lifecycle Config does not exist.)" + ) diff --git a/tests/test_sagemaker/test_sagemaker_training.py b/tests/test_sagemaker/test_sagemaker_training.py index 8f1dda9fea0d..c7b631ae3a74 100644 --- a/tests/test_sagemaker/test_sagemaker_training.py +++ b/tests/test_sagemaker/test_sagemaker_training.py @@ -82,20 +82,21 @@ def test_create_training_job(): r"^arn:aws:sagemaker:.*:.*:training-job/{}$".format(training_job_name) ) assert resp["ModelArtifacts"]["S3ModelArtifacts"].startswith( - params["OutputDataConfig"]["S3OutputPath"] - ) + params["OutputDataConfig"]["S3OutputPath"] + ) assert training_job_name in (resp["ModelArtifacts"]["S3ModelArtifacts"]) - assert \ - resp["ModelArtifacts"]["S3ModelArtifacts"].endswith("output/model.tar.gz") + assert resp["ModelArtifacts"]["S3ModelArtifacts"].endswith("output/model.tar.gz") assert resp["TrainingJobStatus"] == "Completed" assert resp["SecondaryStatus"] == "Completed" assert resp["HyperParameters"] == params["HyperParameters"] - assert \ - resp["AlgorithmSpecification"]["TrainingImage"] == \ - params["AlgorithmSpecification"]["TrainingImage"] - assert \ - resp["AlgorithmSpecification"]["TrainingInputMode"] == \ - params["AlgorithmSpecification"]["TrainingInputMode"] + assert ( + resp["AlgorithmSpecification"]["TrainingImage"] + == params["AlgorithmSpecification"]["TrainingImage"] + ) + assert ( + resp["AlgorithmSpecification"]["TrainingInputMode"] + == params["AlgorithmSpecification"]["TrainingInputMode"] + ) assert "MetricDefinitions" in resp["AlgorithmSpecification"] assert "Name" in resp["AlgorithmSpecification"]["MetricDefinitions"][0] assert "Regex" in resp["AlgorithmSpecification"]["MetricDefinitions"][0] diff --git a/tests/test_secretsmanager/test_list_secrets.py b/tests/test_secretsmanager/test_list_secrets.py index 324ab1838e38..8d8ddbd6446e 100644 --- a/tests/test_secretsmanager/test_list_secrets.py +++ b/tests/test_secretsmanager/test_list_secrets.py @@ -123,7 +123,7 @@ def test_with_all_filter(): secrets = conn.list_secrets(Filters=[{"Key": "all", "Values": ["foo"]}]) secret_names = list(map(lambda s: s["Name"], secrets["SecretList"])) - assert sorted(secret_names) == ['bar', 'baz', 'foo', 'multi', 'qux'] + assert sorted(secret_names) == ["bar", "baz", "foo", "multi", "qux"] @mock_secretsmanager diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 14d30bf36fd5..60a9130c61d4 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -56,9 +56,10 @@ def test_get_secret_that_does_not_exist(): with pytest.raises(ClientError) as cm: result = conn.get_secret_value(SecretId="i-dont-exist") - assert \ - "Secrets Manager can't find the specified secret." == \ - cm.value.response["Error"]["Message"] + assert ( + "Secrets Manager can't find the specified secret." + == cm.value.response["Error"]["Message"] + ) @mock_secretsmanager @@ -71,9 +72,10 @@ def test_get_secret_that_does_not_match(): with pytest.raises(ClientError) as cm: result = conn.get_secret_value(SecretId="i-dont-match") - assert \ - "Secrets Manager can't find the specified secret." == \ - cm.value.response["Error"]["Message"] + assert ( + "Secrets Manager can't find the specified secret." + == cm.value.response["Error"]["Message"] + ) @mock_secretsmanager @@ -97,9 +99,10 @@ def test_get_secret_that_has_no_value(): with pytest.raises(ClientError) as cm: result = conn.get_secret_value(SecretId="java-util-test-password") - assert \ - "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT" == \ - cm.value.response["Error"]["Message"] + assert ( + "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT" + == cm.value.response["Error"]["Message"] + ) @mock_secretsmanager @@ -113,11 +116,10 @@ def test_get_secret_version_that_does_not_exist(): with pytest.raises(ClientError) as cm: conn.get_secret_value(SecretId=secret_arn, VersionId=missing_version_id) - assert \ - ( - "An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets " - "Manager can't find the specified secret value for VersionId: 00000000-0000-0000-0000-000000000000" - ) == cm.value.response["Error"]["Message"] + assert ( + "An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets " + "Manager can't find the specified secret value for VersionId: 00000000-0000-0000-0000-000000000000" + ) == cm.value.response["Error"]["Message"] @mock_secretsmanager @@ -879,9 +881,10 @@ def test_update_secret_which_does_not_exit(): SecretId="test-secret", SecretString="barsecret" ) - assert \ - "Secrets Manager can't find the specified secret." == \ - cm.value.response["Error"]["Message"] + assert ( + "Secrets Manager can't find the specified secret." + == cm.value.response["Error"]["Message"] + ) @mock_secretsmanager @@ -897,8 +900,7 @@ def test_update_secret_marked_as_deleted(): ) assert ( - "because it was marked for deletion." - in cm.value.response["Error"]["Message"] + "because it was marked for deletion." in cm.value.response["Error"]["Message"] ) @@ -924,11 +926,17 @@ def test_tag_resource(): conn = boto3.client("secretsmanager", region_name="us-west-2") conn.create_secret(Name="test-secret", SecretString="foosecret") conn.tag_resource( - SecretId="test-secret", Tags=[{"Key": "FirstTag", "Value": "SomeValue"},], + SecretId="test-secret", + Tags=[ + {"Key": "FirstTag", "Value": "SomeValue"}, + ], ) conn.tag_resource( - SecretId="test-secret", Tags=[{"Key": "SecondTag", "Value": "AnotherValue"},], + SecretId="test-secret", + Tags=[ + {"Key": "SecondTag", "Value": "AnotherValue"}, + ], ) secrets = conn.list_secrets() @@ -940,7 +948,9 @@ def test_tag_resource(): with assert_raises(ClientError) as cm: conn.tag_resource( SecretId="dummy-test-secret", - Tags=[{"Key": "FirstTag", "Value": "SomeValue"},], + Tags=[ + {"Key": "FirstTag", "Value": "SomeValue"}, + ], ) assert_equal( diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index 1d3c9d218107..da41eb5fba80 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -89,9 +89,10 @@ def test_get_secret_that_has_no_value(): ) json_data = json.loads(get_secret.data.decode("utf-8")) - assert \ - json_data["message"] == \ - "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT" + assert ( + json_data["message"] + == "Secrets Manager can't find the specified secret value for staging label: AWSCURRENT" + ) assert json_data["__type"] == "ResourceNotFoundException" diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index dee28210ca5a..2e58ef18d977 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -89,7 +89,9 @@ def test_send_email_when_verify_source(): conn = boto3.client("ses", region_name="us-east-1") kwargs = dict( - Destination={"ToAddresses": ["test_to@example.com"],}, + Destination={ + "ToAddresses": ["test_to@example.com"], + }, Message={ "Subject": {"Data": "test subject"}, "Body": {"Text": {"Data": "test body"}}, @@ -276,7 +278,16 @@ def test_send_email_notification_with_encoded_sender(): response = conn.send_email( Source=sender, Destination={"ToAddresses": ["your.friend@hotmail.com"]}, - Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}}}, + Message={ + "Subject": { + "Data": "hi", + }, + "Body": { + "Text": { + "Data": "there", + } + }, + }, ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @@ -291,7 +302,9 @@ def test_create_configuration_set(): EventDestination={ "Name": "snsEvent", "Enabled": True, - "MatchingEventTypes": ["send",], + "MatchingEventTypes": [ + "send", + ], "SNSDestination": { "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" }, @@ -304,7 +317,9 @@ def test_create_configuration_set(): EventDestination={ "Name": "snsEvent", "Enabled": True, - "MatchingEventTypes": ["send",], + "MatchingEventTypes": [ + "send", + ], "SNSDestination": { "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" }, @@ -319,7 +334,9 @@ def test_create_configuration_set(): EventDestination={ "Name": "snsEvent", "Enabled": True, - "MatchingEventTypes": ["send",], + "MatchingEventTypes": [ + "send", + ], "SNSDestination": { "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" }, diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 797ccdaba5fa..9dfe27656640 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -152,7 +152,9 @@ def test_publish_to_sqs_msg_attr_byte_value(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-queue") conn.subscribe( - TopicArn=topic_arn, Protocol="sqs", Endpoint=queue.attributes["QueueArn"], + TopicArn=topic_arn, + Protocol="sqs", + Endpoint=queue.attributes["QueueArn"], ) queue_raw = sqs.create_queue(QueueName="test-queue-raw") conn.subscribe( diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 49aa656aaa3c..4414e9375832 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -525,7 +525,9 @@ def test_untag_resource_error(): @mock_sns def test_topic_kms_master_key_id_attribute(): client = boto3.client("sns", region_name="us-west-2") - resp = client.create_topic(Name="test-sns-no-key-attr",) + resp = client.create_topic( + Name="test-sns-no-key-attr", + ) topic_arn = resp["TopicArn"] resp = client.get_topic_attributes(TopicArn=topic_arn) resp["Attributes"].should_not.have.key("KmsMasterKeyId") @@ -538,7 +540,10 @@ def test_topic_kms_master_key_id_attribute(): resp["Attributes"]["KmsMasterKeyId"].should.equal("test-key") resp = client.create_topic( - Name="test-sns-with-key-attr", Attributes={"KmsMasterKeyId": "key-id",} + Name="test-sns-with-key-attr", + Attributes={ + "KmsMasterKeyId": "key-id", + }, ) topic_arn = resp["TopicArn"] resp = client.get_topic_attributes(TopicArn=topic_arn) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 57dd97ac3e6e..2b084f3755ef 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -719,7 +719,10 @@ def test_send_receive_message_with_attributes_with_labels(): response = queue.send_message( MessageBody="test message", MessageAttributes={ - "somevalue": {"StringValue": "somevalue", "DataType": "String.custom",} + "somevalue": { + "StringValue": "somevalue", + "DataType": "String.custom", + } }, ) @@ -2242,7 +2245,9 @@ def test_invoke_function_from_sqs_exception(): @mock_sqs def test_maximum_message_size_attribute_default(): sqs = boto3.resource("sqs", region_name="eu-west-3") - queue = sqs.create_queue(QueueName="test-queue",) + queue = sqs.create_queue( + QueueName="test-queue", + ) int(queue.attributes["MaximumMessageSize"]).should.equal(MAXIMUM_MESSAGE_LENGTH) with assert_raises(Exception) as e: queue.send_message(MessageBody="a" * (MAXIMUM_MESSAGE_LENGTH + 1)) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 1eeec09d0a6f..152a3c9c9c4b 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -309,25 +309,29 @@ def test_put_parameter_invalid_names(): client.put_parameter.when.called_with( Name="ssm_test", Value="value", Type="String" ).should.throw( - ClientError, invalid_prefix_err, + ClientError, + invalid_prefix_err, ) client.put_parameter.when.called_with( Name="SSM_TEST", Value="value", Type="String" ).should.throw( - ClientError, invalid_prefix_err, + ClientError, + invalid_prefix_err, ) client.put_parameter.when.called_with( Name="aws_test", Value="value", Type="String" ).should.throw( - ClientError, invalid_prefix_err, + ClientError, + invalid_prefix_err, ) client.put_parameter.when.called_with( Name="AWS_TEST", Value="value", Type="String" ).should.throw( - ClientError, invalid_prefix_err, + ClientError, + invalid_prefix_err, ) ssm_path = "/ssm_test/path/to/var" @@ -354,14 +358,16 @@ def test_put_parameter_invalid_names(): client.put_parameter.when.called_with( Name=aws_path, Value="value", Type="String" ).should.throw( - ClientError, "No access to reserved parameter name: {}.".format(aws_path), + ClientError, + "No access to reserved parameter name: {}.".format(aws_path), ) aws_path = "/AWS/PATH/TO/VAR" client.put_parameter.when.called_with( Name=aws_path, Value="value", Type="String" ).should.throw( - ClientError, "No access to reserved parameter name: {}.".format(aws_path), + ClientError, + "No access to reserved parameter name: {}.".format(aws_path), ) @@ -448,9 +454,7 @@ def test_get_parameter_with_version_and_labels(): with pytest.raises(ClientError) as ex: client.get_parameter(Name="test-2:2", WithDecryption=False) ex.value.response["Error"]["Code"].should.equal("ParameterNotFound") - ex.value.response["Error"]["Message"].should.equal( - "Parameter test-2:2 not found." - ) + ex.value.response["Error"]["Message"].should.equal("Parameter test-2:2 not found.") @mock_ssm diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index d5eb76ae72fa..42a2292d6b39 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -356,8 +356,10 @@ def test_state_machine_can_deleted_nonexisting_machine(): @mock_stepfunctions def test_state_machine_tagging_non_existent_resource_fails(): client = boto3.client("stepfunctions", region_name=region) - non_existent_arn = "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( - region=region, account=ACCOUNT_ID + non_existent_arn = ( + "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( + region=region, account=ACCOUNT_ID + ) ) with assert_raises(ClientError) as ex: client.tag_resource(resourceArn=non_existent_arn, tags=[]) @@ -368,8 +370,10 @@ def test_state_machine_tagging_non_existent_resource_fails(): @mock_stepfunctions def test_state_machine_untagging_non_existent_resource_fails(): client = boto3.client("stepfunctions", region_name=region) - non_existent_arn = "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( - region=region, account=ACCOUNT_ID + non_existent_arn = ( + "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( + region=region, account=ACCOUNT_ID + ) ) with assert_raises(ClientError) as ex: client.untag_resource(resourceArn=non_existent_arn, tagKeys=[]) @@ -386,7 +390,9 @@ def test_state_machine_tagging(): {"key": "tag_key2", "value": "tag_value2"}, ] machine = client.create_state_machine( - name="test", definition=str(simple_definition), roleArn=_get_default_role(), + name="test", + definition=str(simple_definition), + roleArn=_get_default_role(), ) client.tag_resource(resourceArn=machine["stateMachineArn"], tags=tags) resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) diff --git a/tests/test_transcribe/test_transcribe_boto3.py b/tests/test_transcribe/test_transcribe_boto3.py index 3de958bc1140..8fed77979221 100644 --- a/tests/test_transcribe/test_transcribe_boto3.py +++ b/tests/test_transcribe/test_transcribe_boto3.py @@ -17,7 +17,9 @@ def test_run_medical_transcription_job_minimal_params(): args = { "MedicalTranscriptionJobName": job_name, "LanguageCode": "en-US", - "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, + "Media": { + "MediaFileUri": "s3://my-bucket/my-media-file.wav", + }, "OutputBucketName": "my-output-bucket", "Specialty": "PRIMARYCARE", "Type": "CONVERSATION", @@ -98,7 +100,9 @@ def test_run_medical_transcription_job_all_params(): "LanguageCode": "en-US", "MediaSampleRateHertz": 48000, "MediaFormat": "flac", - "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.dat",}, + "Media": { + "MediaFileUri": "s3://my-bucket/my-media-file.dat", + }, "OutputBucketName": "my-output-bucket", "OutputEncryptionKMSKeyId": "arn:aws:kms:us-east-1:012345678901:key/37111b5e-8eff-4706-ae3a-d4f9d1d559fc", "Settings": { @@ -199,7 +203,9 @@ def test_run_medical_transcription_job_with_existing_job_name(): args = { "MedicalTranscriptionJobName": job_name, "LanguageCode": "en-US", - "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, + "Media": { + "MediaFileUri": "s3://my-bucket/my-media-file.wav", + }, "OutputBucketName": "my-output-bucket", "Specialty": "PRIMARYCARE", "Type": "CONVERSATION", @@ -222,7 +228,9 @@ def test_run_medical_transcription_job_nonexistent_vocabulary(): args = { "MedicalTranscriptionJobName": job_name, "LanguageCode": "en-US", - "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.dat",}, + "Media": { + "MediaFileUri": "s3://my-bucket/my-media-file.dat", + }, "OutputBucketName": "my-output-bucket", "Settings": {"VocabularyName": "NonexistentVocabulary"}, "Specialty": "PRIMARYCARE", @@ -244,7 +252,9 @@ def run_job(index, target_status): args = { "MedicalTranscriptionJobName": job_name, "LanguageCode": "en-US", - "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, + "Media": { + "MediaFileUri": "s3://my-bucket/my-media-file.wav", + }, "OutputBucketName": "my-output-bucket", "Specialty": "PRIMARYCARE", "Type": "CONVERSATION", diff --git a/update_version_from_git.py b/update_version_from_git.py index d72dc4ae96c2..707f2f1e8bdc 100644 --- a/update_version_from_git.py +++ b/update_version_from_git.py @@ -30,7 +30,7 @@ def migrate_source_attribute(attr, to_this, target_file, regex): new_file = [] found = False - with open(target_file, 'r') as fp: + with open(target_file, "r") as fp: lines = fp.readlines() for line in lines: @@ -40,61 +40,78 @@ def migrate_source_attribute(attr, to_this, target_file, regex): new_file.append(line) if found: - with open(target_file, 'w') as fp: + with open(target_file, "w") as fp: fp.writelines(new_file) + def migrate_version(target_file, new_version): """Updates __version__ in the source file""" regex = r"['\"](.*)['\"]" - migrate_source_attribute('__version__', "'{new_version}'".format(new_version=new_version), target_file, regex) + migrate_source_attribute( + "__version__", + "'{new_version}'".format(new_version=new_version), + target_file, + regex, + ) def is_master_branch(): - cmd = ('git rev-parse --abbrev-ref HEAD') + cmd = "git rev-parse --abbrev-ref HEAD" tag_branch = subprocess.check_output(cmd, shell=True) - return tag_branch in [b'master\n'] + return tag_branch in [b"master\n"] + def git_tag_name(): - cmd = ('git describe --tags') + cmd = "git describe --tags" tag_branch = subprocess.check_output(cmd, shell=True) tag_branch = tag_branch.decode().strip() return tag_branch + def get_git_version_info(): - cmd = 'git describe --tags' + cmd = "git describe --tags" ver_str = subprocess.check_output(cmd, shell=True) - ver, commits_since, githash = ver_str.decode().strip().split('-') + ver, commits_since, githash = ver_str.decode().strip().split("-") return ver, commits_since, githash + def prerelease_version(): - """ return what the prerelease version should be. + """return what the prerelease version should be. https://packaging.python.org/tutorials/distributing-packages/#pre-release-versioning 0.0.2.dev22 """ ver, commits_since, githash = get_git_version_info() initpy_ver = get_version() - assert len(initpy_ver.split('.')) in [3, 4], 'moto/__init__.py version should be like 0.0.2.dev' - assert initpy_ver > ver, 'the moto/__init__.py version should be newer than the last tagged release.' - return '{initpy_ver}.{commits_since}'.format(initpy_ver=initpy_ver, commits_since=commits_since) + assert len(initpy_ver.split(".")) in [ + 3, + 4, + ], "moto/__init__.py version should be like 0.0.2.dev" + assert ( + initpy_ver > ver + ), "the moto/__init__.py version should be newer than the last tagged release." + return "{initpy_ver}.{commits_since}".format( + initpy_ver=initpy_ver, commits_since=commits_since + ) + def read(*parts): - """ Reads in file from *parts. - """ + """Reads in file from *parts.""" try: - return io.open(os.path.join(*parts), 'r', encoding='utf-8').read() + return io.open(os.path.join(*parts), "r", encoding="utf-8").read() except IOError: - return '' + return "" + def get_version(): - """ Returns version from moto/__init__.py - """ - version_file = read('moto', '__init__.py') - version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', - version_file, re.MULTILINE) + """Returns version from moto/__init__.py""" + version_file = read("moto", "__init__.py") + version_match = re.search( + r'^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.MULTILINE + ) if version_match: return version_match.group(1) - raise RuntimeError('Unable to find version string.') + raise RuntimeError("Unable to find version string.") def release_version_correct(): @@ -107,14 +124,22 @@ def release_version_correct(): initpy = os.path.abspath("moto/__init__.py") new_version = prerelease_version() - print('updating version in __init__.py to {new_version}'.format(new_version=new_version)) - assert len(new_version.split('.')) >= 4, 'moto/__init__.py version should be like 0.0.2.dev' + print( + "updating version in __init__.py to {new_version}".format( + new_version=new_version + ) + ) + assert ( + len(new_version.split(".")) >= 4 + ), "moto/__init__.py version should be like 0.0.2.dev" migrate_version(initpy, new_version) else: assert False, "No non-master deployments yet" # check that we are a tag with the same version as in __init__.py - assert get_version() == git_tag_name(), 'git tag/branch name not the same as moto/__init__.py __verion__' + assert ( + get_version() == git_tag_name() + ), "git tag/branch name not the same as moto/__init__.py __verion__" -if __name__ == '__main__': +if __name__ == "__main__": release_version_correct() diff --git a/wait_for.py b/wait_for.py index 1f291c16b29a..be29b0140098 100755 --- a/wait_for.py +++ b/wait_for.py @@ -21,12 +21,12 @@ print("Waiting for service to come up") while True: try: - urllib.urlopen('http://localhost:5000/', timeout=1) + urllib.urlopen("http://localhost:5000/", timeout=1) break except EXCEPTIONS: elapsed_s = time.time() - start_ts if elapsed_s > 60: raise - print('.') + print(".") time.sleep(1) From 2705698d83c55630e17e195703a3a679d116f359 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= Date: Tue, 6 Oct 2020 11:51:29 +0200 Subject: [PATCH 623/658] Mark functions as requiring network --- tests/test_awslambda/test_lambda.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 8d1efc7a9a54..1fe1de96f744 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -93,6 +93,7 @@ def test_list_functions(): result["Functions"].should.have.length_of(0) +@pytest.mark.network @mock_lambda def test_invoke_requestresponse_function(): conn = boto3.client("lambda", _lambda_region) @@ -137,6 +138,7 @@ def test_invoke_requestresponse_function(): assert "LogResult" not in success_result +@pytest.mark.network @mock_lambda def test_invoke_requestresponse_function_with_arn(): from moto.awslambda.models import ACCOUNT_ID @@ -169,6 +171,7 @@ def test_invoke_requestresponse_function_with_arn(): json.loads(payload).should.equal(in_data) +@pytest.mark.network @mock_lambda def test_invoke_event_function(): conn = boto3.client("lambda", _lambda_region) @@ -196,6 +199,7 @@ def test_invoke_event_function(): json.loads(success_result["Payload"].read().decode("utf-8")).should.equal(in_data) +@pytest.mark.network @mock_lambda def test_invoke_dryrun_function(): conn = boto3.client("lambda", _lambda_region) @@ -260,6 +264,7 @@ def test_invoke_function_get_ec2_volume(): actual_payload.should.equal(expected_payload) +@pytest.mark.network @mock_logs @mock_sns @mock_ec2 @@ -731,6 +736,7 @@ def test_list_create_list_get_delete_list(): conn.list_functions()["Functions"].should.have.length_of(0) +@pytest.mark.network @mock_lambda def test_invoke_lambda_error(): lambda_fx = """ @@ -846,6 +852,7 @@ def test_tags_not_found(): ).should.throw(botocore.client.ClientError) +@pytest.mark.network @mock_lambda def test_invoke_async_function(): conn = boto3.client("lambda", _lambda_region) @@ -1117,6 +1124,7 @@ def test_create_event_source_mapping(): assert response["State"] == "Enabled" +@pytest.mark.network @mock_logs @mock_lambda @mock_sqs @@ -1158,6 +1166,7 @@ def test_invoke_function_from_sqs(): ) +@pytest.mark.network @mock_logs @mock_lambda @mock_dynamodb2 @@ -1206,6 +1215,7 @@ def test_invoke_function_from_dynamodb_put(): ) +@pytest.mark.network @mock_logs @mock_lambda @mock_dynamodb2 @@ -1289,6 +1299,7 @@ def wait_for_log_msg(expected_msg, log_group): return False, received_messages +@pytest.mark.network @mock_logs @mock_lambda @mock_sqs From 6d364dc7aabf1ce7cce2a8e094e29bd56be5228b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= Date: Tue, 10 Nov 2020 18:14:50 +0100 Subject: [PATCH 624/658] Pytest model of exceptions uses 'value' attribute instead of 'exception'. --- tests/test_acm/test_acm.py | 24 ++++++------- .../test_applicationautoscaling.py | 6 ++-- tests/test_ec2/test_elastic_block_store.py | 24 ++++++------- tests/test_ec2/test_subnets.py | 16 ++++----- tests/test_forecast/test_forecast.py | 21 ++++++------ tests/test_iot/test_iot.py | 4 +-- .../test_secretsmanager.py | 18 +++++----- tests/test_sqs/test_sqs.py | 8 ++--- .../test_stepfunctions/test_stepfunctions.py | 34 +++++++++---------- 9 files changed, 74 insertions(+), 81 deletions(-) diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index 0a3b32c1f46d..8a23123e77f0 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -1,18 +1,16 @@ from __future__ import unicode_literals import os -import boto3 -from freezegun import freeze_time -import sure # noqa import uuid +import boto3 +import pytest +import sure # noqa from botocore.exceptions import ClientError - +from freezegun import freeze_time from moto import mock_acm, settings from moto.core import ACCOUNT_ID - -from nose import SkipTest -from nose.tools import assert_raises +from unittest import SkipTest RESOURCE_FOLDER = os.path.join(os.path.dirname(__file__), "resources") _GET_RESOURCE = lambda x: open(os.path.join(RESOURCE_FOLDER, x), "rb").read() @@ -404,7 +402,7 @@ def test_operations_with_invalid_tags(): client = boto3.client("acm", region_name="eu-central-1") # request certificate with invalid tags - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.request_certificate( DomainName="example.com", Tags=[{"Key": "X" * 200, "Value": "Valid"}], @@ -415,7 +413,7 @@ def test_operations_with_invalid_tags(): ) # import certificate with invalid tags - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.import_certificate( Certificate=SERVER_CRT, PrivateKey=SERVER_KEY, @@ -434,7 +432,7 @@ def test_operations_with_invalid_tags(): arn = _import_cert(client) # add invalid tags to existing certificate - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.add_tags_to_certificate( CertificateArn=arn, Tags=[{"Key": "aws:xxx", "Value": "Valid"}, {"Key": "key2"}], @@ -445,7 +443,7 @@ def test_operations_with_invalid_tags(): ) # try removing invalid tags from existing certificate - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.remove_tags_from_certificate( CertificateArn=arn, Tags=[{"Key": "aws:xxx", "Value": "Valid"}] ) @@ -461,7 +459,7 @@ def test_add_too_many_tags(): arn = _import_cert(client) # Add 51 tags - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.add_tags_to_certificate( CertificateArn=arn, Tags=[{"Key": "a-%d" % i, "Value": "abcd"} for i in range(1, 52)], @@ -478,7 +476,7 @@ def test_add_too_many_tags(): client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.length_of( 49 ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.add_tags_to_certificate( CertificateArn=arn, Tags=[{"Key": "x-1", "Value": "xyz"}, {"Key": "x-2", "Value": "xyz"}], diff --git a/tests/test_applicationautoscaling/test_applicationautoscaling.py b/tests/test_applicationautoscaling/test_applicationautoscaling.py index f362cc2c1573..f8c629be2f35 100644 --- a/tests/test_applicationautoscaling/test_applicationautoscaling.py +++ b/tests/test_applicationautoscaling/test_applicationautoscaling.py @@ -343,7 +343,7 @@ def test_put_scaling_policy(): PolicyType="ABCDEFG", TargetTrackingScalingPolicyConfiguration=policy_body, ) - e.exception.response["Error"]["Message"].should.match( + e.value.response["Error"]["Message"].should.match( r"Unknown policy type .* specified." ) @@ -450,7 +450,7 @@ def test_delete_scaling_policies(): ResourceId=resource_id, ScalableDimension=scalable_dimension, ) - e.exception.response["Error"]["Message"].should.match(r"No scaling policy found .*") + e.value.response["Error"]["Message"].should.match(r"No scaling policy found .*") response = client.put_scaling_policy( PolicyName=policy_name, @@ -513,6 +513,6 @@ def test_deregister_scalable_target(): ResourceId=resource_id, ScalableDimension=scalable_dimension, ) - e.exception.response["Error"]["Message"].should.match( + e.value.response["Error"]["Message"].should.match( r"No scalable target found .*" ) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 9e4a3b7bbc43..b5d1d33f63ec 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -1,16 +1,14 @@ from __future__ import unicode_literals -# Ensure 'pytest.raises' context manager support for Python 2.6 -import pytest - -from moto.ec2 import ec2_backends import boto import boto3 -from botocore.exceptions import ClientError -from boto.exception import EC2ResponseError +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +from moto import mock_ec2, mock_ec2_deprecated +from moto.ec2 import ec2_backends from moto.ec2.models import OWNER_ID from moto.kms import mock_kms @@ -920,12 +918,12 @@ def test_search_for_many_snapshots(): @mock_ec2 def test_create_unencrypted_volume_with_kms_key_fails(): resource = boto3.resource("ec2", region_name="us-east-1") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: resource.create_volume( AvailabilityZone="us-east-1a", Encrypted=False, KmsKeyId="key", Size=10 ) - ex.exception.response["Error"]["Code"].should.equal("InvalidParameterDependency") - ex.exception.response["Error"]["Message"].should.contain("KmsKeyId") + ex.value.response["Error"]["Code"].should.equal("InvalidParameterDependency") + ex.value.response["Error"]["Message"].should.contain("KmsKeyId") @mock_kms @@ -933,9 +931,9 @@ def test_create_unencrypted_volume_with_kms_key_fails(): def test_create_encrypted_volume_without_kms_key_should_use_default_key(): kms = boto3.client("kms", region_name="us-east-1") # Default master key for EBS does not exist until needed. - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: kms.describe_key(KeyId="alias/aws/ebs") - ex.exception.response["Error"]["Code"].should.equal("NotFoundException") + ex.value.response["Error"]["Code"].should.equal("NotFoundException") # Creating an encrypted volume should create (and use) the default key. resource = boto3.resource("ec2", region_name="us-east-1") volume = resource.create_volume( diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 85859ba40d65..f49dba586d59 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -1,16 +1,16 @@ from __future__ import unicode_literals -# Ensure 'pytest.raises' context manager support for Python 2.6 -import pytest +import random -import boto3 import boto +import boto3 import boto.vpc -from boto.exception import EC2ResponseError -from botocore.exceptions import ParamValidationError, ClientError -import sure # noqa -import random +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest +import sure # noqa +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError, ParamValidationError from moto import mock_ec2, mock_ec2_deprecated @@ -426,7 +426,7 @@ def test_create_subnet_with_invalid_cidr_range_multiple_vpc_cidr_blocks(): vpc.is_default.shouldnt.be.ok subnet_cidr_block = "10.2.0.0/20" - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) str(ex.exception).should.equal( "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " diff --git a/tests/test_forecast/test_forecast.py b/tests/test_forecast/test_forecast.py index 32af519c7438..e2f5425a2541 100644 --- a/tests/test_forecast/test_forecast.py +++ b/tests/test_forecast/test_forecast.py @@ -1,13 +1,12 @@ from __future__ import unicode_literals import boto3 +import pytest import sure # noqa from botocore.exceptions import ClientError -from nose.tools import assert_raises -from parameterized import parameterized - from moto import mock_forecast from moto.core import ACCOUNT_ID +from parameterized import parameterized region = "us-east-1" account_id = None @@ -40,7 +39,7 @@ def test_forecast_dataset_group_create_invalid_domain(): client = boto3.client("forecast", region_name=region) invalid_domain = "INVALID" - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.create_dataset_group(DatasetGroupName=name, Domain=invalid_domain) exc.exception.response["Error"]["Code"].should.equal("ValidationException") exc.exception.response["Error"]["Message"].should.equal( @@ -55,7 +54,7 @@ def test_forecast_dataset_group_create_invalid_domain(): def test_forecast_dataset_group_create_invalid_name(name): client = boto3.client("forecast", region_name=region) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.create_dataset_group(DatasetGroupName=name, Domain="CUSTOM") exc.exception.response["Error"]["Code"].should.equal("ValidationException") exc.exception.response["Error"]["Message"].should.contain( @@ -70,7 +69,7 @@ def test_forecast_dataset_group_create_duplicate_fails(): client = boto3.client("forecast", region_name=region) client.create_dataset_group(DatasetGroupName="name", Domain="RETAIL") - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.create_dataset_group(DatasetGroupName="name", Domain="RETAIL") exc.exception.response["Error"]["Code"].should.equal( @@ -122,7 +121,7 @@ def test_forecast_delete_dataset_group_missing(): "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/missing" ) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.delete_dataset_group(DatasetGroupArn=missing_dsg_arn) exc.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") exc.exception.response["Error"]["Message"].should.equal( @@ -152,7 +151,7 @@ def test_forecast_update_dataset_group_not_found(): dataset_group_arn = ( "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/" + "test" ) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.update_dataset_group(DatasetGroupArn=dataset_group_arn, DatasetArns=[]) exc.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") exc.exception.response["Error"]["Message"].should.equal( @@ -180,7 +179,7 @@ def test_describe_dataset_group_missing(): dataset_group_arn = ( "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset-group/name" ) - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.describe_dataset_group(DatasetGroupArn=dataset_group_arn) exc.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") exc.exception.response["Error"]["Message"].should.equal( @@ -192,7 +191,7 @@ def test_describe_dataset_group_missing(): def test_create_dataset_group_missing_datasets(): client = boto3.client("forecast", region_name=region) dataset_arn = "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset/name" - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.create_dataset_group( DatasetGroupName="name", Domain="CUSTOM", DatasetArns=[dataset_arn] ) @@ -212,7 +211,7 @@ def test_update_dataset_group_missing_datasets(): client.create_dataset_group(DatasetGroupName=name, Domain="CUSTOM") dataset_arn = "arn:aws:forecast:" + region + ":" + ACCOUNT_ID + ":dataset/name" - with assert_raises(ClientError) as exc: + with pytest.raises(ClientError) as exc: client.update_dataset_group( DatasetGroupArn=dataset_group_arn, DatasetArns=[dataset_arn] ) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 426b1fa69459..a6847adc24b6 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -608,7 +608,7 @@ def test_create_certificate_validation(): client.register_certificate( certificatePem=cert["certificatePem"], setAsActive=False ) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "The certificate is already provisioned or registered" ) @@ -616,7 +616,7 @@ def test_create_certificate_validation(): client.register_certificate_without_ca( certificatePem=cert["certificatePem"], status="ACTIVE" ) - e.exception.response["Error"]["Message"].should.contain( + e.value.response["Error"]["Message"].should.contain( "The certificate is already provisioned or registered" ) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 60a9130c61d4..834705fbaf87 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -631,17 +631,16 @@ def test_rotate_secret_rotation_period_too_long(): @mock_secretsmanager def test_put_secret_value_on_non_existing_secret(): conn = boto3.client("secretsmanager", region_name="us-west-2") - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: conn.put_secret_value( SecretId=DEFAULT_SECRET_NAME, SecretString="foosecret", VersionStages=["AWSCURRENT"], ) - assert_equal( - "Secrets Manager can't find the specified secret.", - cm.exception.response["Error"]["Message"], - ) + assert \ + "Secrets Manager can't find the specified secret." == \ + cm.exception.response["Error"]["Message"] @mock_secretsmanager @@ -945,7 +944,7 @@ def test_tag_resource(): {"Key": "SecondTag", "Value": "AnotherValue"}, ] - with assert_raises(ClientError) as cm: + with pytest.raises(ClientError) as cm: conn.tag_resource( SecretId="dummy-test-secret", Tags=[ @@ -953,10 +952,9 @@ def test_tag_resource(): ], ) - assert_equal( - "Secrets Manager can't find the specified secret.", - cm.exception.response["Error"]["Message"], - ) + assert \ + "Secrets Manager can't find the specified secret." == \ + cm.exception.response["Error"]["Message"] @mock_secretsmanager diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 2b084f3755ef..a84c172af0a3 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -273,7 +273,7 @@ def test_message_send_with_attributes(): def test_message_with_invalid_attributes(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="blah") - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: queue.send_message( MessageBody="derp", MessageAttributes={ @@ -2249,7 +2249,7 @@ def test_maximum_message_size_attribute_default(): QueueName="test-queue", ) int(queue.attributes["MaximumMessageSize"]).should.equal(MAXIMUM_MESSAGE_LENGTH) - with assert_raises(Exception) as e: + with pytest.raises(Exception) as e: queue.send_message(MessageBody="a" * (MAXIMUM_MESSAGE_LENGTH + 1)) ex = e.exception ex.response["Error"]["Code"].should.equal("InvalidParameterValue") @@ -2263,7 +2263,7 @@ def test_maximum_message_size_attribute_fails_for_invalid_values(): MAXIMUM_MESSAGE_SIZE_ATTR_UPPER_BOUND + 1, ] for message_size in invalid_values: - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: sqs.create_queue( QueueName="test-queue", Attributes={"MaximumMessageSize": str(message_size)}, @@ -2281,7 +2281,7 @@ def test_send_message_fails_when_message_size_greater_than_max_message_size(): Attributes={"MaximumMessageSize": str(message_size_limit)}, ) int(queue.attributes["MaximumMessageSize"]).should.equal(message_size_limit) - with assert_raises(ClientError) as e: + with pytest.raises(ClientError) as e: queue.send_message(MessageBody="a" * (message_size_limit + 1)) ex = e.exception ex.response["Error"]["Code"].should.equal("InvalidParameterValue") diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 42a2292d6b39..81715a5f2977 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -361,10 +361,10 @@ def test_state_machine_tagging_non_existent_resource_fails(): region=region, account=ACCOUNT_ID ) ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.tag_resource(resourceArn=non_existent_arn, tags=[]) - ex.exception.response["Error"]["Code"].should.equal("ResourceNotFound") - ex.exception.response["Error"]["Message"].should.contain(non_existent_arn) + ex.value.response["Error"]["Code"].should.equal("ResourceNotFound") + ex.value.response["Error"]["Message"].should.contain(non_existent_arn) @mock_stepfunctions @@ -375,10 +375,10 @@ def test_state_machine_untagging_non_existent_resource_fails(): region=region, account=ACCOUNT_ID ) ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.untag_resource(resourceArn=non_existent_arn, tagKeys=[]) - ex.exception.response["Error"]["Code"].should.equal("ResourceNotFound") - ex.exception.response["Error"]["Message"].should.contain(non_existent_arn) + ex.value.response["Error"]["Code"].should.equal("ResourceNotFound") + ex.value.response["Error"]["Message"].should.contain(non_existent_arn) @mock_stepfunctions @@ -664,7 +664,7 @@ def test_state_machine_list_executions_with_pagination(): for page in page_iterator: page["executions"].should.have.length_of(25) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: resp = client.list_executions( stateMachineArn=sm["stateMachineArn"], maxResults=10 ) @@ -674,16 +674,16 @@ def test_state_machine_list_executions_with_pagination(): statusFilter="ABORTED", nextToken=resp["nextToken"], ) - ex.exception.response["Error"]["Code"].should.equal("InvalidToken") - ex.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("InvalidToken") + ex.value.response["Error"]["Message"].should.contain( "Input inconsistent with page token" ) - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: client.list_executions( stateMachineArn=sm["stateMachineArn"], nextToken="invalid" ) - ex.exception.response["Error"]["Code"].should.equal("InvalidToken") + ex.value.response["Error"]["Code"].should.equal("InvalidToken") @mock_stepfunctions @@ -867,10 +867,10 @@ def test_state_machine_cloudformation(): tag["value"].should.equal("value{}".format(i)) cf.Stack("test_stack").delete() - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: sf.describe_state_machine(stateMachineArn=output["StateMachineArn"]) - ex.exception.response["Error"]["Code"].should.equal("StateMachineDoesNotExist") - ex.exception.response["Error"]["Message"].should.contain("Does Not Exist") + ex.value.response["Error"]["Code"].should.equal("StateMachineDoesNotExist") + ex.value.response["Error"]["Message"].should.contain("Does Not Exist") @mock_stepfunctions @@ -941,10 +941,10 @@ def test_state_machine_cloudformation_update_with_replacement(): if tag["key"] == "key1": tag["value"].should.equal("updated_value") - with assert_raises(ClientError) as ex: + with pytest.raises(ClientError) as ex: sf.describe_state_machine(stateMachineArn=original_machine_arn) - ex.exception.response["Error"]["Code"].should.equal("StateMachineDoesNotExist") - ex.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("StateMachineDoesNotExist") + ex.value.response["Error"]["Message"].should.contain( "State Machine Does Not Exist" ) From cb6731f3404b41eda68a78460ce75494e53c16ce Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 11 Nov 2020 15:54:01 +0000 Subject: [PATCH 625/658] Convert fixtures/exceptions to Pytest --- requirements-tests.txt | 1 - tests/test_acm/test_acm.py | 86 ++++++++++--------- .../test_validation.py | 3 +- tests/test_batch/test_batch.py | 3 +- .../test_dynamodb2/test_dynamodb_executor.py | 5 +- .../test_dynamodb_validation.py | 8 +- tests/test_ec2/test_subnets.py | 2 +- tests/test_emr/test_emr_boto3.py | 8 +- tests/test_forecast/test_forecast.py | 35 ++++---- tests/test_kms/test_kms.py | 9 +- tests/test_kms/test_kms_boto3.py | 81 +++++------------ tests/test_kms/test_utils.py | 19 ++-- tests/test_s3/test_s3.py | 9 +- tests/test_s3/test_s3_utils.py | 6 +- .../test_secretsmanager.py | 6 +- tests/test_sqs/test_sqs.py | 8 +- 16 files changed, 119 insertions(+), 170 deletions(-) diff --git a/requirements-tests.txt b/requirements-tests.txt index 817c38640639..847ce539e2d6 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -2,4 +2,3 @@ pytest pytest-cov sure==1.4.11 freezegun -parameterized>=0.7.0 diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index 8a23123e77f0..4e80ac8b4871 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -407,8 +407,8 @@ def test_operations_with_invalid_tags(): DomainName="example.com", Tags=[{"Key": "X" * 200, "Value": "Valid"}], ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( "Member must have length less than or equal to 128" ) @@ -424,8 +424,8 @@ def test_operations_with_invalid_tags(): ], ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( "Member must have length less than or equal to 256" ) @@ -437,8 +437,8 @@ def test_operations_with_invalid_tags(): CertificateArn=arn, Tags=[{"Key": "aws:xxx", "Value": "Valid"}, {"Key": "key2"}], ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( "AWS internal tags cannot be changed with this API" ) @@ -447,8 +447,8 @@ def test_operations_with_invalid_tags(): client.remove_tags_from_certificate( CertificateArn=arn, Tags=[{"Key": "aws:xxx", "Value": "Valid"}] ) - ex.exception.response["Error"]["Code"].should.equal("ValidationException") - ex.exception.response["Error"]["Message"].should.contain( + ex.value.response["Error"]["Code"].should.equal("ValidationException") + ex.value.response["Error"]["Message"].should.contain( "AWS internal tags cannot be changed with this API" ) @@ -464,8 +464,8 @@ def test_add_too_many_tags(): CertificateArn=arn, Tags=[{"Key": "a-%d" % i, "Value": "abcd"} for i in range(1, 52)], ) - ex.exception.response["Error"]["Code"].should.equal("TooManyTagsException") - ex.exception.response["Error"]["Message"].should.contain("contains too many Tags") + ex.value.response["Error"]["Code"].should.equal("TooManyTagsException") + ex.value.response["Error"]["Message"].should.contain("contains too many Tags") client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.empty # Add 49 tags first, then try to add 2 more. @@ -481,10 +481,10 @@ def test_add_too_many_tags(): CertificateArn=arn, Tags=[{"Key": "x-1", "Value": "xyz"}, {"Key": "x-2", "Value": "xyz"}], ) - ex.exception.response["Error"]["Code"].should.equal("TooManyTagsException") - ex.exception.response["Error"]["Message"].should.contain("contains too many Tags") - ex.exception.response["Error"]["Message"].count("pqrs").should.equal(49) - ex.exception.response["Error"]["Message"].count("xyz").should.equal(2) + ex.value.response["Error"]["Code"].should.equal("TooManyTagsException") + ex.value.response["Error"]["Message"].should.contain("contains too many Tags") + ex.value.response["Error"]["Message"].count("pqrs").should.equal(49) + ex.value.response["Error"]["Message"].count("xyz").should.equal(2) client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.length_of( 49 ) @@ -502,20 +502,21 @@ def test_request_certificate_no_san(): # Also tests the SAN code -@freeze_time("2012-01-01 12:00:00", as_arg=True) @mock_acm -def test_request_certificate_issued_status(frozen_time): +def test_request_certificate_issued_status(): # After requesting a certificate, it should then auto-validate after 1 minute # Some sneaky programming for that ;-) client = boto3.client("acm", region_name="eu-central-1") - resp = client.request_certificate( - DomainName="google.com", - SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], - ) + with freeze_time("2012-01-01 12:00:00"): + resp = client.request_certificate( + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) arn = resp["CertificateArn"] - resp = client.describe_certificate(CertificateArn=arn) + with freeze_time("2012-01-01 12:00:00"): + resp = client.describe_certificate(CertificateArn=arn) resp["Certificate"]["CertificateArn"].should.equal(arn) resp["Certificate"]["DomainName"].should.equal("google.com") resp["Certificate"]["Issuer"].should.equal("Amazon") @@ -525,21 +526,21 @@ def test_request_certificate_issued_status(frozen_time): len(resp["Certificate"]["SubjectAlternativeNames"]).should.equal(3) # validation will be pending for 1 minute. - resp = client.describe_certificate(CertificateArn=arn) + with freeze_time("2012-01-01 12:00:00"): + resp = client.describe_certificate(CertificateArn=arn) resp["Certificate"]["CertificateArn"].should.equal(arn) resp["Certificate"]["Status"].should.equal("PENDING_VALIDATION") if not settings.TEST_SERVER_MODE: # Move time to get it issued. - frozen_time.move_to("2012-01-01 12:02:00") - resp = client.describe_certificate(CertificateArn=arn) + with freeze_time("2012-01-01 12:02:00"): + resp = client.describe_certificate(CertificateArn=arn) resp["Certificate"]["CertificateArn"].should.equal(arn) resp["Certificate"]["Status"].should.equal("ISSUED") -@freeze_time("2012-01-01 12:00:00", as_arg=True) @mock_acm -def test_request_certificate_with_mutiple_times(frozen_time): +def test_request_certificate_with_mutiple_times(): if settings.TEST_SERVER_MODE: raise SkipTest("Cant manipulate time in server mode") @@ -547,11 +548,12 @@ def test_request_certificate_with_mutiple_times(frozen_time): # Some sneaky programming for that ;-) client = boto3.client("acm", region_name="eu-central-1") - resp = client.request_certificate( - IdempotencyToken="test_token", - DomainName="google.com", - SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], - ) + with freeze_time("2012-01-01 12:00:00"): + resp = client.request_certificate( + IdempotencyToken="test_token", + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) original_arn = resp["CertificateArn"] # Should be able to request a certificate multiple times in an hour @@ -561,21 +563,21 @@ def test_request_certificate_with_mutiple_times(frozen_time): "2012-01-01 12:30:00", "2012-01-01 12:45:00", ): - frozen_time.move_to(time_intervals) + with freeze_time(time_intervals): + resp = client.request_certificate( + IdempotencyToken="test_token", + DomainName="google.com", + SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + ) + arn = resp["CertificateArn"] + arn.should.equal(original_arn) + + # Move time + with freeze_time("2012-01-01 13:01:00"): resp = client.request_certificate( IdempotencyToken="test_token", DomainName="google.com", SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], ) - arn = resp["CertificateArn"] - arn.should.equal(original_arn) - - # Move time - frozen_time.move_to("2012-01-01 13:01:00") - resp = client.request_certificate( - IdempotencyToken="test_token", - DomainName="google.com", - SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], - ) arn = resp["CertificateArn"] arn.should_not.equal(original_arn) diff --git a/tests/test_applicationautoscaling/test_validation.py b/tests/test_applicationautoscaling/test_validation.py index 8b396d242bd5..056450ae1707 100644 --- a/tests/test_applicationautoscaling/test_validation.py +++ b/tests/test_applicationautoscaling/test_validation.py @@ -7,7 +7,6 @@ import pytest import sure # noqa from botocore.exceptions import ClientError -from parameterized import parameterized from .test_applicationautoscaling import register_scalable_target DEFAULT_REGION = "us-east-1" @@ -106,7 +105,7 @@ def test_register_scalable_target_ecs_with_non_existent_service_should_return_va err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) -@parameterized( +@pytest.mark.parametrize("namespace,r_id,dimension,expected", [ ("ecs", "service/default/test-svc", "ecs:service:DesiredCount", True), ("ecs", "banana/default/test-svc", "ecs:service:DesiredCount", False), diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 1d4aa1cf27b3..b8e50fd122b9 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -738,7 +738,8 @@ def test_submit_job(): else: raise RuntimeError("Batch job timed out") - resp = logs_client.describe_log_streams(logGroupName="/aws/batch/job") + resp = logs_client.describe_log_streams(logGroupName="/aws/batch/job", + logStreamNamePrefix="sayhellotomylittlefriend") len(resp["logStreams"]).should.equal(1) ls_name = resp["logStreams"][0]["logStreamName"] diff --git a/tests/test_dynamodb2/test_dynamodb_executor.py b/tests/test_dynamodb2/test_dynamodb_executor.py index 538bf41574cf..7270ba713ae8 100644 --- a/tests/test_dynamodb2/test_dynamodb_executor.py +++ b/tests/test_dynamodb2/test_dynamodb_executor.py @@ -1,9 +1,10 @@ +import pytest + from moto.dynamodb2.exceptions import IncorrectOperandType, IncorrectDataType from moto.dynamodb2.models import Item, DynamoType from moto.dynamodb2.parsing.executors import UpdateExpressionExecutor from moto.dynamodb2.parsing.expressions import UpdateExpressionParser from moto.dynamodb2.parsing.validators import UpdateExpressionValidator -from parameterized import parameterized def test_execution_of_if_not_exists_not_existing_value(): @@ -405,7 +406,7 @@ def test_execution_of_add_to_a_set(): assert expected_item == item -@parameterized( +@pytest.mark.parametrize("expression_attribute_values,unexpected_data_type", [ ( {":value": {"S": "10"}}, diff --git a/tests/test_dynamodb2/test_dynamodb_validation.py b/tests/test_dynamodb2/test_dynamodb_validation.py index 93adf88b2659..4d14beefe2d7 100644 --- a/tests/test_dynamodb2/test_dynamodb_validation.py +++ b/tests/test_dynamodb2/test_dynamodb_validation.py @@ -1,3 +1,5 @@ +import pytest + from moto.dynamodb2.exceptions import ( AttributeIsReservedKeyword, ExpressionAttributeValueNotDefined, @@ -10,12 +12,10 @@ from moto.dynamodb2.parsing.ast_nodes import ( NodeDepthLeftTypeFetcher, UpdateExpressionSetAction, - UpdateExpressionValue, DDBTypedValue, ) from moto.dynamodb2.parsing.expressions import UpdateExpressionParser from moto.dynamodb2.parsing.validators import UpdateExpressionValidator -from parameterized import parameterized def test_validation_of_update_expression_with_keyword(): @@ -41,7 +41,7 @@ def test_validation_of_update_expression_with_keyword(): assert e.keyword == "path" -@parameterized( +@pytest.mark.parametrize("update_expression", [ "SET a = #b + :val2", "SET a = :val2 + #b", @@ -101,7 +101,7 @@ def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_i assert True -@parameterized( +@pytest.mark.parametrize("update_expression", [ "SET a = #c", "SET a = #c + #d", diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index f49dba586d59..66b23e790eec 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -428,7 +428,7 @@ def test_create_subnet_with_invalid_cidr_range_multiple_vpc_cidr_blocks(): subnet_cidr_block = "10.2.0.0/20" with pytest.raises(ClientError) as ex: subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) - str(ex.exception).should.equal( + str(ex.value).should.equal( "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block) ) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 5ea433fc90fa..393121c4d296 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -5,6 +5,7 @@ from datetime import datetime import boto3 +import json import pytz import six import sure # noqa @@ -803,11 +804,8 @@ def test_instance_groups(): x["AutoScalingPolicy"]["Status"]["State"].should.equal("ATTACHED") returned_policy = dict(x["AutoScalingPolicy"]) del returned_policy["Status"] - for dimension in y["AutoScalingPolicy"]["Rules"]["Trigger"][ - "CloudWatchAlarmDefinition" - ]["Dimensions"]: - dimension["Value"] = cluster_id - returned_policy.should.equal(y["AutoScalingPolicy"]) + policy = json.loads(json.dumps(y["AutoScalingPolicy"]).replace("${emr.clusterId}", cluster_id)) + returned_policy.should.equal(policy) if "EbsConfiguration" in y: _do_assertion_ebs_configuration(x, y) # Configurations diff --git a/tests/test_forecast/test_forecast.py b/tests/test_forecast/test_forecast.py index e2f5425a2541..03503fec2826 100644 --- a/tests/test_forecast/test_forecast.py +++ b/tests/test_forecast/test_forecast.py @@ -6,7 +6,6 @@ from botocore.exceptions import ClientError from moto import mock_forecast from moto.core import ACCOUNT_ID -from parameterized import parameterized region = "us-east-1" account_id = None @@ -21,7 +20,7 @@ ] -@parameterized(valid_domains) +@pytest.mark.parametrize("domain", valid_domains) @mock_forecast def test_forecast_dataset_group_create(domain): name = "example_dataset_group" @@ -41,23 +40,23 @@ def test_forecast_dataset_group_create_invalid_domain(): with pytest.raises(ClientError) as exc: client.create_dataset_group(DatasetGroupName=name, Domain=invalid_domain) - exc.exception.response["Error"]["Code"].should.equal("ValidationException") - exc.exception.response["Error"]["Message"].should.equal( + exc.value.response["Error"]["Code"].should.equal("ValidationException") + exc.value.response["Error"]["Message"].should.equal( "1 validation error detected: Value '" + invalid_domain + "' at 'domain' failed to satisfy constraint: Member must satisfy enum value set ['INVENTORY_PLANNING', 'METRICS', 'RETAIL', 'EC2_CAPACITY', 'CUSTOM', 'WEB_TRAFFIC', 'WORK_FORCE']" ) -@parameterized([" ", "a" * 64]) +@pytest.mark.parametrize("name", [" ", "a" * 64]) @mock_forecast def test_forecast_dataset_group_create_invalid_name(name): client = boto3.client("forecast", region_name=region) with pytest.raises(ClientError) as exc: client.create_dataset_group(DatasetGroupName=name, Domain="CUSTOM") - exc.exception.response["Error"]["Code"].should.equal("ValidationException") - exc.exception.response["Error"]["Message"].should.contain( + exc.value.response["Error"]["Code"].should.equal("ValidationException") + exc.value.response["Error"]["Message"].should.contain( "1 validation error detected: Value '" + name + "' at 'datasetGroupName' failed to satisfy constraint: Member must" @@ -72,7 +71,7 @@ def test_forecast_dataset_group_create_duplicate_fails(): with pytest.raises(ClientError) as exc: client.create_dataset_group(DatasetGroupName="name", Domain="RETAIL") - exc.exception.response["Error"]["Code"].should.equal( + exc.value.response["Error"]["Code"].should.equal( "ResourceAlreadyExistsException" ) @@ -123,8 +122,8 @@ def test_forecast_delete_dataset_group_missing(): with pytest.raises(ClientError) as exc: client.delete_dataset_group(DatasetGroupArn=missing_dsg_arn) - exc.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - exc.exception.response["Error"]["Message"].should.equal( + exc.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + exc.value.response["Error"]["Message"].should.equal( "No resource found " + missing_dsg_arn ) @@ -153,8 +152,8 @@ def test_forecast_update_dataset_group_not_found(): ) with pytest.raises(ClientError) as exc: client.update_dataset_group(DatasetGroupArn=dataset_group_arn, DatasetArns=[]) - exc.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - exc.exception.response["Error"]["Message"].should.equal( + exc.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + exc.value.response["Error"]["Message"].should.equal( "No resource found " + dataset_group_arn ) @@ -181,8 +180,8 @@ def test_describe_dataset_group_missing(): ) with pytest.raises(ClientError) as exc: client.describe_dataset_group(DatasetGroupArn=dataset_group_arn) - exc.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") - exc.exception.response["Error"]["Message"].should.equal( + exc.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + exc.value.response["Error"]["Message"].should.equal( "No resource found " + dataset_group_arn ) @@ -195,8 +194,8 @@ def test_create_dataset_group_missing_datasets(): client.create_dataset_group( DatasetGroupName="name", Domain="CUSTOM", DatasetArns=[dataset_arn] ) - exc.exception.response["Error"]["Code"].should.equal("InvalidInputException") - exc.exception.response["Error"]["Message"].should.equal( + exc.value.response["Error"]["Code"].should.equal("InvalidInputException") + exc.value.response["Error"]["Message"].should.equal( "Dataset arns: [" + dataset_arn + "] are not found" ) @@ -215,7 +214,7 @@ def test_update_dataset_group_missing_datasets(): client.update_dataset_group( DatasetGroupArn=dataset_group_arn, DatasetArns=[dataset_arn] ) - exc.exception.response["Error"]["Code"].should.equal("InvalidInputException") - exc.exception.response["Error"]["Message"].should.equal( + exc.value.response["Error"]["Code"].should.equal("InvalidInputException") + exc.value.response["Error"]["Message"].should.equal( "Dataset arns: [" + dataset_arn + "] are not found" ) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index fa5a353f85f1..6e4b332c4b9c 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -10,17 +10,12 @@ from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException import pytest -from parameterized import parameterized from moto.core.exceptions import JsonRESTError from moto.kms.models import KmsBackend from moto.kms.exceptions import NotFoundException as MotoNotFoundException from moto import mock_kms_deprecated, mock_kms -PLAINTEXT_VECTORS = ( - (b"some encodeable plaintext",), - (b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16",), - ("some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥",), -) +PLAINTEXT_VECTORS = [b"some encodeable plaintext", b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16", "some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥"] def _get_encoded_value(plaintext): @@ -495,7 +490,7 @@ def test__create_alias__raises_if_alias_has_colon_character(): ex.status.should.equal(400) -@parameterized((("alias/my-alias_/",), ("alias/my_alias-/",))) +@pytest.mark.parametrize("alias_name", ["alias/my-alias_/", "alias/my_alias-/"]) @mock_kms_deprecated def test__create_alias__accepted_characters(alias_name): kms = boto.connect_kms() diff --git a/tests/test_kms/test_kms_boto3.py b/tests/test_kms/test_kms_boto3.py index 26e0eef9133e..ddf315812fb4 100644 --- a/tests/test_kms/test_kms_boto3.py +++ b/tests/test_kms/test_kms_boto3.py @@ -11,15 +11,12 @@ import sure # noqa from freezegun import freeze_time import pytest -from parameterized import parameterized from moto import mock_kms -PLAINTEXT_VECTORS = ( - (b"some encodeable plaintext",), - (b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16",), - ("some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥",), -) +PLAINTEXT_VECTORS = [b"some encodeable plaintext", + b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16", + "some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥"] def _get_encoded_value(plaintext): @@ -132,13 +129,7 @@ def test_describe_key(): response["KeyMetadata"].should_not.have.key("SigningAlgorithms") -@parameterized( - ( - ("alias/does-not-exist",), - ("arn:aws:kms:us-east-1:012345678912:alias/does-not-exist",), - ("invalid",), - ) -) +@pytest.mark.parametrize("key_id", ["alias/does-not-exist", "arn:aws:kms:us-east-1:012345678912:alias/does-not-exist", "invalid"]) @mock_kms def test_describe_key_via_alias_invalid_alias(key_id): client = boto3.client("kms", region_name="us-east-1") @@ -168,7 +159,7 @@ def test_generate_data_key(): response["KeyId"].should.equal(key_arn) -@parameterized(PLAINTEXT_VECTORS) +@pytest.mark.parametrize("plaintext", PLAINTEXT_VECTORS) @mock_kms def test_encrypt(plaintext): client = boto3.client("kms", region_name="us-west-2") @@ -187,7 +178,7 @@ def test_encrypt(plaintext): response["KeyId"].should.equal(key_arn) -@parameterized(PLAINTEXT_VECTORS) +@pytest.mark.parametrize("plaintext", PLAINTEXT_VECTORS) @mock_kms def test_decrypt(plaintext): client = boto3.client("kms", region_name="us-west-2") @@ -213,16 +204,8 @@ def test_decrypt(plaintext): decrypt_response["KeyId"].should.equal(key_arn) -@parameterized( - ( - ("not-a-uuid",), - ("alias/DoesNotExist",), - ("arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist",), - ("d25652e4-d2d2-49f7-929a-671ccda580c6",), - ( - "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", - ), - ) +@pytest.mark.parametrize("key_id", + ["not-a-uuid", "alias/DoesNotExist", "arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist", "d25652e4-d2d2-49f7-929a-671ccda580c6", "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6"] ) @mock_kms def test_invalid_key_ids(key_id): @@ -232,7 +215,7 @@ def test_invalid_key_ids(key_id): client.generate_data_key(KeyId=key_id, NumberOfBytes=5) -@parameterized(PLAINTEXT_VECTORS) +@pytest.mark.parametrize("plaintext", PLAINTEXT_VECTORS) @mock_kms def test_kms_encrypt(plaintext): client = boto3.client("kms", region_name="us-east-1") @@ -369,7 +352,7 @@ def test_list_resource_tags(): assert response["Tags"][0]["TagValue"] == "string" -@parameterized( +@pytest.mark.parametrize("kwargs,expected_key_length", ( (dict(KeySpec="AES_256"), 32), (dict(KeySpec="AES_128"), 16), @@ -401,14 +384,8 @@ def test_generate_data_key_decrypt(): assert resp1["Plaintext"] == resp2["Plaintext"] -@parameterized( - ( - (dict(KeySpec="AES_257"),), - (dict(KeySpec="AES_128", NumberOfBytes=16),), - (dict(NumberOfBytes=2048),), - (dict(NumberOfBytes=0),), - (dict(),), - ) +@pytest.mark.parametrize("kwargs", + [dict(KeySpec="AES_257"), dict(KeySpec="AES_128", NumberOfBytes=16), dict(NumberOfBytes=2048), dict(NumberOfBytes=0), dict()] ) @mock_kms def test_generate_data_key_invalid_size_params(kwargs): @@ -421,15 +398,8 @@ def test_generate_data_key_invalid_size_params(kwargs): client.generate_data_key(KeyId=key["KeyMetadata"]["KeyId"], **kwargs) -@parameterized( - ( - ("alias/DoesNotExist",), - ("arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist",), - ("d25652e4-d2d2-49f7-929a-671ccda580c6",), - ( - "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", - ), - ) +@pytest.mark.parametrize("key_id", + ["alias/DoesNotExist", "arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist", "d25652e4-d2d2-49f7-929a-671ccda580c6", "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6"] ) @mock_kms def test_generate_data_key_invalid_key(key_id): @@ -439,13 +409,8 @@ def test_generate_data_key_invalid_key(key_id): client.generate_data_key(KeyId=key_id, KeySpec="AES_256") -@parameterized( - ( - ("alias/DoesExist", False), - ("arn:aws:kms:us-east-1:012345678912:alias/DoesExist", False), - ("", True), - ("arn:aws:kms:us-east-1:012345678912:key/", True), - ) +@pytest.mark.parametrize("prefix,append_key_id", + [("alias/DoesExist", False), ("arn:aws:kms:us-east-1:012345678912:alias/DoesExist", False), ("", True), ("arn:aws:kms:us-east-1:012345678912:key/", True)] ) @mock_kms def test_generate_data_key_all_valid_key_ids(prefix, append_key_id): @@ -473,7 +438,7 @@ def test_generate_data_key_without_plaintext_decrypt(): assert "Plaintext" not in resp1 -@parameterized(PLAINTEXT_VECTORS) +@pytest.mark.parametrize("plaintext", PLAINTEXT_VECTORS) @mock_kms def test_re_encrypt_decrypt(plaintext): client = boto3.client("kms", region_name="us-west-2") @@ -536,7 +501,7 @@ def test_re_encrypt_to_invalid_destination(): ) -@parameterized(((12,), (44,), (91,), (1,), (1024,))) +@pytest.mark.parametrize("number_of_bytes", [12, 44, 91, 1, 1024]) @mock_kms def test_generate_random(number_of_bytes): client = boto3.client("kms", region_name="us-west-2") @@ -547,14 +512,8 @@ def test_generate_random(number_of_bytes): len(response["Plaintext"]).should.equal(number_of_bytes) -@parameterized( - ( - (2048, botocore.exceptions.ClientError), - (1025, botocore.exceptions.ClientError), - (0, botocore.exceptions.ParamValidationError), - (-1, botocore.exceptions.ParamValidationError), - (-1024, botocore.exceptions.ParamValidationError), - ) +@pytest.mark.parametrize("number_of_bytes,error_type", + [(2048, botocore.exceptions.ClientError), (1025, botocore.exceptions.ClientError), (0, botocore.exceptions.ParamValidationError), (-1, botocore.exceptions.ParamValidationError), (-1024, botocore.exceptions.ParamValidationError)] ) @mock_kms def test_generate_random_invalid_number_of_bytes(number_of_bytes, error_type): diff --git a/tests/test_kms/test_utils.py b/tests/test_kms/test_utils.py index fa402b6b96ba..5a1046e3fc82 100644 --- a/tests/test_kms/test_utils.py +++ b/tests/test_kms/test_utils.py @@ -2,7 +2,6 @@ import sure # noqa import pytest -from parameterized import parameterized from moto.kms.exceptions import ( AccessDeniedException, @@ -22,7 +21,7 @@ Ciphertext, ) -ENCRYPTION_CONTEXT_VECTORS = ( +ENCRYPTION_CONTEXT_VECTORS = [ ( {"this": "is", "an": "encryption", "context": "example"}, b"an" b"encryption" b"context" b"example" b"this" b"is", @@ -31,8 +30,8 @@ {"a_this": "one", "b_is": "actually", "c_in": "order"}, b"a_this" b"one" b"b_is" b"actually" b"c_in" b"order", ), -) -CIPHERTEXT_BLOB_VECTORS = ( +] +CIPHERTEXT_BLOB_VECTORS = [ ( Ciphertext( key_id="d25652e4-d2d2-49f7-929a-671ccda580c6", @@ -57,7 +56,7 @@ b"1234567890123456" b"some ciphertext that is much longer now", ), -) +] def test_generate_data_key(): @@ -74,32 +73,32 @@ def test_generate_master_key(): len(test).should.equal(MASTER_KEY_LEN) -@parameterized(ENCRYPTION_CONTEXT_VECTORS) +@pytest.mark.parametrize("raw,serialized", ENCRYPTION_CONTEXT_VECTORS) def test_serialize_encryption_context(raw, serialized): test = _serialize_encryption_context(raw) test.should.equal(serialized) -@parameterized(CIPHERTEXT_BLOB_VECTORS) +@pytest.mark.parametrize("raw,_serialized", CIPHERTEXT_BLOB_VECTORS) def test_cycle_ciphertext_blob(raw, _serialized): test_serialized = _serialize_ciphertext_blob(raw) test_deserialized = _deserialize_ciphertext_blob(test_serialized) test_deserialized.should.equal(raw) -@parameterized(CIPHERTEXT_BLOB_VECTORS) +@pytest.mark.parametrize("raw,serialized", CIPHERTEXT_BLOB_VECTORS) def test_serialize_ciphertext_blob(raw, serialized): test = _serialize_ciphertext_blob(raw) test.should.equal(serialized) -@parameterized(CIPHERTEXT_BLOB_VECTORS) +@pytest.mark.parametrize("raw,serialized", CIPHERTEXT_BLOB_VECTORS) def test_deserialize_ciphertext_blob(raw, serialized): test = _deserialize_ciphertext_blob(serialized) test.should.equal(raw) -@parameterized(((ec[0],) for ec in ENCRYPTION_CONTEXT_VECTORS)) +@pytest.mark.parametrize("encryption_context", [ec[0] for ec in ENCRYPTION_CONTEXT_VECTORS]) def test_encrypt_decrypt_cycle(encryption_context): plaintext = b"some secret plaintext" master_key = Key("nop", "nop", "nop", "nop", "nop") diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index ee50485c1a2b..1f78e475b099 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -24,7 +24,6 @@ from boto.s3.connection import S3Connection from boto.s3.key import Key from freezegun import freeze_time -from parameterized import parameterized import six import requests from moto.s3.responses import DEFAULT_REGION_NAME @@ -426,7 +425,7 @@ def test_copy_key(): bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value") -@parameterized([("the-unicode-💩-key",), ("key-with?question-mark",)]) +@pytest.mark.parametrize("key_name", ["the-unicode-💩-key", "key-with?question-mark"]) @mock_s3_deprecated def test_copy_key_with_special_chars(key_name): conn = boto.connect_s3("the_key", "the_secret") @@ -4016,7 +4015,7 @@ def test_root_dir_with_empty_name_works(): store_and_read_back_a_key("/") -@parameterized(["mybucket", "my.bucket"]) +@pytest.mark.parametrize("bucket_name", ["mybucket", "my.bucket"]) @mock_s3 def test_leading_slashes_not_removed(bucket_name): """Make sure that leading slashes are not removed internally.""" @@ -4038,8 +4037,8 @@ def test_leading_slashes_not_removed(bucket_name): e.value.response["Error"]["Code"].should.equal("NoSuchKey") -@parameterized( - [("foo/bar/baz",), ("foo",), ("foo/run_dt%3D2019-01-01%252012%253A30%253A00",)] +@pytest.mark.parametrize("key", + ["foo/bar/baz", "foo", "foo/run_dt%3D2019-01-01%252012%253A30%253A00"] ) @mock_s3 def test_delete_objects_with_url_encoded_key(key): diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index b90225597f2f..f6e653f86854 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import os +import pytest from sure import expect from moto.s3.utils import ( bucket_name_from_url, @@ -8,7 +9,6 @@ clean_key_name, undo_clean_key_name, ) -from parameterized import parameterized def test_base_url(): @@ -93,7 +93,7 @@ def test_parse_region_from_url(): parse_region_from_url(url).should.equal(expected) -@parameterized( +@pytest.mark.parametrize("key,expected", [ ("foo/bar/baz", "foo/bar/baz"), ("foo", "foo"), @@ -107,7 +107,7 @@ def test_clean_key_name(key, expected): clean_key_name(key).should.equal(expected) -@parameterized( +@pytest.mark.parametrize("key,expected", [ ("foo/bar/baz", "foo/bar/baz"), ("foo", "foo"), diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 834705fbaf87..9e2fbe0ae9a3 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -638,9 +638,7 @@ def test_put_secret_value_on_non_existing_secret(): VersionStages=["AWSCURRENT"], ) - assert \ - "Secrets Manager can't find the specified secret." == \ - cm.exception.response["Error"]["Message"] + cm.value.response["Error"]["Message"].should.equal("Secrets Manager can't find the specified secret.") @mock_secretsmanager @@ -954,7 +952,7 @@ def test_tag_resource(): assert \ "Secrets Manager can't find the specified secret." == \ - cm.exception.response["Error"]["Message"] + cm.value.response["Error"]["Message"] @mock_secretsmanager diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index a84c172af0a3..a828db3cb957 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -280,7 +280,7 @@ def test_message_with_invalid_attributes(): "öther_encodings": {"DataType": "String", "StringValue": "str"}, }, ) - ex = e.exception + ex = e.value ex.response["Error"]["Code"].should.equal("MessageAttributesInvalid") ex.response["Error"]["Message"].should.equal( "The message attribute name 'öther_encodings' is invalid. " @@ -2251,7 +2251,7 @@ def test_maximum_message_size_attribute_default(): int(queue.attributes["MaximumMessageSize"]).should.equal(MAXIMUM_MESSAGE_LENGTH) with pytest.raises(Exception) as e: queue.send_message(MessageBody="a" * (MAXIMUM_MESSAGE_LENGTH + 1)) - ex = e.exception + ex = e.value ex.response["Error"]["Code"].should.equal("InvalidParameterValue") @@ -2268,7 +2268,7 @@ def test_maximum_message_size_attribute_fails_for_invalid_values(): QueueName="test-queue", Attributes={"MaximumMessageSize": str(message_size)}, ) - ex = e.exception + ex = e.value ex.response["Error"]["Code"].should.equal("InvalidAttributeValue") @@ -2283,7 +2283,7 @@ def test_send_message_fails_when_message_size_greater_than_max_message_size(): int(queue.attributes["MaximumMessageSize"]).should.equal(message_size_limit) with pytest.raises(ClientError) as e: queue.send_message(MessageBody="a" * (message_size_limit + 1)) - ex = e.exception + ex = e.value ex.response["Error"]["Code"].should.equal("InvalidParameterValue") ex.response["Error"]["Message"].should.contain( "{} bytes".format(message_size_limit) From 273ca63d5990b541f50d5d7f6e3e932dfe87a849 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 11 Nov 2020 15:55:37 +0000 Subject: [PATCH 626/658] Linting --- moto/applicationautoscaling/models.py | 5 +- moto/applicationautoscaling/responses.py | 6 +- moto/athena/responses.py | 7 +- moto/autoscaling/responses.py | 8 +- moto/cloudformation/parsing.py | 12 +- moto/cognitoidp/responses.py | 4 +- moto/config/exceptions.py | 12 +- moto/config/models.py | 6 +- moto/config/responses.py | 6 +- moto/core/utils.py | 8 +- moto/dynamodb2/models/__init__.py | 3 +- moto/ec2/exceptions.py | 11 +- moto/ec2/models.py | 18 +- moto/ec2/responses/vpcs.py | 8 +- moto/ecs/exceptions.py | 3 +- moto/ecs/models.py | 6 +- moto/elasticbeanstalk/models.py | 16 +- moto/elasticbeanstalk/responses.py | 23 +- moto/iam/access_control.py | 10 +- moto/iam/exceptions.py | 6 +- moto/iam/models.py | 36 +-- moto/iam/policy_validation.py | 25 +- moto/iot/responses.py | 3 +- moto/kinesis/models.py | 6 +- moto/kinesisvideo/exceptions.py | 3 +- moto/kinesisvideo/responses.py | 10 +- moto/kinesisvideoarchivedmedia/responses.py | 42 ++-- moto/managedblockchain/models.py | 20 +- moto/managedblockchain/responses.py | 23 +- moto/organizations/models.py | 6 +- moto/ram/models.py | 6 +- moto/s3/models.py | 17 +- moto/s3/responses.py | 4 +- moto/sagemaker/models.py | 15 +- moto/sagemaker/responses.py | 14 +- moto/sns/models.py | 14 +- moto/sqs/responses.py | 10 +- moto/stepfunctions/models.py | 4 +- moto/sts/models.py | 10 +- moto/transcribe/models.py | 6 +- tests/test_acm/test_acm.py | 25 +- tests/test_apigateway/test_apigateway.py | 32 +-- .../test_applicationautoscaling.py | 4 +- .../test_validation.py | 14 +- tests/test_athena/test_athena.py | 8 +- tests/test_autoscaling/test_autoscaling.py | 9 +- .../test_autoscaling_cloudformation.py | 9 +- tests/test_awslambda/test_lambda.py | 11 +- tests/test_batch/test_batch.py | 5 +- .../test_cloudformation_depends_on.py | 8 +- .../test_cloudformation_stack_crud_boto3.py | 6 +- .../test_cloudformation_stack_integration.py | 9 +- tests/test_codepipeline/test_codepipeline.py | 12 +- tests/test_cognitoidp/test_cognitoidp.py | 64 ++--- tests/test_config/test_config.py | 7 +- tests/test_core/test_auth.py | 4 +- tests/test_dynamodb2/test_dynamodb.py | 225 ++++-------------- .../test_dynamodb2/test_dynamodb_executor.py | 85 ++----- .../test_dynamodb_validation.py | 14 +- tests/test_ec2/test_elastic_block_store.py | 1 + tests/test_ec2/test_flow_logs.py | 4 +- tests/test_ec2/test_instances.py | 8 +- tests/test_ec2/test_subnets.py | 6 +- tests/test_ec2/test_vpn_connections.py | 4 +- tests/test_ecs/test_ecs_boto3.py | 50 +--- tests/test_elasticbeanstalk/test_eb.py | 48 +--- tests/test_emr/test_emr_boto3.py | 18 +- tests/test_forecast/test_forecast.py | 4 +- tests/test_iam/test_iam.py | 183 ++++---------- tests/test_iot/test_iot.py | 4 +- .../test_kinesisvideoarchivedmedia.py | 8 +- tests/test_kms/test_kms.py | 12 +- tests/test_kms/test_kms_boto3.py | 99 +++++--- tests/test_kms/test_utils.py | 4 +- tests/test_logs/test_integration.py | 16 +- tests/test_logs/test_logs.py | 4 +- .../test_managedblockchain_members.py | 18 +- .../test_managedblockchain_nodes.py | 33 +-- .../test_managedblockchain_proposals.py | 11 +- .../test_managedblockchain_proposalvotes.py | 3 +- .../test_organizations_boto3.py | 5 +- tests/test_s3/test_s3.py | 20 +- tests/test_s3/test_s3_cloudformation.py | 7 +- tests/test_s3/test_s3_utils.py | 10 +- .../test_secretsmanager.py | 25 +- tests/test_ses/test_ses_boto3.py | 27 +-- tests/test_sns/test_publishing_boto3.py | 4 +- tests/test_sns/test_topics_boto3.py | 9 +- tests/test_sqs/test_sqs.py | 9 +- tests/test_ssm/test_ssm_boto3.py | 18 +- .../test_stepfunctions/test_stepfunctions.py | 20 +- .../test_transcribe/test_transcribe_boto3.py | 20 +- 92 files changed, 515 insertions(+), 1200 deletions(-) diff --git a/moto/applicationautoscaling/models.py b/moto/applicationautoscaling/models.py index 9e80cc02212d..40d1094fca1d 100644 --- a/moto/applicationautoscaling/models.py +++ b/moto/applicationautoscaling/models.py @@ -72,10 +72,7 @@ def applicationautoscaling_backend(self): return applicationautoscaling_backends[self.region] def describe_scalable_targets( - self, - namespace, - r_ids=None, - dimension=None, + self, namespace, r_ids=None, dimension=None, ): """ Describe scalable targets. """ if r_ids is None: diff --git a/moto/applicationautoscaling/responses.py b/moto/applicationautoscaling/responses.py index b70d7b528900..ad63af9482f7 100644 --- a/moto/applicationautoscaling/responses.py +++ b/moto/applicationautoscaling/responses.py @@ -21,10 +21,8 @@ def describe_scalable_targets(self): scalable_dimension = self._get_param("ScalableDimension") max_results = self._get_int_param("MaxResults", 50) marker = self._get_param("NextToken") - all_scalable_targets = ( - self.applicationautoscaling_backend.describe_scalable_targets( - service_namespace, resource_ids, scalable_dimension - ) + all_scalable_targets = self.applicationautoscaling_backend.describe_scalable_targets( + service_namespace, resource_ids, scalable_dimension ) start = int(marker) + 1 if marker else 0 next_token = None diff --git a/moto/athena/responses.py b/moto/athena/responses.py index bc14774e1c76..b5e6d6a95777 100644 --- a/moto/athena/responses.py +++ b/moto/athena/responses.py @@ -82,12 +82,7 @@ def stop_query_execution(self): def error(self, msg, status): return ( - json.dumps( - { - "__type": "InvalidRequestException", - "Message": msg, - } - ), + json.dumps({"__type": "InvalidRequestException", "Message": msg,}), dict(status=status), ) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 1b4bb9f44324..a9651a7743b7 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -42,8 +42,8 @@ def create_launch_configuration(self): def describe_launch_configurations(self): names = self._get_multi_param("LaunchConfigurationNames.member") - all_launch_configurations = ( - self.autoscaling_backend.describe_launch_configurations(names) + all_launch_configurations = self.autoscaling_backend.describe_launch_configurations( + names ) marker = self._get_param("NextToken") all_names = [lc.name for lc in all_launch_configurations] @@ -153,8 +153,8 @@ def attach_load_balancer_target_groups(self): @amzn_request_id def describe_load_balancer_target_groups(self): group_name = self._get_param("AutoScalingGroupName") - target_group_arns = ( - self.autoscaling_backend.describe_load_balancer_target_groups(group_name) + target_group_arns = self.autoscaling_backend.describe_load_balancer_target_groups( + group_name ) template = self.response_template(DESCRIBE_LOAD_BALANCER_TARGET_GROUPS) return template.render(target_group_arns=target_group_arns) diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index c6049f175cb2..168536f79e23 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -254,8 +254,7 @@ def generate_resource_name(resource_type, stack_name, logical_id): def parse_resource( - resource_json, - resources_map, + resource_json, resources_map, ): resource_type = resource_json["Type"] resource_class = resource_class_from_type(resource_type) @@ -276,9 +275,7 @@ def parse_resource( def parse_resource_and_generate_name( - logical_id, - resource_json, - resources_map, + logical_id, resource_json, resources_map, ): resource_tuple = parse_resource(resource_json, resources_map) if not resource_tuple: @@ -698,10 +695,7 @@ def delete(self): ] parse_and_delete_resource( - resource_name, - resource_json, - self, - self._region_name, + resource_name, resource_json, self, self._region_name, ) self._parsed_resources.pop(parsed_resource.logical_resource_id) diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index d119c9e2173e..e10a122823a7 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -412,9 +412,7 @@ def confirm_sign_up(self): username = self._get_param("Username") confirmation_code = self._get_param("ConfirmationCode") cognitoidp_backends[self.region].confirm_sign_up( - client_id=client_id, - username=username, - confirmation_code=confirmation_code, + client_id=client_id, username=username, confirmation_code=confirmation_code, ) return "" diff --git a/moto/config/exceptions.py b/moto/config/exceptions.py index 52cfd245094c..4030b87a3b45 100644 --- a/moto/config/exceptions.py +++ b/moto/config/exceptions.py @@ -101,10 +101,8 @@ class InvalidDeliveryChannelNameException(JsonRESTError): code = 400 def __init__(self, name): - message = ( - "The delivery channel name '{name}' is not valid, blank string.".format( - name=name - ) + message = "The delivery channel name '{name}' is not valid, blank string.".format( + name=name ) super(InvalidDeliveryChannelNameException, self).__init__( "InvalidDeliveryChannelNameException", message @@ -289,10 +287,8 @@ class InvalidTagCharacters(JsonRESTError): code = 400 def __init__(self, tag, param="tags.X.member.key"): - message = ( - "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format( - tag, param - ) + message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format( + tag, param ) message += "constraint: Member must satisfy regular expression pattern: [\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]+" diff --git a/moto/config/models.py b/moto/config/models.py index 3646f6704e86..99ae49e4467b 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -395,10 +395,8 @@ def __init__( self.delivery_s3_key_prefix = delivery_s3_key_prefix self.excluded_accounts = excluded_accounts or [] self.last_update_time = datetime2int(datetime.utcnow()) - self.organization_conformance_pack_arn = ( - "arn:aws:config:{0}:{1}:organization-conformance-pack/{2}".format( - region, DEFAULT_ACCOUNT_ID, self._unique_pack_name - ) + self.organization_conformance_pack_arn = "arn:aws:config:{0}:{1}:organization-conformance-pack/{2}".format( + region, DEFAULT_ACCOUNT_ID, self._unique_pack_name ) self.organization_conformance_pack_name = name diff --git a/moto/config/responses.py b/moto/config/responses.py index 489f2b5749ac..7dcc9a01bc6d 100644 --- a/moto/config/responses.py +++ b/moto/config/responses.py @@ -190,10 +190,8 @@ def describe_organization_conformance_pack_statuses(self): def get_organization_conformance_pack_detailed_status(self): # 'Filters' parameter is not implemented yet - statuses = ( - self.config_backend.get_organization_conformance_pack_detailed_status( - self._get_param("OrganizationConformancePackName") - ) + statuses = self.config_backend.get_organization_conformance_pack_detailed_status( + self._get_param("OrganizationConformancePackName") ) return json.dumps(statuses) diff --git a/moto/core/utils.py b/moto/core/utils.py index 7e86a7045d0f..97303a5080a9 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -350,15 +350,11 @@ def tags_from_query_string( tag_index = key.replace(prefix + ".", "").replace("." + key_suffix, "") tag_key = querystring_dict.get( "{prefix}.{index}.{key_suffix}".format( - prefix=prefix, - index=tag_index, - key_suffix=key_suffix, + prefix=prefix, index=tag_index, key_suffix=key_suffix, ) )[0] tag_value_key = "{prefix}.{index}.{value_suffix}".format( - prefix=prefix, - index=tag_index, - value_suffix=value_suffix, + prefix=prefix, index=tag_index, value_suffix=value_suffix, ) if tag_value_key in querystring_dict: response_values[tag_key] = querystring_dict.get(tag_value_key)[0] diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 2a3d8b8733c1..782ddcee9ce1 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -1052,8 +1052,7 @@ def update_table_global_indexes(self, name, global_index_updates): ) gsis_by_name[gsi_to_create["IndexName"]] = GlobalSecondaryIndex.create( - gsi_to_create, - table.table_key_attrs, + gsi_to_create, table.table_key_attrs, ) # in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index f43f83697672..e14a60bf1c2c 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -340,9 +340,7 @@ def __init__(self, dependant_parameter, parameter, parameter_value): super(InvalidDependantParameterError, self).__init__( "InvalidParameter", "{0} can't be empty if {1} is {2}.".format( - dependant_parameter, - parameter, - parameter_value, + dependant_parameter, parameter, parameter_value, ), ) @@ -352,9 +350,7 @@ def __init__(self, dependant_parameter, parameter_value, parameter): super(InvalidDependantParameterTypeError, self).__init__( "InvalidParameter", "{0} type must be {1} if {2} is provided.".format( - dependant_parameter, - parameter_value, - parameter, + dependant_parameter, parameter_value, parameter, ), ) @@ -362,8 +358,7 @@ def __init__(self, dependant_parameter, parameter_value, parameter): class InvalidAggregationIntervalParameterError(EC2ClientError): def __init__(self, parameter): super(InvalidAggregationIntervalParameterError, self).__init__( - "InvalidParameter", - "Invalid {0}".format(parameter), + "InvalidParameter", "Invalid {0}".format(parameter), ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 47e2b0ac0790..bdb1cb03a367 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1491,12 +1491,7 @@ def describe_images( # Limit by owner ids if owners: # support filtering by Owners=['self'] - owners = list( - map( - lambda o: OWNER_ID if o == "self" else o, - owners, - ) - ) + owners = list(map(lambda o: OWNER_ID if o == "self" else o, owners,)) images = [ami for ami in images if ami.owner_id in owners] # Generic filters @@ -3710,17 +3705,13 @@ def _validate_request( ): if log_group_name is None and log_destination is None: raise InvalidDependantParameterError( - "LogDestination", - "LogGroupName", - "not provided", + "LogDestination", "LogGroupName", "not provided", ) if log_destination_type == "s3": if log_group_name is not None: raise InvalidDependantParameterTypeError( - "LogDestination", - "cloud-watch-logs", - "LogGroupName", + "LogDestination", "cloud-watch-logs", "LogGroupName", ) elif log_destination_type == "cloud-watch-logs": if deliver_logs_permission_arn is None: @@ -3868,8 +3859,7 @@ def delete_flow_logs(self, flow_log_ids): if non_existing: raise InvalidFlowLogIdError( - len(flow_log_ids), - " ".join(x for x in flow_log_ids), + len(flow_log_ids), " ".join(x for x in flow_log_ids), ) return True diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 1e8add46ca85..de4bb3febdaa 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -70,8 +70,8 @@ def describe_vpc_classic_link_dns_support(self): def enable_vpc_classic_link_dns_support(self): vpc_id = self._get_param("VpcId") - classic_link_dns_supported = ( - self.ec2_backend.enable_vpc_classic_link_dns_support(vpc_id=vpc_id) + classic_link_dns_supported = self.ec2_backend.enable_vpc_classic_link_dns_support( + vpc_id=vpc_id ) doc_date = self._get_doc_date() template = self.response_template(ENABLE_VPC_CLASSIC_LINK_DNS_SUPPORT_RESPONSE) @@ -81,8 +81,8 @@ def enable_vpc_classic_link_dns_support(self): def disable_vpc_classic_link_dns_support(self): vpc_id = self._get_param("VpcId") - classic_link_dns_supported = ( - self.ec2_backend.disable_vpc_classic_link_dns_support(vpc_id=vpc_id) + classic_link_dns_supported = self.ec2_backend.disable_vpc_classic_link_dns_support( + vpc_id=vpc_id ) doc_date = self._get_doc_date() template = self.response_template(DISABLE_VPC_CLASSIC_LINK_DNS_SUPPORT_RESPONSE) diff --git a/moto/ecs/exceptions.py b/moto/ecs/exceptions.py index cbd5d5f53295..72129224ea37 100644 --- a/moto/ecs/exceptions.py +++ b/moto/ecs/exceptions.py @@ -38,6 +38,5 @@ class ClusterNotFoundException(JsonRESTError): def __init__(self): super(ClusterNotFoundException, self).__init__( - error_type="ClientException", - message="Cluster not found", + error_type="ClientException", message="Cluster not found", ) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index d7f840d53f1a..a4522660e0ee 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -431,10 +431,8 @@ def __init__(self, ec2_instance_id, region_name): "type": "STRINGSET", }, ] - self.container_instance_arn = ( - "arn:aws:ecs:{0}:012345678910:container-instance/{1}".format( - region_name, str(uuid.uuid4()) - ) + self.container_instance_arn = "arn:aws:ecs:{0}:012345678910:container-instance/{1}".format( + region_name, str(uuid.uuid4()) ) self.pending_tasks_count = 0 self.remaining_resources = [ diff --git a/moto/elasticbeanstalk/models.py b/moto/elasticbeanstalk/models.py index 303d34b87e96..3767846c1117 100644 --- a/moto/elasticbeanstalk/models.py +++ b/moto/elasticbeanstalk/models.py @@ -8,11 +8,7 @@ class FakeEnvironment(BaseModel): def __init__( - self, - application, - environment_name, - solution_stack_name, - tags, + self, application, environment_name, solution_stack_name, tags, ): self.application = weakref.proxy( application @@ -53,10 +49,7 @@ def __init__(self, backend, application_name): self.environments = dict() def create_environment( - self, - environment_name, - solution_stack_name, - tags, + self, environment_name, solution_stack_name, tags, ): if environment_name in self.environments: raise InvalidParameterValueError @@ -93,10 +86,7 @@ def create_application(self, application_name): raise InvalidParameterValueError( "Application {} already exists.".format(application_name) ) - new_app = FakeApplication( - backend=self, - application_name=application_name, - ) + new_app = FakeApplication(backend=self, application_name=application_name,) self.applications[application_name] = new_app return new_app diff --git a/moto/elasticbeanstalk/responses.py b/moto/elasticbeanstalk/responses.py index f35e0f3ff99a..387cbb3ea24a 100644 --- a/moto/elasticbeanstalk/responses.py +++ b/moto/elasticbeanstalk/responses.py @@ -18,16 +18,11 @@ def create_application(self): ) template = self.response_template(EB_CREATE_APPLICATION) - return template.render( - region_name=self.backend.region, - application=app, - ) + return template.render(region_name=self.backend.region, application=app,) def describe_applications(self): template = self.response_template(EB_DESCRIBE_APPLICATIONS) - return template.render( - applications=self.backend.applications.values(), - ) + return template.render(applications=self.backend.applications.values(),) def create_environment(self): application_name = self._get_param("ApplicationName") @@ -47,18 +42,13 @@ def create_environment(self): ) template = self.response_template(EB_CREATE_ENVIRONMENT) - return template.render( - environment=env, - region=self.backend.region, - ) + return template.render(environment=env, region=self.backend.region,) def describe_environments(self): envs = self.backend.describe_environments() template = self.response_template(EB_DESCRIBE_ENVIRONMENTS) - return template.render( - environments=envs, - ) + return template.render(environments=envs,) def list_available_solution_stacks(self): return EB_LIST_AVAILABLE_SOLUTION_STACKS @@ -78,10 +68,7 @@ def list_tags_for_resource(self): tags = self.backend.list_tags_for_resource(resource_arn) template = self.response_template(EB_LIST_TAGS_FOR_RESOURCE) - return template.render( - tags=tags, - arn=resource_arn, - ) + return template.render(tags=tags, arn=resource_arn,) EB_CREATE_APPLICATION = """ diff --git a/moto/iam/access_control.py b/moto/iam/access_control.py index abf51928a40c..bcde25d9ea0f 100644 --- a/moto/iam/access_control.py +++ b/moto/iam/access_control.py @@ -125,12 +125,10 @@ def __init__(self, access_key_id, headers): @property def arn(self): - return ( - "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( - account_id=ACCOUNT_ID, - role_name=self._owner_role_name, - session_name=self._session_name, - ) + return "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( + account_id=ACCOUNT_ID, + role_name=self._owner_role_name, + session_name=self._session_name, ) def create_credentials(self): diff --git a/moto/iam/exceptions.py b/moto/iam/exceptions.py index e1070c42ed0a..1d0f3ca01180 100644 --- a/moto/iam/exceptions.py +++ b/moto/iam/exceptions.py @@ -88,10 +88,8 @@ class InvalidTagCharacters(RESTError): code = 400 def __init__(self, tag, param="tags.X.member.key"): - message = ( - "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format( - tag, param - ) + message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format( + tag, param ) message += "constraint: Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" diff --git a/moto/iam/models.py b/moto/iam/models.py index a28e3b9fc58e..76b824d609bb 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -362,12 +362,7 @@ def __init__( self.update(policy_name, policy_document, group_names, role_names, user_names) def update( - self, - policy_name, - policy_document, - group_names, - role_names, - user_names, + self, policy_name, policy_document, group_names, role_names, user_names, ): self.policy_name = policy_name self.policy_document = ( @@ -409,11 +404,7 @@ def create_from_cloudformation_json( @classmethod def update_from_cloudformation_json( - cls, - original_resource, - new_resource_name, - cloudformation_json, - region_name, + cls, original_resource, new_resource_name, cloudformation_json, region_name, ): properties = cloudformation_json["Properties"] @@ -816,18 +807,11 @@ def create_from_cloudformation_json( user_name = properties.get("UserName") status = properties.get("Status", "Active") - return iam_backend.create_access_key( - user_name, - status=status, - ) + return iam_backend.create_access_key(user_name, status=status,) @classmethod def update_from_cloudformation_json( - cls, - original_resource, - new_resource_name, - cloudformation_json, - region_name, + cls, original_resource, new_resource_name, cloudformation_json, region_name, ): properties = cloudformation_json["Properties"] @@ -1155,11 +1139,7 @@ def create_from_cloudformation_json( @classmethod def update_from_cloudformation_json( - cls, - original_resource, - new_resource_name, - cloudformation_json, - region_name, + cls, original_resource, new_resource_name, cloudformation_json, region_name, ): properties = cloudformation_json["Properties"] @@ -2577,11 +2557,7 @@ def update_inline_policy( inline_policy = self.get_inline_policy(resource_name) inline_policy.unapply_policy(self) inline_policy.update( - policy_name, - policy_document, - group_names, - role_names, - user_names, + policy_name, policy_document, group_names, role_names, user_names, ) inline_policy.apply_policy(self) return inline_policy diff --git a/moto/iam/policy_validation.py b/moto/iam/policy_validation.py index 251af606770e..95610ac4db1e 100644 --- a/moto/iam/policy_validation.py +++ b/moto/iam/policy_validation.py @@ -343,10 +343,8 @@ def _validate_resource_format(self, resource): resource_partitions = resource.partition(":") if resource_partitions[1] == "": - self._resource_error = ( - 'Resource {resource} must be in ARN format or "*".'.format( - resource=resource - ) + self._resource_error = 'Resource {resource} must be in ARN format or "*".'.format( + resource=resource ) return @@ -392,14 +390,15 @@ def _validate_resource_format(self, resource): service = resource_partitions[0] - if ( - service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() - and not resource_partitions[2].startswith(":") + if service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() and not resource_partitions[ + 2 + ].startswith( + ":" ): - self._resource_error = ( - SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[service].format( - resource=resource - ) + self._resource_error = SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[ + service + ].format( + resource=resource ) return @@ -521,8 +520,8 @@ def _validate_iso_8601_datetime(datetime): assert 0 <= int(time_zone_minutes) <= 59 else: seconds_with_decimal_fraction = time_parts[2] - seconds_with_decimal_fraction_partition = ( - seconds_with_decimal_fraction.partition(".") + seconds_with_decimal_fraction_partition = seconds_with_decimal_fraction.partition( + "." ) seconds = seconds_with_decimal_fraction_partition[0] assert 0 <= int(seconds) <= 59 diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 7f2c602ea294..15c62d91ea9f 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -340,8 +340,7 @@ def register_certificate_without_ca(self): status = self._get_param("status") cert = self.iot_backend.register_certificate_without_ca( - certificate_pem=certificate_pem, - status=status, + certificate_pem=certificate_pem, status=status, ) return json.dumps( dict(certificateId=cert.certificate_id, certificateArn=cert.arn) diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index 4548fb347e05..280402d5f025 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -261,11 +261,7 @@ def create_from_cloudformation_json( @classmethod def update_from_cloudformation_json( - cls, - original_resource, - new_resource_name, - cloudformation_json, - region_name, + cls, original_resource, new_resource_name, cloudformation_json, region_name, ): properties = cloudformation_json["Properties"] diff --git a/moto/kinesisvideo/exceptions.py b/moto/kinesisvideo/exceptions.py index 33c7e603a745..e2e119b3799d 100644 --- a/moto/kinesisvideo/exceptions.py +++ b/moto/kinesisvideo/exceptions.py @@ -20,6 +20,5 @@ class ResourceInUseException(KinesisvideoClientError): def __init__(self, message): self.code = 400 super(ResourceInUseException, self).__init__( - "ResourceInUseException", - message, + "ResourceInUseException", message, ) diff --git a/moto/kinesisvideo/responses.py b/moto/kinesisvideo/responses.py index 383777ab745a..d1e386f2eeea 100644 --- a/moto/kinesisvideo/responses.py +++ b/moto/kinesisvideo/responses.py @@ -32,8 +32,7 @@ def describe_stream(self): stream_name = self._get_param("StreamName") stream_arn = self._get_param("StreamARN") stream_info = self.kinesisvideo_backend.describe_stream( - stream_name=stream_name, - stream_arn=stream_arn, + stream_name=stream_name, stream_arn=stream_arn, ) return json.dumps(dict(StreamInfo=stream_info)) @@ -52,8 +51,7 @@ def delete_stream(self): stream_arn = self._get_param("StreamARN") current_version = self._get_param("CurrentVersion") self.kinesisvideo_backend.delete_stream( - stream_arn=stream_arn, - current_version=current_version, + stream_arn=stream_arn, current_version=current_version, ) return json.dumps(dict()) @@ -62,8 +60,6 @@ def get_data_endpoint(self): stream_arn = self._get_param("StreamARN") api_name = self._get_param("APIName") data_endpoint = self.kinesisvideo_backend.get_data_endpoint( - stream_name=stream_name, - stream_arn=stream_arn, - api_name=api_name, + stream_name=stream_name, stream_arn=stream_arn, api_name=api_name, ) return json.dumps(dict(DataEndpoint=data_endpoint)) diff --git a/moto/kinesisvideoarchivedmedia/responses.py b/moto/kinesisvideoarchivedmedia/responses.py index a566930971eb..d021ced0e72b 100644 --- a/moto/kinesisvideoarchivedmedia/responses.py +++ b/moto/kinesisvideoarchivedmedia/responses.py @@ -23,18 +23,16 @@ def get_hls_streaming_session_url(self): max_media_playlist_fragment_results = self._get_param( "MaxMediaPlaylistFragmentResults" ) - hls_streaming_session_url = ( - self.kinesisvideoarchivedmedia_backend.get_hls_streaming_session_url( - stream_name=stream_name, - stream_arn=stream_arn, - playback_mode=playback_mode, - hls_fragment_selector=hls_fragment_selector, - container_format=container_format, - discontinuity_mode=discontinuity_mode, - display_fragment_timestamp=display_fragment_timestamp, - expires=expires, - max_media_playlist_fragment_results=max_media_playlist_fragment_results, - ) + hls_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_hls_streaming_session_url( + stream_name=stream_name, + stream_arn=stream_arn, + playback_mode=playback_mode, + hls_fragment_selector=hls_fragment_selector, + container_format=container_format, + discontinuity_mode=discontinuity_mode, + display_fragment_timestamp=display_fragment_timestamp, + expires=expires, + max_media_playlist_fragment_results=max_media_playlist_fragment_results, ) return json.dumps(dict(HLSStreamingSessionURL=hls_streaming_session_url)) @@ -47,17 +45,15 @@ def get_dash_streaming_session_url(self): dash_fragment_selector = self._get_param("DASHFragmentSelector") expires = self._get_int_param("Expires") max_manifest_fragment_results = self._get_param("MaxManifestFragmentResults") - dash_streaming_session_url = ( - self.kinesisvideoarchivedmedia_backend.get_dash_streaming_session_url( - stream_name=stream_name, - stream_arn=stream_arn, - playback_mode=playback_mode, - display_fragment_timestamp=display_fragment_timestamp, - display_fragment_number=display_fragment_number, - dash_fragment_selector=dash_fragment_selector, - expires=expires, - max_manifest_fragment_results=max_manifest_fragment_results, - ) + dash_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_dash_streaming_session_url( + stream_name=stream_name, + stream_arn=stream_arn, + playback_mode=playback_mode, + display_fragment_timestamp=display_fragment_timestamp, + display_fragment_number=display_fragment_number, + dash_fragment_selector=dash_fragment_selector, + expires=expires, + max_manifest_fragment_results=max_manifest_fragment_results, ) return json.dumps(dict(DASHStreamingSessionURL=dash_streaming_session_url)) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py index 92e2456b2e4d..233e875c3203 100644 --- a/moto/managedblockchain/models.py +++ b/moto/managedblockchain/models.py @@ -352,11 +352,7 @@ def set_network_status(self, network_status): class ManagedBlockchainMember(BaseModel): def __init__( - self, - id, - networkid, - member_configuration, - region, + self, id, networkid, member_configuration, region, ): self.creationdate = datetime.datetime.utcnow() self.id = id @@ -587,11 +583,7 @@ def get_network(self, network_id): return self.networks.get(network_id) def create_proposal( - self, - networkid, - memberid, - actions, - description=None, + self, networkid, memberid, actions, description=None, ): # Check if network exists if networkid not in self.networks: @@ -791,10 +783,7 @@ def reject_invitation(self, invitationid): self.invitations.get(invitationid).reject_invitation() def create_member( - self, - invitationid, - networkid, - member_configuration, + self, invitationid, networkid, member_configuration, ): # Check if network exists if networkid not in self.networks: @@ -999,8 +988,7 @@ def create_node( chkregionpreregex = self.region_name + "[a-z]" if re.match(chkregionpreregex, availabilityzone, re.IGNORECASE) is None: raise InvalidRequestException( - "CreateNode", - "Availability Zone is not valid", + "CreateNode", "Availability Zone is not valid", ) node_id = get_node_id() diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py index ccbc08d4482b..7dd628eba4ba 100644 --- a/moto/managedblockchain/responses.py +++ b/moto/managedblockchain/responses.py @@ -134,10 +134,7 @@ def _proposal_response_post(self, network_id, json_body, querystring, headers): description = json_body.get("Description", None) response = self.backend.create_proposal( - network_id, - memberid, - actions, - description, + network_id, memberid, actions, description, ) return 200, headers, json.dumps(response) @@ -201,10 +198,7 @@ def _proposal_votes_response_post( vote = json_body["Vote"] self.backend.vote_on_proposal( - network_id, - proposal_id, - votermemberid, - vote, + network_id, proposal_id, votermemberid, vote, ) return 200, headers, "" @@ -284,9 +278,7 @@ def _member_response_post(self, network_id, json_body, querystring, headers): member_configuration = json_body["MemberConfiguration"] response = self.backend.create_member( - invitationid, - network_id, - member_configuration, + invitationid, network_id, member_configuration, ) return 200, headers, json.dumps(response) @@ -325,9 +317,7 @@ def _memberid_response_get(self, network_id, member_id, headers): def _memberid_response_patch(self, network_id, member_id, json_body, headers): logpublishingconfiguration = json_body["LogPublishingConfiguration"] self.backend.update_member( - network_id, - member_id, - logpublishingconfiguration, + network_id, member_id, logpublishingconfiguration, ) return 200, headers, "" @@ -427,10 +417,7 @@ def _nodeid_response_patch( ): logpublishingconfiguration = json_body self.backend.update_node( - network_id, - member_id, - node_id, - logpublishingconfiguration, + network_id, member_id, node_id, logpublishingconfiguration, ) return 200, headers, "" diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 6fc696c91ed3..5655326c02ff 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -785,8 +785,7 @@ def deregister_delegated_administrator(self, **kwargs): ) admin = next( - (admin for admin in self.admins if admin.account.id == account_id), - None, + (admin for admin in self.admins if admin.account.id == account_id), None, ) if admin is None: account = next( @@ -842,8 +841,7 @@ def detach_policy(self, **kwargs): ) elif re.match(account_id_regex, target_id): account = next( - (account for account in self.accounts if account.id == target_id), - None, + (account for account in self.accounts if account.id == target_id), None, ) if account is not None: if account in account.attached_policies: diff --git a/moto/ram/models.py b/moto/ram/models.py index 0d2b8bfd0ad1..d38099374c62 100644 --- a/moto/ram/models.py +++ b/moto/ram/models.py @@ -88,10 +88,8 @@ def add_principals(self, principals): ) if root_id: - ous = ( - self.organizations_backend.list_organizational_units_for_parent( - ParentId=root_id - ) + ous = self.organizations_backend.list_organizational_units_for_parent( + ParentId=root_id ) if any(principal == ou["Arn"] for ou in ous["OrganizationalUnits"]): continue diff --git a/moto/s3/models.py b/moto/s3/models.py index 9e85d8f43d31..17282739a321 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -523,10 +523,7 @@ def to_config_dict(self): for key, value in self.tags.items(): data.append( - { - "type": "LifecycleTagPredicate", - "tag": {"key": key, "value": value}, - } + {"type": "LifecycleTagPredicate", "tag": {"key": key, "value": value},} ) return data @@ -1132,11 +1129,7 @@ def create_from_cloudformation_json( @classmethod def update_from_cloudformation_json( - cls, - original_resource, - new_resource_name, - cloudformation_json, - region_name, + cls, original_resource, new_resource_name, cloudformation_json, region_name, ): properties = cloudformation_json["Properties"] @@ -1476,8 +1469,7 @@ def set_key_tags(self, key, tags, key_name=None): raise MissingKey(key_name) self.tagger.delete_all_tags_for_resource(key.arn) self.tagger.tag_resource( - key.arn, - [{"Key": k, "Value": v} for (k, v) in tags.items()], + key.arn, [{"Key": k, "Value": v} for (k, v) in tags.items()], ) return key @@ -1489,8 +1481,7 @@ def put_bucket_tagging(self, bucket_name, tags): bucket = self.get_bucket(bucket_name) self.tagger.delete_all_tags_for_resource(bucket.arn) self.tagger.tag_resource( - bucket.arn, - [{"Key": key, "Value": value} for key, value in tags.items()], + bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()], ) def delete_bucket_tagging(self, bucket_name): diff --git a/moto/s3/responses.py b/moto/s3/responses.py index c27b57cf666a..b01bed1fbd85 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -406,8 +406,8 @@ def _bucket_response_get(self, bucket_name, querystring): template = self.response_template(S3_BUCKET_CORS_RESPONSE) return template.render(cors=cors) elif "notification" in querystring: - notification_configuration = ( - self.backend.get_bucket_notification_configuration(bucket_name) + notification_configuration = self.backend.get_bucket_notification_configuration( + bucket_name ) if not notification_configuration: return 200, {}, "" diff --git a/moto/sagemaker/models.py b/moto/sagemaker/models.py index d13925ba9d92..f53cc3eecbe5 100644 --- a/moto/sagemaker/models.py +++ b/moto/sagemaker/models.py @@ -517,10 +517,8 @@ def __init__( self.creation_time = self.last_modified_time = datetime.now().strftime( "%Y-%m-%d %H:%M:%S" ) - self.notebook_instance_lifecycle_config_arn = ( - FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( - self.notebook_instance_lifecycle_config_name, self.region_name - ) + self.notebook_instance_lifecycle_config_arn = FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter( + self.notebook_instance_lifecycle_config_name, self.region_name ) @staticmethod @@ -583,9 +581,7 @@ def describe_model(self, model_name=None): Model.arn_for_model_name(model_name, self.region_name) ) raise RESTError( - error_type="ValidationException", - message=message, - template="error_json", + error_type="ValidationException", message=message, template="error_json", ) def list_models(self): @@ -796,10 +792,7 @@ def delete_endpoint_config(self, endpoint_config_name): raise ValidationError(message=message) def create_endpoint( - self, - endpoint_name, - endpoint_config_name, - tags, + self, endpoint_name, endpoint_config_name, tags, ): try: endpoint_config = self.describe_endpoint_config(endpoint_config_name) diff --git a/moto/sagemaker/responses.py b/moto/sagemaker/responses.py index 9abb1369af9a..d5d2cab435d5 100644 --- a/moto/sagemaker/responses.py +++ b/moto/sagemaker/responses.py @@ -243,14 +243,12 @@ def delete_training_job(self): @amzn_request_id def create_notebook_instance_lifecycle_config(self): try: - lifecycle_configuration = ( - self.sagemaker_backend.create_notebook_instance_lifecycle_config( - notebook_instance_lifecycle_config_name=self._get_param( - "NotebookInstanceLifecycleConfigName" - ), - on_create=self._get_param("OnCreate"), - on_start=self._get_param("OnStart"), - ) + lifecycle_configuration = self.sagemaker_backend.create_notebook_instance_lifecycle_config( + notebook_instance_lifecycle_config_name=self._get_param( + "NotebookInstanceLifecycleConfigName" + ), + on_create=self._get_param("OnCreate"), + on_start=self._get_param("OnStart"), ) response = { "NotebookInstanceLifecycleConfigArn": lifecycle_configuration.notebook_instance_lifecycle_config_arn, diff --git a/moto/sns/models.py b/moto/sns/models.py index 5da2c06b7e18..7d297fbdc097 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -340,14 +340,12 @@ def enabled(self): @property def arn(self): - return ( - "arn:aws:sns:{region}:{AccountId}:endpoint/{platform}/{name}/{id}".format( - region=self.region, - AccountId=DEFAULT_ACCOUNT_ID, - platform=self.application.platform, - name=self.application.name, - id=self.id, - ) + return "arn:aws:sns:{region}:{AccountId}:endpoint/{platform}/{name}/{id}".format( + region=self.region, + AccountId=DEFAULT_ACCOUNT_ID, + platform=self.application.platform, + name=self.application.name, + id=self.id, ) def publish(self, message): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 1168d8094053..016637b4c8a3 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -354,9 +354,7 @@ def receive_message(self): queue_name = self._get_queue_name() message_attributes = self._get_multi_param("message_attributes") if not message_attributes: - message_attributes = extract_input_message_attributes( - self.querystring, - ) + message_attributes = extract_input_message_attributes(self.querystring,) queue = self.sqs_backend.get_queue(queue_name) @@ -720,10 +718,8 @@ def list_queue_tags(self): 6fde8d1e-52cd-4581-8cd9-c512f4c64223
""" -ERROR_MAX_VISIBILITY_TIMEOUT_RESPONSE = ( - "Invalid request, maximum visibility timeout is {0}".format( - MAXIMUM_VISIBILTY_TIMEOUT - ) +ERROR_MAX_VISIBILITY_TIMEOUT_RESPONSE = "Invalid request, maximum visibility timeout is {0}".format( + MAXIMUM_VISIBILTY_TIMEOUT ) ERROR_INEXISTENT_QUEUE = """ diff --git a/moto/stepfunctions/models.py b/moto/stepfunctions/models.py index c3a266130fe1..125e5d807e64 100644 --- a/moto/stepfunctions/models.py +++ b/moto/stepfunctions/models.py @@ -148,9 +148,7 @@ def update_from_cloudformation_json( tags = cfn_to_api_tags(properties.get("Tags", [])) sf_backend = stepfunction_backends[region_name] state_machine = sf_backend.update_state_machine( - original_resource.arn, - definition=definition, - role_arn=role_arn, + original_resource.arn, definition=definition, role_arn=role_arn, ) state_machine.add_tags(tags) return state_machine diff --git a/moto/sts/models.py b/moto/sts/models.py index 04c1233da751..b274b1acdcfe 100644 --- a/moto/sts/models.py +++ b/moto/sts/models.py @@ -48,12 +48,10 @@ def user_id(self): @property def arn(self): - return ( - "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( - account_id=ACCOUNT_ID, - role_name=self.role_arn.split("/")[-1], - session_name=self.session_name, - ) + return "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( + account_id=ACCOUNT_ID, + role_name=self.role_arn.split("/")[-1], + session_name=self.session_name, ) diff --git a/moto/transcribe/models.py b/moto/transcribe/models.py index f149a7989b5c..bf8e602e63dc 100644 --- a/moto/transcribe/models.py +++ b/moto/transcribe/models.py @@ -153,11 +153,7 @@ def advance_job_status(self): class FakeMedicalVocabulary(BaseObject): def __init__( - self, - region_name, - vocabulary_name, - language_code, - vocabulary_file_uri, + self, region_name, vocabulary_name, language_code, vocabulary_file_uri, ): self._region_name = region_name self.vocabulary_name = vocabulary_name diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index 4e80ac8b4871..b32fabeed7b1 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -20,10 +20,8 @@ SERVER_COMMON_NAME = "*.moto.com" SERVER_CRT_BAD = _GET_RESOURCE("star_moto_com-bad.pem") SERVER_KEY = _GET_RESOURCE("star_moto_com.key") -BAD_ARN = ( - "arn:aws:acm:us-east-2:{}:certificate/_0000000-0000-0000-0000-000000000000".format( - ACCOUNT_ID - ) +BAD_ARN = "arn:aws:acm:us-east-2:{}:certificate/_0000000-0000-0000-0000-000000000000".format( + ACCOUNT_ID ) @@ -56,10 +54,7 @@ def test_import_certificate_with_tags(): Certificate=SERVER_CRT, PrivateKey=SERVER_KEY, CertificateChain=CA_CRT, - Tags=[ - {"Key": "Environment", "Value": "QA"}, - {"Key": "KeyOnly"}, - ], + Tags=[{"Key": "Environment", "Value": "QA"}, {"Key": "KeyOnly"},], ) arn = resp["CertificateArn"] @@ -371,10 +366,7 @@ def test_request_certificate_with_tags(): DomainName="google.com", IdempotencyToken=token, SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], - Tags=[ - {"Key": "Environment", "Value": "Prod"}, - {"Key": "KeyOnly"}, - ], + Tags=[{"Key": "Environment", "Value": "Prod"}, {"Key": "KeyOnly"},], ) arn_2 = resp["CertificateArn"] @@ -404,8 +396,7 @@ def test_operations_with_invalid_tags(): # request certificate with invalid tags with pytest.raises(ClientError) as ex: client.request_certificate( - DomainName="example.com", - Tags=[{"Key": "X" * 200, "Value": "Valid"}], + DomainName="example.com", Tags=[{"Key": "X" * 200, "Value": "Valid"}], ) ex.value.response["Error"]["Code"].should.equal("ValidationException") ex.value.response["Error"]["Message"].should.contain( @@ -567,7 +558,11 @@ def test_request_certificate_with_mutiple_times(): resp = client.request_certificate( IdempotencyToken="test_token", DomainName="google.com", - SubjectAlternativeNames=["google.com", "www.google.com", "mail.google.com"], + SubjectAlternativeNames=[ + "google.com", + "www.google.com", + "mail.google.com", + ], ) arn = resp["CertificateArn"] arn.should.equal(original_arn) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 01529fadc227..f85fd4a02543 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -105,9 +105,7 @@ def test_create_rest_api_valid_apikeysources(): # 1. test creating rest api with HEADER apiKeySource response = client.create_rest_api( - name="my_api", - description="this is my api", - apiKeySource="HEADER", + name="my_api", description="this is my api", apiKeySource="HEADER", ) api_id = response["id"] @@ -116,9 +114,7 @@ def test_create_rest_api_valid_apikeysources(): # 2. test creating rest api with AUTHORIZER apiKeySource response = client.create_rest_api( - name="my_api2", - description="this is my api", - apiKeySource="AUTHORIZER", + name="my_api2", description="this is my api", apiKeySource="AUTHORIZER", ) api_id = response["id"] @@ -153,9 +149,7 @@ def test_create_rest_api_valid_endpointconfigurations(): response = client.get_rest_api(restApiId=api_id) response["endpointConfiguration"].should.equal( - { - "types": ["PRIVATE"], - } + {"types": ["PRIVATE"],} ) # 2. test creating rest api with REGIONAL endpointConfiguration @@ -168,9 +162,7 @@ def test_create_rest_api_valid_endpointconfigurations(): response = client.get_rest_api(restApiId=api_id) response["endpointConfiguration"].should.equal( - { - "types": ["REGIONAL"], - } + {"types": ["REGIONAL"],} ) # 3. test creating rest api with EDGE endpointConfiguration @@ -183,9 +175,7 @@ def test_create_rest_api_valid_endpointconfigurations(): response = client.get_rest_api(restApiId=api_id) response["endpointConfiguration"].should.equal( - { - "types": ["EDGE"], - } + {"types": ["EDGE"],} ) @@ -231,11 +221,7 @@ def test_create_resource(): root_resource["ResponseMetadata"].pop("HTTPHeaders", None) root_resource["ResponseMetadata"].pop("RetryAttempts", None) root_resource.should.equal( - { - "path": "/", - "id": root_id, - "ResponseMetadata": {"HTTPStatusCode": 200}, - } + {"path": "/", "id": root_id, "ResponseMetadata": {"HTTPStatusCode": 200},} ) client.create_resource(restApiId=api_id, parentId=root_id, pathPart="users") @@ -1834,10 +1820,8 @@ def test_http_proxying_integration(): stage_name = "staging" client.create_deployment(restApiId=api_id, stageName=stage_name) - deploy_url = ( - "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}".format( - api_id=api_id, region_name=region_name, stage_name=stage_name - ) + deploy_url = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}".format( + api_id=api_id, region_name=region_name, stage_name=stage_name ) if not settings.TEST_SERVER_MODE: diff --git a/tests/test_applicationautoscaling/test_applicationautoscaling.py b/tests/test_applicationautoscaling/test_applicationautoscaling.py index f8c629be2f35..aed728ab62e6 100644 --- a/tests/test_applicationautoscaling/test_applicationautoscaling.py +++ b/tests/test_applicationautoscaling/test_applicationautoscaling.py @@ -513,6 +513,4 @@ def test_deregister_scalable_target(): ResourceId=resource_id, ScalableDimension=scalable_dimension, ) - e.value.response["Error"]["Message"].should.match( - r"No scalable target found .*" - ) + e.value.response["Error"]["Message"].should.match(r"No scalable target found .*") diff --git a/tests/test_applicationautoscaling/test_validation.py b/tests/test_applicationautoscaling/test_validation.py index 056450ae1707..b074d339699f 100644 --- a/tests/test_applicationautoscaling/test_validation.py +++ b/tests/test_applicationautoscaling/test_validation.py @@ -48,8 +48,7 @@ def test_describe_scalable_targets_with_invalid_scalable_dimension_should_return with pytest.raises(ClientError) as err: response = client.describe_scalable_targets( - ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, - ScalableDimension="foo", + ServiceNamespace=DEFAULT_SERVICE_NAMESPACE, ScalableDimension="foo", ) err.response["Error"]["Code"].should.equal("ValidationException") err.response["Error"]["Message"].split(":")[0].should.look_like( @@ -64,8 +63,7 @@ def test_describe_scalable_targets_with_invalid_service_namespace_should_return_ with pytest.raises(ClientError) as err: response = client.describe_scalable_targets( - ServiceNamespace="foo", - ScalableDimension=DEFAULT_SCALABLE_DIMENSION, + ServiceNamespace="foo", ScalableDimension=DEFAULT_SCALABLE_DIMENSION, ) err.response["Error"]["Code"].should.equal("ValidationException") err.response["Error"]["Message"].split(":")[0].should.look_like( @@ -80,8 +78,7 @@ def test_describe_scalable_targets_with_multiple_invalid_parameters_should_retur with pytest.raises(ClientError) as err: response = client.describe_scalable_targets( - ServiceNamespace="foo", - ScalableDimension="bar", + ServiceNamespace="foo", ScalableDimension="bar", ) err.response["Error"]["Code"].should.equal("ValidationException") err.response["Error"]["Message"].split(":")[0].should.look_like( @@ -105,12 +102,13 @@ def test_register_scalable_target_ecs_with_non_existent_service_should_return_va err.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) -@pytest.mark.parametrize("namespace,r_id,dimension,expected", +@pytest.mark.parametrize( + "namespace,r_id,dimension,expected", [ ("ecs", "service/default/test-svc", "ecs:service:DesiredCount", True), ("ecs", "banana/default/test-svc", "ecs:service:DesiredCount", False), ("rds", "service/default/test-svc", "ecs:service:DesiredCount", False), - ] + ], ) def test_target_params_are_valid_success(namespace, r_id, dimension, expected): if expected is True: diff --git a/tests/test_athena/test_athena.py b/tests/test_athena/test_athena.py index d3362cd7fc74..f667f231697b 100644 --- a/tests/test_athena/test_athena.py +++ b/tests/test_athena/test_athena.py @@ -178,9 +178,7 @@ def test_create_named_query(): # craete named query res = client.create_named_query( - Name="query-name", - Database="target_db", - QueryString="SELECT * FROM table1", + Name="query-name", Database="target_db", QueryString="SELECT * FROM table1", ) assert "NamedQueryId" in res @@ -217,8 +215,6 @@ def create_basic_workgroup(client, name): Name=name, Description="Test work group", Configuration={ - "ResultConfiguration": { - "OutputLocation": "s3://bucket-name/prefix/", - } + "ResultConfiguration": {"OutputLocation": "s3://bucket-name/prefix/",} }, ) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 0b6ec88f7569..cbcd8eb205d6 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -961,8 +961,7 @@ def test_describe_autoscaling_groups_boto3_launch_config(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") client.create_launch_configuration( - LaunchConfigurationName="test_launch_configuration", - InstanceType="t2.micro", + LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", ) client.create_auto_scaling_group( AutoScalingGroupName="test_asg", @@ -1041,8 +1040,7 @@ def test_describe_autoscaling_instances_boto3_launch_config(): mocked_networking = setup_networking() client = boto3.client("autoscaling", region_name="us-east-1") client.create_launch_configuration( - LaunchConfigurationName="test_launch_configuration", - InstanceType="t2.micro", + LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", ) client.create_auto_scaling_group( AutoScalingGroupName="test_asg", @@ -2156,8 +2154,7 @@ def test_standby_exit_standby(): response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby") response = client.exit_standby( - AutoScalingGroupName="test_asg", - InstanceIds=[instance_to_standby_exit_standby], + AutoScalingGroupName="test_asg", InstanceIds=[instance_to_standby_exit_standby], ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) diff --git a/tests/test_autoscaling/test_autoscaling_cloudformation.py b/tests/test_autoscaling/test_autoscaling_cloudformation.py index ac7884c5fcf2..24a5b5628a7f 100644 --- a/tests/test_autoscaling/test_autoscaling_cloudformation.py +++ b/tests/test_autoscaling/test_autoscaling_cloudformation.py @@ -32,8 +32,7 @@ def test_launch_configuration(): """.strip() cf_client.create_stack( - StackName=stack_name, - TemplateBody=cf_template, + StackName=stack_name, TemplateBody=cf_template, ) stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] stack["Outputs"][0]["OutputValue"].should.be.equal("test_launch_configuration") @@ -57,8 +56,7 @@ def test_launch_configuration(): """.strip() cf_client.update_stack( - StackName=stack_name, - TemplateBody=cf_template, + StackName=stack_name, TemplateBody=cf_template, ) stack = cf_client.describe_stacks(StackName=stack_name)["Stacks"][0] stack["Outputs"][0]["OutputValue"].should.be.equal("test_launch_configuration") @@ -78,8 +76,7 @@ def test_autoscaling_group_from_launch_config(): client = boto3.client("autoscaling", region_name="us-east-1") client.create_launch_configuration( - LaunchConfigurationName="test_launch_configuration", - InstanceType="t2.micro", + LaunchConfigurationName="test_launch_configuration", InstanceType="t2.micro", ) stack_name = "test-auto-scaling-group" diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 1fe1de96f744..7e4fc22f56f1 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -208,9 +208,7 @@ def test_invoke_dryrun_function(): Runtime="python2.7", Role=get_role_name(), Handler="lambda_function.lambda_handler", - Code={ - "ZipFile": get_test_zip_file1(), - }, + Code={"ZipFile": get_test_zip_file1(),}, Description="test lambda function", Timeout=3, MemorySize=128, @@ -1287,8 +1285,7 @@ def wait_for_log_msg(expected_msg, log_group): for log_stream in log_streams: result = logs_conn.get_log_events( - logGroupName=log_group, - logStreamName=log_stream["logStreamName"], + logGroupName=log_group, logStreamName=log_stream["logStreamName"], ) received_messages.extend( [event["message"] for event in result.get("events")] @@ -1727,9 +1724,7 @@ def test_remove_function_permission(): ) remove = conn.remove_permission( - FunctionName="testFunction", - StatementId="1", - Qualifier="2", + FunctionName="testFunction", StatementId="1", Qualifier="2", ) remove["ResponseMetadata"]["HTTPStatusCode"].should.equal(204) policy = conn.get_policy(FunctionName="testFunction", Qualifier="2")["Policy"] diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index b8e50fd122b9..5a7757777975 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -738,8 +738,9 @@ def test_submit_job(): else: raise RuntimeError("Batch job timed out") - resp = logs_client.describe_log_streams(logGroupName="/aws/batch/job", - logStreamNamePrefix="sayhellotomylittlefriend") + resp = logs_client.describe_log_streams( + logGroupName="/aws/batch/job", logStreamNamePrefix="sayhellotomylittlefriend" + ) len(resp["logStreams"]).should.equal(1) ls_name = resp["logStreams"][0]["logStreamName"] diff --git a/tests/test_cloudformation/test_cloudformation_depends_on.py b/tests/test_cloudformation/test_cloudformation_depends_on.py index 6a8e17428da5..1b47b40648fb 100644 --- a/tests/test_cloudformation/test_cloudformation_depends_on.py +++ b/tests/test_cloudformation/test_cloudformation_depends_on.py @@ -23,9 +23,7 @@ }, "LaunchConfig": { "Type": "AWS::AutoScaling::LaunchConfiguration", - "Properties": { - "LaunchConfigurationName": "test-launch-config", - }, + "Properties": {"LaunchConfigurationName": "test-launch-config",}, }, }, } @@ -47,9 +45,7 @@ }, "LaunchConfig": { "Type": "AWS::AutoScaling::LaunchConfiguration", - "Properties": { - "LaunchConfigurationName": "test-launch-config", - }, + "Properties": {"LaunchConfigurationName": "test-launch-config",}, }, }, } diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index fdf1f2426ad5..86b6f1a94e82 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -1369,12 +1369,10 @@ def test_non_json_redrive_policy(): def test_boto3_create_duplicate_stack(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, + StackName="test_stack", TemplateBody=dummy_template_json, ) with pytest.raises(ClientError): cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, + StackName="test_stack", TemplateBody=dummy_template_json, ) diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 852bb805015b..9949bb4a5035 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2325,10 +2325,7 @@ def test_stack_dynamodb_resources_integration(): dynamodb_client = boto3.client("dynamodb", region_name="us-east-1") table_desc = dynamodb_client.describe_table(TableName="myTableName")["Table"] table_desc["StreamSpecification"].should.equal( - { - "StreamEnabled": True, - "StreamViewType": "KEYS_ONLY", - } + {"StreamEnabled": True, "StreamViewType": "KEYS_ONLY",} ) dynamodb_conn = boto3.resource("dynamodb", region_name="us-east-1") @@ -2782,9 +2779,7 @@ def test_stack_events_get_attribute_integration(): @mock_dynamodb2 def test_dynamodb_table_creation(): CFN_TEMPLATE = { - "Outputs": { - "MyTableName": {"Value": {"Ref": "MyTable"}}, - }, + "Outputs": {"MyTableName": {"Value": {"Ref": "MyTable"}},}, "Resources": { "MyTable": { "Type": "AWS::DynamoDB::Table", diff --git a/tests/test_codepipeline/test_codepipeline.py b/tests/test_codepipeline/test_codepipeline.py index c80a732612a8..ca1094582ad0 100644 --- a/tests/test_codepipeline/test_codepipeline.py +++ b/tests/test_codepipeline/test_codepipeline.py @@ -326,9 +326,7 @@ def test_update_pipeline(): "S3Bucket": "different-bucket", "S3ObjectKey": "test-object", }, - "outputArtifacts": [ - {"name": "artifact"}, - ], + "outputArtifacts": [{"name": "artifact"},], }, ], }, @@ -437,9 +435,7 @@ def test_update_pipeline_errors(): "S3Bucket": "test-bucket", "S3ObjectKey": "test-object", }, - "outputArtifacts": [ - {"name": "artifact"}, - ], + "outputArtifacts": [{"name": "artifact"},], }, ], }, @@ -700,9 +696,7 @@ def create_basic_codepipeline(client, name): "S3Bucket": "test-bucket", "S3ObjectKey": "test-object", }, - "outputArtifacts": [ - {"name": "artifact"}, - ], + "outputArtifacts": [{"name": "artifact"},], }, ], }, diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index b4893b4ecdad..54ee9528f71f 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1272,20 +1272,15 @@ def user_authentication_flow(conn): )["UserPoolClient"]["ClientId"] conn.sign_up( - ClientId=client_id, - Username=username, - Password=password, + ClientId=client_id, Username=username, Password=password, ) client_secret = conn.describe_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_id, + UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( - ClientId=client_id, - Username=username, - ConfirmationCode="123456", + ClientId=client_id, Username=username, ConfirmationCode="123456", ) # generating secret hash @@ -1323,25 +1318,18 @@ def user_authentication_flow(conn): ) conn.verify_software_token( - AccessToken=result["AuthenticationResult"]["AccessToken"], - UserCode="123456", + AccessToken=result["AuthenticationResult"]["AccessToken"], UserCode="123456", ) conn.set_user_mfa_preference( AccessToken=result["AuthenticationResult"]["AccessToken"], - SoftwareTokenMfaSettings={ - "Enabled": True, - "PreferredMfa": True, - }, + SoftwareTokenMfaSettings={"Enabled": True, "PreferredMfa": True,}, ) result = conn.initiate_auth( ClientId=client_id, AuthFlow="REFRESH_TOKEN", - AuthParameters={ - "SECRET_HASH": secret_hash, - "REFRESH_TOKEN": refresh_token, - }, + AuthParameters={"SECRET_HASH": secret_hash, "REFRESH_TOKEN": refresh_token,}, ) result["AuthenticationResult"]["IdToken"].should_not.be.none @@ -1595,8 +1583,7 @@ def test_sign_up(): conn = boto3.client("cognito-idp", "us-west-2") user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), )["UserPoolClient"]["ClientId"] username = str(uuid.uuid4()) password = str(uuid.uuid4()) @@ -1612,16 +1599,12 @@ def test_confirm_sign_up(): password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - GenerateSecret=True, + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) conn.confirm_sign_up( - ClientId=client_id, - Username=username, - ConfirmationCode="123456", + ClientId=client_id, Username=username, ConfirmationCode="123456", ) result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) @@ -1635,19 +1618,14 @@ def test_initiate_auth_USER_SRP_AUTH(): password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - GenerateSecret=True, + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_id, + UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( - ClientId=client_id, - Username=username, - ConfirmationCode="123456", + ClientId=client_id, Username=username, ConfirmationCode="123456", ) key = bytes(str(client_secret).encode("latin-1")) @@ -1691,14 +1669,11 @@ def test_initiate_auth_for_unconfirmed_user(): password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - GenerateSecret=True, + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_id, + UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] key = bytes(str(client_secret).encode("latin-1")) @@ -1730,19 +1705,14 @@ def test_initiate_auth_with_invalid_secret_hash(): password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - GenerateSecret=True, + UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), GenerateSecret=True, )["UserPoolClient"]["ClientId"] conn.sign_up(ClientId=client_id, Username=username, Password=password) client_secret = conn.describe_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_id, + UserPoolId=user_pool_id, ClientId=client_id, )["UserPoolClient"]["ClientSecret"] conn.confirm_sign_up( - ClientId=client_id, - Username=username, - ConfirmationCode="123456", + ClientId=client_id, Username=username, ConfirmationCode="123456", ) invalid_secret_hash = str(uuid.uuid4()) diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index 54678cf7f539..41774c2fa53b 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -1845,12 +1845,7 @@ def test_put_evaluations(): response["ResponseMetadata"].pop("HTTPHeaders", None) response["ResponseMetadata"].pop("RetryAttempts", None) response.should.equal( - { - "FailedEvaluations": [], - "ResponseMetadata": { - "HTTPStatusCode": 200, - }, - } + {"FailedEvaluations": [], "ResponseMetadata": {"HTTPStatusCode": 200,},} ) diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py index f867b434ec1f..b6fc8a1356dc 100644 --- a/tests/test_core/test_auth.py +++ b/tests/test_core/test_auth.py @@ -325,9 +325,7 @@ def test_access_denied_for_run_instances(): ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.value.response["Error"]["Message"].should.equal( "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, - user_name=user_name, - operation="ec2:RunInstances", + account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:RunInstances", ) ) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 7dc739f700ea..7a2ed32cb0ed 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1347,13 +1347,9 @@ def test_get_item_returns_consumed_capacity(): def test_put_empty_item(): dynamodb = boto3.resource("dynamodb", region_name="us-east-1") dynamodb.create_table( - AttributeDefinitions=[ - {"AttributeName": "structure_id", "AttributeType": "S"}, - ], + AttributeDefinitions=[{"AttributeName": "structure_id", "AttributeType": "S"},], TableName="test", - KeySchema=[ - {"AttributeName": "structure_id", "KeyType": "HASH"}, - ], + KeySchema=[{"AttributeName": "structure_id", "KeyType": "HASH"},], ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, ) table = dynamodb.Table("test") @@ -1370,13 +1366,9 @@ def test_put_empty_item(): def test_put_item_nonexisting_hash_key(): dynamodb = boto3.resource("dynamodb", region_name="us-east-1") dynamodb.create_table( - AttributeDefinitions=[ - {"AttributeName": "structure_id", "AttributeType": "S"}, - ], + AttributeDefinitions=[{"AttributeName": "structure_id", "AttributeType": "S"},], TableName="test", - KeySchema=[ - {"AttributeName": "structure_id", "KeyType": "HASH"}, - ], + KeySchema=[{"AttributeName": "structure_id", "KeyType": "HASH"},], ProvisionedThroughput={"ReadCapacityUnits": 123, "WriteCapacityUnits": 123}, ) table = dynamodb.Table("test") @@ -2295,10 +2287,7 @@ def test_update_item_on_map(): table.update_item( Key={"forum_name": "the-key", "subject": "123"}, UpdateExpression="SET body.#nested.#data = :tb", - ExpressionAttributeNames={ - "#nested": "nested", - "#data": "data", - }, + ExpressionAttributeNames={"#nested": "nested", "#data": "data",}, ExpressionAttributeValues={":tb": "new_value"}, ) # Running this against AWS DDB gives an exception so make sure it also fails.: @@ -3962,30 +3951,19 @@ def test_update_supports_nested_update_if_nested_value_not_exists(): table = dynamodb.Table(name) table.put_item( - Item={ - "user_id": "1234", - "friends": {"5678": {"name": "friend_5678"}}, - }, + Item={"user_id": "1234", "friends": {"5678": {"name": "friend_5678"}},}, ) table.update_item( Key={"user_id": "1234"}, - ExpressionAttributeNames={ - "#friends": "friends", - "#friendid": "0000", - }, - ExpressionAttributeValues={ - ":friend": {"name": "friend_0000"}, - }, + ExpressionAttributeNames={"#friends": "friends", "#friendid": "0000",}, + ExpressionAttributeValues={":friend": {"name": "friend_0000"},}, UpdateExpression="SET #friends.#friendid = :friend", ReturnValues="UPDATED_NEW", ) item = table.get_item(Key={"user_id": "1234"})["Item"] assert item == { "user_id": "1234", - "friends": { - "5678": {"name": "friend_5678"}, - "0000": {"name": "friend_0000"}, - }, + "friends": {"5678": {"name": "friend_5678"}, "0000": {"name": "friend_0000"},}, } @@ -4208,17 +4186,11 @@ def test_invalid_transact_get_items(): ) table = dynamodb.Table("test1") table.put_item( - Item={ - "id": "1", - "val": "1", - } + Item={"id": "1", "val": "1",} ) table.put_item( - Item={ - "id": "1", - "val": "2", - } + Item={"id": "1", "val": "2",} ) client = boto3.client("dynamodb", region_name="us-east-1") @@ -4240,22 +4212,8 @@ def test_invalid_transact_get_items(): with pytest.raises(ClientError) as ex: client.transact_get_items( TransactItems=[ - { - "Get": { - "Key": { - "id": {"S": "1"}, - }, - "TableName": "test1", - } - }, - { - "Get": { - "Key": { - "id": {"S": "1"}, - }, - "TableName": "non_exists_table", - } - }, + {"Get": {"Key": {"id": {"S": "1"},}, "TableName": "test1",}}, + {"Get": {"Key": {"id": {"S": "1"},}, "TableName": "non_exists_table",}}, ] ) @@ -4281,17 +4239,11 @@ def test_valid_transact_get_items(): ) table1 = dynamodb.Table("test1") table1.put_item( - Item={ - "id": "1", - "sort_key": "1", - } + Item={"id": "1", "sort_key": "1",} ) table1.put_item( - Item={ - "id": "1", - "sort_key": "2", - } + Item={"id": "1", "sort_key": "2",} ) dynamodb.create_table( @@ -4308,10 +4260,7 @@ def test_valid_transact_get_items(): ) table2 = dynamodb.Table("test2") table2.put_item( - Item={ - "id": "1", - "sort_key": "1", - } + Item={"id": "1", "sort_key": "1",} ) client = boto3.client("dynamodb", region_name="us-east-1") @@ -4425,10 +4374,7 @@ def test_valid_transact_get_items(): "TableName": "test1", "CapacityUnits": 4.0, "ReadCapacityUnits": 4.0, - "Table": { - "CapacityUnits": 4.0, - "ReadCapacityUnits": 4.0, - }, + "Table": {"CapacityUnits": 4.0, "ReadCapacityUnits": 4.0,}, } ) @@ -4437,10 +4383,7 @@ def test_valid_transact_get_items(): "TableName": "test2", "CapacityUnits": 2.0, "ReadCapacityUnits": 2.0, - "Table": { - "CapacityUnits": 2.0, - "ReadCapacityUnits": 2.0, - }, + "Table": {"CapacityUnits": 2.0, "ReadCapacityUnits": 2.0,}, } ) @@ -4456,9 +4399,7 @@ def test_gsi_verify_negative_number_order(): {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, ], - "Projection": { - "ProjectionType": "KEYS_ONLY", - }, + "Projection": {"ProjectionType": "KEYS_ONLY",}, } ], "AttributeDefinitions": [ @@ -4509,9 +4450,7 @@ def test_gsi_verify_negative_number_order(): def test_transact_write_items_put(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4522,10 +4461,7 @@ def test_transact_write_items_put(): TransactItems=[ { "Put": { - "Item": { - "id": {"S": "foo{}".format(str(i))}, - "foo": {"S": "bar"}, - }, + "Item": {"id": {"S": "foo{}".format(str(i))}, "foo": {"S": "bar"},}, "TableName": "test-table", } } @@ -4541,19 +4477,14 @@ def test_transact_write_items_put(): def test_transact_write_items_put_conditional_expressions(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema ) dynamodb.put_item( - TableName="test-table", - Item={ - "id": {"S": "foo2"}, - }, + TableName="test-table", Item={"id": {"S": "foo2"},}, ) # Put multiple items with pytest.raises(ClientError) as ex: @@ -4591,9 +4522,7 @@ def test_transact_write_items_put_conditional_expressions(): def test_transact_write_items_conditioncheck_passes(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4601,10 +4530,7 @@ def test_transact_write_items_conditioncheck_passes(): ) # Insert an item without email address dynamodb.put_item( - TableName="test-table", - Item={ - "id": {"S": "foo"}, - }, + TableName="test-table", Item={"id": {"S": "foo"},}, ) # Put an email address, after verifying it doesn't exist yet dynamodb.transact_write_items( @@ -4638,9 +4564,7 @@ def test_transact_write_items_conditioncheck_passes(): def test_transact_write_items_conditioncheck_fails(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4689,9 +4613,7 @@ def test_transact_write_items_conditioncheck_fails(): def test_transact_write_items_delete(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4699,20 +4621,12 @@ def test_transact_write_items_delete(): ) # Insert an item dynamodb.put_item( - TableName="test-table", - Item={ - "id": {"S": "foo"}, - }, + TableName="test-table", Item={"id": {"S": "foo"},}, ) # Delete the item dynamodb.transact_write_items( TransactItems=[ - { - "Delete": { - "Key": {"id": {"S": "foo"}}, - "TableName": "test-table", - } - } + {"Delete": {"Key": {"id": {"S": "foo"}}, "TableName": "test-table",}} ] ) # Assert the item is deleted @@ -4724,9 +4638,7 @@ def test_transact_write_items_delete(): def test_transact_write_items_delete_with_successful_condition_expression(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4734,19 +4646,14 @@ def test_transact_write_items_delete_with_successful_condition_expression(): ) # Insert an item without email address dynamodb.put_item( - TableName="test-table", - Item={ - "id": {"S": "foo"}, - }, + TableName="test-table", Item={"id": {"S": "foo"},}, ) # ConditionExpression will pass - no email address has been specified yet dynamodb.transact_write_items( TransactItems=[ { "Delete": { - "Key": { - "id": {"S": "foo"}, - }, + "Key": {"id": {"S": "foo"},}, "TableName": "test-table", "ConditionExpression": "attribute_not_exists(#e)", "ExpressionAttributeNames": {"#e": "email_address"}, @@ -4763,9 +4670,7 @@ def test_transact_write_items_delete_with_successful_condition_expression(): def test_transact_write_items_delete_with_failed_condition_expression(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4783,9 +4688,7 @@ def test_transact_write_items_delete_with_failed_condition_expression(): TransactItems=[ { "Delete": { - "Key": { - "id": {"S": "foo"}, - }, + "Key": {"id": {"S": "foo"},}, "TableName": "test-table", "ConditionExpression": "attribute_not_exists(#e)", "ExpressionAttributeNames": {"#e": "email_address"}, @@ -4806,9 +4709,7 @@ def test_transact_write_items_delete_with_failed_condition_expression(): def test_transact_write_items_update(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -4840,9 +4741,7 @@ def test_transact_write_items_update(): def test_transact_write_items_update_with_failed_condition_expression(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -5032,18 +4931,12 @@ def create_simple_table_and_return_client(): dynamodb.create_table( TableName="moto-test", KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], - AttributeDefinitions=[ - {"AttributeName": "id", "AttributeType": "S"}, - ], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"},], ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1}, ) dynamodb.put_item( TableName="moto-test", - Item={ - "id": {"S": "1"}, - "myNum": {"N": "1"}, - "MyStr": {"S": "1"}, - }, + Item={"id": {"S": "1"}, "myNum": {"N": "1"}, "MyStr": {"S": "1"},}, ) return dynamodb @@ -5107,11 +5000,7 @@ def test_update_expression_with_plus_in_attribute_name(): dynamodb.put_item( TableName="moto-test", - Item={ - "id": {"S": "1"}, - "my+Num": {"S": "1"}, - "MyStr": {"S": "aaa"}, - }, + Item={"id": {"S": "1"}, "my+Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, ) try: dynamodb.update_item( @@ -5138,11 +5027,7 @@ def test_update_expression_with_minus_in_attribute_name(): dynamodb.put_item( TableName="moto-test", - Item={ - "id": {"S": "1"}, - "my-Num": {"S": "1"}, - "MyStr": {"S": "aaa"}, - }, + Item={"id": {"S": "1"}, "my-Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, ) try: dynamodb.update_item( @@ -5169,11 +5054,7 @@ def test_update_expression_with_space_in_attribute_name(): dynamodb.put_item( TableName="moto-test", - Item={ - "id": {"S": "1"}, - "my Num": {"S": "1"}, - "MyStr": {"S": "aaa"}, - }, + Item={"id": {"S": "1"}, "my Num": {"S": "1"}, "MyStr": {"S": "aaa"},}, ) try: @@ -5356,8 +5237,7 @@ def test_update_item_atomic_counter_from_zero(): key = {"t_id": {"S": "item1"}} ddb_mock.put_item( - TableName=table, - Item=key, + TableName=table, Item=key, ) ddb_mock.update_item( @@ -5383,8 +5263,7 @@ def test_update_item_add_to_non_existent_set(): ) key = {"t_id": {"S": "item1"}} ddb_mock.put_item( - TableName=table, - Item=key, + TableName=table, Item=key, ) ddb_mock.update_item( @@ -5409,8 +5288,7 @@ def test_update_item_add_to_non_existent_number_set(): ) key = {"t_id": {"S": "item1"}} ddb_mock.put_item( - TableName=table, - Item=key, + TableName=table, Item=key, ) ddb_mock.update_item( @@ -5427,9 +5305,7 @@ def test_update_item_add_to_non_existent_number_set(): def test_transact_write_items_fails_with_transaction_canceled_exception(): table_schema = { "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], - "AttributeDefinitions": [ - {"AttributeName": "id", "AttributeType": "S"}, - ], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], } dynamodb = boto3.client("dynamodb", region_name="us-east-1") dynamodb.create_table( @@ -5481,9 +5357,7 @@ def test_gsi_projection_type_keys_only(): {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, ], - "Projection": { - "ProjectionType": "KEYS_ONLY", - }, + "Projection": {"ProjectionType": "KEYS_ONLY",}, } ], "AttributeDefinitions": [ @@ -5536,9 +5410,7 @@ def test_lsi_projection_type_keys_only(): {"AttributeName": "partitionKey", "KeyType": "HASH"}, {"AttributeName": "lsiK1SortKey", "KeyType": "RANGE"}, ], - "Projection": { - "ProjectionType": "KEYS_ONLY", - }, + "Projection": {"ProjectionType": "KEYS_ONLY",}, } ], "AttributeDefinitions": [ @@ -5563,8 +5435,7 @@ def test_lsi_projection_type_keys_only(): table.put_item(Item=item) items = table.query( - KeyConditionExpression=Key("partitionKey").eq("pk-1"), - IndexName="LSI", + KeyConditionExpression=Key("partitionKey").eq("pk-1"), IndexName="LSI", )["Items"] items.should.have.length_of(1) # Item should only include GSI Keys and Table Keys, as per the ProjectionType diff --git a/tests/test_dynamodb2/test_dynamodb_executor.py b/tests/test_dynamodb2/test_dynamodb_executor.py index 7270ba713ae8..892d2715cef9 100644 --- a/tests/test_dynamodb2/test_dynamodb_executor.py +++ b/tests/test_dynamodb2/test_dynamodb_executor.py @@ -212,11 +212,7 @@ def test_execution_of_remove_in_map(): "itemlist": { "L": [ {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, - { - "M": { - "foo10": {"S": "bar1"}, - } - }, + {"M": {"foo10": {"S": "bar1"},}}, ] } } @@ -265,9 +261,7 @@ def test_execution_of_remove_in_list(): "itemmap": { "M": { "itemlist": { - "L": [ - {"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}}, - ] + "L": [{"M": {"foo00": {"S": "bar1"}, "foo01": {"S": "bar2"}}},] } } }, @@ -284,10 +278,7 @@ def test_execution_of_delete_element_from_set(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={ - "id": {"S": "foo2"}, - "s": {"SS": ["value1", "value2", "value3"]}, - }, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, ) validated_ast = UpdateExpressionValidator( update_expression_ast, @@ -301,10 +292,7 @@ def test_execution_of_delete_element_from_set(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={ - "id": {"S": "foo2"}, - "s": {"SS": ["value1", "value3"]}, - }, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value3"]},}, ) assert expected_item == item @@ -317,10 +305,7 @@ def test_execution_of_add_number(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={ - "id": {"S": "foo2"}, - "s": {"N": "5"}, - }, + attrs={"id": {"S": "foo2"}, "s": {"N": "5"},}, ) validated_ast = UpdateExpressionValidator( update_expression_ast, @@ -347,10 +332,7 @@ def test_execution_of_add_set_to_a_number(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={ - "id": {"S": "foo2"}, - "s": {"N": "5"}, - }, + attrs={"id": {"S": "foo2"}, "s": {"N": "5"},}, ) try: validated_ast = UpdateExpressionValidator( @@ -381,10 +363,7 @@ def test_execution_of_add_to_a_set(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={ - "id": {"S": "foo2"}, - "s": {"SS": ["value1", "value2", "value3"]}, - }, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, ) validated_ast = UpdateExpressionValidator( update_expression_ast, @@ -406,37 +385,17 @@ def test_execution_of_add_to_a_set(): assert expected_item == item -@pytest.mark.parametrize("expression_attribute_values,unexpected_data_type", +@pytest.mark.parametrize( + "expression_attribute_values,unexpected_data_type", [ - ( - {":value": {"S": "10"}}, - "STRING", - ), - ( - {":value": {"N": "10"}}, - "NUMBER", - ), - ( - {":value": {"B": "10"}}, - "BINARY", - ), - ( - {":value": {"BOOL": True}}, - "BOOLEAN", - ), - ( - {":value": {"NULL": True}}, - "NULL", - ), - ( - {":value": {"M": {"el0": {"S": "10"}}}}, - "MAP", - ), - ( - {":value": {"L": []}}, - "LIST", - ), - ] + ({":value": {"S": "10"}}, "STRING",), + ({":value": {"N": "10"}}, "NUMBER",), + ({":value": {"B": "10"}}, "BINARY",), + ({":value": {"BOOL": True}}, "BOOLEAN",), + ({":value": {"NULL": True}}, "NULL",), + ({":value": {"M": {"el0": {"S": "10"}}}}, "MAP",), + ({":value": {"L": []}}, "LIST",), + ], ) def test_execution_of__delete_element_from_set_invalid_value( expression_attribute_values, unexpected_data_type @@ -449,10 +408,7 @@ def test_execution_of__delete_element_from_set_invalid_value( hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={ - "id": {"S": "foo2"}, - "s": {"SS": ["value1", "value2", "value3"]}, - }, + attrs={"id": {"S": "foo2"}, "s": {"SS": ["value1", "value2", "value3"]},}, ) try: validated_ast = UpdateExpressionValidator( @@ -477,10 +433,7 @@ def test_execution_of_delete_element_from_a_string_attribute(): hash_key_type="TYPE", range_key=None, range_key_type=None, - attrs={ - "id": {"S": "foo2"}, - "s": {"S": "5"}, - }, + attrs={"id": {"S": "foo2"}, "s": {"S": "5"},}, ) try: validated_ast = UpdateExpressionValidator( diff --git a/tests/test_dynamodb2/test_dynamodb_validation.py b/tests/test_dynamodb2/test_dynamodb_validation.py index 4d14beefe2d7..8761d2cd270d 100644 --- a/tests/test_dynamodb2/test_dynamodb_validation.py +++ b/tests/test_dynamodb2/test_dynamodb_validation.py @@ -41,11 +41,8 @@ def test_validation_of_update_expression_with_keyword(): assert e.keyword == "path" -@pytest.mark.parametrize("update_expression", - [ - "SET a = #b + :val2", - "SET a = :val2 + #b", - ] +@pytest.mark.parametrize( + "update_expression", ["SET a = #b + :val2", "SET a = :val2 + #b",] ) def test_validation_of_a_set_statement_with_incorrect_passed_value(update_expression): """ @@ -101,12 +98,7 @@ def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_i assert True -@pytest.mark.parametrize("update_expression", - [ - "SET a = #c", - "SET a = #c + #d", - ] -) +@pytest.mark.parametrize("update_expression", ["SET a = #c", "SET a = #c + #d",]) def test_validation_of_update_expression_with_attribute_name_that_is_not_defined( update_expression, ): diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index b5d1d33f63ec..d0b1dee2d6d1 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -2,6 +2,7 @@ import boto import boto3 + # Ensure 'pytest.raises' context manager support for Python 2.6 import pytest import sure # noqa diff --git a/tests/test_ec2/test_flow_logs.py b/tests/test_ec2/test_flow_logs.py index 0bf7a61d00e3..743466eaa472 100644 --- a/tests/test_ec2/test_flow_logs.py +++ b/tests/test_ec2/test_flow_logs.py @@ -144,9 +144,7 @@ def test_create_flow_log_create(): bucket = s3.create_bucket( Bucket="test-flow-logs", - CreateBucketConfiguration={ - "LocationConstraint": "us-west-1", - }, + CreateBucketConfiguration={"LocationConstraint": "us-west-1",}, ) response = client.create_flow_logs( diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 28aeb62a959e..146e3c69697d 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -211,9 +211,7 @@ def test_instance_detach_volume_wrong_path(): ImageId="ami-d3adb33f", MinCount=1, MaxCount=1, - BlockDeviceMappings=[ - {"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}, - ], + BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},], ) instance = result[0] for volume in instance.volumes.all(): @@ -1585,9 +1583,7 @@ def test_create_instance_ebs_optimized(): instance.ebs_optimized.should.be(False) instance = ec2_resource.create_instances( - ImageId="ami-12345678", - MaxCount=1, - MinCount=1, + ImageId="ami-12345678", MaxCount=1, MinCount=1, )[0] instance.load() instance.ebs_optimized.should.be(False) diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 66b23e790eec..246cacf6b4e8 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -661,11 +661,7 @@ def test_run_instances_should_attach_to_default_subnet(): client = boto3.client("ec2", region_name="us-west-1") ec2.create_security_group(GroupName="sg01", Description="Test security group sg01") # run_instances - instances = client.run_instances( - MinCount=1, - MaxCount=1, - SecurityGroups=["sg01"], - ) + instances = client.run_instances(MinCount=1, MaxCount=1, SecurityGroups=["sg01"],) # Assert subnet is created appropriately subnets = client.describe_subnets()["Subnets"] default_subnet_id = subnets[0]["SubnetId"] diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index 95bd7b66c05b..ca8897417345 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -60,9 +60,7 @@ def test_create_vpn_connection_with_vpn_gateway(): vpn_gateway = client.create_vpn_gateway(Type="ipsec.1").get("VpnGateway", {}) customer_gateway = client.create_customer_gateway( - Type="ipsec.1", - PublicIp="205.251.242.54", - BgpAsn=65534, + Type="ipsec.1", PublicIp="205.251.242.54", BgpAsn=65534, ).get("CustomerGateway", {}) vpn_connection = client.create_vpn_connection( Type="ipsec.1", diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index b535f5713c04..8b6b2798724f 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -2531,9 +2531,7 @@ def test_describe_task_sets(): assert "tags" not in task_sets[0] task_sets = client.describe_task_sets( - cluster=cluster_name, - service=service_name, - include=["TAGS"], + cluster=cluster_name, service=service_name, include=["TAGS"], )["taskSets"] cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][ @@ -2593,39 +2591,29 @@ def test_delete_task_set(): ) task_set = client.create_task_set( - cluster=cluster_name, - service=service_name, - taskDefinition=task_def_name, + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, )["taskSet"] task_sets = client.describe_task_sets( - cluster=cluster_name, - service=service_name, - taskSets=[task_set["taskSetArn"]], + cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], )["taskSets"] assert len(task_sets) == 1 response = client.delete_task_set( - cluster=cluster_name, - service=service_name, - taskSet=task_set["taskSetArn"], + cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"], ) assert response["taskSet"]["taskSetArn"] == task_set["taskSetArn"] task_sets = client.describe_task_sets( - cluster=cluster_name, - service=service_name, - taskSets=[task_set["taskSetArn"]], + cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], )["taskSets"] assert len(task_sets) == 0 with pytest.raises(ClientError): _ = client.delete_task_set( - cluster=cluster_name, - service=service_name, - taskSet=task_set["taskSetArn"], + cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"], ) @@ -2661,9 +2649,7 @@ def test_update_service_primary_task_set(): ) task_set = client.create_task_set( - cluster=cluster_name, - service=service_name, - taskDefinition=task_def_name, + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, )["taskSet"] service = client.describe_services(cluster=cluster_name, services=[service_name],)[ @@ -2683,9 +2669,7 @@ def test_update_service_primary_task_set(): assert service["taskDefinition"] == service["taskSets"][0]["taskDefinition"] another_task_set = client.create_task_set( - cluster=cluster_name, - service=service_name, - taskDefinition=task_def_name, + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, )["taskSet"] service = client.describe_services(cluster=cluster_name, services=[service_name],)[ "services" @@ -2737,15 +2721,11 @@ def test_update_task_set(): ) task_set = client.create_task_set( - cluster=cluster_name, - service=service_name, - taskDefinition=task_def_name, + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, )["taskSet"] another_task_set = client.create_task_set( - cluster=cluster_name, - service=service_name, - taskDefinition=task_def_name, + cluster=cluster_name, service=service_name, taskDefinition=task_def_name, )["taskSet"] assert another_task_set["scale"]["unit"] == "PERCENT" assert another_task_set["scale"]["value"] == 100.0 @@ -2758,9 +2738,7 @@ def test_update_task_set(): ) updated_task_set = client.describe_task_sets( - cluster=cluster_name, - service=service_name, - taskSets=[task_set["taskSetArn"]], + cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]], )["taskSets"][0] assert updated_task_set["scale"]["value"] == 25.0 assert updated_task_set["scale"]["unit"] == "PERCENT" @@ -2806,13 +2784,11 @@ def test_list_tasks_with_filters(): } _ = ecs.register_task_definition( - family="test_task_def_1", - containerDefinitions=[test_container_def], + family="test_task_def_1", containerDefinitions=[test_container_def], ) _ = ecs.register_task_definition( - family="test_task_def_2", - containerDefinitions=[test_container_def], + family="test_task_def_2", containerDefinitions=[test_container_def], ) _ = ecs.start_task( diff --git a/tests/test_elasticbeanstalk/test_eb.py b/tests/test_elasticbeanstalk/test_eb.py index 8eb32d24ed71..42eb09be3eba 100644 --- a/tests/test_elasticbeanstalk/test_eb.py +++ b/tests/test_elasticbeanstalk/test_eb.py @@ -9,30 +9,24 @@ def test_create_application(): # Create Elastic Beanstalk Application conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - app = conn.create_application( - ApplicationName="myapp", - ) + app = conn.create_application(ApplicationName="myapp",) app["Application"]["ApplicationName"].should.equal("myapp") @mock_elasticbeanstalk def test_create_application_dup(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application( - ApplicationName="myapp", + conn.create_application(ApplicationName="myapp",) + conn.create_application.when.called_with(ApplicationName="myapp",).should.throw( + ClientError ) - conn.create_application.when.called_with( - ApplicationName="myapp", - ).should.throw(ClientError) @mock_elasticbeanstalk def test_describe_applications(): # Create Elastic Beanstalk Application conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application( - ApplicationName="myapp", - ) + conn.create_application(ApplicationName="myapp",) apps = conn.describe_applications() len(apps["Applications"]).should.equal(1) @@ -43,13 +37,8 @@ def test_describe_applications(): def test_create_environment(): # Create Elastic Beanstalk Environment conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - app = conn.create_application( - ApplicationName="myapp", - ) - env = conn.create_environment( - ApplicationName="myapp", - EnvironmentName="myenv", - ) + app = conn.create_application(ApplicationName="myapp",) + env = conn.create_environment(ApplicationName="myapp", EnvironmentName="myenv",) env["EnvironmentName"].should.equal("myenv") @@ -57,12 +46,9 @@ def test_create_environment(): def test_describe_environments(): # List Elastic Beanstalk Envs conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application( - ApplicationName="myapp", - ) + conn.create_application(ApplicationName="myapp",) conn.create_environment( - ApplicationName="myapp", - EnvironmentName="myenv", + ApplicationName="myapp", EnvironmentName="myenv", ) envs = conn.describe_environments() @@ -89,9 +75,7 @@ def tags_list_to_dict(tag_list): @mock_elasticbeanstalk def test_create_environment_tags(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application( - ApplicationName="myapp", - ) + conn.create_application(ApplicationName="myapp",) env_tags = {"initial key": "initial value"} env = conn.create_environment( ApplicationName="myapp", @@ -99,9 +83,7 @@ def test_create_environment_tags(): Tags=tags_dict_to_list(env_tags), ) - tags = conn.list_tags_for_resource( - ResourceArn=env["EnvironmentArn"], - ) + tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],) tags["ResourceArn"].should.equal(env["EnvironmentArn"]) tags_list_to_dict(tags["ResourceTags"]).should.equal(env_tags) @@ -109,9 +91,7 @@ def test_create_environment_tags(): @mock_elasticbeanstalk def test_update_tags(): conn = boto3.client("elasticbeanstalk", region_name="us-east-1") - conn.create_application( - ApplicationName="myapp", - ) + conn.create_application(ApplicationName="myapp",) env_tags = { "initial key": "initial value", "to remove": "delete me", @@ -137,9 +117,7 @@ def test_update_tags(): total_env_tags.update(extra_env_tags) del total_env_tags["to remove"] - tags = conn.list_tags_for_resource( - ResourceArn=env["EnvironmentArn"], - ) + tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],) tags["ResourceArn"].should.equal(env["EnvironmentArn"]) tags_list_to_dict(tags["ResourceTags"]).should.equal(total_env_tags) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 393121c4d296..de8f4edbb0ca 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -525,10 +525,8 @@ def test_run_job_flow_with_instance_groups_with_autoscaling(): if "AutoScalingPolicy" in y: x["AutoScalingPolicy"]["Status"]["State"].should.equal("ATTACHED") returned_policy = deepcopy(x["AutoScalingPolicy"]) - auto_scaling_policy_with_cluster_id = ( - _patch_cluster_id_placeholder_in_autoscaling_policy( - y["AutoScalingPolicy"], cluster_id - ) + auto_scaling_policy_with_cluster_id = _patch_cluster_id_placeholder_in_autoscaling_policy( + y["AutoScalingPolicy"], cluster_id ) del returned_policy["Status"] returned_policy.should.equal(auto_scaling_policy_with_cluster_id) @@ -554,10 +552,8 @@ def test_put_remove_auto_scaling_policy(): AutoScalingPolicy=auto_scaling_policy, ) - auto_scaling_policy_with_cluster_id = ( - _patch_cluster_id_placeholder_in_autoscaling_policy( - auto_scaling_policy, cluster_id - ) + auto_scaling_policy_with_cluster_id = _patch_cluster_id_placeholder_in_autoscaling_policy( + auto_scaling_policy, cluster_id ) del resp["AutoScalingPolicy"]["Status"] resp["AutoScalingPolicy"].should.equal(auto_scaling_policy_with_cluster_id) @@ -804,7 +800,11 @@ def test_instance_groups(): x["AutoScalingPolicy"]["Status"]["State"].should.equal("ATTACHED") returned_policy = dict(x["AutoScalingPolicy"]) del returned_policy["Status"] - policy = json.loads(json.dumps(y["AutoScalingPolicy"]).replace("${emr.clusterId}", cluster_id)) + policy = json.loads( + json.dumps(y["AutoScalingPolicy"]).replace( + "${emr.clusterId}", cluster_id + ) + ) returned_policy.should.equal(policy) if "EbsConfiguration" in y: _do_assertion_ebs_configuration(x, y) diff --git a/tests/test_forecast/test_forecast.py b/tests/test_forecast/test_forecast.py index 03503fec2826..7936a500d4fa 100644 --- a/tests/test_forecast/test_forecast.py +++ b/tests/test_forecast/test_forecast.py @@ -71,9 +71,7 @@ def test_forecast_dataset_group_create_duplicate_fails(): with pytest.raises(ClientError) as exc: client.create_dataset_group(DatasetGroupName="name", Domain="RETAIL") - exc.value.response["Error"]["Code"].should.equal( - "ResourceAlreadyExistsException" - ) + exc.value.response["Error"]["Code"].should.equal("ResourceAlreadyExistsException") @mock_forecast diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 7e8d1560f501..bd9a8ab82aeb 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -207,9 +207,7 @@ def test_remove_role_from_instance_profile(): def test_delete_instance_profile(): conn = boto3.client("iam", region_name="us-east-1") conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) conn.create_instance_profile(InstanceProfileName="my-profile") conn.add_role_to_instance_profile( @@ -259,9 +257,7 @@ def test_delete_role(): # Test deletion failure with a managed policy conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) response = conn.create_policy( PolicyName="my-managed-policy", PolicyDocument=MOCK_POLICY @@ -277,14 +273,10 @@ def test_delete_role(): # Test deletion failure with an inline policy conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) conn.put_role_policy( - RoleName="my-role", - PolicyName="my-role-policy", - PolicyDocument=MOCK_POLICY, + RoleName="my-role", PolicyName="my-role-policy", PolicyDocument=MOCK_POLICY, ) with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_role(RoleName="my-role") @@ -295,9 +287,7 @@ def test_delete_role(): # Test deletion failure with attachment to an instance profile conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) conn.create_instance_profile(InstanceProfileName="my-profile") conn.add_role_to_instance_profile( @@ -314,9 +304,7 @@ def test_delete_role(): # Test deletion with no conflicts conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) conn.delete_role(RoleName="my-role") with pytest.raises(conn.exceptions.NoSuchEntityException): @@ -343,9 +331,7 @@ def test_list_instance_profiles_for_role(): conn = boto.connect_iam() conn.create_role( - role_name="my-role", - assume_role_policy_document="some policy", - path="my-path", + role_name="my-role", assume_role_policy_document="some policy", path="my-path", ) conn.create_role( role_name="my-role2", @@ -357,8 +343,7 @@ def test_list_instance_profiles_for_role(): profile_path_list = ["my-path", "my-path2"] for profile_count in range(0, 2): conn.create_instance_profile( - profile_name_list[profile_count], - path=profile_path_list[profile_count], + profile_name_list[profile_count], path=profile_path_list[profile_count], ) for profile_count in range(0, 2): @@ -424,9 +409,7 @@ def test_put_role_policy(): def test_get_role_policy(): conn = boto3.client("iam", region_name="us-east-1") conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="my-path", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="my-path", ) with pytest.raises(conn.exceptions.NoSuchEntityException): conn.get_role_policy(RoleName="my-role", PolicyName="does-not-exist") @@ -1039,8 +1022,7 @@ def test_create_virtual_mfa_device_errors(): client.create_virtual_mfa_device.when.called_with( VirtualMFADeviceName="test-device" ).should.throw( - ClientError, - "MFADevice entity at the same path and name already exists.", + ClientError, "MFADevice entity at the same path and name already exists.", ) client.create_virtual_mfa_device.when.called_with( @@ -1229,9 +1211,7 @@ def test_delete_user(): # Test deletion failure with an inline policy conn.create_user(UserName="my-user") conn.put_user_policy( - UserName="my-user", - PolicyName="my-user-policy", - PolicyDocument=MOCK_POLICY, + UserName="my-user", PolicyName="my-user-policy", PolicyDocument=MOCK_POLICY, ) with pytest.raises(conn.exceptions.DeleteConflictException): conn.delete_user(UserName="my-user") @@ -1416,9 +1396,7 @@ def test_managed_policy(): role_name = "my-role" conn.create_role( - role_name, - assume_role_policy_document={"policy": "test"}, - path="my-path", + role_name, assume_role_policy_document={"policy": "test"}, path="my-path", ) for policy_name in [ "AmazonElasticMapReduceRole", @@ -1445,8 +1423,7 @@ def test_managed_policy(): ].should.have.length_of(2) conn.detach_role_policy( - "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", - role_name, + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", role_name, ) rows = conn.list_policies(only_attached=True)["list_policies_response"][ "list_policies_result" @@ -1586,9 +1563,7 @@ def test_get_ssh_public_key(): with pytest.raises(ClientError): client.get_ssh_public_key( - UserName=username, - SSHPublicKeyId="xxnon-existent-keyxx", - Encoding="SSH", + UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Encoding="SSH", ) resp = client.upload_ssh_public_key(UserName=username, SSHPublicKeyBody=public_key) @@ -1629,9 +1604,7 @@ def test_update_ssh_public_key(): with pytest.raises(ClientError): client.update_ssh_public_key( - UserName=username, - SSHPublicKeyId="xxnon-existent-keyxx", - Status="Inactive", + UserName=username, SSHPublicKeyId="xxnon-existent-keyxx", Status="Inactive", ) resp = client.upload_ssh_public_key(UserName=username, SSHPublicKeyBody=public_key) @@ -1709,9 +1682,7 @@ def test_get_account_authorization_details(): UserName="testUser", PolicyName="testPolicy", PolicyDocument=test_policy ) conn.put_group_policy( - GroupName="testGroup", - PolicyName="testPolicy", - PolicyDocument=test_policy, + GroupName="testGroup", PolicyName="testPolicy", PolicyDocument=test_policy, ) conn.attach_user_policy( @@ -2011,9 +1982,7 @@ def test_create_role_with_tags(): map(lambda x: {"Key": str(x), "Value": str(x)}, range(0, 51)) ) conn.create_role( - RoleName="my-role3", - AssumeRolePolicyDocument="{}", - Tags=too_many_tags, + RoleName="my-role3", AssumeRolePolicyDocument="{}", Tags=too_many_tags, ) assert ( "failed to satisfy constraint: Member must have length less than or equal to 50." @@ -2279,9 +2248,7 @@ def test_update_role_description(): conn.delete_role(RoleName="my-role") conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) response = conn.update_role_description(RoleName="my-role", Description="test") @@ -2296,9 +2263,7 @@ def test_update_role(): conn.delete_role(RoleName="my-role") conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) response = conn.update_role_description(RoleName="my-role", Description="test") assert response["Role"]["RoleName"] == "my-role" @@ -2312,9 +2277,7 @@ def test_update_role(): conn.delete_role(RoleName="my-role") conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) response = conn.update_role(RoleName="my-role", Description="test") assert len(response.keys()) == 1 @@ -2355,9 +2318,7 @@ def test_list_entities_for_policy(): conn = boto3.client("iam", region_name="us-east-1") conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Path="/my-path/", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/", ) conn.create_user(Path="/", UserName="testUser") conn.create_group(Path="/", GroupName="testGroup") @@ -2373,9 +2334,7 @@ def test_list_entities_for_policy(): UserName="testUser", PolicyName="testPolicy", PolicyDocument=test_policy ) conn.put_group_policy( - GroupName="testGroup", - PolicyName="testPolicy", - PolicyDocument=test_policy, + GroupName="testGroup", PolicyName="testPolicy", PolicyDocument=test_policy, ) conn.attach_user_policy( @@ -2438,9 +2397,7 @@ def test_list_entities_for_policy(): def test_create_role_no_path(): conn = boto3.client("iam", region_name="us-east-1") resp = conn.create_role( - RoleName="my-role", - AssumeRolePolicyDocument="some policy", - Description="test", + RoleName="my-role", AssumeRolePolicyDocument="some policy", Description="test", ) resp.get("Role").get("Arn").should.equal( "arn:aws:iam::{}:role/my-role".format(ACCOUNT_ID) @@ -2496,9 +2453,7 @@ def test_create_role_with_same_name_should_fail(): iam = boto3.client("iam", region_name="us-east-1") test_role_name = str(uuid4()) iam.create_role( - RoleName=test_role_name, - AssumeRolePolicyDocument="policy", - Description="test", + RoleName=test_role_name, AssumeRolePolicyDocument="policy", Description="test", ) # Create the role again, and verify that it fails with pytest.raises(ClientError) as err: @@ -2585,24 +2540,14 @@ def test_create_open_id_connect_provider_errors(): client.create_open_id_connect_provider.when.called_with( Url="http://example.org", - ThumbprintList=[ - "a" * 40, - "b" * 40, - "c" * 40, - "d" * 40, - "e" * 40, - "f" * 40, - ], + ThumbprintList=["a" * 40, "b" * 40, "c" * 40, "d" * 40, "e" * 40, "f" * 40,], ).should.throw(ClientError, "Thumbprint list must contain fewer than 5 entries.") too_many_client_ids = ["{}".format(i) for i in range(101)] client.create_open_id_connect_provider.when.called_with( - Url="http://example.org", - ThumbprintList=[], - ClientIDList=too_many_client_ids, + Url="http://example.org", ThumbprintList=[], ClientIDList=too_many_client_ids, ).should.throw( - ClientError, - "Cannot exceed quota for ClientIdsPerOpenIdConnectProvider: 100", + ClientError, "Cannot exceed quota for ClientIdsPerOpenIdConnectProvider: 100", ) too_long_url = "b" * 256 @@ -2643,8 +2588,7 @@ def test_delete_open_id_connect_provider(): client.get_open_id_connect_provider.when.called_with( OpenIDConnectProviderArn=open_id_arn ).should.throw( - ClientError, - "OpenIDConnect Provider not found for arn {}".format(open_id_arn), + ClientError, "OpenIDConnect Provider not found for arn {}".format(open_id_arn), ) # deleting a non existing provider should be successful @@ -2736,9 +2680,7 @@ def test_update_account_password_policy_errors(): client = boto3.client("iam", region_name="us-east-1") client.update_account_password_policy.when.called_with( - MaxPasswordAge=1096, - MinimumPasswordLength=129, - PasswordReusePrevention=25, + MaxPasswordAge=1096, MinimumPasswordLength=129, PasswordReusePrevention=25, ).should.throw( ClientError, "3 validation errors detected: " @@ -2816,8 +2758,7 @@ def test_delete_account_password_policy_errors(): client = boto3.client("iam", region_name="us-east-1") client.delete_account_password_policy.when.called_with().should.throw( - ClientError, - "The account policy with name PasswordPolicy cannot be found.", + ClientError, "The account policy with name PasswordPolicy cannot be found.", ) @@ -2945,8 +2886,7 @@ def test_list_user_tags(): conn = boto3.client("iam", region_name="us-east-1") conn.create_user(UserName="kenny-bania") conn.create_user( - UserName="jackie-chiles", - Tags=[{"Key": "Sue-Allen", "Value": "Oh-Henry"}], + UserName="jackie-chiles", Tags=[{"Key": "Sue-Allen", "Value": "Oh-Henry"}], ) conn.create_user( UserName="cosmo", @@ -2965,10 +2905,7 @@ def test_list_user_tags(): response = conn.list_user_tags(UserName="cosmo") response["Tags"].should.equal( - [ - {"Key": "Stan", "Value": "The Caddy"}, - {"Key": "like-a", "Value": "glove"}, - ] + [{"Key": "Stan", "Value": "The Caddy"}, {"Key": "like-a", "Value": "glove"},] ) response["IsTruncated"].should_not.be.ok @@ -3011,8 +2948,7 @@ def test_delete_account_password_policy_errors(): client = boto3.client("iam", region_name="us-east-1") client.delete_account_password_policy.when.called_with().should.throw( - ClientError, - "The account policy with name PasswordPolicy cannot be found.", + ClientError, "The account policy with name PasswordPolicy cannot be found.", ) @@ -3041,10 +2977,7 @@ def test_role_list_config_discovered_resources(): max_session_duration=3600, ) roles.append( - { - "id": this_role.id, - "name": this_role.name, - } + {"id": this_role.id, "name": this_role.name,} ) assert len(roles) == num_roles @@ -3102,11 +3035,7 @@ def test_role_config_dict(): basic_assume_role = { "Version": "2012-10-17", "Statement": [ - { - "Effect": "Allow", - "Principal": {"AWS": "*"}, - "Action": "sts:AssumeRole", - } + {"Effect": "Allow", "Principal": {"AWS": "*"}, "Action": "sts:AssumeRole",} ], } @@ -3423,9 +3352,7 @@ def test_role_config_client(): # Test non-aggregated pagination assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", - limit=1, - nextToken=result["nextToken"], + resourceType="AWS::IAM::Role", limit=1, nextToken=result["nextToken"], )["resourceIdentifiers"][0]["resourceId"] ) != first_result @@ -3461,18 +3388,14 @@ def test_role_config_client(): # Test non-aggregated resource name/id filter assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", - resourceName=roles[1]["name"], - limit=1, + resourceType="AWS::IAM::Role", resourceName=roles[1]["name"], limit=1, )["resourceIdentifiers"][0]["resourceName"] == roles[1]["name"] ) assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", - resourceIds=[roles[0]["id"]], - limit=1, + resourceType="AWS::IAM::Role", resourceIds=[roles[0]["id"]], limit=1, )["resourceIdentifiers"][0]["resourceName"] == roles[0]["name"] ) @@ -3518,17 +3441,13 @@ def test_role_config_client(): # Test non-aggregated resource name/id filter assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", - resourceName=roles[1]["name"], - limit=1, + resourceType="AWS::IAM::Role", resourceName=roles[1]["name"], limit=1, )["resourceIdentifiers"][0]["resourceName"] == roles[1]["name"] ) assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Role", - resourceIds=[roles[0]["id"]], - limit=1, + resourceType="AWS::IAM::Role", resourceIds=[roles[0]["id"]], limit=1, )["resourceIdentifiers"][0]["resourceName"] == roles[0]["name"] ) @@ -3638,10 +3557,7 @@ def test_policy_list_config_discovered_resources(): policy_name="policy{}".format(ix), ) policies.append( - { - "id": this_policy.id, - "name": this_policy.name, - } + {"id": this_policy.id, "name": this_policy.name,} ) assert len(policies) == num_policies @@ -3866,9 +3782,7 @@ def test_policy_config_client(): # Test non-aggregated pagination assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Policy", - limit=1, - nextToken=result["nextToken"], + resourceType="AWS::IAM::Policy", limit=1, nextToken=result["nextToken"], )["resourceIdentifiers"][0]["resourceId"] ) != first_result @@ -3905,18 +3819,14 @@ def test_policy_config_client(): # Test non-aggregated resource name/id filter assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Policy", - resourceName=policies[1]["name"], - limit=1, + resourceType="AWS::IAM::Policy", resourceName=policies[1]["name"], limit=1, )["resourceIdentifiers"][0]["resourceName"] == policies[1]["name"] ) assert ( config_client.list_discovered_resources( - resourceType="AWS::IAM::Policy", - resourceIds=[policies[0]["id"]], - limit=1, + resourceType="AWS::IAM::Policy", resourceIds=[policies[0]["id"]], limit=1, )["resourceIdentifiers"][0]["resourceName"] == policies[0]["name"] ) @@ -3997,10 +3907,7 @@ def test_policy_config_client(): assert ( config_client.batch_get_resource_config( resourceKeys=[ - { - "resourceType": "AWS::IAM::Policy", - "resourceId": policies[7]["id"], - } + {"resourceType": "AWS::IAM::Policy", "resourceId": policies[7]["id"],} ] )["baseConfigurationItems"][0]["resourceName"] == policies[7]["name"] diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index a6847adc24b6..7a39e0987cf0 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1017,9 +1017,7 @@ def test_delete_thing_group(): group_name_1a = "my-group-name-1a" group_name_2a = "my-group-name-2a" tree_dict = { - group_name_1a: { - group_name_2a: {}, - }, + group_name_1a: {group_name_2a: {},}, } group_catalog = generate_thing_group_tree(client, tree_dict) diff --git a/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py index 65785f2e27e8..ee44391977c9 100644 --- a/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py +++ b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py @@ -24,9 +24,7 @@ def test_get_hls_streaming_session_url(): region_name=region_name, endpoint_url=data_endpoint, ) - res = client.get_hls_streaming_session_url( - StreamName=stream_name, - ) + res = client.get_hls_streaming_session_url(StreamName=stream_name,) reg_exp = "^{}/hls/v1/getHLSMasterPlaylist.m3u8\?SessionToken\=.+$".format( data_endpoint ) @@ -50,9 +48,7 @@ def test_get_dash_streaming_session_url(): region_name=region_name, endpoint_url=data_endpoint, ) - res = client.get_dash_streaming_session_url( - StreamName=stream_name, - ) + res = client.get_dash_streaming_session_url(StreamName=stream_name,) reg_exp = "^{}/dash/v1/getDASHManifest.mpd\?SessionToken\=.+$".format(data_endpoint) res.should.have.key("DASHStreamingSessionURL").which.should.match(reg_exp) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 6e4b332c4b9c..bb1b013e021b 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -15,7 +15,11 @@ from moto.kms.exceptions import NotFoundException as MotoNotFoundException from moto import mock_kms_deprecated, mock_kms -PLAINTEXT_VECTORS = [b"some encodeable plaintext", b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16", "some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥"] +PLAINTEXT_VECTORS = [ + b"some encodeable plaintext", + b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16", + "some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥", +] def _get_encoded_value(plaintext): @@ -570,10 +574,8 @@ def test__delete_alias__raises_if_alias_is_not_found(): with pytest.raises(NotFoundException) as err: kms.delete_alias(alias_name) - expected_message_match = ( - r"Alias arn:aws:kms:{region}:[0-9]{{12}}:{alias_name} is not found.".format( - region=region, alias_name=alias_name - ) + expected_message_match = r"Alias arn:aws:kms:{region}:[0-9]{{12}}:{alias_name} is not found.".format( + region=region, alias_name=alias_name ) ex = err.value ex.body["__type"].should.equal("NotFoundException") diff --git a/tests/test_kms/test_kms_boto3.py b/tests/test_kms/test_kms_boto3.py index ddf315812fb4..25a8d59420f2 100644 --- a/tests/test_kms/test_kms_boto3.py +++ b/tests/test_kms/test_kms_boto3.py @@ -14,9 +14,11 @@ from moto import mock_kms -PLAINTEXT_VECTORS = [b"some encodeable plaintext", - b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16", - "some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥"] +PLAINTEXT_VECTORS = [ + b"some encodeable plaintext", + b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16", + "some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥", +] def _get_encoded_value(plaintext): @@ -52,20 +54,14 @@ def test_create_key(): key["KeyMetadata"]["Origin"].should.equal("AWS_KMS") key["KeyMetadata"].should_not.have.key("SigningAlgorithms") - key = conn.create_key( - KeyUsage="ENCRYPT_DECRYPT", - CustomerMasterKeySpec="RSA_2048", - ) + key = conn.create_key(KeyUsage="ENCRYPT_DECRYPT", CustomerMasterKeySpec="RSA_2048",) sorted(key["KeyMetadata"]["EncryptionAlgorithms"]).should.equal( ["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"] ) key["KeyMetadata"].should_not.have.key("SigningAlgorithms") - key = conn.create_key( - KeyUsage="SIGN_VERIFY", - CustomerMasterKeySpec="RSA_2048", - ) + key = conn.create_key(KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="RSA_2048",) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") sorted(key["KeyMetadata"]["SigningAlgorithms"]).should.equal( @@ -80,24 +76,21 @@ def test_create_key(): ) key = conn.create_key( - KeyUsage="SIGN_VERIFY", - CustomerMasterKeySpec="ECC_SECG_P256K1", + KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_SECG_P256K1", ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_256"]) key = conn.create_key( - KeyUsage="SIGN_VERIFY", - CustomerMasterKeySpec="ECC_NIST_P384", + KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P384", ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_384"]) key = conn.create_key( - KeyUsage="SIGN_VERIFY", - CustomerMasterKeySpec="ECC_NIST_P521", + KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P521", ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") @@ -107,10 +100,7 @@ def test_create_key(): @mock_kms def test_describe_key(): client = boto3.client("kms", region_name="us-east-1") - response = client.create_key( - Description="my key", - KeyUsage="ENCRYPT_DECRYPT", - ) + response = client.create_key(Description="my key", KeyUsage="ENCRYPT_DECRYPT",) key_id = response["KeyMetadata"]["KeyId"] response = client.describe_key(KeyId=key_id) @@ -129,7 +119,14 @@ def test_describe_key(): response["KeyMetadata"].should_not.have.key("SigningAlgorithms") -@pytest.mark.parametrize("key_id", ["alias/does-not-exist", "arn:aws:kms:us-east-1:012345678912:alias/does-not-exist", "invalid"]) +@pytest.mark.parametrize( + "key_id", + [ + "alias/does-not-exist", + "arn:aws:kms:us-east-1:012345678912:alias/does-not-exist", + "invalid", + ], +) @mock_kms def test_describe_key_via_alias_invalid_alias(key_id): client = boto3.client("kms", region_name="us-east-1") @@ -204,8 +201,15 @@ def test_decrypt(plaintext): decrypt_response["KeyId"].should.equal(key_arn) -@pytest.mark.parametrize("key_id", - ["not-a-uuid", "alias/DoesNotExist", "arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist", "d25652e4-d2d2-49f7-929a-671ccda580c6", "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6"] +@pytest.mark.parametrize( + "key_id", + [ + "not-a-uuid", + "alias/DoesNotExist", + "arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist", + "d25652e4-d2d2-49f7-929a-671ccda580c6", + "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", + ], ) @mock_kms def test_invalid_key_ids(key_id): @@ -352,14 +356,15 @@ def test_list_resource_tags(): assert response["Tags"][0]["TagValue"] == "string" -@pytest.mark.parametrize("kwargs,expected_key_length", +@pytest.mark.parametrize( + "kwargs,expected_key_length", ( (dict(KeySpec="AES_256"), 32), (dict(KeySpec="AES_128"), 16), (dict(NumberOfBytes=64), 64), (dict(NumberOfBytes=1), 1), (dict(NumberOfBytes=1024), 1024), - ) + ), ) @mock_kms def test_generate_data_key_sizes(kwargs, expected_key_length): @@ -384,8 +389,15 @@ def test_generate_data_key_decrypt(): assert resp1["Plaintext"] == resp2["Plaintext"] -@pytest.mark.parametrize("kwargs", - [dict(KeySpec="AES_257"), dict(KeySpec="AES_128", NumberOfBytes=16), dict(NumberOfBytes=2048), dict(NumberOfBytes=0), dict()] +@pytest.mark.parametrize( + "kwargs", + [ + dict(KeySpec="AES_257"), + dict(KeySpec="AES_128", NumberOfBytes=16), + dict(NumberOfBytes=2048), + dict(NumberOfBytes=0), + dict(), + ], ) @mock_kms def test_generate_data_key_invalid_size_params(kwargs): @@ -398,8 +410,14 @@ def test_generate_data_key_invalid_size_params(kwargs): client.generate_data_key(KeyId=key["KeyMetadata"]["KeyId"], **kwargs) -@pytest.mark.parametrize("key_id", - ["alias/DoesNotExist", "arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist", "d25652e4-d2d2-49f7-929a-671ccda580c6", "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6"] +@pytest.mark.parametrize( + "key_id", + [ + "alias/DoesNotExist", + "arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist", + "d25652e4-d2d2-49f7-929a-671ccda580c6", + "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", + ], ) @mock_kms def test_generate_data_key_invalid_key(key_id): @@ -409,8 +427,14 @@ def test_generate_data_key_invalid_key(key_id): client.generate_data_key(KeyId=key_id, KeySpec="AES_256") -@pytest.mark.parametrize("prefix,append_key_id", - [("alias/DoesExist", False), ("arn:aws:kms:us-east-1:012345678912:alias/DoesExist", False), ("", True), ("arn:aws:kms:us-east-1:012345678912:key/", True)] +@pytest.mark.parametrize( + "prefix,append_key_id", + [ + ("alias/DoesExist", False), + ("arn:aws:kms:us-east-1:012345678912:alias/DoesExist", False), + ("", True), + ("arn:aws:kms:us-east-1:012345678912:key/", True), + ], ) @mock_kms def test_generate_data_key_all_valid_key_ids(prefix, append_key_id): @@ -512,8 +536,15 @@ def test_generate_random(number_of_bytes): len(response["Plaintext"]).should.equal(number_of_bytes) -@pytest.mark.parametrize("number_of_bytes,error_type", - [(2048, botocore.exceptions.ClientError), (1025, botocore.exceptions.ClientError), (0, botocore.exceptions.ParamValidationError), (-1, botocore.exceptions.ParamValidationError), (-1024, botocore.exceptions.ParamValidationError)] +@pytest.mark.parametrize( + "number_of_bytes,error_type", + [ + (2048, botocore.exceptions.ClientError), + (1025, botocore.exceptions.ClientError), + (0, botocore.exceptions.ParamValidationError), + (-1, botocore.exceptions.ParamValidationError), + (-1024, botocore.exceptions.ParamValidationError), + ], ) @mock_kms def test_generate_random_invalid_number_of_bytes(number_of_bytes, error_type): diff --git a/tests/test_kms/test_utils.py b/tests/test_kms/test_utils.py index 5a1046e3fc82..92d85610e69d 100644 --- a/tests/test_kms/test_utils.py +++ b/tests/test_kms/test_utils.py @@ -98,7 +98,9 @@ def test_deserialize_ciphertext_blob(raw, serialized): test.should.equal(raw) -@pytest.mark.parametrize("encryption_context", [ec[0] for ec in ENCRYPTION_CONTEXT_VECTORS]) +@pytest.mark.parametrize( + "encryption_context", [ec[0] for ec in ENCRYPTION_CONTEXT_VECTORS] +) def test_encrypt_decrypt_cycle(encryption_context): plaintext = b"some secret plaintext" master_key = Key("nop", "nop", "nop", "nop", "nop") diff --git a/tests/test_logs/test_integration.py b/tests/test_logs/test_integration.py index f7b4fc399622..eab839970c10 100644 --- a/tests/test_logs/test_integration.py +++ b/tests/test_logs/test_integration.py @@ -205,8 +205,7 @@ def test_delete_subscription_filter_errors(): # when client_logs.delete_subscription_filter( - logGroupName="/test", - filterName="test", + logGroupName="/test", filterName="test", ) # then @@ -244,8 +243,7 @@ def test_delete_subscription_filter_errors(): # when with pytest.raises(ClientError) as e: client_logs.delete_subscription_filter( - logGroupName="not-existing-log-group", - filterName="test", + logGroupName="not-existing-log-group", filterName="test", ) # then @@ -260,8 +258,7 @@ def test_delete_subscription_filter_errors(): # when with pytest.raises(ClientError) as e: client_logs.delete_subscription_filter( - logGroupName="/test", - filterName="wrong-filter-name", + logGroupName="/test", filterName="wrong-filter-name", ) # then @@ -345,9 +342,7 @@ def _get_role_name(region_name): return iam.get_role(RoleName="test-role")["Role"]["Arn"] except ClientError: return iam.create_role( - RoleName="test-role", - AssumeRolePolicyDocument="test policy", - Path="/", + RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/", )["Role"]["Arn"] @@ -377,8 +372,7 @@ def _wait_for_log_msg(client, log_group_name, expected_msg_part): for log_stream in log_streams: result = client.get_log_events( - logGroupName=log_group_name, - logStreamName=log_stream["logStreamName"], + logGroupName=log_group_name, logStreamName=log_stream["logStreamName"], ) received_messages.extend( [event["message"] for event in result.get("events")] diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index cbfed65f00a7..fc9868ffb85a 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -448,9 +448,7 @@ def test_describe_subscription_filters_errors(): # when with pytest.raises(ClientError) as e: - client.describe_subscription_filters( - logGroupName="not-existing-log-group", - ) + client.describe_subscription_filters(logGroupName="not-existing-log-group",) # then ex = e.value diff --git a/tests/test_managedblockchain/test_managedblockchain_members.py b/tests/test_managedblockchain/test_managedblockchain_members.py index 74b40db26306..9120e4aee42c 100644 --- a/tests/test_managedblockchain/test_managedblockchain_members.py +++ b/tests/test_managedblockchain/test_managedblockchain_members.py @@ -183,8 +183,7 @@ def test_create_another_member_withopts(): # But cannot get response = conn.get_member.when.called_with( - NetworkId=network_id, - MemberId=member_id2, + NetworkId=network_id, MemberId=member_id2, ).should.throw(Exception, "Member {0} not found".format(member_id2)) # Delete member 1 @@ -256,9 +255,7 @@ def test_invite_and_remove_member(): # Create proposal (invite and remove member) response = conn.create_proposal( - NetworkId=network_id, - MemberId=member_id, - Actions=both_policy_actions, + NetworkId=network_id, MemberId=member_id, Actions=both_policy_actions, ) proposal_id2 = response["ProposalId"] @@ -371,10 +368,7 @@ def test_create_too_many_members(): MemberConfiguration=helpers.create_member_configuration( "testmember6", "admin", "Admin12345", False, "Test Member 6" ), - ).should.throw( - Exception, - "is the maximum number of members allowed in a", - ) + ).should.throw(Exception, "is the maximum number of members allowed in a",) @mock_managedblockchain @@ -600,8 +594,7 @@ def test_get_member_badmember(): network_id = response["NetworkId"] response = conn.get_member.when.called_with( - NetworkId=network_id, - MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") @@ -631,8 +624,7 @@ def test_delete_member_badmember(): network_id = response["NetworkId"] response = conn.delete_member.when.called_with( - NetworkId=network_id, - MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_nodes.py b/tests/test_managedblockchain/test_managedblockchain_nodes.py index ec657700804d..32a5bc62c5d2 100644 --- a/tests/test_managedblockchain/test_managedblockchain_nodes.py +++ b/tests/test_managedblockchain/test_managedblockchain_nodes.py @@ -58,9 +58,7 @@ def test_create_node(): # Delete node conn.delete_node( - NetworkId=network_id, - MemberId=member_id, - NodeId=node_id, + NetworkId=network_id, MemberId=member_id, NodeId=node_id, ) # Find node in full list @@ -79,9 +77,7 @@ def test_create_node(): # But cannot get response = conn.get_node.when.called_with( - NetworkId=network_id, - MemberId=member_id, - NodeId=node_id, + NetworkId=network_id, MemberId=member_id, NodeId=node_id, ).should.throw(Exception, "Node {0} not found".format(node_id)) @@ -107,9 +103,7 @@ def test_create_node_standard_edition(): logconfigbad = dict(helpers.default_nodeconfiguration) logconfigbad["InstanceType"] = "bc.t3.large" response = conn.create_node( - NetworkId=network_id, - MemberId=member_id, - NodeConfiguration=logconfigbad, + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, ) node_id = response["NodeId"] @@ -152,8 +146,7 @@ def test_create_node_standard_edition(): # Should now be an exception response = conn.list_nodes.when.called_with( - NetworkId=network_id, - MemberId=member_id, + NetworkId=network_id, MemberId=member_id, ).should.throw(Exception, "Member {0} not found".format(member_id)) @@ -199,8 +192,7 @@ def test_create_too_many_nodes(): MemberId=member_id, NodeConfiguration=helpers.default_nodeconfiguration, ).should.throw( - Exception, - "Maximum number of nodes exceeded in member {0}".format(member_id), + Exception, "Maximum number of nodes exceeded in member {0}".format(member_id), ) @@ -257,18 +249,14 @@ def test_create_node_badnodeconfig(): logconfigbad = dict(helpers.default_nodeconfiguration) logconfigbad["InstanceType"] = "foo" response = conn.create_node.when.called_with( - NetworkId=network_id, - MemberId=member_id, - NodeConfiguration=logconfigbad, + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, ).should.throw(Exception, "Requested instance foo isn't supported.") # Incorrect instance type for edition logconfigbad = dict(helpers.default_nodeconfiguration) logconfigbad["InstanceType"] = "bc.t3.large" response = conn.create_node.when.called_with( - NetworkId=network_id, - MemberId=member_id, - NodeConfiguration=logconfigbad, + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, ).should.throw( Exception, "Instance type bc.t3.large is not supported with STARTER Edition networks", @@ -278,9 +266,7 @@ def test_create_node_badnodeconfig(): logconfigbad = dict(helpers.default_nodeconfiguration) logconfigbad["AvailabilityZone"] = "us-east-11" response = conn.create_node.when.called_with( - NetworkId=network_id, - MemberId=member_id, - NodeConfiguration=logconfigbad, + NetworkId=network_id, MemberId=member_id, NodeConfiguration=logconfigbad, ).should.throw(Exception, "Availability Zone is not valid") @@ -310,8 +296,7 @@ def test_list_nodes_badmember(): network_id = response["NetworkId"] response = conn.list_nodes.when.called_with( - NetworkId=network_id, - MemberId="m-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposals.py b/tests/test_managedblockchain/test_managedblockchain_proposals.py index d0339535c81c..aa899e3a1623 100644 --- a/tests/test_managedblockchain/test_managedblockchain_proposals.py +++ b/tests/test_managedblockchain/test_managedblockchain_proposals.py @@ -131,9 +131,7 @@ def test_create_proposal_badinvitationacctid(): member_id = response["MemberId"] response = conn.create_proposal.when.called_with( - NetworkId=network_id, - MemberId=member_id, - Actions=actions, + NetworkId=network_id, MemberId=member_id, Actions=actions, ).should.throw(Exception, "Account ID format specified in proposal is not valid") @@ -157,9 +155,7 @@ def test_create_proposal_badremovalmemid(): member_id = response["MemberId"] response = conn.create_proposal.when.called_with( - NetworkId=network_id, - MemberId=member_id, - Actions=actions, + NetworkId=network_id, MemberId=member_id, Actions=actions, ).should.throw(Exception, "Member ID format specified in proposal is not valid") @@ -198,6 +194,5 @@ def test_get_proposal_badproposal(): network_id = response["NetworkId"] response = conn.get_proposal.when.called_with( - NetworkId=network_id, - ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py index d7739ebac28c..e8f4043d5536 100644 --- a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py +++ b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py @@ -666,6 +666,5 @@ def test_list_proposal_votes_badproposal(): member_id = response["MemberId"] response = conn.list_proposal_votes.when.called_with( - NetworkId=network_id, - ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 2339116d3aee..07cd3afa67f6 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -931,10 +931,7 @@ def test_tag_resource_errors(): with pytest.raises(ClientError) as e: client.tag_resource( - ResourceId="000000000000", - Tags=[ - {"Key": "key", "Value": "value"}, - ], + ResourceId="000000000000", Tags=[{"Key": "key", "Value": "value"},], ) ex = e.value ex.operation_name.should.equal("TagResource") diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 1f78e475b099..bac03ed6a259 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2397,9 +2397,7 @@ def test_boto3_get_object_if_match(): with pytest.raises(botocore.exceptions.ClientError) as err: s3.get_object( - Bucket=bucket_name, - Key=key, - IfMatch='"hello"', + Bucket=bucket_name, Key=key, IfMatch='"hello"', ) e = err.value e.response["Error"]["Code"].should.equal("PreconditionFailed") @@ -2418,9 +2416,7 @@ def test_boto3_get_object_if_none_match(): with pytest.raises(botocore.exceptions.ClientError) as err: s3.get_object( - Bucket=bucket_name, - Key=key, - IfNoneMatch=etag, + Bucket=bucket_name, Key=key, IfNoneMatch=etag, ) e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @@ -2478,9 +2474,7 @@ def test_boto3_head_object_if_match(): with pytest.raises(botocore.exceptions.ClientError) as err: s3.head_object( - Bucket=bucket_name, - Key=key, - IfMatch='"hello"', + Bucket=bucket_name, Key=key, IfMatch='"hello"', ) e = err.value e.response["Error"].should.equal({"Code": "412", "Message": "Precondition Failed"}) @@ -2498,9 +2492,7 @@ def test_boto3_head_object_if_none_match(): with pytest.raises(botocore.exceptions.ClientError) as err: s3.head_object( - Bucket=bucket_name, - Key=key, - IfNoneMatch=etag, + Bucket=bucket_name, Key=key, IfNoneMatch=etag, ) e = err.value e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"}) @@ -4037,8 +4029,8 @@ def test_leading_slashes_not_removed(bucket_name): e.value.response["Error"]["Code"].should.equal("NoSuchKey") -@pytest.mark.parametrize("key", - ["foo/bar/baz", "foo", "foo/run_dt%3D2019-01-01%252012%253A30%253A00"] +@pytest.mark.parametrize( + "key", ["foo/bar/baz", "foo", "foo/run_dt%3D2019-01-01%252012%253A30%253A00"] ) @mock_s3 def test_delete_objects_with_url_encoded_key(key): diff --git a/tests/test_s3/test_s3_cloudformation.py b/tests/test_s3/test_s3_cloudformation.py index 68f191622880..ebaa03b7879c 100644 --- a/tests/test_s3/test_s3_cloudformation.py +++ b/tests/test_s3/test_s3_cloudformation.py @@ -14,12 +14,7 @@ def test_s3_bucket_cloudformation_basic(): template = { "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testInstance": { - "Type": "AWS::S3::Bucket", - "Properties": {}, - } - }, + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}}, "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, } template_json = json.dumps(template) diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index f6e653f86854..64d1c2ca844e 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -93,7 +93,8 @@ def test_parse_region_from_url(): parse_region_from_url(url).should.equal(expected) -@pytest.mark.parametrize("key,expected", +@pytest.mark.parametrize( + "key,expected", [ ("foo/bar/baz", "foo/bar/baz"), ("foo", "foo"), @@ -101,13 +102,14 @@ def test_parse_region_from_url(): "foo/run_dt%3D2019-01-01%252012%253A30%253A00", "foo/run_dt=2019-01-01%2012%3A30%3A00", ), - ] + ], ) def test_clean_key_name(key, expected): clean_key_name(key).should.equal(expected) -@pytest.mark.parametrize("key,expected", +@pytest.mark.parametrize( + "key,expected", [ ("foo/bar/baz", "foo/bar/baz"), ("foo", "foo"), @@ -115,7 +117,7 @@ def test_clean_key_name(key, expected): "foo/run_dt%3D2019-01-01%252012%253A30%253A00", "foo/run_dt%253D2019-01-01%25252012%25253A30%25253A00", ), - ] + ], ) def test_undo_clean_key_name(key, expected): undo_clean_key_name(key).should.equal(expected) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 9e2fbe0ae9a3..539b878f98ac 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -638,7 +638,9 @@ def test_put_secret_value_on_non_existing_secret(): VersionStages=["AWSCURRENT"], ) - cm.value.response["Error"]["Message"].should.equal("Secrets Manager can't find the specified secret.") + cm.value.response["Error"]["Message"].should.equal( + "Secrets Manager can't find the specified secret." + ) @mock_secretsmanager @@ -923,17 +925,11 @@ def test_tag_resource(): conn = boto3.client("secretsmanager", region_name="us-west-2") conn.create_secret(Name="test-secret", SecretString="foosecret") conn.tag_resource( - SecretId="test-secret", - Tags=[ - {"Key": "FirstTag", "Value": "SomeValue"}, - ], + SecretId="test-secret", Tags=[{"Key": "FirstTag", "Value": "SomeValue"},], ) conn.tag_resource( - SecretId="test-secret", - Tags=[ - {"Key": "SecondTag", "Value": "AnotherValue"}, - ], + SecretId="test-secret", Tags=[{"Key": "SecondTag", "Value": "AnotherValue"},], ) secrets = conn.list_secrets() @@ -945,14 +941,13 @@ def test_tag_resource(): with pytest.raises(ClientError) as cm: conn.tag_resource( SecretId="dummy-test-secret", - Tags=[ - {"Key": "FirstTag", "Value": "SomeValue"}, - ], + Tags=[{"Key": "FirstTag", "Value": "SomeValue"},], ) - assert \ - "Secrets Manager can't find the specified secret." == \ - cm.value.response["Error"]["Message"] + assert ( + "Secrets Manager can't find the specified secret." + == cm.value.response["Error"]["Message"] + ) @mock_secretsmanager diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index 2e58ef18d977..5af4d9cbfc52 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -89,9 +89,7 @@ def test_send_email_when_verify_source(): conn = boto3.client("ses", region_name="us-east-1") kwargs = dict( - Destination={ - "ToAddresses": ["test_to@example.com"], - }, + Destination={"ToAddresses": ["test_to@example.com"],}, Message={ "Subject": {"Data": "test subject"}, "Body": {"Text": {"Data": "test body"}}, @@ -278,16 +276,7 @@ def test_send_email_notification_with_encoded_sender(): response = conn.send_email( Source=sender, Destination={"ToAddresses": ["your.friend@hotmail.com"]}, - Message={ - "Subject": { - "Data": "hi", - }, - "Body": { - "Text": { - "Data": "there", - } - }, - }, + Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}},}, ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) @@ -302,9 +291,7 @@ def test_create_configuration_set(): EventDestination={ "Name": "snsEvent", "Enabled": True, - "MatchingEventTypes": [ - "send", - ], + "MatchingEventTypes": ["send",], "SNSDestination": { "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" }, @@ -317,9 +304,7 @@ def test_create_configuration_set(): EventDestination={ "Name": "snsEvent", "Enabled": True, - "MatchingEventTypes": [ - "send", - ], + "MatchingEventTypes": ["send",], "SNSDestination": { "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" }, @@ -334,9 +319,7 @@ def test_create_configuration_set(): EventDestination={ "Name": "snsEvent", "Enabled": True, - "MatchingEventTypes": [ - "send", - ], + "MatchingEventTypes": ["send",], "SNSDestination": { "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" }, diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 9dfe27656640..797ccdaba5fa 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -152,9 +152,7 @@ def test_publish_to_sqs_msg_attr_byte_value(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-queue") conn.subscribe( - TopicArn=topic_arn, - Protocol="sqs", - Endpoint=queue.attributes["QueueArn"], + TopicArn=topic_arn, Protocol="sqs", Endpoint=queue.attributes["QueueArn"], ) queue_raw = sqs.create_queue(QueueName="test-queue-raw") conn.subscribe( diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 4414e9375832..6b1e52df6a88 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -525,9 +525,7 @@ def test_untag_resource_error(): @mock_sns def test_topic_kms_master_key_id_attribute(): client = boto3.client("sns", region_name="us-west-2") - resp = client.create_topic( - Name="test-sns-no-key-attr", - ) + resp = client.create_topic(Name="test-sns-no-key-attr",) topic_arn = resp["TopicArn"] resp = client.get_topic_attributes(TopicArn=topic_arn) resp["Attributes"].should_not.have.key("KmsMasterKeyId") @@ -540,10 +538,7 @@ def test_topic_kms_master_key_id_attribute(): resp["Attributes"]["KmsMasterKeyId"].should.equal("test-key") resp = client.create_topic( - Name="test-sns-with-key-attr", - Attributes={ - "KmsMasterKeyId": "key-id", - }, + Name="test-sns-with-key-attr", Attributes={"KmsMasterKeyId": "key-id",}, ) topic_arn = resp["TopicArn"] resp = client.get_topic_attributes(TopicArn=topic_arn) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index a828db3cb957..c234f5cdc8bf 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -719,10 +719,7 @@ def test_send_receive_message_with_attributes_with_labels(): response = queue.send_message( MessageBody="test message", MessageAttributes={ - "somevalue": { - "StringValue": "somevalue", - "DataType": "String.custom", - } + "somevalue": {"StringValue": "somevalue", "DataType": "String.custom",} }, ) @@ -2245,9 +2242,7 @@ def test_invoke_function_from_sqs_exception(): @mock_sqs def test_maximum_message_size_attribute_default(): sqs = boto3.resource("sqs", region_name="eu-west-3") - queue = sqs.create_queue( - QueueName="test-queue", - ) + queue = sqs.create_queue(QueueName="test-queue",) int(queue.attributes["MaximumMessageSize"]).should.equal(MAXIMUM_MESSAGE_LENGTH) with pytest.raises(Exception) as e: queue.send_message(MessageBody="a" * (MAXIMUM_MESSAGE_LENGTH + 1)) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 152a3c9c9c4b..5aad144298e8 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -309,29 +309,25 @@ def test_put_parameter_invalid_names(): client.put_parameter.when.called_with( Name="ssm_test", Value="value", Type="String" ).should.throw( - ClientError, - invalid_prefix_err, + ClientError, invalid_prefix_err, ) client.put_parameter.when.called_with( Name="SSM_TEST", Value="value", Type="String" ).should.throw( - ClientError, - invalid_prefix_err, + ClientError, invalid_prefix_err, ) client.put_parameter.when.called_with( Name="aws_test", Value="value", Type="String" ).should.throw( - ClientError, - invalid_prefix_err, + ClientError, invalid_prefix_err, ) client.put_parameter.when.called_with( Name="AWS_TEST", Value="value", Type="String" ).should.throw( - ClientError, - invalid_prefix_err, + ClientError, invalid_prefix_err, ) ssm_path = "/ssm_test/path/to/var" @@ -358,16 +354,14 @@ def test_put_parameter_invalid_names(): client.put_parameter.when.called_with( Name=aws_path, Value="value", Type="String" ).should.throw( - ClientError, - "No access to reserved parameter name: {}.".format(aws_path), + ClientError, "No access to reserved parameter name: {}.".format(aws_path), ) aws_path = "/AWS/PATH/TO/VAR" client.put_parameter.when.called_with( Name=aws_path, Value="value", Type="String" ).should.throw( - ClientError, - "No access to reserved parameter name: {}.".format(aws_path), + ClientError, "No access to reserved parameter name: {}.".format(aws_path), ) diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index 81715a5f2977..13a6809f53b7 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -356,10 +356,8 @@ def test_state_machine_can_deleted_nonexisting_machine(): @mock_stepfunctions def test_state_machine_tagging_non_existent_resource_fails(): client = boto3.client("stepfunctions", region_name=region) - non_existent_arn = ( - "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( - region=region, account=ACCOUNT_ID - ) + non_existent_arn = "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( + region=region, account=ACCOUNT_ID ) with pytest.raises(ClientError) as ex: client.tag_resource(resourceArn=non_existent_arn, tags=[]) @@ -370,10 +368,8 @@ def test_state_machine_tagging_non_existent_resource_fails(): @mock_stepfunctions def test_state_machine_untagging_non_existent_resource_fails(): client = boto3.client("stepfunctions", region_name=region) - non_existent_arn = ( - "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( - region=region, account=ACCOUNT_ID - ) + non_existent_arn = "arn:aws:states:{region}:{account}:stateMachine:non-existent".format( + region=region, account=ACCOUNT_ID ) with pytest.raises(ClientError) as ex: client.untag_resource(resourceArn=non_existent_arn, tagKeys=[]) @@ -390,9 +386,7 @@ def test_state_machine_tagging(): {"key": "tag_key2", "value": "tag_value2"}, ] machine = client.create_state_machine( - name="test", - definition=str(simple_definition), - roleArn=_get_default_role(), + name="test", definition=str(simple_definition), roleArn=_get_default_role(), ) client.tag_resource(resourceArn=machine["stateMachineArn"], tags=tags) resp = client.list_tags_for_resource(resourceArn=machine["stateMachineArn"]) @@ -944,9 +938,7 @@ def test_state_machine_cloudformation_update_with_replacement(): with pytest.raises(ClientError) as ex: sf.describe_state_machine(stateMachineArn=original_machine_arn) ex.value.response["Error"]["Code"].should.equal("StateMachineDoesNotExist") - ex.value.response["Error"]["Message"].should.contain( - "State Machine Does Not Exist" - ) + ex.value.response["Error"]["Message"].should.contain("State Machine Does Not Exist") @mock_stepfunctions diff --git a/tests/test_transcribe/test_transcribe_boto3.py b/tests/test_transcribe/test_transcribe_boto3.py index 8fed77979221..3de958bc1140 100644 --- a/tests/test_transcribe/test_transcribe_boto3.py +++ b/tests/test_transcribe/test_transcribe_boto3.py @@ -17,9 +17,7 @@ def test_run_medical_transcription_job_minimal_params(): args = { "MedicalTranscriptionJobName": job_name, "LanguageCode": "en-US", - "Media": { - "MediaFileUri": "s3://my-bucket/my-media-file.wav", - }, + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, "OutputBucketName": "my-output-bucket", "Specialty": "PRIMARYCARE", "Type": "CONVERSATION", @@ -100,9 +98,7 @@ def test_run_medical_transcription_job_all_params(): "LanguageCode": "en-US", "MediaSampleRateHertz": 48000, "MediaFormat": "flac", - "Media": { - "MediaFileUri": "s3://my-bucket/my-media-file.dat", - }, + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.dat",}, "OutputBucketName": "my-output-bucket", "OutputEncryptionKMSKeyId": "arn:aws:kms:us-east-1:012345678901:key/37111b5e-8eff-4706-ae3a-d4f9d1d559fc", "Settings": { @@ -203,9 +199,7 @@ def test_run_medical_transcription_job_with_existing_job_name(): args = { "MedicalTranscriptionJobName": job_name, "LanguageCode": "en-US", - "Media": { - "MediaFileUri": "s3://my-bucket/my-media-file.wav", - }, + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, "OutputBucketName": "my-output-bucket", "Specialty": "PRIMARYCARE", "Type": "CONVERSATION", @@ -228,9 +222,7 @@ def test_run_medical_transcription_job_nonexistent_vocabulary(): args = { "MedicalTranscriptionJobName": job_name, "LanguageCode": "en-US", - "Media": { - "MediaFileUri": "s3://my-bucket/my-media-file.dat", - }, + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.dat",}, "OutputBucketName": "my-output-bucket", "Settings": {"VocabularyName": "NonexistentVocabulary"}, "Specialty": "PRIMARYCARE", @@ -252,9 +244,7 @@ def run_job(index, target_status): args = { "MedicalTranscriptionJobName": job_name, "LanguageCode": "en-US", - "Media": { - "MediaFileUri": "s3://my-bucket/my-media-file.wav", - }, + "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav",}, "OutputBucketName": "my-output-bucket", "Specialty": "PRIMARYCARE", "Type": "CONVERSATION", From d068653dead700f913f90b72a6948e51aeb53544 Mon Sep 17 00:00:00 2001 From: gsamaras Date: Sat, 14 Nov 2020 11:10:38 +0000 Subject: [PATCH 627/658] dynamodb2 support for default Action ('Put') in update_item (#3454) Co-authored-by: Georgios Samaras --- moto/dynamodb2/models/__init__.py | 5 ++++- tests/test_dynamodb2/test_dynamodb.py | 24 ++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 782ddcee9ce1..6b35830103da 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -109,7 +109,10 @@ def describe_attrs(self, attributes): def update_with_attribute_updates(self, attribute_updates): for attribute_name, update_action in attribute_updates.items(): - action = update_action["Action"] + # Use default Action value, if no explicit Action is passed. + # Default value is 'Put', according to + # Boto3 DynamoDB.Client.update_item documentation. + action = update_action.get("Action", "PUT") if action == "DELETE" and "Value" not in update_action: if attribute_name in self.attrs: del self.attrs[attribute_name] diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 7a2ed32cb0ed..de9811df68f2 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2251,6 +2251,30 @@ def test_update_item_with_list(): resp["Item"].should.equal({"key": "the-key", "list": [1, 2]}) +# https://github.com/spulec/moto/issues/2328 +@mock_dynamodb2 +def test_update_item_with_no_action_passed_with_list(): + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + + # Create the DynamoDB table. + dynamodb.create_table( + TableName="Table", + KeySchema=[{"AttributeName": "key", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "key", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1}, + ) + table = dynamodb.Table("Table") + table.update_item( + Key={"key": "the-key"}, + # Do not pass 'Action' key, in order to check that the + # parameter's default value will be used. + AttributeUpdates={"list": {"Value": [1, 2]}}, + ) + + resp = table.get_item(Key={"key": "the-key"}) + resp["Item"].should.equal({"key": "the-key", "list": [1, 2]}) + + # https://github.com/spulec/moto/issues/1342 @mock_dynamodb2 def test_update_item_on_map(): From 7749c1f75781cd5fb2367b089c31a92e127b07b1 Mon Sep 17 00:00:00 2001 From: Guillermo Arribas Date: Mon, 16 Nov 2020 18:20:33 +1100 Subject: [PATCH 628/658] Fix failures with latest responses library (0.12.1) (#3466) * Fix failures with latest responses library (0.12.1) * Detect version of responses library and supply a compatible monkey patch depending on version * Seperate responses_mock._find_match monkey patchs depending on reponses lib version to improve readability Co-authored-by: Guillermo Arribas --- moto/core/models.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index d8de6b29f2a4..2cd67188a424 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -5,6 +5,7 @@ import functools import inspect import os +import pkg_resources import re import six import types @@ -14,6 +15,7 @@ from botocore.config import Config from botocore.handlers import BUILTIN_HANDLERS from botocore.awsrequest import AWSResponse +from distutils.version import LooseVersion from six.moves.urllib.parse import urlparse from werkzeug.wrappers import Request @@ -28,6 +30,7 @@ ) ACCOUNT_ID = os.environ.get("MOTO_ACCOUNT_ID", "123456789012") +RESPONSES_VERSION = pkg_resources.get_distribution("responses").version class BaseMockAWS(object): @@ -251,7 +254,7 @@ def _url_matches(self, url, other, match_querystring=False): responses_mock.add_passthru("http") -def _find_first_match(self, request): +def _find_first_match_legacy(self, request): for i, match in enumerate(self._matches): if match.matches(request): return match @@ -259,12 +262,29 @@ def _find_first_match(self, request): return None +def _find_first_match(self, request): + match_failed_reasons = [] + for i, match in enumerate(self._matches): + match_result, reason = match.matches(request) + if match_result: + return match, match_failed_reasons + else: + match_failed_reasons.append(reason) + + return None, match_failed_reasons + + # Modify behaviour of the matcher to only/always return the first match # Default behaviour is to return subsequent matches for subsequent requests, which leads to https://github.com/spulec/moto/issues/2567 # - First request matches on the appropriate S3 URL # - Same request, executed again, will be matched on the subsequent match, which happens to be the catch-all, not-yet-implemented, callback # Fix: Always return the first match -responses_mock._find_match = types.MethodType(_find_first_match, responses_mock) +if LooseVersion(RESPONSES_VERSION) < LooseVersion("0.12.1"): + responses_mock._find_match = types.MethodType( + _find_first_match_legacy, responses_mock + ) +else: + responses_mock._find_match = types.MethodType(_find_first_match, responses_mock) BOTOCORE_HTTP_METHODS = ["GET", "DELETE", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] From 93453eba057f9cf63e477ba55c3427c664e0e6a7 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Mon, 16 Nov 2020 00:17:36 -0800 Subject: [PATCH 629/658] Improve ec2:DescribeSubnets filtering (#3457) * Add response/model/test coverage for filtering by `state` * Add explicit test case for filtering by `vpc-id` Closes #801 --- moto/ec2/models.py | 3 ++ moto/ec2/responses/subnets.py | 2 +- tests/test_ec2/test_subnets.py | 56 ++++++++++++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index bdb1cb03a367..586f49dcf56a 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3307,6 +3307,7 @@ def __init__( ] # Reserved by AWS self._unused_ips = set() # if instance is destroyed hold IP here for reuse self._subnet_ips = {} # has IP: instance + self.state = "available" @staticmethod def cloudformation_name_type(): @@ -3387,6 +3388,8 @@ def get_filter_value(self, filter_name): return self.availability_zone elif filter_name in ("defaultForAz", "default-for-az"): return self.default_for_az + elif filter_name == "state": + return self.state else: return super(Subnet, self).get_filter_value(filter_name, "DescribeSubnets") diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py index ef1b6249c47b..1cfd36993576 100644 --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -99,7 +99,7 @@ def modify_subnet_attribute(self): {% for subnet in subnets %} {{ subnet.id }} - available + {{ subnet.state }} {{ subnet.vpc_id }} {{ subnet.cidr_block }} {{ subnet.available_ip_addresses }} diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 246cacf6b4e8..76e525990dba 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -677,3 +677,59 @@ def test_run_instances_should_attach_to_default_subnet(): subnets[0]["AvailableIpAddressCount"] == 4090 or subnets[1]["AvailableIpAddressCount"] == 4090 ) + + +@mock_ec2 +def test_describe_subnets_by_vpc_id(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc1 = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet1 = ec2.create_subnet( + VpcId=vpc1.id, CidrBlock="10.0.0.0/24", AvailabilityZone="us-west-1a" + ) + vpc2 = ec2.create_vpc(CidrBlock="172.31.0.0/16") + subnet2 = ec2.create_subnet( + VpcId=vpc2.id, CidrBlock="172.31.48.0/20", AvailabilityZone="us-west-1b" + ) + + subnets = client.describe_subnets( + Filters=[{"Name": "vpc-id", "Values": [vpc1.id]}] + ).get("Subnets", []) + subnets.should.have.length_of(1) + subnets[0]["SubnetId"].should.equal(subnet1.id) + + subnets = client.describe_subnets( + Filters=[{"Name": "vpc-id", "Values": [vpc2.id]}] + ).get("Subnets", []) + subnets.should.have.length_of(1) + subnets[0]["SubnetId"].should.equal(subnet2.id) + + # Specify multiple VPCs in Filter. + subnets = client.describe_subnets( + Filters=[{"Name": "vpc-id", "Values": [vpc1.id, vpc2.id]}] + ).get("Subnets", []) + subnets.should.have.length_of(2) + + # Specify mismatched SubnetIds/Filters. + subnets = client.describe_subnets( + SubnetIds=[subnet1.id], Filters=[{"Name": "vpc-id", "Values": [vpc2.id]}] + ).get("Subnets", []) + subnets.should.have.length_of(0) + + +@mock_ec2 +def test_describe_subnets_by_state(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + client = boto3.client("ec2", region_name="us-west-1") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + ec2.create_subnet( + VpcId=vpc.id, CidrBlock="10.0.0.0/24", AvailabilityZone="us-west-1a" + ) + + subnets = client.describe_subnets( + Filters=[{"Name": "state", "Values": ["available"]}] + ).get("Subnets", []) + for subnet in subnets: + subnet["State"].should.equal("available") From d29475ed19dfbb80213c0b87507b9952a718920f Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Mon, 16 Nov 2020 01:30:53 -0800 Subject: [PATCH 630/658] Fix: `TagList` missing in rds:DescribeDBInstance response (#3459) Previously, tags were only available via rds:ListTagsForResource, but are now included in the Create/DescribeDBInstance responses as of Botocore 1.18.17[1] [1]: https://github.com/boto/botocore/commit/f29d23c53ec0477e58450744986b0f8f23c97da1#diff-d10722c0e11ded323c8d240066d7ed31e93a1e6423d54e091b7d54b86e6bd4e0 Fixes #3458 --- moto/rds2/models.py | 8 ++++++++ tests/test_rds2/test_rds2.py | 18 ++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 6efbf8492417..bc52bdcbf40f 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -280,6 +280,14 @@ def to_xml(self): {{ database.port }} {{ database.db_instance_arn }} + + {%- for tag in database.tags -%} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {%- endfor -%} + """ ) return template.render(database=self) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 13e35549a52d..fd2ffb9d0721 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1749,3 +1749,21 @@ def test_create_db_snapshot_with_iam_authentication(): ).get("DBSnapshot") snapshot.get("IAMDatabaseAuthenticationEnabled").should.equal(True) + + +@mock_rds2 +def test_create_db_instance_with_tags(): + client = boto3.client("rds", region_name="us-west-2") + tags = [{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}] + db_instance_identifier = "test-db-instance" + resp = client.create_db_instance( + DBInstanceIdentifier=db_instance_identifier, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + Tags=tags, + ) + resp["DBInstance"]["TagList"].should.equal(tags) + + resp = client.describe_db_instances(DBInstanceIdentifier=db_instance_identifier) + resp["DBInstances"][0]["TagList"].should.equal(tags) From 62d382ff706066251bae967ef28a495864941988 Mon Sep 17 00:00:00 2001 From: Oide Brett <32061073+oidebrett@users.noreply.github.com> Date: Tue, 17 Nov 2020 07:41:54 +0000 Subject: [PATCH 631/658] Fixed issue 3448 for DynamoDB update_item (#3463) * Fixed issue 3448 for DynamoDB update_item * Tidied up fix for issue 3448 for DynamoDB update_item * Reformatted fix for issue 3448 for DynamoDB update_item * removed use of f-strings in test case as it fails in Travis CI build due to Python 2.7 support of f strings --- moto/dynamodb2/responses.py | 5 +--- tests/test_dynamodb2/test_dynamodb.py | 38 +++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 8eb1023b662f..d67994ced6a8 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -813,7 +813,6 @@ def update_item(self): item_dict["Attributes"] = self._build_updated_new_attributes( existing_attributes, item_dict["Attributes"] ) - return dynamo_json_dump(item_dict) def _build_updated_new_attributes(self, original, changed): @@ -838,10 +837,8 @@ def _build_updated_new_attributes(self, original, changed): ) for index in range(len(changed)) ] - elif changed != original: - return changed else: - return None + return changed def describe_limits(self): return json.dumps( diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index de9811df68f2..731f4466de7c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2437,6 +2437,44 @@ def update(col, to, rv): r = update("col1", "val6", "WRONG") +# https://github.com/spulec/moto/issues/3448 +@mock_dynamodb2 +def test_update_return_updated_new_attributes_when_same(): + dynamo_client = boto3.resource("dynamodb", region_name="us-east-1") + dynamo_client.create_table( + TableName="moto-test", + KeySchema=[{"AttributeName": "HashKey1", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "HashKey1", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1}, + ) + + dynamodb_table = dynamo_client.Table("moto-test") + dynamodb_table.put_item( + Item={"HashKey1": "HashKeyValue1", "listValuedAttribute1": ["a", "b"]} + ) + + def update(col, to, rv): + return dynamodb_table.update_item( + TableName="moto-test", + Key={"HashKey1": "HashKeyValue1"}, + UpdateExpression="SET listValuedAttribute1=:" + col, + ExpressionAttributeValues={":" + col: to}, + ReturnValues=rv, + ) + + r = update("a", ["a", "c"], "UPDATED_NEW") + assert r["Attributes"] == {"listValuedAttribute1": ["a", "c"]} + + r = update("a", {"a", "c"}, "UPDATED_NEW") + assert r["Attributes"] == {"listValuedAttribute1": {"a", "c"}} + + r = update("a", {1, 2}, "UPDATED_NEW") + assert r["Attributes"] == {"listValuedAttribute1": {1, 2}} + + with pytest.raises(ClientError) as ex: + r = update("a", ["a", "c"], "WRONG") + + @mock_dynamodb2 def test_put_return_attributes(): dynamodb = boto3.client("dynamodb", region_name="us-east-1") From f045af7e0a3d8159d7111cf0cf2e77da8ca97282 Mon Sep 17 00:00:00 2001 From: Rich Unger Date: Tue, 17 Nov 2020 01:12:39 -0800 Subject: [PATCH 632/658] Add support for empty strings in non-key dynamo attributes (#3467) * Add support for empty strings in non-key attributes https://github.com/spulec/moto/issues/3339 * Nose, not pytest * Revert "Nose, not pytest" This reverts commit 5a3cf6c887dd9fafa49096c82cfa3a3b7f91d224. * PUT is default action --- moto/dynamodb2/exceptions.py | 7 ++ moto/dynamodb2/models/__init__.py | 24 +++++ moto/dynamodb2/parsing/validators.py | 28 +++++- moto/dynamodb2/responses.py | 26 +++--- tests/test_dynamodb2/conftest.py | 13 +++ tests/test_dynamodb2/test_dynamodb.py | 77 ++++++++++++++-- .../test_dynamodb2/test_dynamodb_executor.py | 46 +++++++--- .../test_dynamodb_validation.py | 88 +++++++++++++++---- 8 files changed, 257 insertions(+), 52 deletions(-) create mode 100644 tests/test_dynamodb2/conftest.py diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py index 334cd913a22f..01b98b35df2e 100644 --- a/moto/dynamodb2/exceptions.py +++ b/moto/dynamodb2/exceptions.py @@ -164,3 +164,10 @@ class TransactionCanceledException(ValueError): def __init__(self, errors): msg = self.cancel_reason_msg.format(", ".join([str(err) for err in errors])) super(TransactionCanceledException, self).__init__(msg) + + +class EmptyKeyAttributeException(MockValidationException): + empty_str_msg = "One or more parameter values were invalid: An AttributeValue may not contain an empty string" + + def __init__(self): + super(EmptyKeyAttributeException, self).__init__(self.empty_str_msg) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 6b35830103da..18b0b918f9c2 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -20,6 +20,7 @@ ItemSizeToUpdateTooLarge, ConditionalCheckFailed, TransactionCanceledException, + EmptyKeyAttributeException, ) from moto.dynamodb2.models.utilities import bytesize from moto.dynamodb2.models.dynamo_type import DynamoType @@ -107,6 +108,13 @@ def describe_attrs(self, attributes): included = self.attrs return {"Item": included} + def validate_no_empty_key_values(self, attribute_updates, key_attributes): + for attribute_name, update_action in attribute_updates.items(): + action = update_action.get("Action") or "PUT" # PUT is default + new_value = next(iter(update_action["Value"].values())) + if action == "PUT" and new_value == "" and attribute_name in key_attributes: + raise EmptyKeyAttributeException + def update_with_attribute_updates(self, attribute_updates): for attribute_name, update_action in attribute_updates.items(): # Use default Action value, if no explicit Action is passed. @@ -434,6 +442,18 @@ def get_cfn_attribute(self, attribute_name): def physical_resource_id(self): return self.name + @property + def key_attributes(self): + # A set of all the hash or range attributes for all indexes + def keys_from_index(idx): + schema = idx.schema + return [attr["AttributeName"] for attr in schema] + + fieldnames = copy.copy(self.table_key_attrs) + for idx in self.indexes + self.global_indexes: + fieldnames += keys_from_index(idx) + return fieldnames + @staticmethod def cloudformation_name_type(): return "TableName" @@ -1273,12 +1293,16 @@ def update_item( table.put_item(data) item = table.get_item(hash_value, range_value) + if attribute_updates: + item.validate_no_empty_key_values(attribute_updates, table.key_attributes) + if update_expression: validated_ast = UpdateExpressionValidator( update_expression_ast, expression_attribute_names=expression_attribute_names, expression_attribute_values=expression_attribute_values, item=item, + table=table, ).validate() try: UpdateExpressionExecutor( diff --git a/moto/dynamodb2/parsing/validators.py b/moto/dynamodb2/parsing/validators.py index f924a713c309..79849e538c64 100644 --- a/moto/dynamodb2/parsing/validators.py +++ b/moto/dynamodb2/parsing/validators.py @@ -12,6 +12,7 @@ IncorrectOperandType, InvalidUpdateExpressionInvalidDocumentPath, ProvidedKeyDoesNotExist, + EmptyKeyAttributeException, ) from moto.dynamodb2.models import DynamoType from moto.dynamodb2.parsing.ast_nodes import ( @@ -318,13 +319,36 @@ def get_subtraction(cls, left_operand, right_operand): raise IncorrectOperandType("-", left_operand.type) +class EmptyStringKeyValueValidator(DepthFirstTraverser): + def __init__(self, key_attributes): + self.key_attributes = key_attributes + + def _processing_map(self): + return {UpdateExpressionSetAction: self.check_for_empty_string_key_value} + + def check_for_empty_string_key_value(self, node): + """A node representing a SET action. Check that keys are not being assigned empty strings""" + assert isinstance(node, UpdateExpressionSetAction) + assert len(node.children) == 2 + key = node.children[0].children[0].children[0] + val_node = node.children[1].children[0] + if val_node.type in ["S", "B"] and key in self.key_attributes: + raise EmptyKeyAttributeException + return node + + class Validator(object): """ A validator is used to validate expressions which are passed in as an AST. """ def __init__( - self, expression, expression_attribute_names, expression_attribute_values, item + self, + expression, + expression_attribute_names, + expression_attribute_values, + item, + table, ): """ Besides validation the Validator should also replace referenced parts of an item which is cheapest upon @@ -339,6 +363,7 @@ def __init__( self.expression_attribute_names = expression_attribute_names self.expression_attribute_values = expression_attribute_values self.item = item + self.table = table self.processors = self.get_ast_processors() self.node_to_validate = deepcopy(expression) @@ -364,5 +389,6 @@ def get_ast_processors(self): UpdateExpressionFunctionEvaluator(), NoneExistingPathChecker(), ExecuteOperations(), + EmptyStringKeyValueValidator(self.table.key_attributes), ] return processors diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index d67994ced6a8..85d265f6d569 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -21,15 +21,18 @@ TRANSACTION_MAX_ITEMS = 25 -def has_empty_keys_or_values(_dict): - if _dict == "": - return True - if not isinstance(_dict, dict): - return False - return any( - key == "" or value == "" or has_empty_keys_or_values(value) - for key, value in _dict.items() - ) +def put_has_empty_keys(field_updates, table): + if table: + key_names = table.key_attributes + + # string/binary fields with empty string as value + empty_str_fields = [ + key + for (key, val) in field_updates.items() + if next(iter(val.keys())) in ["S", "B"] and next(iter(val.values())) == "" + ] + return any([keyname in empty_str_fields for keyname in key_names]) + return False def get_empty_str_error(): @@ -257,7 +260,7 @@ def put_item(self): er = "com.amazonaws.dynamodb.v20111205#ValidationException" return self.error(er, "Return values set to invalid value") - if has_empty_keys_or_values(item): + if put_has_empty_keys(item, self.dynamodb_backend.get_table(name)): return get_empty_str_error() overwrite = "Expected" not in self.body @@ -751,9 +754,6 @@ def update_item(self): er = "com.amazonaws.dynamodb.v20111205#ValidationException" return self.error(er, "Return values set to invalid value") - if has_empty_keys_or_values(expression_attribute_values): - return get_empty_str_error() - if "Expected" in self.body: expected = self.body["Expected"] else: diff --git a/tests/test_dynamodb2/conftest.py b/tests/test_dynamodb2/conftest.py new file mode 100644 index 000000000000..5f523db9660a --- /dev/null +++ b/tests/test_dynamodb2/conftest.py @@ -0,0 +1,13 @@ +import pytest +from moto.dynamodb2.models import Table + + +@pytest.fixture +def table(): + return Table( + "Forums", + schema=[ + {"KeyType": "HASH", "AttributeName": "forum_name"}, + {"KeyType": "RANGE", "AttributeName": "subject"}, + ], + ) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 731f4466de7c..3571239e2249 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -186,7 +186,7 @@ def test_list_not_found_table_tags(): @requires_boto_gte("2.9") @mock_dynamodb2 -def test_item_add_empty_string_exception(): +def test_item_add_empty_string_in_key_exception(): name = "TestTable" conn = boto3.client( "dynamodb", @@ -205,10 +205,10 @@ def test_item_add_empty_string_exception(): conn.put_item( TableName=name, Item={ - "forum_name": {"S": "LOLCat Forum"}, + "forum_name": {"S": ""}, "subject": {"S": "Check this out!"}, "Body": {"S": "http://url_to_lolcat.gif"}, - "SentBy": {"S": ""}, + "SentBy": {"S": "someone@somewhere.edu"}, "ReceivedTime": {"S": "12/9/2011 11:36:03 PM"}, }, ) @@ -222,7 +222,36 @@ def test_item_add_empty_string_exception(): @requires_boto_gte("2.9") @mock_dynamodb2 -def test_update_item_with_empty_string_exception(): +def test_item_add_empty_string_no_exception(): + name = "TestTable" + conn = boto3.client( + "dynamodb", + region_name="us-west-2", + aws_access_key_id="ak", + aws_secret_access_key="sk", + ) + conn.create_table( + TableName=name, + KeySchema=[{"AttributeName": "forum_name", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "forum_name", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + conn.put_item( + TableName=name, + Item={ + "forum_name": {"S": "LOLCat Forum"}, + "subject": {"S": "Check this out!"}, + "Body": {"S": "http://url_to_lolcat.gif"}, + "SentBy": {"S": ""}, + "ReceivedTime": {"S": "12/9/2011 11:36:03 PM"}, + }, + ) + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_item_with_empty_string_in_key_exception(): name = "TestTable" conn = boto3.client( "dynamodb", @@ -252,8 +281,8 @@ def test_update_item_with_empty_string_exception(): conn.update_item( TableName=name, Key={"forum_name": {"S": "LOLCat Forum"}}, - UpdateExpression="set Body=:Body", - ExpressionAttributeValues={":Body": {"S": ""}}, + UpdateExpression="set forum_name=:NewName", + ExpressionAttributeValues={":NewName": {"S": ""}}, ) ex.value.response["Error"]["Code"].should.equal("ValidationException") @@ -263,6 +292,42 @@ def test_update_item_with_empty_string_exception(): ) +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_item_with_empty_string_no_exception(): + name = "TestTable" + conn = boto3.client( + "dynamodb", + region_name="us-west-2", + aws_access_key_id="ak", + aws_secret_access_key="sk", + ) + conn.create_table( + TableName=name, + KeySchema=[{"AttributeName": "forum_name", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "forum_name", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + + conn.put_item( + TableName=name, + Item={ + "forum_name": {"S": "LOLCat Forum"}, + "subject": {"S": "Check this out!"}, + "Body": {"S": "http://url_to_lolcat.gif"}, + "SentBy": {"S": "test"}, + "ReceivedTime": {"S": "12/9/2011 11:36:03 PM"}, + }, + ) + + conn.update_item( + TableName=name, + Key={"forum_name": {"S": "LOLCat Forum"}}, + UpdateExpression="set Body=:Body", + ExpressionAttributeValues={":Body": {"S": ""}}, + ) + + @requires_boto_gte("2.9") @mock_dynamodb2 def test_query_invalid_table(): diff --git a/tests/test_dynamodb2/test_dynamodb_executor.py b/tests/test_dynamodb2/test_dynamodb_executor.py index 892d2715cef9..577a5bae0d52 100644 --- a/tests/test_dynamodb2/test_dynamodb_executor.py +++ b/tests/test_dynamodb2/test_dynamodb_executor.py @@ -7,7 +7,7 @@ from moto.dynamodb2.parsing.validators import UpdateExpressionValidator -def test_execution_of_if_not_exists_not_existing_value(): +def test_execution_of_if_not_exists_not_existing_value(table): update_expression = "SET a = if_not_exists(b, a)" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -22,6 +22,7 @@ def test_execution_of_if_not_exists_not_existing_value(): expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -34,7 +35,9 @@ def test_execution_of_if_not_exists_not_existing_value(): assert expected_item == item -def test_execution_of_if_not_exists_with_existing_attribute_should_return_attribute(): +def test_execution_of_if_not_exists_with_existing_attribute_should_return_attribute( + table, +): update_expression = "SET a = if_not_exists(b, a)" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -49,6 +52,7 @@ def test_execution_of_if_not_exists_with_existing_attribute_should_return_attrib expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -61,7 +65,7 @@ def test_execution_of_if_not_exists_with_existing_attribute_should_return_attrib assert expected_item == item -def test_execution_of_if_not_exists_with_existing_attribute_should_return_value(): +def test_execution_of_if_not_exists_with_existing_attribute_should_return_value(table): update_expression = "SET a = if_not_exists(b, :val)" update_expression_values = {":val": {"N": "4"}} update_expression_ast = UpdateExpressionParser.make(update_expression) @@ -77,6 +81,7 @@ def test_execution_of_if_not_exists_with_existing_attribute_should_return_value( expression_attribute_names=None, expression_attribute_values=update_expression_values, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -89,7 +94,9 @@ def test_execution_of_if_not_exists_with_existing_attribute_should_return_value( assert expected_item == item -def test_execution_of_if_not_exists_with_non_existing_attribute_should_return_value(): +def test_execution_of_if_not_exists_with_non_existing_attribute_should_return_value( + table, +): update_expression = "SET a = if_not_exists(b, :val)" update_expression_values = {":val": {"N": "4"}} update_expression_ast = UpdateExpressionParser.make(update_expression) @@ -105,6 +112,7 @@ def test_execution_of_if_not_exists_with_non_existing_attribute_should_return_va expression_attribute_names=None, expression_attribute_values=update_expression_values, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -117,7 +125,7 @@ def test_execution_of_if_not_exists_with_non_existing_attribute_should_return_va assert expected_item == item -def test_execution_of_sum_operation(): +def test_execution_of_sum_operation(table): update_expression = "SET a = a + b" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -132,6 +140,7 @@ def test_execution_of_sum_operation(): expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -144,7 +153,7 @@ def test_execution_of_sum_operation(): assert expected_item == item -def test_execution_of_remove(): +def test_execution_of_remove(table): update_expression = "Remove a" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -159,6 +168,7 @@ def test_execution_of_remove(): expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -171,7 +181,7 @@ def test_execution_of_remove(): assert expected_item == item -def test_execution_of_remove_in_map(): +def test_execution_of_remove_in_map(table): update_expression = "Remove itemmap.itemlist[1].foo11" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -198,6 +208,7 @@ def test_execution_of_remove_in_map(): expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -222,7 +233,7 @@ def test_execution_of_remove_in_map(): assert expected_item == item -def test_execution_of_remove_in_list(): +def test_execution_of_remove_in_list(table): update_expression = "Remove itemmap.itemlist[1]" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -249,6 +260,7 @@ def test_execution_of_remove_in_list(): expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -270,7 +282,7 @@ def test_execution_of_remove_in_list(): assert expected_item == item -def test_execution_of_delete_element_from_set(): +def test_execution_of_delete_element_from_set(table): update_expression = "delete s :value" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -285,6 +297,7 @@ def test_execution_of_delete_element_from_set(): expression_attribute_names=None, expression_attribute_values={":value": {"SS": ["value2", "value5"]}}, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -297,7 +310,7 @@ def test_execution_of_delete_element_from_set(): assert expected_item == item -def test_execution_of_add_number(): +def test_execution_of_add_number(table): update_expression = "add s :value" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -312,6 +325,7 @@ def test_execution_of_add_number(): expression_attribute_names=None, expression_attribute_values={":value": {"N": "10"}}, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -324,7 +338,7 @@ def test_execution_of_add_number(): assert expected_item == item -def test_execution_of_add_set_to_a_number(): +def test_execution_of_add_set_to_a_number(table): update_expression = "add s :value" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -340,6 +354,7 @@ def test_execution_of_add_set_to_a_number(): expression_attribute_names=None, expression_attribute_values={":value": {"SS": ["s1"]}}, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -355,7 +370,7 @@ def test_execution_of_add_set_to_a_number(): assert True -def test_execution_of_add_to_a_set(): +def test_execution_of_add_to_a_set(table): update_expression = "ADD s :value" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -370,6 +385,7 @@ def test_execution_of_add_to_a_set(): expression_attribute_names=None, expression_attribute_values={":value": {"SS": ["value2", "value5"]}}, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() expected_item = Item( @@ -398,7 +414,7 @@ def test_execution_of_add_to_a_set(): ], ) def test_execution_of__delete_element_from_set_invalid_value( - expression_attribute_values, unexpected_data_type + expression_attribute_values, unexpected_data_type, table ): """A delete statement must use a value of type SS in order to delete elements from a set.""" update_expression = "delete s :value" @@ -416,6 +432,7 @@ def test_execution_of__delete_element_from_set_invalid_value( expression_attribute_names=None, expression_attribute_values=expression_attribute_values, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() assert False, "Must raise exception" @@ -424,7 +441,7 @@ def test_execution_of__delete_element_from_set_invalid_value( assert e.operand_type == unexpected_data_type -def test_execution_of_delete_element_from_a_string_attribute(): +def test_execution_of_delete_element_from_a_string_attribute(table): """A delete statement must use a value of type SS in order to delete elements from a set.""" update_expression = "delete s :value" update_expression_ast = UpdateExpressionParser.make(update_expression) @@ -441,6 +458,7 @@ def test_execution_of_delete_element_from_a_string_attribute(): expression_attribute_names=None, expression_attribute_values={":value": {"SS": ["value2"]}}, item=item, + table=table, ).validate() UpdateExpressionExecutor(validated_ast, item, None).execute() assert False, "Must raise exception" diff --git a/tests/test_dynamodb2/test_dynamodb_validation.py b/tests/test_dynamodb2/test_dynamodb_validation.py index 8761d2cd270d..c966efc14c7b 100644 --- a/tests/test_dynamodb2/test_dynamodb_validation.py +++ b/tests/test_dynamodb2/test_dynamodb_validation.py @@ -7,6 +7,7 @@ ExpressionAttributeNameNotDefined, IncorrectOperandType, InvalidUpdateExpressionInvalidDocumentPath, + EmptyKeyAttributeException, ) from moto.dynamodb2.models import Item, DynamoType from moto.dynamodb2.parsing.ast_nodes import ( @@ -18,7 +19,28 @@ from moto.dynamodb2.parsing.validators import UpdateExpressionValidator -def test_validation_of_update_expression_with_keyword(): +def test_validation_of_empty_string_key_val(table): + with pytest.raises(EmptyKeyAttributeException): + update_expression = "set forum_name=:NewName" + update_expression_values = {":NewName": {"S": ""}} + update_expression_ast = UpdateExpressionParser.make(update_expression) + item = Item( + hash_key=DynamoType({"S": "forum_name"}), + hash_key_type="TYPE", + range_key=None, + range_key_type=None, + attrs={"forum_name": {"S": "hello"}}, + ) + UpdateExpressionValidator( + update_expression_ast, + expression_attribute_names=None, + expression_attribute_values=update_expression_values, + item=item, + table=table, + ).validate() + + +def test_validation_of_update_expression_with_keyword(table): try: update_expression = "SET myNum = path + :val" update_expression_values = {":val": {"N": "3"}} @@ -35,6 +57,7 @@ def test_validation_of_update_expression_with_keyword(): expression_attribute_names=None, expression_attribute_values=update_expression_values, item=item, + table=table, ).validate() assert False, "No exception raised" except AttributeIsReservedKeyword as e: @@ -44,7 +67,9 @@ def test_validation_of_update_expression_with_keyword(): @pytest.mark.parametrize( "update_expression", ["SET a = #b + :val2", "SET a = :val2 + #b",] ) -def test_validation_of_a_set_statement_with_incorrect_passed_value(update_expression): +def test_validation_of_a_set_statement_with_incorrect_passed_value( + update_expression, table +): """ By running permutations it shows that values are replaced prior to resolving attributes. @@ -65,12 +90,15 @@ def test_validation_of_a_set_statement_with_incorrect_passed_value(update_expres expression_attribute_names={"#b": "ok"}, expression_attribute_values={":val": {"N": "3"}}, item=item, + table=table, ).validate() except ExpressionAttributeValueNotDefined as e: assert e.attribute_value == ":val2" -def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_item(): +def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_item( + table, +): """ When an update expression tries to get an attribute that does not exist it must throw the appropriate exception. @@ -92,6 +120,7 @@ def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_i expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() assert False, "No exception raised" except AttributeDoesNotExist: @@ -100,7 +129,7 @@ def test_validation_of_update_expression_with_attribute_that_does_not_exist_in_i @pytest.mark.parametrize("update_expression", ["SET a = #c", "SET a = #c + #d",]) def test_validation_of_update_expression_with_attribute_name_that_is_not_defined( - update_expression, + update_expression, table, ): """ When an update expression tries to get an attribute name that is not provided it must throw an exception. @@ -122,13 +151,14 @@ def test_validation_of_update_expression_with_attribute_name_that_is_not_defined expression_attribute_names={"#b": "ok"}, expression_attribute_values=None, item=item, + table=table, ).validate() assert False, "No exception raised" except ExpressionAttributeNameNotDefined as e: assert e.not_defined_attribute_name == "#c" -def test_validation_of_if_not_exists_not_existing_invalid_replace_value(): +def test_validation_of_if_not_exists_not_existing_invalid_replace_value(table): try: update_expression = "SET a = if_not_exists(b, a.c)" update_expression_ast = UpdateExpressionParser.make(update_expression) @@ -144,6 +174,7 @@ def test_validation_of_if_not_exists_not_existing_invalid_replace_value(): expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() assert False, "No exception raised" except AttributeDoesNotExist: @@ -172,7 +203,7 @@ def get_set_action_value(ast): return dynamo_value -def test_validation_of_if_not_exists_not_existing_value(): +def test_validation_of_if_not_exists_not_existing_value(table): update_expression = "SET a = if_not_exists(b, a)" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -187,12 +218,15 @@ def test_validation_of_if_not_exists_not_existing_value(): expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() dynamo_value = get_set_action_value(validated_ast) assert dynamo_value == DynamoType({"S": "A"}) -def test_validation_of_if_not_exists_with_existing_attribute_should_return_attribute(): +def test_validation_of_if_not_exists_with_existing_attribute_should_return_attribute( + table, +): update_expression = "SET a = if_not_exists(b, a)" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -207,12 +241,13 @@ def test_validation_of_if_not_exists_with_existing_attribute_should_return_attri expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() dynamo_value = get_set_action_value(validated_ast) assert dynamo_value == DynamoType({"S": "B"}) -def test_validation_of_if_not_exists_with_existing_attribute_should_return_value(): +def test_validation_of_if_not_exists_with_existing_attribute_should_return_value(table): update_expression = "SET a = if_not_exists(b, :val)" update_expression_values = {":val": {"N": "4"}} update_expression_ast = UpdateExpressionParser.make(update_expression) @@ -228,12 +263,15 @@ def test_validation_of_if_not_exists_with_existing_attribute_should_return_value expression_attribute_names=None, expression_attribute_values=update_expression_values, item=item, + table=table, ).validate() dynamo_value = get_set_action_value(validated_ast) assert dynamo_value == DynamoType({"N": "3"}) -def test_validation_of_if_not_exists_with_non_existing_attribute_should_return_value(): +def test_validation_of_if_not_exists_with_non_existing_attribute_should_return_value( + table, +): update_expression = "SET a = if_not_exists(b, :val)" update_expression_values = {":val": {"N": "4"}} update_expression_ast = UpdateExpressionParser.make(update_expression) @@ -249,12 +287,13 @@ def test_validation_of_if_not_exists_with_non_existing_attribute_should_return_v expression_attribute_names=None, expression_attribute_values=update_expression_values, item=item, + table=table, ).validate() dynamo_value = get_set_action_value(validated_ast) assert dynamo_value == DynamoType({"N": "4"}) -def test_validation_of_sum_operation(): +def test_validation_of_sum_operation(table): update_expression = "SET a = a + b" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -269,12 +308,13 @@ def test_validation_of_sum_operation(): expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() dynamo_value = get_set_action_value(validated_ast) assert dynamo_value == DynamoType({"N": "7"}) -def test_validation_homogeneous_list_append_function(): +def test_validation_homogeneous_list_append_function(table): update_expression = "SET ri = list_append(ri, :vals)" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -289,6 +329,7 @@ def test_validation_homogeneous_list_append_function(): expression_attribute_names=None, expression_attribute_values={":vals": {"L": [{"S": "i3"}, {"S": "i4"}]}}, item=item, + table=table, ).validate() dynamo_value = get_set_action_value(validated_ast) assert dynamo_value == DynamoType( @@ -296,7 +337,7 @@ def test_validation_homogeneous_list_append_function(): ) -def test_validation_hetereogenous_list_append_function(): +def test_validation_hetereogenous_list_append_function(table): update_expression = "SET ri = list_append(ri, :vals)" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -311,12 +352,13 @@ def test_validation_hetereogenous_list_append_function(): expression_attribute_names=None, expression_attribute_values={":vals": {"L": [{"N": "3"}]}}, item=item, + table=table, ).validate() dynamo_value = get_set_action_value(validated_ast) assert dynamo_value == DynamoType({"L": [{"S": "i1"}, {"S": "i2"}, {"N": "3"}]}) -def test_validation_list_append_function_with_non_list_arg(): +def test_validation_list_append_function_with_non_list_arg(table): """ Must error out: Invalid UpdateExpression: Incorrect operand type for operator or function; @@ -339,13 +381,14 @@ def test_validation_list_append_function_with_non_list_arg(): expression_attribute_names=None, expression_attribute_values={":vals": {"S": "N"}}, item=item, + table=table, ).validate() except IncorrectOperandType as e: assert e.operand_type == "S" assert e.operator_or_function == "list_append" -def test_sum_with_incompatible_types(): +def test_sum_with_incompatible_types(table): """ Must error out: Invalid UpdateExpression: Incorrect operand type for operator or function; operator or function: +, operand type: S' @@ -367,13 +410,14 @@ def test_sum_with_incompatible_types(): expression_attribute_names=None, expression_attribute_values={":val": {"S": "N"}, ":val2": {"N": "3"}}, item=item, + table=table, ).validate() except IncorrectOperandType as e: assert e.operand_type == "S" assert e.operator_or_function == "+" -def test_validation_of_subraction_operation(): +def test_validation_of_subraction_operation(table): update_expression = "SET ri = :val - :val2" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( @@ -388,12 +432,13 @@ def test_validation_of_subraction_operation(): expression_attribute_names=None, expression_attribute_values={":val": {"N": "1"}, ":val2": {"N": "3"}}, item=item, + table=table, ).validate() dynamo_value = get_set_action_value(validated_ast) assert dynamo_value == DynamoType({"N": "-2"}) -def test_cannot_index_into_a_string(): +def test_cannot_index_into_a_string(table): """ Must error out: The document path provided in the update expression is invalid for update' @@ -413,13 +458,16 @@ def test_cannot_index_into_a_string(): expression_attribute_names=None, expression_attribute_values={":Item": {"S": "string_update"}}, item=item, + table=table, ).validate() assert False, "Must raise exception" except InvalidUpdateExpressionInvalidDocumentPath: assert True -def test_validation_set_path_does_not_need_to_be_resolvable_when_setting_a_new_attribute(): +def test_validation_set_path_does_not_need_to_be_resolvable_when_setting_a_new_attribute( + table, +): """If this step just passes we are happy enough""" update_expression = "set d=a" update_expression_ast = UpdateExpressionParser.make(update_expression) @@ -435,12 +483,15 @@ def test_validation_set_path_does_not_need_to_be_resolvable_when_setting_a_new_a expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() dynamo_value = get_set_action_value(validated_ast) assert dynamo_value == DynamoType({"N": "3"}) -def test_validation_set_path_does_not_need_to_be_resolvable_but_must_be_creatable_when_setting_a_new_attribute(): +def test_validation_set_path_does_not_need_to_be_resolvable_but_must_be_creatable_when_setting_a_new_attribute( + table, +): try: update_expression = "set d.e=a" update_expression_ast = UpdateExpressionParser.make(update_expression) @@ -456,6 +507,7 @@ def test_validation_set_path_does_not_need_to_be_resolvable_but_must_be_creatabl expression_attribute_names=None, expression_attribute_values=None, item=item, + table=table, ).validate() assert False, "Must raise exception" except InvalidUpdateExpressionInvalidDocumentPath: From 5fe921c2bc36f9fd32875681eada0c186f52c41b Mon Sep 17 00:00:00 2001 From: jweite Date: Tue, 17 Nov 2020 05:54:34 -0500 Subject: [PATCH 633/658] Added support for EMR Security Configurations and Kerberos Attributes. (#3456) * Added support for EMR Security Configurations and Kerberos Attributes. * Revised exception-raising test to work with pytest api. * Added htmlcov to .gitignore; upgrading botocore to 1.18.17, per commit d29475e. Co-authored-by: Joseph Weitekamp --- .gitignore | 1 + moto/emr/exceptions.py | 9 ++- moto/emr/models.py | 47 +++++++++++++- moto/emr/responses.py | 106 +++++++++++++++++++++++++++++-- requirements-dev.txt | 2 +- tests/test_emr/test_emr_boto3.py | 62 +++++++++++++++++- 6 files changed, 217 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index 02e812c5b2f6..04480a29049b 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,4 @@ tests/file.tmp .mypy_cache/ *.tmp .venv/ +htmlcov/ \ No newline at end of file diff --git a/moto/emr/exceptions.py b/moto/emr/exceptions.py index 1a3398d4f42e..bb96346521eb 100644 --- a/moto/emr/exceptions.py +++ b/moto/emr/exceptions.py @@ -1,7 +1,14 @@ from __future__ import unicode_literals -from moto.core.exceptions import RESTError +from moto.core.exceptions import RESTError, JsonRESTError class EmrError(RESTError): code = 400 + + +class InvalidRequestException(JsonRESTError): + def __init__(self, message, **kwargs): + super(InvalidRequestException, self).__init__( + "InvalidRequestException", message, **kwargs + ) diff --git a/moto/emr/models.py b/moto/emr/models.py index 5a34c4d104db..b37ebf034322 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -6,7 +6,7 @@ from boto3 import Session from dateutil.parser import parse as dtparse from moto.core import BaseBackend, BaseModel -from moto.emr.exceptions import EmrError +from moto.emr.exceptions import EmrError, InvalidRequestException from .utils import ( random_instance_group_id, random_cluster_id, @@ -147,6 +147,8 @@ def __init__( running_ami_version=None, custom_ami_id=None, step_concurrency_level=1, + security_configuration=None, + kerberos_attributes=None, ): self.id = cluster_id or random_cluster_id() emr_backend.clusters[self.id] = self @@ -249,6 +251,10 @@ def __init__( self.run_bootstrap_actions() if self.steps: self.steps[0].start() + self.security_configuration = ( + security_configuration # ToDo: Raise if doesn't already exist. + ) + self.kerberos_attributes = kerberos_attributes @property def instance_groups(self): @@ -337,12 +343,20 @@ def set_visibility(self, visibility): self.visible_to_all_users = visibility +class FakeSecurityConfiguration(BaseModel): + def __init__(self, name, security_configuration): + self.name = name + self.security_configuration = security_configuration + self.creation_date_time = datetime.now(pytz.utc) + + class ElasticMapReduceBackend(BaseBackend): def __init__(self, region_name): super(ElasticMapReduceBackend, self).__init__() self.region_name = region_name self.clusters = {} self.instance_groups = {} + self.security_configurations = {} def reset(self): region_name = self.region_name @@ -527,6 +541,37 @@ def remove_auto_scaling_policy(self, cluster_id, instance_group_id): instance_group = instance_groups[0] instance_group.auto_scaling_policy = None + def create_security_configuration(self, name, security_configuration): + if name in self.security_configurations: + raise InvalidRequestException( + message="SecurityConfiguration with name '{}' already exists.".format( + name + ) + ) + security_configuration = FakeSecurityConfiguration( + name=name, security_configuration=security_configuration + ) + self.security_configurations[name] = security_configuration + return security_configuration + + def get_security_configuration(self, name): + if name not in self.security_configurations: + raise InvalidRequestException( + message="Security configuration with name '{}' does not exist.".format( + name + ) + ) + return self.security_configurations[name] + + def delete_security_configuration(self, name): + if name not in self.security_configurations: + raise InvalidRequestException( + message="Security configuration with name '{}' does not exist.".format( + name + ) + ) + del self.security_configurations[name] + emr_backends = {} for region in Session().get_available_regions("emr"): diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 9ced4569bedb..234fbc8e79c8 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -102,11 +102,29 @@ def add_tags(self): def cancel_steps(self): raise NotImplementedError + @generate_boto3_response("CreateSecurityConfiguration") def create_security_configuration(self): - raise NotImplementedError + name = self._get_param("Name") + security_configuration = self._get_param("SecurityConfiguration") + resp = self.backend.create_security_configuration( + name=name, security_configuration=security_configuration + ) + template = self.response_template(CREATE_SECURITY_CONFIGURATION_TEMPLATE) + return template.render(name=name, creation_date_time=resp.creation_date_time) + @generate_boto3_response("DescribeSecurityConfiguration") + def describe_security_configuration(self): + name = self._get_param("Name") + security_configuration = self.backend.get_security_configuration(name=name) + template = self.response_template(DESCRIBE_SECURITY_CONFIGURATION_TEMPLATE) + return template.render(security_configuration=security_configuration) + + @generate_boto3_response("DeleteSecurityConfiguration") def delete_security_configuration(self): - raise NotImplementedError + name = self._get_param("Name") + self.backend.delete_security_configuration(name=name) + template = self.response_template(DELETE_SECURITY_CONFIGURATION_TEMPLATE) + return template.render() @generate_boto3_response("DescribeCluster") def describe_cluster(self): @@ -190,9 +208,6 @@ def modify_cluster(self): template = self.response_template(MODIFY_CLUSTER_TEMPLATE) return template.render(cluster=cluster) - def describe_security_configuration(self): - raise NotImplementedError - @generate_boto3_response("ModifyInstanceGroups") def modify_instance_groups(self): instance_groups = self._get_list_prefix("InstanceGroups.member") @@ -327,6 +342,39 @@ def run_job_flow(self): if step_concurrency_level: kwargs["step_concurrency_level"] = step_concurrency_level + security_configuration = self._get_param("SecurityConfiguration") + if security_configuration: + kwargs["security_configuration"] = security_configuration + + kerberos_attributes = {} + kwargs["kerberos_attributes"] = kerberos_attributes + + realm = self._get_param("KerberosAttributes.Realm") + if realm: + kerberos_attributes["Realm"] = realm + + kdc_admin_password = self._get_param("KerberosAttributes.KdcAdminPassword") + if kdc_admin_password: + kerberos_attributes["KdcAdminPassword"] = kdc_admin_password + + cross_realm_principal_password = self._get_param( + "KerberosAttributes.CrossRealmTrustPrincipalPassword" + ) + if cross_realm_principal_password: + kerberos_attributes[ + "CrossRealmTrustPrincipalPassword" + ] = cross_realm_principal_password + + ad_domain_join_user = self._get_param("KerberosAttributes.ADDomainJoinUser") + if ad_domain_join_user: + kerberos_attributes["ADDomainJoinUser"] = ad_domain_join_user + + ad_domain_join_password = self._get_param( + "KerberosAttributes.ADDomainJoinPassword" + ) + if ad_domain_join_password: + kerberos_attributes["ADDomainJoinPassword"] = ad_domain_join_password + cluster = self.backend.run_job_flow(**kwargs) applications = self._get_list_prefix("Applications.member") @@ -560,6 +608,23 @@ def remove_auto_scaling_policy(self): {{ cluster.service_access_security_group }} {{ cluster.id }} + + {% if 'Realm' in cluster.kerberos_attributes%} + {{ cluster.kerberos_attributes['Realm'] }} + {% endif %} + {% if 'KdcAdminPassword' in cluster.kerberos_attributes%} + {{ cluster.kerberos_attributes['KdcAdminPassword'] }} + {% endif %} + {% if 'CrossRealmTrustPrincipalPassword' in cluster.kerberos_attributes%} + {{ cluster.kerberos_attributes['CrossRealmTrustPrincipalPassword'] }} + {% endif %} + {% if 'ADDomainJoinUser' in cluster.kerberos_attributes%} + {{ cluster.kerberos_attributes['ADDomainJoinUser'] }} + {% endif %} + {% if 'ADDomainJoinPassword' in cluster.kerberos_attributes%} + {{ cluster.kerberos_attributes['ADDomainJoinPassword'] }} + {% endif %} + {{ cluster.log_uri }} ec2-184-0-0-1.us-west-1.compute.amazonaws.com {{ cluster.name }} @@ -573,7 +638,9 @@ def remove_auto_scaling_policy(self): {% if cluster.running_ami_version is not none %} {{ cluster.running_ami_version }} {% endif %} - + {% if cluster.security_configuration is not none %} + {{ cluster.security_configuration }} + {% endif %} {{ cluster.service_role }} {{ cluster.state }} @@ -1253,3 +1320,30 @@ def remove_auto_scaling_policy(self): c04a1042-5340-4c0a-a7b5-7779725ce4f7
""" + +CREATE_SECURITY_CONFIGURATION_TEMPLATE = """ + + {{name}} + {{creation_date_time}} + + + 2690d7eb-ed86-11dd-9877-6fad448a8419 + +""" + +DESCRIBE_SECURITY_CONFIGURATION_TEMPLATE = """ + + {{security_configuration['name']}} + {{security_configuration['security_configuration']}} + {{security_configuration['creation_date_time']}} + + + 2690d7eb-ed86-11dd-9877-6fad448a8419 + +""" + +DELETE_SECURITY_CONFIGURATION_TEMPLATE = """ + + 2690d7eb-ed86-11dd-9877-6fad448a8419 + +""" diff --git a/requirements-dev.txt b/requirements-dev.txt index c25f8de2bde3..692a1cbf32df 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -9,7 +9,7 @@ flask flask-cors boto>=2.45.0 boto3>=1.4.4 -botocore>=1.15.13 +botocore>=1.18.17 six>=1.9 prompt-toolkit==2.0.10 # 3.x is not available with python2 click==6.7 diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index de8f4edbb0ca..8b815e0fa055 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -107,7 +107,15 @@ def test_describe_cluster(): args["Instances"]["EmrManagedSlaveSecurityGroup"] = "slave-security-group" args["Instances"]["KeepJobFlowAliveWhenNoSteps"] = False args["Instances"]["ServiceAccessSecurityGroup"] = "service-access-security-group" + args["KerberosAttributes"] = { + "Realm": "MY-REALM.COM", + "KdcAdminPassword": "SuperSecretPassword2", + "CrossRealmTrustPrincipalPassword": "SuperSecretPassword3", + "ADDomainJoinUser": "Bob", + "ADDomainJoinPassword": "SuperSecretPassword4", + } args["Tags"] = [{"Key": "tag1", "Value": "val1"}, {"Key": "tag2", "Value": "val2"}] + args["SecurityConfiguration"] = "my-security-configuration" cluster_id = client.run_job_flow(**args)["JobFlowId"] @@ -145,6 +153,7 @@ def test_describe_cluster(): args["Instances"]["ServiceAccessSecurityGroup"] ) cl["Id"].should.equal(cluster_id) + cl["KerberosAttributes"].should.equal(args["KerberosAttributes"]) cl["LogUri"].should.equal(args["LogUri"]) cl["MasterPublicDnsName"].should.be.a(six.string_types) cl["Name"].should.equal(args["Name"]) @@ -152,7 +161,8 @@ def test_describe_cluster(): # cl['ReleaseLabel'].should.equal('emr-5.0.0') cl.shouldnt.have.key("RequestedAmiVersion") cl["RunningAmiVersion"].should.equal("1.0.0") - # cl['SecurityConfiguration'].should.be.a(six.string_types) + cl["SecurityConfiguration"].should.be.a(six.string_types) + cl["SecurityConfiguration"].should.equal(args["SecurityConfiguration"]) cl["ServiceRole"].should.equal(args["ServiceRole"]) status = cl["Status"] @@ -985,3 +995,53 @@ def test_tags(): client.remove_tags(ResourceId=cluster_id, TagKeys=[t["Key"] for t in input_tags]) resp = client.describe_cluster(ClusterId=cluster_id)["Cluster"] resp["Tags"].should.equal([]) + + +@mock_emr +def test_security_configurations(): + + client = boto3.client("emr", region_name="us-east-1") + + security_configuration_name = "MySecurityConfiguration" + + security_configuration = """ +{ + "EncryptionConfiguration": { + "AtRestEncryptionConfiguration": { + "S3EncryptionConfiguration": { + "EncryptionMode": "SSE-S3" + } + }, + "EnableInTransitEncryption": false, + "EnableAtRestEncryption": true + } +} + """.strip() + + resp = client.create_security_configuration( + Name=security_configuration_name, SecurityConfiguration=security_configuration + ) + + resp["Name"].should.equal(security_configuration_name) + resp["CreationDateTime"].should.be.a("datetime.datetime") + + resp = client.describe_security_configuration(Name=security_configuration_name) + resp["Name"].should.equal(security_configuration_name) + resp["SecurityConfiguration"].should.equal(security_configuration) + resp["CreationDateTime"].should.be.a("datetime.datetime") + + client.delete_security_configuration(Name=security_configuration_name) + + with pytest.raises(ClientError) as ex: + client.describe_security_configuration(Name=security_configuration_name) + ex.value.response["Error"]["Code"].should.equal("InvalidRequestException") + ex.value.response["Error"]["Message"].should.match( + r"Security configuration with name .* does not exist." + ) + + with pytest.raises(ClientError) as ex: + client.delete_security_configuration(Name=security_configuration_name) + ex.value.response["Error"]["Code"].should.equal("InvalidRequestException") + ex.value.response["Error"]["Message"].should.match( + r"Security configuration with name .* does not exist." + ) From 62fd975da076562f7f2c916b3edf07e60348ff54 Mon Sep 17 00:00:00 2001 From: Guillermo Arribas Date: Wed, 18 Nov 2020 02:36:17 +1100 Subject: [PATCH 634/658] EventBridge: put_rule and list_rules should store and retrieve EventBusName property (#3472) Co-authored-by: Guillermo Arribas --- moto/events/models.py | 1 + moto/events/responses.py | 3 +++ tests/test_events/test_events.py | 2 ++ 3 files changed, 6 insertions(+) diff --git a/moto/events/models.py b/moto/events/models.py index 8b7a084f796c..a3675d8e1a14 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -25,6 +25,7 @@ def __init__(self, name, region_name, **kwargs): self.state = kwargs.get("State") or "ENABLED" self.description = kwargs.get("Description") self.role_arn = kwargs.get("RoleArn") + self.event_bus_name = kwargs.get("EventBusName", "default") self.targets = [] @property diff --git a/moto/events/responses.py b/moto/events/responses.py index a72a869759e4..99577bacbed6 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -25,6 +25,7 @@ def _generate_rule_dict(self, rule): "Description": rule.description, "ScheduleExpression": rule.schedule_exp, "RoleArn": rule.role_arn, + "EventBusName": rule.event_bus_name, } @property @@ -167,6 +168,7 @@ def put_rule(self): state = self._get_param("State") desc = self._get_param("Description") role_arn = self._get_param("RoleArn") + event_bus_name = self._get_param("EventBusName") if not name: return self.error("ValidationException", "Parameter Name is required.") @@ -199,6 +201,7 @@ def put_rule(self): State=state, Description=desc, RoleArn=role_arn, + EventBusName=event_bus_name, ) return json.dumps({"RuleArn": rule.arn}), self.response_headers diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 4b5bbd4cb138..3719692f803e 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -86,6 +86,7 @@ def test_put_rule(): "Name": "my-event", "ScheduleExpression": "rate(5 minutes)", "EventPattern": '{"source": ["test-source"]}', + "EventBusName": "test-bus", } client.put_rule(**rule_data) @@ -96,6 +97,7 @@ def test_put_rule(): rules[0]["Name"].should.equal(rule_data["Name"]) rules[0]["ScheduleExpression"].should.equal(rule_data["ScheduleExpression"]) rules[0]["EventPattern"].should.equal(rule_data["EventPattern"]) + rules[0]["EventBusName"].should.equal(rule_data["EventBusName"]) rules[0]["State"].should.equal("ENABLED") From 7f73015f02af234901b6324a256a98830fc1cada Mon Sep 17 00:00:00 2001 From: Ayush Ghosh Date: Wed, 18 Nov 2020 02:23:49 -0500 Subject: [PATCH 635/658] Fix XML encoding in Route53 JInja2 Templates #3469 (#3473) * Use Jinja2 escape functionality to escape html attributes in value response * Add tests * fix formatting --- moto/route53/models.py | 2 +- tests/test_route53/test_route53.py | 88 ++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 1 deletion(-) diff --git a/moto/route53/models.py b/moto/route53/models.py index eb73f2bfb65b..f4303c2ae512 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -194,7 +194,7 @@ def to_xml(self): {% for record in record_set.records %} - {{ record }} + {{ record|e }} {% endfor %} diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 68436a40e643..dcc12904fac2 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -644,6 +644,94 @@ def test_change_resource_record_sets_crud_valid(): len(response["ResourceRecordSets"]).should.equal(0) +@mock_route53 +def test_change_resource_record_sets_crud_valid_with_special_xml_chars(): + conn = boto3.client("route53", region_name="us-east-1") + conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash("foo")), + HostedZoneConfig=dict(PrivateZone=True, Comment="db"), + ) + + zones = conn.list_hosted_zones_by_name(DNSName="db.") + len(zones["HostedZones"]).should.equal(1) + zones["HostedZones"][0]["Name"].should.equal("db.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + # Create TXT Record. + txt_record_endpoint_payload = { + "Comment": "Create TXT record prod.redis.db", + "Changes": [ + { + "Action": "CREATE", + "ResourceRecordSet": { + "Name": "prod.redis.db.", + "Type": "TXT", + "TTL": 10, + "ResourceRecords": [{"Value": "SomeInitialValue"}], + }, + } + ], + } + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, ChangeBatch=txt_record_endpoint_payload + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response["ResourceRecordSets"]).should.equal(1) + a_record_detail = response["ResourceRecordSets"][0] + a_record_detail["Name"].should.equal("prod.redis.db.") + a_record_detail["Type"].should.equal("TXT") + a_record_detail["TTL"].should.equal(10) + a_record_detail["ResourceRecords"].should.equal([{"Value": "SomeInitialValue"}]) + + # Update TXT Record with XML Special Character &. + txt_record_with_special_char_endpoint_payload = { + "Comment": "Update TXT record prod.redis.db", + "Changes": [ + { + "Action": "UPSERT", + "ResourceRecordSet": { + "Name": "prod.redis.db.", + "Type": "TXT", + "TTL": 60, + "ResourceRecords": [{"Value": "SomeInitialValue&NewValue"}], + }, + } + ], + } + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch=txt_record_with_special_char_endpoint_payload, + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response["ResourceRecordSets"]).should.equal(1) + cname_record_detail = response["ResourceRecordSets"][0] + cname_record_detail["Name"].should.equal("prod.redis.db.") + cname_record_detail["Type"].should.equal("TXT") + cname_record_detail["TTL"].should.equal(60) + cname_record_detail["ResourceRecords"].should.equal( + [{"Value": "SomeInitialValue&NewValue"}] + ) + + # Delete record. + delete_payload = { + "Comment": "delete prod.redis.db", + "Changes": [ + { + "Action": "DELETE", + "ResourceRecordSet": {"Name": "prod.redis.db", "Type": "TXT"}, + } + ], + } + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, ChangeBatch=delete_payload + ) + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response["ResourceRecordSets"]).should.equal(0) + + @mock_route53 def test_change_weighted_resource_record_sets(): conn = boto3.client("route53", region_name="us-east-2") From 83507fbc371c1e999d3b4dd90af4b709e65bcdea Mon Sep 17 00:00:00 2001 From: Oide Brett <32061073+oidebrett@users.noreply.github.com> Date: Wed, 18 Nov 2020 08:45:31 +0000 Subject: [PATCH 636/658] fixed issue in update_configuration for lambda when setting VPC config property (#3479) --- moto/awslambda/models.py | 2 +- tests/test_awslambda/test_lambda.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index a26fcba40211..475ef3086a11 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -305,7 +305,7 @@ def update_configuration(self, config_updates): elif key == "Timeout": self.timeout = value elif key == "VpcConfig": - self.vpc_config = value + self._vpc_config = value elif key == "Environment": self.environment_vars = value["Variables"] diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 7e4fc22f56f1..8308195fb063 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1536,6 +1536,7 @@ def test_update_configuration(): Handler="lambda_function.new_lambda_handler", Runtime="python3.6", Timeout=7, + VpcConfig={"SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"]}, Environment={"Variables": {"test_environment": "test_value"}}, ) @@ -1548,6 +1549,11 @@ def test_update_configuration(): assert updated_config["Environment"]["Variables"] == { "test_environment": "test_value" } + assert updated_config["VpcConfig"] == { + "SecurityGroupIds": ["sg-123abc"], + "SubnetIds": ["subnet-123abc"], + "VpcId": "vpc-123abc", + } @mock_lambda From f7467164e4a1a2874952a46d56d7673c2dd27208 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 18 Nov 2020 02:49:25 -0800 Subject: [PATCH 637/658] Fix Race Condition in batch:SubmitJob (#3480) * Extract Duplicate Code into Helper Method DRY up the tests and replace the arbitrary `sleep()` calls with a more explicit check before progressing. * Improve Testing of batch:TerminateJob The test now confirms that the job was terminated by sandwiching a `sleep` command between two `echo` commands. In addition to the original checks of the terminated job status/reason, the test now asserts that only the first echo command succeeded, confirming that the job was indeed terminated while in progress. * Fix Race Condition in batch:SubmitJob The `test_submit_job` in `test_batch.py` kicks off a job, calls `describe_jobs` in a loop until the job status returned is SUCCEEDED, and then asserts against the logged events. The backend code that runs the submitted job does so in a separate thread. If the job was successful, the job status was being set to SUCCEEDED *before* the event logs had been written to the logging backend. As a result, it was possible for the primary thread running the test to detect that the job was successful immediately after the secondary thread had updated the job status but before the secondary thread had written the logs to the logging backend. Under the right conditions, this could cause the subsequent logging assertions in the primary thread to fail. Additionally, the code that collected the logs from the container was using a "dodgy hack" of time.sleep() and a modulo-based conditional that was ultimately non-deterministic and could result in log messages being dropped or duplicated in certain scenarios. In order to address these issues, this commit does the following: * Carefully re-orders any code that sets a job status or timestamp to avoid any obvious race conditions. * Removes the "dodgy hack" in favor of a much more straightforward (and less error-prone) method of collecting logs from the container. * Removes arbitrary and unnecessary calls to time.sleep() Before applying any changes, the flaky test was failing about 12% of the time. Putting a sleep() call between setting the `job_status` to SUCCEEDED and collecting the logs, resulted in a 100% failure rate. Simply moving the code that sets the job status to SUCCEEDED to the end of the code block, dropped the failure rate to ~2%. Finally, removing the log collection hack allowed the test suite to run ~1000 times without a single failure. Taken in aggregate, these changes make the batch backend more deterministic and should put the nail in the coffin of this flaky test. Closes #3475 --- moto/batch/models.py | 53 +++++--------------------- tests/test_batch/test_batch.py | 68 ++++++++++++++++++---------------- 2 files changed, 46 insertions(+), 75 deletions(-) diff --git a/moto/batch/models.py b/moto/batch/models.py index f729144d89ee..1338beb0c3db 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -392,7 +392,6 @@ def run(self): """ try: self.job_state = "PENDING" - time.sleep(1) image = self.job_definition.container_properties.get( "image", "alpine:latest" @@ -425,8 +424,8 @@ def run(self): self.job_state = "RUNNABLE" # TODO setup ecs container instance - time.sleep(1) + self.job_started_at = datetime.datetime.now() self.job_state = "STARTING" log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON) container = self.docker_client.containers.run( @@ -440,58 +439,24 @@ def run(self): privileged=privileged, ) self.job_state = "RUNNING" - self.job_started_at = datetime.datetime.now() try: - # Log collection - logs_stdout = [] - logs_stderr = [] container.reload() - - # Dodgy hack, we can only check docker logs once a second, but we want to loop more - # so we can stop if asked to in a quick manner, should all go away if we go async - # There also be some dodgyness when sending an integer to docker logs and some - # events seem to be duplicated. - now = datetime.datetime.now() - i = 1 while container.status == "running" and not self.stop: - time.sleep(0.2) - if i % 5 == 0: - logs_stderr.extend( - container.logs( - stdout=False, - stderr=True, - timestamps=True, - since=datetime2int(now), - ) - .decode() - .split("\n") - ) - logs_stdout.extend( - container.logs( - stdout=True, - stderr=False, - timestamps=True, - since=datetime2int(now), - ) - .decode() - .split("\n") - ) - now = datetime.datetime.now() - container.reload() - i += 1 + container.reload() # Container should be stopped by this point... unless asked to stop if container.status == "running": container.kill() - self.job_stopped_at = datetime.datetime.now() - # Get final logs + # Log collection + logs_stdout = [] + logs_stderr = [] logs_stderr.extend( container.logs( stdout=False, stderr=True, timestamps=True, - since=datetime2int(now), + since=datetime2int(self.job_started_at), ) .decode() .split("\n") @@ -501,14 +466,12 @@ def run(self): stdout=True, stderr=False, timestamps=True, - since=datetime2int(now), + since=datetime2int(self.job_started_at), ) .decode() .split("\n") ) - self.job_state = "SUCCEEDED" if not self.stop else "FAILED" - # Process logs logs_stdout = [x for x in logs_stdout if len(x) > 0] logs_stderr = [x for x in logs_stderr if len(x) > 0] @@ -532,6 +495,8 @@ def run(self): self._log_backend.create_log_stream(log_group, stream_name) self._log_backend.put_log_events(log_group, stream_name, logs, None) + self.job_state = "SUCCEEDED" if not self.stop else "FAILED" + except Exception as err: logger.error( "Failed to run AWS Batch container {0}. Error {1}".format( diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index 5a7757777975..67f24bebc4ea 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -725,18 +725,7 @@ def test_submit_job(): ) job_id = resp["jobId"] - future = datetime.datetime.now() + datetime.timedelta(seconds=30) - - while datetime.datetime.now() < future: - time.sleep(1) - resp = batch_client.describe_jobs(jobs=[job_id]) - - if resp["jobs"][0]["status"] == "FAILED": - raise RuntimeError("Batch job failed") - if resp["jobs"][0]["status"] == "SUCCEEDED": - break - else: - raise RuntimeError("Batch job timed out") + _wait_for_job_status(batch_client, job_id, "SUCCEEDED") resp = logs_client.describe_log_streams( logGroupName="/aws/batch/job", logStreamNamePrefix="sayhellotomylittlefriend" @@ -798,26 +787,13 @@ def test_list_jobs(): ) job_id2 = resp["jobId"] - future = datetime.datetime.now() + datetime.timedelta(seconds=30) - resp_finished_jobs = batch_client.list_jobs( jobQueue=queue_arn, jobStatus="SUCCEEDED" ) # Wait only as long as it takes to run the jobs - while datetime.datetime.now() < future: - resp = batch_client.describe_jobs(jobs=[job_id1, job_id2]) - - any_failed_jobs = any([job["status"] == "FAILED" for job in resp["jobs"]]) - succeeded_jobs = all([job["status"] == "SUCCEEDED" for job in resp["jobs"]]) - - if any_failed_jobs: - raise RuntimeError("A Batch job failed") - if succeeded_jobs: - break - time.sleep(0.5) - else: - raise RuntimeError("Batch jobs timed out") + for job_id in [job_id1, job_id2]: + _wait_for_job_status(batch_client, job_id, "SUCCEEDED") resp_finished_jobs2 = batch_client.list_jobs( jobQueue=queue_arn, jobStatus="SUCCEEDED" @@ -854,13 +830,13 @@ def test_terminate_job(): queue_arn = resp["jobQueueArn"] resp = batch_client.register_job_definition( - jobDefinitionName="sleep10", + jobDefinitionName="echo-sleep-echo", type="container", containerProperties={ "image": "busybox:latest", "vcpus": 1, "memory": 128, - "command": ["sleep", "10"], + "command": ["sh", "-c", "echo start && sleep 30 && echo stop"], }, ) job_def_arn = resp["jobDefinitionArn"] @@ -870,13 +846,43 @@ def test_terminate_job(): ) job_id = resp["jobId"] - time.sleep(2) + _wait_for_job_status(batch_client, job_id, "RUNNING") batch_client.terminate_job(jobId=job_id, reason="test_terminate") - time.sleep(2) + _wait_for_job_status(batch_client, job_id, "FAILED") resp = batch_client.describe_jobs(jobs=[job_id]) resp["jobs"][0]["jobName"].should.equal("test1") resp["jobs"][0]["status"].should.equal("FAILED") resp["jobs"][0]["statusReason"].should.equal("test_terminate") + + resp = logs_client.describe_log_streams( + logGroupName="/aws/batch/job", logStreamNamePrefix="echo-sleep-echo" + ) + len(resp["logStreams"]).should.equal(1) + ls_name = resp["logStreams"][0]["logStreamName"] + + resp = logs_client.get_log_events( + logGroupName="/aws/batch/job", logStreamName=ls_name + ) + # Events should only contain 'start' because we interrupted + # the job before 'stop' was written to the logs. + resp["events"].should.have.length_of(1) + resp["events"][0]["message"].should.equal("start") + + +def _wait_for_job_status(client, job_id, status, seconds_to_wait=30): + wait_time = datetime.datetime.now() + datetime.timedelta(seconds=seconds_to_wait) + last_job_status = None + while datetime.datetime.now() < wait_time: + resp = client.describe_jobs(jobs=[job_id]) + last_job_status = resp["jobs"][0]["status"] + if last_job_status == status: + break + else: + raise RuntimeError( + "Time out waiting for job status {status}!\n Last status: {last_status}".format( + status=status, last_status=last_job_status + ) + ) From c902de8a098d2537792ec5533feb01085febd5ea Mon Sep 17 00:00:00 2001 From: David Szotten Date: Thu, 19 Nov 2020 09:01:53 +0000 Subject: [PATCH 638/658] lift user_settable_fields from (old) boto (#3468) (old) `boto` is no longer being developed, and importing it raises deprecation warnings. copy Key.base_user_settable_fields in here instead of importing boto --- moto/s3/utils.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/moto/s3/utils.py b/moto/s3/utils.py index d02da3a60189..d89997dfd6a7 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -2,7 +2,6 @@ import logging import os -from boto.s3.key import Key import re import six from six.moves.urllib.parse import urlparse, unquote, quote @@ -14,6 +13,16 @@ bucket_name_regex = re.compile("(.+).s3(.*).amazonaws.com") +user_settable_fields = { + "content-md5", + "content-language", + "content-type", + "content-encoding", + "cache-control", + "expires", + "content-disposition", + "x-robots-tag", +} def bucket_name_from_url(url): @@ -72,7 +81,7 @@ def metadata_from_headers(headers): if result: # Check for extra metadata meta_key = result.group(0).lower() - elif header.lower() in Key.base_user_settable_fields: + elif header.lower() in user_settable_fields: # Check for special metadata that doesn't start with x-amz-meta meta_key = header if meta_key: From 8a95878a812cdc8fd61949b5e89bdadd91891aab Mon Sep 17 00:00:00 2001 From: Rafael de Elvira Date: Fri, 20 Nov 2020 07:21:05 +0000 Subject: [PATCH 639/658] Route53: Implement dummy GetChange endpoint (#3486) * Route53: Implement dummy GetChange endpoint * fix typo in GET_CHANGE_RESPONSE --- moto/route53/responses.py | 18 ++++++++++++++++++ moto/route53/urls.py | 1 + tests/test_route53/test_route53.py | 11 +++++++++++ 3 files changed, 30 insertions(+) diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 077c89a2c7d4..e831820e1521 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -243,6 +243,15 @@ def list_or_change_tags_for_resource_request(self, request, full_url, headers): return 200, headers, template.render() + def get_change(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + if request.method == "GET": + parsed_url = urlparse(full_url) + change_id = parsed_url.path.rstrip("/").rsplit("/", 1)[1] + template = Template(GET_CHANGE_RESPONSE) + return 200, headers, template.render(change_id=change_id) + LIST_TAGS_FOR_RESOURCE_RESPONSE = """ @@ -382,3 +391,12 @@ def list_or_change_tags_for_resource_request(self, request, full_url, headers): DELETE_HEALTH_CHECK_RESPONSE = """ """ + +GET_CHANGE_RESPONSE = """ + + + INSYNC + 2010-09-10T01:36:41.958Z + {{ change_id }} + +""" diff --git a/moto/route53/urls.py b/moto/route53/urls.py index c0fc9373460c..3bca32715a47 100644 --- a/moto/route53/urls.py +++ b/moto/route53/urls.py @@ -21,4 +21,5 @@ def tag_response2(*args, **kwargs): r"{0}/(?P[\d_-]+)/tags/healthcheck/(?P[^/]+)$": tag_response1, r"{0}/(?P[\d_-]+)/tags/hostedzone/(?P[^/]+)$": tag_response2, r"{0}/(?P[\d_-]+)/trafficpolicyinstances/*": Route53().not_implemented_response, + r"{0}/(?P[\d_-]+)/change/(?P[^/]+)$": Route53().get_change, } diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index dcc12904fac2..8ce5272ef76c 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -1031,3 +1031,14 @@ def create_resource_record_set(rec_type, rec_name): len(returned_records).should.equal(len(all_records) - start_with) for desired_record in all_records[start_with:]: returned_records.should.contain(desired_record) + + +@mock_route53 +def test_get_change(): + conn = boto3.client("route53", region_name="us-east-2") + + change_id = "123456" + response = conn.get_change(Id=change_id) + + response["ChangeInfo"]["Id"].should.equal(change_id) + response["ChangeInfo"]["Status"].should.equal("INSYNC") From 54e296eb5305dfea423c53f46ca22be2ee9419eb Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Fri, 20 Nov 2020 18:08:48 +0530 Subject: [PATCH 640/658] Fix:CloudWatch List metrics with dimensions (#3461) * Fix:CloudWatch List metrics with dimensions * Fix:CloudWatch List metrics with dimensions * Fixed new cases and added more tests Co-authored-by: usmankb --- moto/cloudwatch/models.py | 36 ++++-- .../test_cloudwatch/test_cloudwatch_boto3.py | 117 ++++++++++++++++++ 2 files changed, 146 insertions(+), 7 deletions(-) diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 772672e0eb33..94668f32f642 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -155,6 +155,18 @@ def update_state(self, reason, reason_data, state_value): self.state_updated_timestamp = datetime.utcnow() +def are_dimensions_same(metric_dimensions, dimensions): + for dimension in metric_dimensions: + for new_dimension in dimensions: + if ( + dimension.name != new_dimension.name + or dimension.value != new_dimension.value + ): + return False + + return True + + class MetricDatum(BaseModel): def __init__(self, namespace, name, value, dimensions, timestamp): self.namespace = namespace @@ -165,11 +177,17 @@ def __init__(self, namespace, name, value, dimensions, timestamp): Dimension(dimension["Name"], dimension["Value"]) for dimension in dimensions ] - def filter(self, namespace, name, dimensions): + def filter(self, namespace, name, dimensions, already_present_metrics): if namespace and namespace != self.namespace: return False if name and name != self.name: return False + for metric in already_present_metrics: + if self.dimensions and are_dimensions_same( + metric.dimensions, self.dimensions + ): + return False + if dimensions and any( Dimension(d["Name"], d["Value"]) not in self.dimensions for d in dimensions ): @@ -514,12 +532,16 @@ def list_metrics(self, next_token, namespace, metric_name, dimensions): def get_filtered_metrics(self, metric_name, namespace, dimensions): metrics = self.get_all_metrics() - metrics = [ - md - for md in metrics - if md.filter(namespace=namespace, name=metric_name, dimensions=dimensions) - ] - return metrics + new_metrics = [] + for md in metrics: + if md.filter( + namespace=namespace, + name=metric_name, + dimensions=dimensions, + already_present_metrics=new_metrics, + ): + new_metrics.append(md) + return new_metrics def _get_paginated(self, metrics): if len(metrics) > 500: diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index d448b0c58b45..55f0878d4424 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -349,6 +349,123 @@ def test_get_metric_statistics(): datapoint["Sum"].should.equal(1.5) +@mock_cloudwatch +def test_duplicate_put_metric_data(): + conn = boto3.client("cloudwatch", region_name="us-east-1") + utc_now = datetime.now(tz=pytz.utc) + + conn.put_metric_data( + Namespace="tester", + MetricData=[ + dict( + MetricName="metric", + Dimensions=[{"Name": "Name", "Value": "B"}], + Value=1.5, + Timestamp=utc_now, + ) + ], + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}] + )["Metrics"] + len(result).should.equal(1) + + conn.put_metric_data( + Namespace="tester", + MetricData=[ + dict( + MetricName="metric", + Dimensions=[{"Name": "Name", "Value": "B"}], + Value=1.5, + Timestamp=utc_now, + ) + ], + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}] + )["Metrics"] + len(result).should.equal(1) + + conn.put_metric_data( + Namespace="tester", + MetricData=[ + dict( + MetricName="metric", + Dimensions=[{"Name": "Name", "Value": "B"}], + Value=1.5, + Timestamp=utc_now, + ) + ], + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}] + )["Metrics"] + result.should.equal( + [ + { + "Namespace": "tester", + "MetricName": "metric", + "Dimensions": [{"Name": "Name", "Value": "B"}], + } + ] + ) + + conn.put_metric_data( + Namespace="tester", + MetricData=[ + dict( + MetricName="metric", + Dimensions=[ + {"Name": "Name", "Value": "B"}, + {"Name": "Name", "Value": "C"}, + ], + Value=1.5, + Timestamp=utc_now, + ) + ], + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "B"}] + )["Metrics"] + result.should.equal( + [ + { + "Namespace": "tester", + "MetricName": "metric", + "Dimensions": [{"Name": "Name", "Value": "B"}], + }, + { + "Namespace": "tester", + "MetricName": "metric", + "Dimensions": [ + {"Name": "Name", "Value": "B"}, + {"Name": "Name", "Value": "C"}, + ], + }, + ] + ) + + result = conn.list_metrics( + Namespace="tester", Dimensions=[{"Name": "Name", "Value": "C"}] + )["Metrics"] + result.should.equal( + [ + { + "Namespace": "tester", + "MetricName": "metric", + "Dimensions": [ + {"Name": "Name", "Value": "B"}, + {"Name": "Name", "Value": "C"}, + ], + } + ] + ) + + @mock_cloudwatch @freeze_time("2020-02-10 18:44:05") def test_custom_timestamp(): From 689cd8f28531c13b3987c4cb731d4f85dd17a099 Mon Sep 17 00:00:00 2001 From: Mykola Mykhalov <4667951+miho120@users.noreply.github.com> Date: Fri, 20 Nov 2020 23:00:53 +0100 Subject: [PATCH 641/658] Implement IAM instance profile associations (#3482) * Add associate_iam_instance_profile describe_iam_instance_profile_associations, disassociate_iam_instance_profile, replace_iam_instance_profile_association * More tests, removed type hints, filter fix * Ec2 fix --- moto/ec2/exceptions.py | 16 + moto/ec2/models.py | 147 ++++++++ moto/ec2/responses/__init__.py | 2 + moto/ec2/responses/iam_instance_profiles.py | 89 +++++ moto/ec2/utils.py | 50 +++ moto/iam/models.py | 7 + .../test_iam_instance_profile_associations.py | 345 ++++++++++++++++++ 7 files changed, 656 insertions(+) create mode 100644 moto/ec2/responses/iam_instance_profiles.py create mode 100644 tests/test_ec2/test_iam_instance_profile_associations.py diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index e14a60bf1c2c..348c3f723b48 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -583,3 +583,19 @@ def __init__(self, param, param_needed): param, param_needed ), ) + + +class IncorrectStateIamProfileAssociationError(EC2ClientError): + def __init__(self, instance_id): + super(IncorrectStateIamProfileAssociationError, self).__init__( + "IncorrectState", + "There is an existing association for instance {0}".format(instance_id), + ) + + +class InvalidAssociationIDIamProfileAssociationError(EC2ClientError): + def __init__(self, association_id): + super(InvalidAssociationIDIamProfileAssociationError, self).__init__( + "InvalidAssociationID.NotFound", + "An invalid association-id of '{0}' was given".format(association_id), + ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 586f49dcf56a..9b5e692a7fce 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -99,6 +99,8 @@ RulesPerSecurityGroupLimitExceededError, TagLimitExceeded, InvalidParameterDependency, + IncorrectStateIamProfileAssociationError, + InvalidAssociationIDIamProfileAssociationError, ) from .utils import ( EC2_RESOURCE_TO_PREFIX, @@ -136,6 +138,7 @@ random_vpc_id, random_vpc_cidr_association_id, random_vpc_peering_connection_id, + random_iam_instance_profile_association_id, generic_filter, is_valid_resource_id, get_prefix, @@ -143,6 +146,8 @@ is_valid_cidr, filter_internet_gateways, filter_reservations, + filter_iam_instance_profile_associations, + filter_iam_instance_profiles, random_network_acl_id, random_network_acl_subnet_association_id, random_vpn_gateway_id, @@ -674,6 +679,16 @@ def create_from_cloudformation_json( instance = reservation.instances[0] for tag in properties.get("Tags", []): instance.add_tag(tag["Key"], tag["Value"]) + + # Associating iam instance profile. + # TODO: Don't forget to implement replace_iam_instance_profile_association once update_from_cloudformation_json + # for ec2 instance will be implemented. + if properties.get("IamInstanceProfile"): + ec2_backend.associate_iam_instance_profile( + instance_id=instance.id, + iam_instance_profile_name=properties.get("IamInstanceProfile"), + ) + return instance @classmethod @@ -759,6 +774,15 @@ def terminate(self, *args, **kwargs): "Client.UserInitiatedShutdown", ) + # Disassociate iam instance profile if associated, otherwise iam_instance_profile_associations will + # be pointing to None. + if self.ec2_backend.iam_instance_profile_associations.get(self.id): + self.ec2_backend.disassociate_iam_instance_profile( + association_id=self.ec2_backend.iam_instance_profile_associations[ + self.id + ].id + ) + def reboot(self, *args, **kwargs): self._state.name = "running" self._state.code = 16 @@ -5868,6 +5892,121 @@ def get_launch_templates( return generic_filter(filters, templates) +class IamInstanceProfileAssociation(CloudFormationModel): + def __init__(self, ec2_backend, association_id, instance, iam_instance_profile): + self.ec2_backend = ec2_backend + self.id = association_id + self.instance = instance + self.iam_instance_profile = iam_instance_profile + self.state = "associated" + + +class IamInstanceProfileAssociationBackend(object): + def __init__(self): + self.iam_instance_profile_associations = {} + super(IamInstanceProfileAssociationBackend, self).__init__() + + def associate_iam_instance_profile( + self, + instance_id, + iam_instance_profile_name=None, + iam_instance_profile_arn=None, + ): + iam_association_id = random_iam_instance_profile_association_id() + + instance_profile = filter_iam_instance_profiles( + iam_instance_profile_arn, iam_instance_profile_name + ) + + if instance_id in self.iam_instance_profile_associations.keys(): + raise IncorrectStateIamProfileAssociationError(instance_id) + + iam_instance_profile_associations = IamInstanceProfileAssociation( + self, + iam_association_id, + self.get_instance(instance_id) if instance_id else None, + instance_profile, + ) + # Regarding to AWS there can be only one association with ec2. + self.iam_instance_profile_associations[ + instance_id + ] = iam_instance_profile_associations + return iam_instance_profile_associations + + def describe_iam_instance_profile_associations( + self, association_ids, filters=None, max_results=100, next_token=None + ): + associations_list = [] + if association_ids: + for association in self.iam_instance_profile_associations.values(): + if association.id in association_ids: + associations_list.append(association) + else: + # That's mean that no association id were given. Showing all. + associations_list.extend(self.iam_instance_profile_associations.values()) + + associations_list = filter_iam_instance_profile_associations( + associations_list, filters + ) + + starting_point = int(next_token or 0) + ending_point = starting_point + int(max_results or 100) + associations_page = associations_list[starting_point:ending_point] + new_next_token = ( + str(ending_point) if ending_point < len(associations_list) else None + ) + + return associations_page, new_next_token + + def disassociate_iam_instance_profile(self, association_id): + iam_instance_profile_associations = None + for association_key in self.iam_instance_profile_associations.keys(): + if ( + self.iam_instance_profile_associations[association_key].id + == association_id + ): + iam_instance_profile_associations = self.iam_instance_profile_associations[ + association_key + ] + del self.iam_instance_profile_associations[association_key] + # Deleting once and avoiding `RuntimeError: dictionary changed size during iteration` + break + + if not iam_instance_profile_associations: + raise InvalidAssociationIDIamProfileAssociationError(association_id) + + return iam_instance_profile_associations + + def replace_iam_instance_profile_association( + self, + association_id, + iam_instance_profile_name=None, + iam_instance_profile_arn=None, + ): + instance_profile = filter_iam_instance_profiles( + iam_instance_profile_arn, iam_instance_profile_name + ) + + iam_instance_profile_association = None + for association_key in self.iam_instance_profile_associations.keys(): + if ( + self.iam_instance_profile_associations[association_key].id + == association_id + ): + self.iam_instance_profile_associations[ + association_key + ].iam_instance_profile = instance_profile + iam_instance_profile_association = self.iam_instance_profile_associations[ + association_key + ] + break + + if not iam_instance_profile_association: + raise InvalidAssociationIDIamProfileAssociationError(association_id) + + return iam_instance_profile_association + + class EC2Backend( BaseBackend, InstanceBackend, @@ -5897,6 +6036,7 @@ class EC2Backend( CustomerGatewayBackend, NatGatewayBackend, LaunchTemplateBackend, + IamInstanceProfileAssociationBackend, ): def __init__(self, region_name): self.region_name = region_name @@ -5983,6 +6123,13 @@ def do_resources_exist(self, resource_ids): self.describe_vpn_connections(vpn_connection_ids=[resource_id]) elif resource_prefix == EC2_RESOURCE_TO_PREFIX["vpn-gateway"]: self.get_vpn_gateway(vpn_gateway_id=resource_id) + elif ( + resource_prefix + == EC2_RESOURCE_TO_PREFIX["iam-instance-profile-association"] + ): + self.describe_iam_instance_profile_associations( + association_ids=[resource_id] + ) return True diff --git a/moto/ec2/responses/__init__.py b/moto/ec2/responses/__init__.py index 893a25e89014..515ae1f31bd6 100644 --- a/moto/ec2/responses/__init__.py +++ b/moto/ec2/responses/__init__.py @@ -34,6 +34,7 @@ from .vpn_connections import VPNConnections from .windows import Windows from .nat_gateways import NatGateways +from .iam_instance_profiles import IamInstanceProfiles class EC2Response( @@ -71,6 +72,7 @@ class EC2Response( VPNConnections, Windows, NatGateways, + IamInstanceProfiles, ): @property def ec2_backend(self): diff --git a/moto/ec2/responses/iam_instance_profiles.py b/moto/ec2/responses/iam_instance_profiles.py new file mode 100644 index 000000000000..3d2525ba78f8 --- /dev/null +++ b/moto/ec2/responses/iam_instance_profiles.py @@ -0,0 +1,89 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse + + +class IamInstanceProfiles(BaseResponse): + def associate_iam_instance_profile(self): + instance_id = self._get_param("InstanceId") + iam_instance_profile_name = self._get_param("IamInstanceProfile.Name") + iam_instance_profile_arn = self._get_param("IamInstanceProfile.Arn") + iam_association = self.ec2_backend.associate_iam_instance_profile( + instance_id, iam_instance_profile_name, iam_instance_profile_arn + ) + template = self.response_template(IAM_INSTANCE_PROFILE_RESPONSE) + return template.render(iam_association=iam_association, state="associating") + + def describe_iam_instance_profile_associations(self): + association_ids = self._get_multi_param("AssociationId") + filters = self._get_object_map("Filter") + max_items = self._get_param("MaxItems") + next_token = self._get_param("NextToken") + ( + iam_associations, + next_token, + ) = self.ec2_backend.describe_iam_instance_profile_associations( + association_ids, filters, max_items, next_token + ) + template = self.response_template(DESCRIBE_IAM_INSTANCE_PROFILE_RESPONSE) + return template.render(iam_associations=iam_associations, next_token=next_token) + + def disassociate_iam_instance_profile(self): + association_id = self._get_param("AssociationId") + iam_association = self.ec2_backend.disassociate_iam_instance_profile( + association_id + ) + template = self.response_template(IAM_INSTANCE_PROFILE_RESPONSE) + return template.render(iam_association=iam_association, state="disassociating") + + def replace_iam_instance_profile_association(self): + association_id = self._get_param("AssociationId") + iam_instance_profile_name = self._get_param("IamInstanceProfile.Name") + iam_instance_profile_arn = self._get_param("IamInstanceProfile.Arn") + iam_association = self.ec2_backend.replace_iam_instance_profile_association( + association_id, iam_instance_profile_name, iam_instance_profile_arn + ) + template = self.response_template(IAM_INSTANCE_PROFILE_RESPONSE) + return template.render(iam_association=iam_association, state="associating") + + +# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateIamInstanceProfile.html +IAM_INSTANCE_PROFILE_RESPONSE = """ + + e10deeaf-7cda-48e7-950b-example + + {{ iam_association.id }} + {% if iam_association.iam_instance_profile %} + + {{ iam_association.iam_instance_profile.arn }} + {{ iam_association.iam_instance_profile.id }} + + {% endif %} + {{ iam_association.instance.id }} + {{ state }} + + +""" + + +# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeIamInstanceProfileAssociations.html +# Note: this API description page contains an error! Provided `iamInstanceProfileAssociations` doesn't work, you +# should use `iamInstanceProfileAssociationSet` instead. +DESCRIBE_IAM_INSTANCE_PROFILE_RESPONSE = """ + + 84c2d2a6-12dc-491f-a9ee-example + {% if next_token %}{{ next_token }}{% endif %} + + {% for iam_association in iam_associations %} + + {{ iam_association.id }} + + {{ iam_association.iam_instance_profile.arn }} + {{ iam_association.iam_instance_profile.id }} + + {{ iam_association.instance.id }} + {{ iam_association.state }} + + {% endfor %} + + +""" diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index e6763fec1dbc..4a101f9234aa 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -12,6 +12,7 @@ from cryptography.hazmat.primitives.asymmetric import rsa from moto.core import ACCOUNT_ID +from moto.iam import iam_backends EC2_RESOURCE_TO_PREFIX = { "customer-gateway": "cgw", @@ -43,6 +44,7 @@ "vpc-peering-connection": "pcx", "vpn-connection": "vpn", "vpn-gateway": "vgw", + "iam-instance-profile-association": "iip-assoc", } @@ -171,6 +173,10 @@ def random_launch_template_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX["launch-template"], size=17) +def random_iam_instance_profile_association_id(): + return random_id(prefix=EC2_RESOURCE_TO_PREFIX["iam-instance-profile-association"]) + + def random_public_ip(): return "54.214.{0}.{1}".format(random.choice(range(255)), random.choice(range(255))) @@ -597,3 +603,47 @@ def rsa_public_key_fingerprint(rsa_public_key): fingerprint_hex = hashlib.md5(key_data).hexdigest() fingerprint = re.sub(r"([a-f0-9]{2})(?!$)", r"\1:", fingerprint_hex) return fingerprint + + +def filter_iam_instance_profile_associations(iam_instance_associations, filter_dict): + if not filter_dict: + return iam_instance_associations + result = [] + for iam_instance_association in iam_instance_associations: + filter_passed = True + if filter_dict.get("instance-id"): + if ( + iam_instance_association.instance.id + not in filter_dict.get("instance-id").values() + ): + filter_passed = False + if filter_dict.get("state"): + if iam_instance_association.state not in filter_dict.get("state").values(): + filter_passed = False + if filter_passed: + result.append(iam_instance_association) + return result + + +def filter_iam_instance_profiles(iam_instance_profile_arn, iam_instance_profile_name): + instance_profile = None + instance_profile_by_name = None + instance_profile_by_arn = None + if iam_instance_profile_name: + instance_profile_by_name = iam_backends["global"].get_instance_profile( + iam_instance_profile_name + ) + instance_profile = instance_profile_by_name + if iam_instance_profile_arn: + instance_profile_by_arn = iam_backends["global"].get_instance_profile_by_arn( + iam_instance_profile_arn + ) + instance_profile = instance_profile_by_arn + # We would prefer instance profile that we found by arn + if iam_instance_profile_arn and iam_instance_profile_name: + if instance_profile_by_name == instance_profile_by_arn: + instance_profile = instance_profile_by_arn + else: + instance_profile = None + + return instance_profile diff --git a/moto/iam/models.py b/moto/iam/models.py index 76b824d609bb..ac8402e57270 100755 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -1852,6 +1852,13 @@ def get_instance_profile(self, profile_name): "Instance profile {0} not found".format(profile_name) ) + def get_instance_profile_by_arn(self, profile_arn): + for profile in self.get_instance_profiles(): + if profile.arn == profile_arn: + return profile + + raise IAMNotFoundException("Instance profile {0} not found".format(profile_arn)) + def get_instance_profiles(self): return self.instance_profiles.values() diff --git a/tests/test_ec2/test_iam_instance_profile_associations.py b/tests/test_ec2/test_iam_instance_profile_associations.py new file mode 100644 index 000000000000..6a7dcad3073a --- /dev/null +++ b/tests/test_ec2/test_iam_instance_profile_associations.py @@ -0,0 +1,345 @@ +from __future__ import unicode_literals + +# Ensure 'pytest.raises' context manager support for Python 2.6 +import pytest + +import time +import json +import boto3 +from botocore.exceptions import ClientError +import sure # noqa + +from moto import mock_ec2, mock_iam, mock_cloudformation + + +def quick_instance_creation(): + image_id = "ami-1234abcd" + conn_ec2 = boto3.resource("ec2", "us-east-1") + test_instance = conn_ec2.create_instances(ImageId=image_id, MinCount=1, MaxCount=1) + # We only need instance id for this tests + return test_instance[0].id + + +def quick_instance_profile_creation(name): + conn_iam = boto3.resource("iam", "us-east-1") + test_instance_profile = conn_iam.create_instance_profile( + InstanceProfileName=name, Path="/" + ) + return test_instance_profile.arn, test_instance_profile.name + + +@mock_ec2 +@mock_iam +def test_associate(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + + association = client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + association["IamInstanceProfileAssociation"]["InstanceId"].should.equal(instance_id) + association["IamInstanceProfileAssociation"]["IamInstanceProfile"][ + "Arn" + ].should.equal(instance_profile_arn) + association["IamInstanceProfileAssociation"]["State"].should.equal("associating") + + +@mock_ec2 +@mock_iam +def test_invalid_associate(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + + # Duplicate + with pytest.raises(ClientError) as ex: + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + ex.value.response["Error"]["Code"].should.equal("IncorrectState") + ex.value.response["Error"]["Message"].should.contain( + "There is an existing association for" + ) + + # Wrong instance profile + with pytest.raises(ClientError) as ex: + client.associate_iam_instance_profile( + IamInstanceProfile={"Arn": "fake", "Name": "fake"}, InstanceId=instance_id, + ) + ex.value.response["Error"]["Code"].should.equal("NoSuchEntity") + ex.value.response["Error"]["Message"].should.contain("not found") + + # Wrong instance id + with pytest.raises(ClientError) as ex: + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId="fake", + ) + ex.value.response["Error"]["Code"].should.equal("InvalidInstanceID.NotFound") + ex.value.response["Error"]["Message"].should.contain("does not exist") + + +@mock_ec2 +@mock_iam +def test_describe(): + client = boto3.client("ec2", region_name="us-east-1") + + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + associations["IamInstanceProfileAssociations"][0]["InstanceId"].should.equal( + instance_id + ) + associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][ + "Arn" + ].should.equal(instance_profile_arn) + associations["IamInstanceProfileAssociations"][0]["State"].should.equal( + "associated" + ) + + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile1" + ) + client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + + next_test_associations = client.describe_iam_instance_profile_associations() + next_test_associations["IamInstanceProfileAssociations"].should.have.length_of(2) + + associations = client.describe_iam_instance_profile_associations( + AssociationIds=[ + next_test_associations["IamInstanceProfileAssociations"][0][ + "AssociationId" + ], + ] + ) + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][ + "Arn" + ].should.equal( + next_test_associations["IamInstanceProfileAssociations"][0][ + "IamInstanceProfile" + ]["Arn"] + ) + + associations = client.describe_iam_instance_profile_associations( + Filters=[ + { + "Name": "instance-id", + "Values": [ + next_test_associations["IamInstanceProfileAssociations"][0][ + "InstanceId" + ], + ], + }, + {"Name": "state", "Values": ["associated"]}, + ] + ) + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][ + "Arn" + ].should.equal( + next_test_associations["IamInstanceProfileAssociations"][0][ + "IamInstanceProfile" + ]["Arn"] + ) + + +@mock_ec2 +@mock_iam +def test_replace(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id1 = quick_instance_creation() + instance_profile_arn1, instance_profile_name1 = quick_instance_profile_creation( + "test_profile1" + ) + instance_profile_arn2, instance_profile_name2 = quick_instance_profile_creation( + "test_profile2" + ) + + association = client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn1, + "Name": instance_profile_name1, + }, + InstanceId=instance_id1, + ) + + association = client.replace_iam_instance_profile_association( + IamInstanceProfile={ + "Arn": instance_profile_arn2, + "Name": instance_profile_name2, + }, + AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"], + ) + + association["IamInstanceProfileAssociation"]["IamInstanceProfile"][ + "Arn" + ].should.equal(instance_profile_arn2) + association["IamInstanceProfileAssociation"]["State"].should.equal("associating") + + +@mock_ec2 +@mock_iam +def test_invalid_replace(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + instance_profile_arn2, instance_profile_name2 = quick_instance_profile_creation( + "test_profile2" + ) + + association = client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + + # Wrong id + with pytest.raises(ClientError) as ex: + client.replace_iam_instance_profile_association( + IamInstanceProfile={ + "Arn": instance_profile_arn2, + "Name": instance_profile_name2, + }, + AssociationId="fake", + ) + ex.value.response["Error"]["Code"].should.equal("InvalidAssociationID.NotFound") + ex.value.response["Error"]["Message"].should.contain("An invalid association-id of") + + # Wrong instance profile + with pytest.raises(ClientError) as ex: + client.replace_iam_instance_profile_association( + IamInstanceProfile={"Arn": "fake", "Name": "fake",}, + AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"], + ) + ex.value.response["Error"]["Code"].should.equal("NoSuchEntity") + ex.value.response["Error"]["Message"].should.contain("not found") + + +@mock_ec2 +@mock_iam +def test_disassociate(): + client = boto3.client("ec2", region_name="us-east-1") + instance_id = quick_instance_creation() + instance_profile_arn, instance_profile_name = quick_instance_profile_creation( + "test_profile" + ) + + association = client.associate_iam_instance_profile( + IamInstanceProfile={ + "Arn": instance_profile_arn, + "Name": instance_profile_name, + }, + InstanceId=instance_id, + ) + + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + + disassociation = client.disassociate_iam_instance_profile( + AssociationId=association["IamInstanceProfileAssociation"]["AssociationId"], + ) + + disassociation["IamInstanceProfileAssociation"]["IamInstanceProfile"][ + "Arn" + ].should.equal(instance_profile_arn) + disassociation["IamInstanceProfileAssociation"]["State"].should.equal( + "disassociating" + ) + + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(0) + + +@mock_ec2 +@mock_iam +def test_invalid_disassociate(): + client = boto3.client("ec2", region_name="us-east-1") + + # Wrong id + with pytest.raises(ClientError) as ex: + client.disassociate_iam_instance_profile(AssociationId="fake",) + ex.value.response["Error"]["Code"].should.equal("InvalidAssociationID.NotFound") + ex.value.response["Error"]["Message"].should.contain("An invalid association-id of") + + +@mock_ec2 +@mock_cloudformation +def test_cloudformation(): + dummy_template_json = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "InstanceProfile": { + "Type": "AWS::IAM::InstanceProfile", + "Properties": {"Path": "/", "Roles": []}, + }, + "Ec2Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "IamInstanceProfile": {"Ref": "InstanceProfile"}, + "KeyName": "mykey1", + "ImageId": "ami-7a11e213", + }, + }, + }, + } + + client = boto3.client("ec2", region_name="us-east-1") + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=json.dumps(dummy_template_json) + ) + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(1) + associations["IamInstanceProfileAssociations"][0]["IamInstanceProfile"][ + "Arn" + ].should.contain("test_stack") + + cf_conn.delete_stack(StackName="test_stack") + associations = client.describe_iam_instance_profile_associations() + associations["IamInstanceProfileAssociations"].should.have.length_of(0) From 4245497a9774b657376ea202fd8b072f9237a603 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 21 Nov 2020 02:35:46 -0800 Subject: [PATCH 642/658] Fix: redshift:EnableSnapshotCopy raises incorrect Exception (#3485) The previous code was trying to raise a botocore ClientError directly, which was actually generating a secondary AttributeError because the arguments passed to ClientError() were incorrect. This replaces the ClientError() call with a proper moto exception class for Redshift and fixes the test assertions appropriately. Supersedes #1957 --- moto/redshift/models.py | 6 ++---- tests/test_redshift/test_redshift.py | 9 +++++++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 0bdb14edc138..625796f8a3ab 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -573,10 +573,8 @@ def enable_snapshot_copy(self, **kwargs): cluster.encrypted == "true" and kwargs["snapshot_copy_grant_name"] is None ): - raise ClientError( - "InvalidParameterValue", - "SnapshotCopyGrantName is required for Snapshot Copy " - "on KMS encrypted clusters.", + raise InvalidParameterValueError( + "SnapshotCopyGrantName is required for Snapshot Copy on KMS encrypted clusters." ) status = { "DestinationRegion": kwargs["destination_region"], diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index cf96ee15ffaa..8272cea8274c 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -12,6 +12,7 @@ InvalidSubnet, ) from botocore.exceptions import ClientError +import pytest import sure # noqa from moto import mock_ec2 @@ -1260,6 +1261,14 @@ def test_enable_snapshot_copy(): MasterUserPassword="password", NodeType="ds2.xlarge", ) + with pytest.raises(ClientError) as ex: + client.enable_snapshot_copy( + ClusterIdentifier="test", DestinationRegion="us-west-2", RetentionPeriod=3, + ) + ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue") + ex.value.response["Error"]["Message"].should.contain( + "SnapshotCopyGrantName is required for Snapshot Copy on KMS encrypted clusters." + ) client.enable_snapshot_copy( ClusterIdentifier="test", DestinationRegion="us-west-2", From 93b393c67979171264268882c43b97557c19aa1b Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 21 Nov 2020 05:36:33 -0800 Subject: [PATCH 643/658] Fix: Python 2/3 Incompatibility (#3488) Previous code would raise `TypeError: 'dict_keys' object is not subscriptable` when run under Python 3. * Re-write code in Python 2/3 compatible way. * Add clarifying comment. * Add test coverage. Supersedes #3227 --- moto/cognitoidp/models.py | 6 ++++-- tests/test_cognitoidp/test_cognitoidp.py | 25 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 6ee71cbc04b4..7078583fa260 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -1066,5 +1066,7 @@ def find_region_by_value(key, value): if key == "access_token" and value in user_pool.access_tokens: return region - - return cognitoidp_backends.keys()[0] + # If we can't find the `client_id` or `access_token`, we just pass + # back a default backend region, which will raise the appropriate + # error message (e.g. NotAuthorized or NotFound). + return list(cognitoidp_backends)[0] diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 54ee9528f71f..c61be4aa4f33 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1840,6 +1840,31 @@ def test_admin_set_user_password(): result["UserStatus"].should.equal("CONFIRMED") +@mock_cognitoidp +def test_change_password_with_invalid_token_raises_error(): + client = boto3.client("cognito-idp", "us-west-2") + with pytest.raises(ClientError) as ex: + client.change_password( + AccessToken=str(uuid.uuid4()), + PreviousPassword="previous_password", + ProposedPassword="newer_password", + ) + ex.value.response["Error"]["Code"].should.equal("NotAuthorizedException") + + +@mock_cognitoidp +def test_confirm_forgot_password_with_non_existent_client_id_raises_error(): + client = boto3.client("cognito-idp", "us-west-2") + with pytest.raises(ClientError) as ex: + client.confirm_forgot_password( + ClientId="non-existent-client-id", + Username="not-existent-username", + ConfirmationCode=str(uuid.uuid4()), + Password=str(uuid.uuid4()), + ) + ex.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") + + # Test will retrieve public key from cognito.amazonaws.com/.well-known/jwks.json, # which isnt mocked in ServerMode if not settings.TEST_SERVER_MODE: From 53a3e52c67288c38fd7389c2199eb7012303d4cb Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sun, 22 Nov 2020 10:54:59 -0800 Subject: [PATCH 644/658] Fix: EMR `ReleaseLabel` validation does not respect semantic versioning (#3489) Fixes #3474 --- .coveragerc | 1 + moto/emr/responses.py | 6 ++- moto/emr/utils.py | 74 ++++++++++++++++++++++++++++++++ tests/test_emr/test_emr_boto3.py | 2 +- tests/test_emr/test_utils.py | 49 +++++++++++++++++++++ 5 files changed, 129 insertions(+), 3 deletions(-) create mode 100644 tests/test_emr/test_utils.py diff --git a/.coveragerc b/.coveragerc index 25d85b805c10..2130ec2adedd 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,6 +3,7 @@ exclude_lines = if __name__ == .__main__.: raise NotImplemented. + return NotImplemented def __repr__ [run] diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 234fbc8e79c8..a5d98ced4823 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -13,7 +13,7 @@ from moto.core.utils import tags_from_query_string from .exceptions import EmrError from .models import emr_backends -from .utils import steps_from_query_string, Unflattener +from .utils import steps_from_query_string, Unflattener, ReleaseLabel def generate_boto3_response(operation): @@ -323,7 +323,9 @@ def run_job_flow(self): custom_ami_id = self._get_param("CustomAmiId") if custom_ami_id: kwargs["custom_ami_id"] = custom_ami_id - if release_label and release_label < "emr-5.7.0": + if release_label and ( + ReleaseLabel(release_label) < ReleaseLabel("emr-5.7.0") + ): message = "Custom AMI is not allowed" raise EmrError( error_type="ValidationException", diff --git a/moto/emr/utils.py b/moto/emr/utils.py index 48f3232facee..506201c1c019 100644 --- a/moto/emr/utils.py +++ b/moto/emr/utils.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import random +import re import string from moto.core.utils import camelcase_to_underscores @@ -144,3 +145,76 @@ def parse_list(x): @staticmethod def parse_scalar(x): return x + + +class ReleaseLabel(object): + + version_re = re.compile(r"^emr-(\d+)\.(\d+)\.(\d+)$") + + def __init__(self, release_label): + major, minor, patch = self.parse(release_label) + + self.major = major + self.minor = minor + self.patch = patch + + @classmethod + def parse(cls, release_label): + if not release_label: + raise ValueError("Invalid empty ReleaseLabel: %r" % release_label) + + match = cls.version_re.match(release_label) + if not match: + raise ValueError("Invalid ReleaseLabel: %r" % release_label) + + major, minor, patch = match.groups() + + major = int(major) + minor = int(minor) + patch = int(patch) + + return major, minor, patch + + def __str__(self): + version = "emr-%d.%d.%d" % (self.major, self.minor, self.patch) + return version + + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, str(self)) + + def __iter__(self): + return iter((self.major, self.minor, self.patch)) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + self.major == other.major + and self.minor == other.minor + and self.patch == other.patch + ) + + def __ne__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) != tuple(other) + + def __lt__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) < tuple(other) + + def __le__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) <= tuple(other) + + def __gt__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) > tuple(other) + + def __ge__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return tuple(self) >= tuple(other) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 8b815e0fa055..e2aa4944470c 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -636,7 +636,7 @@ def test_run_job_flow_with_custom_ami(): args = deepcopy(run_job_flow_args) args["CustomAmiId"] = "MyEmrCustomAmi" - args["ReleaseLabel"] = "emr-5.7.0" + args["ReleaseLabel"] = "emr-5.31.0" cluster_id = client.run_job_flow(**args)["JobFlowId"] resp = client.describe_cluster(ClusterId=cluster_id) resp["Cluster"]["CustomAmiId"].should.equal("MyEmrCustomAmi") diff --git a/tests/test_emr/test_utils.py b/tests/test_emr/test_utils.py new file mode 100644 index 000000000000..b836ebf48da8 --- /dev/null +++ b/tests/test_emr/test_utils.py @@ -0,0 +1,49 @@ +import pytest + +from moto.emr.utils import ReleaseLabel + + +def test_invalid_release_labels_raise_exception(): + invalid_releases = [ + "", + "0", + "1.0", + "emr-2.0", + ] + for invalid_release in invalid_releases: + with pytest.raises(ValueError): + ReleaseLabel(invalid_release) + + +def test_release_label_comparisons(): + assert str(ReleaseLabel("emr-5.1.2")) == "emr-5.1.2" + + assert ReleaseLabel("emr-5.0.0") != ReleaseLabel("emr-5.0.1") + assert ReleaseLabel("emr-5.0.0") == ReleaseLabel("emr-5.0.0") + + assert ReleaseLabel("emr-5.31.0") > ReleaseLabel("emr-5.7.0") + assert ReleaseLabel("emr-6.0.0") > ReleaseLabel("emr-5.7.0") + + assert ReleaseLabel("emr-5.7.0") < ReleaseLabel("emr-5.10.0") + assert ReleaseLabel("emr-5.10.0") < ReleaseLabel("emr-5.10.1") + + assert ReleaseLabel("emr-5.60.0") >= ReleaseLabel("emr-5.7.0") + assert ReleaseLabel("emr-6.0.0") >= ReleaseLabel("emr-6.0.0") + + assert ReleaseLabel("emr-5.7.0") <= ReleaseLabel("emr-5.17.0") + assert ReleaseLabel("emr-5.7.0") <= ReleaseLabel("emr-5.7.0") + + releases_unsorted = [ + ReleaseLabel("emr-5.60.2"), + ReleaseLabel("emr-4.0.1"), + ReleaseLabel("emr-4.0.0"), + ReleaseLabel("emr-5.7.3"), + ] + releases_sorted = [str(label) for label in sorted(releases_unsorted)] + expected = [ + "emr-4.0.0", + "emr-4.0.1", + "emr-5.7.3", + "emr-5.60.2", + ] + assert releases_sorted == expected From 161cb468869a45ff62d061a77028979f6555a48b Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 21 Nov 2020 23:02:52 -0800 Subject: [PATCH 645/658] Add coverage for `ContentType=JSON` server requests The `boto` library explicitly requests JSON responses from Redshift endpoints --- tests/test_redshift/test_server.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/test_redshift/test_server.py b/tests/test_redshift/test_server.py index f4eee85e8ba5..e3ba6d9d4279 100644 --- a/tests/test_redshift/test_server.py +++ b/tests/test_redshift/test_server.py @@ -1,6 +1,5 @@ from __future__ import unicode_literals -import json import sure # noqa import moto.server as server @@ -20,3 +19,14 @@ def test_describe_clusters(): result = res.data.decode("utf-8") result.should.contain("") + + +@mock_redshift +def test_describe_clusters_with_json_content_type(): + backend = server.create_backend_app("redshift") + test_client = backend.test_client() + + res = test_client.get("/?Action=DescribeClusters&ContentType=JSON") + + result = res.data.decode("utf-8") + result.should.contain('{"Clusters": []}') From 555be78f6e1636b4f94e225c0e78fe8adcde95d0 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 21 Nov 2020 23:13:04 -0800 Subject: [PATCH 646/658] Fix: redshift:DescribeClusterSnapshots should not raise ClusterNotFoundError Real AWS backend returns an empty array instead of raising an error. --- moto/redshift/models.py | 1 - tests/test_redshift/test_redshift.py | 9 ++++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 625796f8a3ab..5bbe348bcfc1 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -777,7 +777,6 @@ def describe_cluster_snapshots( cluster_snapshots.append(snapshot) if cluster_snapshots: return cluster_snapshots - raise ClusterNotFoundError(cluster_identifier) if snapshot_identifier: if snapshot_identifier in self.snapshots: diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 8272cea8274c..f7c8b872c306 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -826,12 +826,11 @@ def test_describe_cluster_snapshots(): @mock_redshift def test_describe_cluster_snapshots_not_found_error(): client = boto3.client("redshift", region_name="us-east-1") - cluster_identifier = "my_cluster" - snapshot_identifier = "my_snapshot" + cluster_identifier = "non-existent-cluster-id" + snapshot_identifier = "non-existent-snapshot-id" - client.describe_cluster_snapshots.when.called_with( - ClusterIdentifier=cluster_identifier - ).should.throw(ClientError, "Cluster {} not found.".format(cluster_identifier)) + resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp["Snapshots"].should.have.length_of(0) client.describe_cluster_snapshots.when.called_with( SnapshotIdentifier=snapshot_identifier From 49c6e65603ed2ae6f651a529796e0b495811c4ed Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 21 Nov 2020 23:21:15 -0800 Subject: [PATCH 647/658] Fix: DeleteCluster behavior with SkipFinalClusterSnapshot Original code was trying to raise a ClientError directly. Change to appropriate Redshift exception class. * Fix test assertion for `boto`. * Add test coverage for `boto3`. --- moto/redshift/exceptions.py | 7 +++ moto/redshift/models.py | 9 ++-- tests/test_redshift/test_redshift.py | 73 +++++++++++++++++++++++++++- 3 files changed, 83 insertions(+), 6 deletions(-) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index b5f83d3bc25f..c071d19da3ad 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -143,3 +143,10 @@ def __init__(self): super(ClusterAlreadyExistsFaultError, self).__init__( "ClusterAlreadyExists", "Cluster already exists" ) + + +class InvalidParameterCombinationError(RedshiftClientError): + def __init__(self, message): + super(InvalidParameterCombinationError, self).__init__( + "InvalidParameterCombination", message + ) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 5bbe348bcfc1..2fc73b8f675d 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -4,7 +4,7 @@ import datetime from boto3 import Session -from botocore.exceptions import ClientError + from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.core.utils import iso_8601_datetime_with_milliseconds @@ -17,6 +17,7 @@ ClusterSnapshotAlreadyExistsError, ClusterSnapshotNotFoundError, ClusterSubnetGroupNotFoundError, + InvalidParameterCombinationError, InvalidParameterValueError, InvalidSubnetError, ResourceNotFoundFaultError, @@ -655,10 +656,8 @@ def delete_cluster(self, **cluster_kwargs): cluster_skip_final_snapshot is False and cluster_snapshot_identifer is None ): - raise ClientError( - "InvalidParameterValue", - "FinalSnapshotIdentifier is required for Snapshot copy " - "when SkipFinalSnapshot is False", + raise InvalidParameterCombinationError( + "FinalClusterSnapshotIdentifier is required unless SkipFinalClusterSnapshot is specified." ) elif ( cluster_skip_final_snapshot is False diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index f7c8b872c306..4594092cf6d8 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -424,7 +424,7 @@ def test_delete_cluster(): ) conn.delete_cluster.when.called_with(cluster_identifier, False).should.throw( - AttributeError + boto.exception.JSONResponseError ) clusters = conn.describe_clusters()["DescribeClustersResponse"][ @@ -1363,3 +1363,74 @@ def test_create_duplicate_cluster_fails(): client.create_cluster.when.called_with(**kwargs).should.throw( ClientError, "ClusterAlreadyExists" ) + + +@mock_redshift +def test_delete_cluster_with_final_snapshot(): + client = boto3.client("redshift", region_name="us-east-1") + + with pytest.raises(ClientError) as ex: + client.delete_cluster(ClusterIdentifier="non-existent") + ex.value.response["Error"]["Code"].should.equal("ClusterNotFound") + ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.") + + cluster_identifier = "my_cluster" + client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType="single-node", + DBName="test", + MasterUsername="user", + MasterUserPassword="password", + NodeType="ds2.xlarge", + ) + + with pytest.raises(ClientError) as ex: + client.delete_cluster( + ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=False + ) + ex.value.response["Error"]["Code"].should.equal("InvalidParameterCombination") + ex.value.response["Error"]["Message"].should.contain( + "FinalClusterSnapshotIdentifier is required unless SkipFinalClusterSnapshot is specified." + ) + + snapshot_identifier = "my_snapshot" + client.delete_cluster( + ClusterIdentifier=cluster_identifier, + SkipFinalClusterSnapshot=False, + FinalClusterSnapshotIdentifier=snapshot_identifier, + ) + + resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp["Snapshots"].should.have.length_of(1) + resp["Snapshots"][0]["SnapshotIdentifier"].should.equal(snapshot_identifier) + resp["Snapshots"][0]["SnapshotType"].should.equal("manual") + + with pytest.raises(ClientError) as ex: + client.describe_clusters(ClusterIdentifier=cluster_identifier) + ex.value.response["Error"]["Code"].should.equal("ClusterNotFound") + ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.") + + +@mock_redshift +def test_delete_cluster_without_final_snapshot(): + client = boto3.client("redshift", region_name="us-east-1") + cluster_identifier = "my_cluster" + client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType="single-node", + DBName="test", + MasterUsername="user", + MasterUserPassword="password", + NodeType="ds2.xlarge", + ) + client.delete_cluster( + ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=True + ) + + resp = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp["Snapshots"].should.have.length_of(0) + + with pytest.raises(ClientError) as ex: + client.describe_clusters(ClusterIdentifier=cluster_identifier) + ex.value.response["Error"]["Code"].should.equal("ClusterNotFound") + ex.value.response["Error"]["Message"].should.match(r"Cluster .+ not found.") From b4d7d183ab9963e8feb80e0ee92e329a7270f994 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 21 Nov 2020 23:25:33 -0800 Subject: [PATCH 648/658] Add additional detail to ClientError assertions We check the message now to ensure we've raised the *correct* ClientError --- tests/test_redshift/test_redshift.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 4594092cf6d8..e2be4e75a838 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -76,7 +76,7 @@ def test_create_snapshot_copy_grant(): client.describe_snapshot_copy_grants.when.called_with( SnapshotCopyGrantName="test-us-east-1" - ).should.throw(Exception) + ).should.throw(ClientError) @mock_redshift @@ -866,8 +866,8 @@ def test_delete_cluster_snapshot(): # Delete invalid id client.delete_cluster_snapshot.when.called_with( - SnapshotIdentifier="not-a-snapshot" - ).should.throw(ClientError) + SnapshotIdentifier="non-existent" + ).should.throw(ClientError, "Snapshot non-existent not found.") @mock_redshift @@ -891,7 +891,7 @@ def test_cluster_snapshot_already_exists(): client.create_cluster_snapshot.when.called_with( SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier - ).should.throw(ClientError) + ).should.throw(ClientError, "{} already exists".format(snapshot_identifier)) @mock_redshift From cf7869d0e2a3e011ec168d561b9de21158a2be94 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 21 Nov 2020 23:43:38 -0800 Subject: [PATCH 649/658] Add check for `UnknownSnapshotCopyRegionFault` error --- moto/redshift/exceptions.py | 7 +++++++ moto/redshift/models.py | 5 +++++ tests/test_redshift/test_redshift.py | 9 +++++++++ 3 files changed, 21 insertions(+) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index c071d19da3ad..eb6cea99e14b 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -150,3 +150,10 @@ def __init__(self, message): super(InvalidParameterCombinationError, self).__init__( "InvalidParameterCombination", message ) + + +class UnknownSnapshotCopyRegionFaultError(RedshiftClientError): + def __init__(self, message): + super(UnknownSnapshotCopyRegionFaultError, self).__init__( + "UnknownSnapshotCopyRegionFault", message + ) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 2fc73b8f675d..bb28af0293b2 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -26,6 +26,7 @@ SnapshotCopyDisabledFaultError, SnapshotCopyGrantAlreadyExistsFaultError, SnapshotCopyGrantNotFoundFaultError, + UnknownSnapshotCopyRegionFaultError, ) @@ -577,6 +578,10 @@ def enable_snapshot_copy(self, **kwargs): raise InvalidParameterValueError( "SnapshotCopyGrantName is required for Snapshot Copy on KMS encrypted clusters." ) + if kwargs["destination_region"] == self.region: + raise UnknownSnapshotCopyRegionFaultError( + "Invalid region {}".format(self.region) + ) status = { "DestinationRegion": kwargs["destination_region"], "RetentionPeriod": kwargs["retention_period"], diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index e2be4e75a838..c9f0e3572dd3 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1268,6 +1268,15 @@ def test_enable_snapshot_copy(): ex.value.response["Error"]["Message"].should.contain( "SnapshotCopyGrantName is required for Snapshot Copy on KMS encrypted clusters." ) + with pytest.raises(ClientError) as ex: + client.enable_snapshot_copy( + ClusterIdentifier="test", + DestinationRegion="us-east-1", + RetentionPeriod=3, + SnapshotCopyGrantName="invalid-us-east-1-to-us-east-1", + ) + ex.value.response["Error"]["Code"].should.equal("UnknownSnapshotCopyRegionFault") + ex.value.response["Error"]["Message"].should.contain("Invalid region us-east-1") client.enable_snapshot_copy( ClusterIdentifier="test", DestinationRegion="us-west-2", From 5a2cbf1ecad4f56e75adcd2f177abed79bd7698c Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Sat, 21 Nov 2020 23:51:33 -0800 Subject: [PATCH 650/658] Fix: Duplicate test name causing loss of coverage A test added in #2401 copied the name of an existing test, preventing it from being run. This commit renames the second test, allowing both to be picked up by the test runner. --- tests/test_redshift/test_redshift.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index c9f0e3572dd3..f2acf4d003ec 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -43,7 +43,7 @@ def test_create_cluster_boto3(): @mock_redshift -def test_create_cluster_boto3(): +def test_create_cluster_with_enhanced_vpc_routing_enabled(): client = boto3.client("redshift", region_name="us-east-1") response = client.create_cluster( DBName="test", From d58d3e2c2eab8d19ad3998dd03e4e536f9468ca7 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 25 Nov 2020 02:48:05 -0800 Subject: [PATCH 651/658] Fix: yield tests ignored by pytest runner (#3500) Closes #3499 --- tests/test_iam/test_iam_policies.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/tests/test_iam/test_iam_policies.py b/tests/test_iam/test_iam_policies.py index fec291c94cd2..96cd632c6e89 100644 --- a/tests/test_iam/test_iam_policies.py +++ b/tests/test_iam/test_iam_policies.py @@ -3,6 +3,7 @@ import boto3 from botocore.exceptions import ClientError import pytest +import sure # noqa from moto import mock_iam @@ -1611,31 +1612,25 @@ ] -def test_create_policy_with_invalid_policy_documents(): - for test_case in invalid_policy_document_test_cases: - yield check_create_policy_with_invalid_policy_document, test_case - - -def test_create_policy_with_valid_policy_documents(): - for valid_policy_document in valid_policy_documents: - yield check_create_policy_with_valid_policy_document, valid_policy_document - - +@pytest.mark.parametrize("invalid_policy_document", invalid_policy_document_test_cases) @mock_iam -def check_create_policy_with_invalid_policy_document(test_case): +def test_create_policy_with_invalid_policy_document(invalid_policy_document): conn = boto3.client("iam", region_name="us-east-1") with pytest.raises(ClientError) as ex: conn.create_policy( PolicyName="TestCreatePolicy", - PolicyDocument=json.dumps(test_case["document"]), + PolicyDocument=json.dumps(invalid_policy_document["document"]), ) ex.value.response["Error"]["Code"].should.equal("MalformedPolicyDocument") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.value.response["Error"]["Message"].should.equal(test_case["error_message"]) + ex.value.response["Error"]["Message"].should.equal( + invalid_policy_document["error_message"] + ) +@pytest.mark.parametrize("valid_policy_document", valid_policy_documents) @mock_iam -def check_create_policy_with_valid_policy_document(valid_policy_document): +def test_create_policy_with_valid_policy_document(valid_policy_document): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy( PolicyName="TestCreatePolicy", PolicyDocument=json.dumps(valid_policy_document) From 9e3b23758af52b315beaf8bf6277e4c07c3e5c77 Mon Sep 17 00:00:00 2001 From: Garrett Date: Wed, 25 Nov 2020 15:28:05 -0500 Subject: [PATCH 652/658] [dynamodb2] Support include projection on indexes (#3498) * [dynamodb2] Support include projection on indexes * linter --- moto/dynamodb2/models/__init__.py | 16 ++++++-- tests/test_dynamodb2/test_dynamodb.py | 55 +++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 18b0b918f9c2..7218fe0c9c61 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -292,11 +292,19 @@ def project(self, item): :return: """ if self.projection: - if self.projection.get("ProjectionType", None) == "KEYS_ONLY": - allowed_attributes = ",".join( - self.table_key_attrs + [key["AttributeName"] for key in self.schema] + projection_type = self.projection.get("ProjectionType", None) + key_attributes = self.table_key_attrs + [ + key["AttributeName"] for key in self.schema + ] + + if projection_type == "KEYS_ONLY": + item.filter(",".join(key_attributes)) + elif projection_type == "INCLUDE": + allowed_attributes = key_attributes + self.projection.get( + "NonKeyAttributes", [] ) - item.filter(allowed_attributes) + item.filter(",".join(allowed_attributes)) + # ALL is handled implicitly by not filtering return item diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 3571239e2249..0e0fcb08239e 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -5523,6 +5523,61 @@ def test_gsi_projection_type_keys_only(): ) +@mock_dynamodb2 +def test_gsi_projection_type_include(): + table_schema = { + "KeySchema": [{"AttributeName": "partitionKey", "KeyType": "HASH"}], + "GlobalSecondaryIndexes": [ + { + "IndexName": "GSI-INC", + "KeySchema": [ + {"AttributeName": "gsiK1PartitionKey", "KeyType": "HASH"}, + {"AttributeName": "gsiK1SortKey", "KeyType": "RANGE"}, + ], + "Projection": { + "ProjectionType": "INCLUDE", + "NonKeyAttributes": ["projectedAttribute"], + }, + } + ], + "AttributeDefinitions": [ + {"AttributeName": "partitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1PartitionKey", "AttributeType": "S"}, + {"AttributeName": "gsiK1SortKey", "AttributeType": "S"}, + ], + } + + item = { + "partitionKey": "pk-1", + "gsiK1PartitionKey": "gsi-pk", + "gsiK1SortKey": "gsi-sk", + "projectedAttribute": "lore ipsum", + "nonProjectedAttribute": "dolor sit amet", + } + + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + table = dynamodb.Table("test-table") + table.put_item(Item=item) + + items = table.query( + KeyConditionExpression=Key("gsiK1PartitionKey").eq("gsi-pk"), + IndexName="GSI-INC", + )["Items"] + items.should.have.length_of(1) + # Item should only include keys and additionally projected attributes only + items[0].should.equal( + { + "gsiK1PartitionKey": "gsi-pk", + "gsiK1SortKey": "gsi-sk", + "partitionKey": "pk-1", + "projectedAttribute": "lore ipsum", + } + ) + + @mock_dynamodb2 def test_lsi_projection_type_keys_only(): table_schema = { From f58e6e1038baf2ce7845c0aadbc955769cb285ee Mon Sep 17 00:00:00 2001 From: Christian Bandowski Date: Thu, 26 Nov 2020 09:52:58 +0100 Subject: [PATCH 653/658] #3494 fix using EventBridge via Go SDK (#3495) --- moto/server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/moto/server.py b/moto/server.py index a10dc4e3e93f..28e4ce5565a2 100644 --- a/moto/server.py +++ b/moto/server.py @@ -93,6 +93,11 @@ def infer_service_region_host(self, environ): # S3 is the last resort when the target is also unknown service, region = DEFAULT_SERVICE_REGION + if service == "EventBridge": + # Go SDK uses 'EventBridge' in the SigV4 request instead of 'events' + # see https://github.com/spulec/moto/issues/3494 + service = "events" + if service == "dynamodb": if environ["HTTP_X_AMZ_TARGET"].startswith("DynamoDBStreams"): host = "dynamodbstreams" From bd4aa65635be338090e9be59cc0b260023968090 Mon Sep 17 00:00:00 2001 From: Szymon Zmilczak Date: Thu, 26 Nov 2020 12:12:09 +0100 Subject: [PATCH 654/658] Mark sts.get_caller_identity as implemented (#3501) --- IMPLEMENTATION_COVERAGE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 9ea4330fa2fb..4ccc4e2dc866 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -8223,7 +8223,7 @@ - [X] assume_role_with_web_identity - [ ] decode_authorization_message - [ ] get_access_key_info -- [ ] get_caller_identity +- [x] get_caller_identity - [X] get_federation_token - [X] get_session_token
From ae85c539fd57034c4d5cfd0f95af41ff19862dd1 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Thu, 26 Nov 2020 23:59:15 -0800 Subject: [PATCH 655/658] Remove `boto` package dependency The `boto` library (long ago superseded by `boto3`) has not had an official release in over two years or even a commit in the last 18 months. Importing the package (or indirectly importing it by via `moto`) generates a deprecation warning. Additionally, an ever-increasing number of `moto` users who have left `boto` behind for `boto3` are still being forced to install `boto`. This commit vendors a very small subset of the `boto` library--only the code required by `moto` to run--into the /packages subdirectory. A README file is included explaining the origin of the files and a recommendation for how they can be removed entirely in a future release. NOTE: Users of `boto` will still be able to use `moto` after this is merged. closes #2978 closes #3013 closes #3170 closes #3418 relates to #2950 --- moto/autoscaling/models.py | 5 +- moto/cloudformation/parsing.py | 2 +- moto/ec2/models.py | 13 +- moto/ec2/responses/instances.py | 2 +- moto/ec2instanceconnect/models.py | 10 +- moto/elb/models.py | 4 +- moto/elb/responses.py | 4 +- moto/packages/boto/README.md | 18 ++ moto/packages/boto/__init__.py | 0 moto/packages/boto/cloudformation/__init__.py | 0 moto/packages/boto/cloudformation/stack.py | 9 + moto/packages/boto/ec2/__init__.py | 0 moto/packages/boto/ec2/blockdevicemapping.py | 83 +++++++ moto/packages/boto/ec2/ec2object.py | 48 ++++ moto/packages/boto/ec2/elb/__init__.py | 0 moto/packages/boto/ec2/elb/attributes.py | 100 ++++++++ moto/packages/boto/ec2/elb/policies.py | 55 +++++ moto/packages/boto/ec2/image.py | 25 ++ moto/packages/boto/ec2/instance.py | 217 ++++++++++++++++++ moto/packages/boto/ec2/instancetype.py | 50 ++++ moto/packages/boto/ec2/launchspecification.py | 48 ++++ moto/packages/boto/ec2/spotinstancerequest.py | 85 +++++++ moto/packages/boto/ec2/tag.py | 35 +++ moto/rds/models.py | 12 +- setup.py | 1 - .../test_cloudformation/test_stack_parsing.py | 2 +- tests/test_ec2/test_ec2_cloudformation.py | 3 + 27 files changed, 811 insertions(+), 20 deletions(-) create mode 100644 moto/packages/boto/README.md create mode 100644 moto/packages/boto/__init__.py create mode 100644 moto/packages/boto/cloudformation/__init__.py create mode 100644 moto/packages/boto/cloudformation/stack.py create mode 100644 moto/packages/boto/ec2/__init__.py create mode 100644 moto/packages/boto/ec2/blockdevicemapping.py create mode 100644 moto/packages/boto/ec2/ec2object.py create mode 100644 moto/packages/boto/ec2/elb/__init__.py create mode 100644 moto/packages/boto/ec2/elb/attributes.py create mode 100644 moto/packages/boto/ec2/elb/policies.py create mode 100644 moto/packages/boto/ec2/image.py create mode 100644 moto/packages/boto/ec2/instance.py create mode 100644 moto/packages/boto/ec2/instancetype.py create mode 100644 moto/packages/boto/ec2/launchspecification.py create mode 100644 moto/packages/boto/ec2/spotinstancerequest.py create mode 100644 moto/packages/boto/ec2/tag.py diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index ee5cd9acdddf..f4afd51bea37 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -2,7 +2,10 @@ import random -from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping +from moto.packages.boto.ec2.blockdevicemapping import ( + BlockDeviceType, + BlockDeviceMapping, +) from moto.ec2.exceptions import InvalidInstanceIdError from moto.compat import OrderedDict diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 168536f79e23..50de876f3111 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -50,7 +50,7 @@ UnformattedGetAttTemplateException, ValidationError, ) -from boto.cloudformation.stack import Output +from moto.packages.boto.cloudformation.stack import Output # List of supported CloudFormation models MODEL_LIST = CloudFormationModel.__subclasses__() diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 9b5e692a7fce..7676bffb44c6 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -15,10 +15,15 @@ from collections import defaultdict import weakref from datetime import datetime -from boto.ec2.instance import Instance as BotoInstance, Reservation -from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType -from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest -from boto.ec2.launchspecification import LaunchSpecification +from moto.packages.boto.ec2.instance import Instance as BotoInstance, Reservation +from moto.packages.boto.ec2.blockdevicemapping import ( + BlockDeviceMapping, + BlockDeviceType, +) +from moto.packages.boto.ec2.spotinstancerequest import ( + SpotInstanceRequest as BotoSpotRequest, +) +from moto.packages.boto.ec2.launchspecification import LaunchSpecification from moto.compat import OrderedDict from moto.core import BaseBackend diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index e9843399f7ee..eb395aa8f698 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from boto.ec2.instancetype import InstanceType +from moto.packages.boto.ec2.instancetype import InstanceType from moto.autoscaling import autoscaling_backends from moto.core.responses import BaseResponse diff --git a/moto/ec2instanceconnect/models.py b/moto/ec2instanceconnect/models.py index 43c01e7f275b..19c4717ec379 100644 --- a/moto/ec2instanceconnect/models.py +++ b/moto/ec2instanceconnect/models.py @@ -1,4 +1,4 @@ -import boto.ec2 +from boto3 import Session import json from moto.core import BaseBackend @@ -11,5 +11,9 @@ def send_ssh_public_key(self): ec2instanceconnect_backends = {} -for region in boto.ec2.regions(): - ec2instanceconnect_backends[region.name] = Ec2InstanceConnectBackend() +for region in Session().get_available_regions("ec2"): + ec2instanceconnect_backends[region] = Ec2InstanceConnectBackend() +for region in Session().get_available_regions("ec2", partition_name="aws-us-gov"): + ec2instanceconnect_backends[region] = Ec2InstanceConnectBackend() +for region in Session().get_available_regions("ec2", partition_name="aws-cn"): + ec2instanceconnect_backends[region] = Ec2InstanceConnectBackend() diff --git a/moto/elb/models.py b/moto/elb/models.py index 715758090cde..47cdfd507360 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -4,14 +4,14 @@ import pytz -from boto.ec2.elb.attributes import ( +from moto.packages.boto.ec2.elb.attributes import ( LbAttributes, ConnectionSettingAttribute, ConnectionDrainingAttribute, AccessLogAttribute, CrossZoneLoadBalancingAttribute, ) -from boto.ec2.elb.policies import Policies, OtherPolicy +from moto.packages.boto.ec2.elb.policies import Policies, OtherPolicy from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel, CloudFormationModel from moto.ec2.models import ec2_backends diff --git a/moto/elb/responses.py b/moto/elb/responses.py index 79db5a788793..7bf627b66d62 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -1,11 +1,11 @@ from __future__ import unicode_literals -from boto.ec2.elb.attributes import ( +from moto.packages.boto.ec2.elb.attributes import ( ConnectionSettingAttribute, ConnectionDrainingAttribute, AccessLogAttribute, CrossZoneLoadBalancingAttribute, ) -from boto.ec2.elb.policies import AppCookieStickinessPolicy, OtherPolicy +from moto.packages.boto.ec2.elb.policies import AppCookieStickinessPolicy, OtherPolicy from moto.core.responses import BaseResponse from .models import elb_backends diff --git a/moto/packages/boto/README.md b/moto/packages/boto/README.md new file mode 100644 index 000000000000..f3a247a58f9e --- /dev/null +++ b/moto/packages/boto/README.md @@ -0,0 +1,18 @@ +## Removing the `boto` Dependency + +In order to rid `moto` of a direct dependency on the long-deprecated `boto` +package, a subset of the `boto` code has been vendored here. + +This directory contains only the `boto` files required for `moto` to run, +which is a very small subset of the original package's contents. Furthermore, +the `boto` models collected here have been stripped of all superfluous +methods/attributes not used by `moto`. (Any copyright headers on the +original files have been left intact.) + +## Next Steps + +Currently, a small number of `moto` models inherit from these `boto` classes. +With some additional work, the inheritance can be dropped in favor of simply +adding the required methods/properties from these `boto` models to their +respective `moto` subclasses, which would allow for these files/directories +to be removed entirely. \ No newline at end of file diff --git a/moto/packages/boto/__init__.py b/moto/packages/boto/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/moto/packages/boto/cloudformation/__init__.py b/moto/packages/boto/cloudformation/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/moto/packages/boto/cloudformation/stack.py b/moto/packages/boto/cloudformation/stack.py new file mode 100644 index 000000000000..26c4bfdf7686 --- /dev/null +++ b/moto/packages/boto/cloudformation/stack.py @@ -0,0 +1,9 @@ +class Output(object): + def __init__(self, connection=None): + self.connection = connection + self.description = None + self.key = None + self.value = None + + def __repr__(self): + return 'Output:"%s"="%s"' % (self.key, self.value) diff --git a/moto/packages/boto/ec2/__init__.py b/moto/packages/boto/ec2/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/moto/packages/boto/ec2/blockdevicemapping.py b/moto/packages/boto/ec2/blockdevicemapping.py new file mode 100644 index 000000000000..462060115d08 --- /dev/null +++ b/moto/packages/boto/ec2/blockdevicemapping.py @@ -0,0 +1,83 @@ +# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class BlockDeviceType(object): + """ + Represents parameters for a block device. + """ + + def __init__( + self, + connection=None, + ephemeral_name=None, + no_device=False, + volume_id=None, + snapshot_id=None, + status=None, + attach_time=None, + delete_on_termination=False, + size=None, + volume_type=None, + iops=None, + encrypted=None, + ): + self.connection = connection + self.ephemeral_name = ephemeral_name + self.no_device = no_device + self.volume_id = volume_id + self.snapshot_id = snapshot_id + self.status = status + self.attach_time = attach_time + self.delete_on_termination = delete_on_termination + self.size = size + self.volume_type = volume_type + self.iops = iops + self.encrypted = encrypted + + +# for backwards compatibility +EBSBlockDeviceType = BlockDeviceType + + +class BlockDeviceMapping(dict): + """ + Represents a collection of BlockDeviceTypes when creating ec2 instances. + + Example: + dev_sda1 = BlockDeviceType() + dev_sda1.size = 100 # change root volume to 100GB instead of default + bdm = BlockDeviceMapping() + bdm['/dev/sda1'] = dev_sda1 + reservation = image.run(..., block_device_map=bdm, ...) + """ + + def __init__(self, connection=None): + """ + :type connection: :class:`boto.ec2.EC2Connection` + :param connection: Optional connection. + """ + dict.__init__(self) + self.connection = connection + self.current_name = None + self.current_value = None diff --git a/moto/packages/boto/ec2/ec2object.py b/moto/packages/boto/ec2/ec2object.py new file mode 100644 index 000000000000..0067f59ce8a5 --- /dev/null +++ b/moto/packages/boto/ec2/ec2object.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Object +""" +from moto.packages.boto.ec2.tag import TagSet + + +class EC2Object(object): + def __init__(self, connection=None): + self.connection = connection + self.region = None + + +class TaggedEC2Object(EC2Object): + """ + Any EC2 resource that can be tagged should be represented + by a Python object that subclasses this class. This class + has the mechanism in place to handle the tagSet element in + the Describe* responses. If tags are found, it will create + a TagSet object and allow it to parse and collect the tags + into a dict that is stored in the "tags" attribute of the + object. + """ + + def __init__(self, connection=None): + super(TaggedEC2Object, self).__init__(connection) + self.tags = TagSet() diff --git a/moto/packages/boto/ec2/elb/__init__.py b/moto/packages/boto/ec2/elb/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/moto/packages/boto/ec2/elb/attributes.py b/moto/packages/boto/ec2/elb/attributes.py new file mode 100644 index 000000000000..fbb387ec6f3b --- /dev/null +++ b/moto/packages/boto/ec2/elb/attributes.py @@ -0,0 +1,100 @@ +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# Created by Chris Huegle for TellApart, Inc. + + +class ConnectionSettingAttribute(object): + """ + Represents the ConnectionSetting segment of ELB Attributes. + """ + + def __init__(self, connection=None): + self.idle_timeout = None + + def __repr__(self): + return "ConnectionSettingAttribute(%s)" % (self.idle_timeout) + + +class CrossZoneLoadBalancingAttribute(object): + """ + Represents the CrossZoneLoadBalancing segement of ELB Attributes. + """ + + def __init__(self, connection=None): + self.enabled = None + + def __repr__(self): + return "CrossZoneLoadBalancingAttribute(%s)" % (self.enabled) + + +class AccessLogAttribute(object): + """ + Represents the AccessLog segment of ELB attributes. + """ + + def __init__(self, connection=None): + self.enabled = None + self.s3_bucket_name = None + self.s3_bucket_prefix = None + self.emit_interval = None + + def __repr__(self): + return "AccessLog(%s, %s, %s, %s)" % ( + self.enabled, + self.s3_bucket_name, + self.s3_bucket_prefix, + self.emit_interval, + ) + + +class ConnectionDrainingAttribute(object): + """ + Represents the ConnectionDraining segment of ELB attributes. + """ + + def __init__(self, connection=None): + self.enabled = None + self.timeout = None + + def __repr__(self): + return "ConnectionDraining(%s, %s)" % (self.enabled, self.timeout) + + +class LbAttributes(object): + """ + Represents the Attributes of an Elastic Load Balancer. + """ + + def __init__(self, connection=None): + self.connection = connection + self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute( + self.connection + ) + self.access_log = AccessLogAttribute(self.connection) + self.connection_draining = ConnectionDrainingAttribute(self.connection) + self.connecting_settings = ConnectionSettingAttribute(self.connection) + + def __repr__(self): + return "LbAttributes(%s, %s, %s, %s)" % ( + repr(self.cross_zone_load_balancing), + repr(self.access_log), + repr(self.connection_draining), + repr(self.connecting_settings), + ) diff --git a/moto/packages/boto/ec2/elb/policies.py b/moto/packages/boto/ec2/elb/policies.py new file mode 100644 index 000000000000..a5c216f7eca5 --- /dev/null +++ b/moto/packages/boto/ec2/elb/policies.py @@ -0,0 +1,55 @@ +# Copyright (c) 2010 Reza Lotun http://reza.lotun.name +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class AppCookieStickinessPolicy(object): + def __init__(self, connection=None): + self.cookie_name = None + self.policy_name = None + + def __repr__(self): + return "AppCookieStickiness(%s, %s)" % (self.policy_name, self.cookie_name) + + +class OtherPolicy(object): + def __init__(self, connection=None): + self.policy_name = None + + def __repr__(self): + return "OtherPolicy(%s)" % (self.policy_name) + + +class Policies(object): + """ + ELB Policies + """ + + def __init__(self, connection=None): + self.connection = connection + self.app_cookie_stickiness_policies = None + self.lb_cookie_stickiness_policies = None + self.other_policies = None + + def __repr__(self): + app = "AppCookieStickiness%s" % self.app_cookie_stickiness_policies + lb = "LBCookieStickiness%s" % self.lb_cookie_stickiness_policies + other = "Other%s" % self.other_policies + return "Policies(%s,%s,%s)" % (app, lb, other) diff --git a/moto/packages/boto/ec2/image.py b/moto/packages/boto/ec2/image.py new file mode 100644 index 000000000000..b1fba4197d94 --- /dev/null +++ b/moto/packages/boto/ec2/image.py @@ -0,0 +1,25 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class ProductCodes(list): + pass diff --git a/moto/packages/boto/ec2/instance.py b/moto/packages/boto/ec2/instance.py new file mode 100644 index 000000000000..3ba81ee95efe --- /dev/null +++ b/moto/packages/boto/ec2/instance.py @@ -0,0 +1,217 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Instance +""" +from moto.packages.boto.ec2.ec2object import EC2Object, TaggedEC2Object +from moto.packages.boto.ec2.image import ProductCodes + + +class InstanceState(object): + """ + The state of the instance. + + :ivar code: The low byte represents the state. The high byte is an + opaque internal value and should be ignored. Valid values: + + * 0 (pending) + * 16 (running) + * 32 (shutting-down) + * 48 (terminated) + * 64 (stopping) + * 80 (stopped) + + :ivar name: The name of the state of the instance. Valid values: + + * "pending" + * "running" + * "shutting-down" + * "terminated" + * "stopping" + * "stopped" + """ + + def __init__(self, code=0, name=None): + self.code = code + self.name = name + + def __repr__(self): + return "%s(%d)" % (self.name, self.code) + + +class InstancePlacement(object): + """ + The location where the instance launched. + + :ivar zone: The Availability Zone of the instance. + :ivar group_name: The name of the placement group the instance is + in (for cluster compute instances). + :ivar tenancy: The tenancy of the instance (if the instance is + running within a VPC). An instance with a tenancy of dedicated + runs on single-tenant hardware. + """ + + def __init__(self, zone=None, group_name=None, tenancy=None): + self.zone = zone + self.group_name = group_name + self.tenancy = tenancy + + def __repr__(self): + return self.zone + + +class Reservation(EC2Object): + """ + Represents a Reservation response object. + + :ivar id: The unique ID of the Reservation. + :ivar owner_id: The unique ID of the owner of the Reservation. + :ivar groups: A list of Group objects representing the security + groups associated with launched instances. + :ivar instances: A list of Instance objects launched in this + Reservation. + """ + + def __init__(self, connection=None): + super(Reservation, self).__init__(connection) + self.id = None + self.owner_id = None + self.groups = [] + self.instances = [] + + def __repr__(self): + return "Reservation:%s" % self.id + + +class Instance(TaggedEC2Object): + """ + Represents an instance. + + :ivar id: The unique ID of the Instance. + :ivar groups: A list of Group objects representing the security + groups associated with the instance. + :ivar public_dns_name: The public dns name of the instance. + :ivar private_dns_name: The private dns name of the instance. + :ivar state: The string representation of the instance's current state. + :ivar state_code: An integer representation of the instance's + current state. + :ivar previous_state: The string representation of the instance's + previous state. + :ivar previous_state_code: An integer representation of the + instance's current state. + :ivar key_name: The name of the SSH key associated with the instance. + :ivar instance_type: The type of instance (e.g. m1.small). + :ivar launch_time: The time the instance was launched. + :ivar image_id: The ID of the AMI used to launch this instance. + :ivar placement: The availability zone in which the instance is running. + :ivar placement_group: The name of the placement group the instance + is in (for cluster compute instances). + :ivar placement_tenancy: The tenancy of the instance, if the instance + is running within a VPC. An instance with a tenancy of dedicated + runs on a single-tenant hardware. + :ivar kernel: The kernel associated with the instance. + :ivar ramdisk: The ramdisk associated with the instance. + :ivar architecture: The architecture of the image (i386|x86_64). + :ivar hypervisor: The hypervisor used. + :ivar virtualization_type: The type of virtualization used. + :ivar product_codes: A list of product codes associated with this instance. + :ivar ami_launch_index: This instances position within it's launch group. + :ivar monitored: A boolean indicating whether monitoring is enabled or not. + :ivar monitoring_state: A string value that contains the actual value + of the monitoring element returned by EC2. + :ivar spot_instance_request_id: The ID of the spot instance request + if this is a spot instance. + :ivar subnet_id: The VPC Subnet ID, if running in VPC. + :ivar vpc_id: The VPC ID, if running in VPC. + :ivar private_ip_address: The private IP address of the instance. + :ivar ip_address: The public IP address of the instance. + :ivar platform: Platform of the instance (e.g. Windows) + :ivar root_device_name: The name of the root device. + :ivar root_device_type: The root device type (ebs|instance-store). + :ivar block_device_mapping: The Block Device Mapping for the instance. + :ivar state_reason: The reason for the most recent state transition. + :ivar interfaces: List of Elastic Network Interfaces associated with + this instance. + :ivar ebs_optimized: Whether instance is using optimized EBS volumes + or not. + :ivar instance_profile: A Python dict containing the instance + profile id and arn associated with this instance. + """ + + def __init__(self, connection=None): + super(Instance, self).__init__(connection) + self.id = None + self.dns_name = None + self.public_dns_name = None + self.private_dns_name = None + self.key_name = None + self.instance_type = None + self.launch_time = None + self.image_id = None + self.kernel = None + self.ramdisk = None + self.product_codes = ProductCodes() + self.ami_launch_index = None + self.monitored = False + self.monitoring_state = None + self.spot_instance_request_id = None + self.subnet_id = None + self.vpc_id = None + self.private_ip_address = None + self.ip_address = None + self.requester_id = None + self._in_monitoring_element = False + self.persistent = False + self.root_device_name = None + self.root_device_type = None + self.block_device_mapping = None + self.state_reason = None + self.group_name = None + self.client_token = None + self.eventsSet = None + self.groups = [] + self.platform = None + self.interfaces = [] + self.hypervisor = None + self.virtualization_type = None + self.architecture = None + self.instance_profile = None + self._previous_state = None + self._state = InstanceState() + self._placement = InstancePlacement() + + def __repr__(self): + return "Instance:%s" % self.id + + @property + def state(self): + return self._state.name + + @property + def state_code(self): + return self._state.code + + @property + def placement(self): + return self._placement.zone diff --git a/moto/packages/boto/ec2/instancetype.py b/moto/packages/boto/ec2/instancetype.py new file mode 100644 index 000000000000..a84e4879e694 --- /dev/null +++ b/moto/packages/boto/ec2/instancetype.py @@ -0,0 +1,50 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from moto.packages.boto.ec2.ec2object import EC2Object + + +class InstanceType(EC2Object): + """ + Represents an EC2 VM Type + + :ivar name: The name of the vm type + :ivar cores: The number of cpu cores for this vm type + :ivar memory: The amount of memory in megabytes for this vm type + :ivar disk: The amount of disk space in gigabytes for this vm type + """ + + def __init__(self, connection=None, name=None, cores=None, memory=None, disk=None): + super(InstanceType, self).__init__(connection) + self.connection = connection + self.name = name + self.cores = cores + self.memory = memory + self.disk = disk + + def __repr__(self): + return "InstanceType:%s-%s,%s,%s" % ( + self.name, + self.cores, + self.memory, + self.disk, + ) diff --git a/moto/packages/boto/ec2/launchspecification.py b/moto/packages/boto/ec2/launchspecification.py new file mode 100644 index 000000000000..df6c99fc563e --- /dev/null +++ b/moto/packages/boto/ec2/launchspecification.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a launch specification for Spot instances. +""" + +from moto.packages.boto.ec2.ec2object import EC2Object + + +class LaunchSpecification(EC2Object): + def __init__(self, connection=None): + super(LaunchSpecification, self).__init__(connection) + self.key_name = None + self.instance_type = None + self.image_id = None + self.groups = [] + self.placement = None + self.kernel = None + self.ramdisk = None + self.monitored = False + self.subnet_id = None + self._in_monitoring_element = False + self.block_device_mapping = None + self.instance_profile = None + self.ebs_optimized = False + + def __repr__(self): + return "LaunchSpecification(%s)" % self.image_id diff --git a/moto/packages/boto/ec2/spotinstancerequest.py b/moto/packages/boto/ec2/spotinstancerequest.py new file mode 100644 index 000000000000..c8630e74abf9 --- /dev/null +++ b/moto/packages/boto/ec2/spotinstancerequest.py @@ -0,0 +1,85 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Request +""" + +from moto.packages.boto.ec2.ec2object import TaggedEC2Object + + +class SpotInstanceRequest(TaggedEC2Object): + """ + + :ivar id: The ID of the Spot Instance Request. + :ivar price: The maximum hourly price for any Spot Instance launched to + fulfill the request. + :ivar type: The Spot Instance request type. + :ivar state: The state of the Spot Instance request. + :ivar fault: The fault codes for the Spot Instance request, if any. + :ivar valid_from: The start date of the request. If this is a one-time + request, the request becomes active at this date and time and remains + active until all instances launch, the request expires, or the request is + canceled. If the request is persistent, the request becomes active at this + date and time and remains active until it expires or is canceled. + :ivar valid_until: The end date of the request. If this is a one-time + request, the request remains active until all instances launch, the request + is canceled, or this date is reached. If the request is persistent, it + remains active until it is canceled or this date is reached. + :ivar launch_group: The instance launch group. Launch groups are Spot + Instances that launch together and terminate together. + :ivar launched_availability_zone: foo + :ivar product_description: The Availability Zone in which the bid is + launched. + :ivar availability_zone_group: The Availability Zone group. If you specify + the same Availability Zone group for all Spot Instance requests, all Spot + Instances are launched in the same Availability Zone. + :ivar create_time: The time stamp when the Spot Instance request was + created. + :ivar launch_specification: Additional information for launching instances. + :ivar instance_id: The instance ID, if an instance has been launched to + fulfill the Spot Instance request. + :ivar status: The status code and status message describing the Spot + Instance request. + + """ + + def __init__(self, connection=None): + super(SpotInstanceRequest, self).__init__(connection) + self.id = None + self.price = None + self.type = None + self.state = None + self.fault = None + self.valid_from = None + self.valid_until = None + self.launch_group = None + self.launched_availability_zone = None + self.product_description = None + self.availability_zone_group = None + self.create_time = None + self.launch_specification = None + self.instance_id = None + self.status = None + + def __repr__(self): + return "SpotInstanceRequest:%s" % self.id diff --git a/moto/packages/boto/ec2/tag.py b/moto/packages/boto/ec2/tag.py new file mode 100644 index 000000000000..9f5c2ef88906 --- /dev/null +++ b/moto/packages/boto/ec2/tag.py @@ -0,0 +1,35 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class TagSet(dict): + """ + A TagSet is used to collect the tags associated with a particular + EC2 resource. Not all resources can be tagged but for those that + can, this dict object will be used to collect those values. See + :class:`boto.ec2.ec2object.TaggedEC2Object` for more details. + """ + + def __init__(self, connection=None): + self.connection = connection + self._current_key = None + self._current_value = None diff --git a/moto/rds/models.py b/moto/rds/models.py index 33be04e8cb10..5039d9a26aa9 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -import boto.rds +from boto3 import Session from jinja2 import Template from moto.core import BaseBackend, CloudFormationModel @@ -335,6 +335,10 @@ def rds2_backend(self): return rds2_backends[self.region] -rds_backends = dict( - (region.name, RDSBackend(region.name)) for region in boto.rds.regions() -) +rds_backends = {} +for region in Session().get_available_regions("rds"): + rds_backends[region] = RDSBackend(region) +for region in Session().get_available_regions("rds", partition_name="aws-us-gov"): + rds_backends[region] = RDSBackend(region) +for region in Session().get_available_regions("rds", partition_name="aws-cn"): + rds_backends[region] = RDSBackend(region) diff --git a/setup.py b/setup.py index a738feab6f6d..913565eb4c15 100755 --- a/setup.py +++ b/setup.py @@ -32,7 +32,6 @@ def get_version(): install_requires = [ - "boto>=2.36.0", "boto3>=1.9.201", "botocore>=1.12.201", "cryptography>=2.3.0", diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 4e51c5b1220b..9692e36cbed9 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -15,7 +15,7 @@ from moto.sqs.models import Queue from moto.s3.models import FakeBucket from moto.cloudformation.utils import yaml_tag_constructor -from boto.cloudformation.stack import Output +from moto.packages.boto.cloudformation.stack import Output dummy_template = { diff --git a/tests/test_ec2/test_ec2_cloudformation.py b/tests/test_ec2/test_ec2_cloudformation.py index b5aa8dd24fac..6fa27140be81 100644 --- a/tests/test_ec2/test_ec2_cloudformation.py +++ b/tests/test_ec2/test_ec2_cloudformation.py @@ -2,6 +2,9 @@ from moto import mock_cloudformation, mock_ec2 from tests.test_cloudformation.fixtures import vpc_eni import boto +import boto.ec2 +import boto.cloudformation +import boto.vpc import boto3 import json import sure # noqa From 72e616cb48e3a781bd68280c48bd6aa8dc099a1a Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 28 Nov 2020 17:10:38 -0600 Subject: [PATCH 656/658] Add tagging to docker image build. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0df12ac17628..391a8efa0d2b 100644 --- a/Makefile +++ b/Makefile @@ -39,7 +39,7 @@ upload_pypi_artifact: twine upload dist/* push_dockerhub_image: - docker build -t motoserver/moto . + docker build -t motoserver/moto . --tag moto:`python setup.py --version` docker push motoserver/moto tag_github_release: From b2adcdf518fb825bbcea7ff8589d3870cf72a82c Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 2 Dec 2020 01:23:01 +0530 Subject: [PATCH 657/658] =?UTF-8?q?Fix:RDS:add=20DBParameterGroupArn=20in?= =?UTF-8?q?=20describe-db-parameter-groups=20&=20cre=E2=80=A6=20(#3462)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix:RDS:add DBParameterGroupArn in describe-db-parameter-groups & create-db-parameter-group * Test change * Fixed tests * tests change acconutID * linting Co-authored-by: usmankb --- moto/rds2/models.py | 14 +++++++++++--- tests/test_rds2/test_rds2.py | 17 ++++++++++++++--- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index bc52bdcbf40f..eb4159025966 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -9,7 +9,8 @@ from jinja2 import Template from re import compile as re_compile from moto.compat import OrderedDict -from moto.core import BaseBackend, BaseModel, CloudFormationModel +from moto.core import BaseBackend, BaseModel, CloudFormationModel, ACCOUNT_ID + from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2.models import ec2_backends from .exceptions import ( @@ -157,6 +158,7 @@ def db_parameter_groups(self): family=db_family, description=description, tags={}, + region=self.region, ) ] else: @@ -1172,7 +1174,7 @@ def create_db_parameter_group(self, db_parameter_group_kwargs): "InvalidParameterValue", "The parameter DBParameterGroupName must be provided and must not be blank.", ) - + db_parameter_group_kwargs["region"] = self.region db_parameter_group = DBParameterGroup(**db_parameter_group_kwargs) self.db_parameter_groups[db_parameter_group_id] = db_parameter_group return db_parameter_group @@ -1471,13 +1473,18 @@ def to_xml(self): return template.render(option_group_option_setting=self) +def make_rds_arn(region, name): + return "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, name) + + class DBParameterGroup(CloudFormationModel): - def __init__(self, name, description, family, tags): + def __init__(self, name, description, family, tags, region): self.name = name self.description = description self.family = family self.tags = tags self.parameters = defaultdict(dict) + self.arn = make_rds_arn(region, name) def to_xml(self): template = Template( @@ -1485,6 +1492,7 @@ def to_xml(self): {{ param_group.name }} {{ param_group.family }} {{ param_group.description }} + {{ param_group.arn }} """ ) return template.render(param_group=self) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index fd2ffb9d0721..96ec378db539 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -4,6 +4,7 @@ import boto3 import sure # noqa from moto import mock_ec2, mock_kms, mock_rds2 +from moto.core import ACCOUNT_ID @mock_rds2 @@ -1504,7 +1505,9 @@ def test_create_database_with_encrypted_storage(): @mock_rds2 def test_create_db_parameter_group(): - conn = boto3.client("rds", region_name="us-west-2") + region = "us-west-2" + pg_name = "test" + conn = boto3.client("rds", region_name=region) db_parameter_group = conn.create_db_parameter_group( DBParameterGroupName="test", DBParameterGroupFamily="mysql5.6", @@ -1518,6 +1521,9 @@ def test_create_db_parameter_group(): db_parameter_group["DBParameterGroup"]["Description"].should.equal( "test parameter group" ) + db_parameter_group["DBParameterGroup"]["DBParameterGroupArn"].should.equal( + "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name) + ) @mock_rds2 @@ -1629,9 +1635,11 @@ def test_create_db_parameter_group_duplicate(): @mock_rds2 def test_describe_db_parameter_group(): - conn = boto3.client("rds", region_name="us-west-2") + region = "us-west-2" + pg_name = "test" + conn = boto3.client("rds", region_name=region) conn.create_db_parameter_group( - DBParameterGroupName="test", + DBParameterGroupName=pg_name, DBParameterGroupFamily="mysql5.6", Description="test parameter group", ) @@ -1639,6 +1647,9 @@ def test_describe_db_parameter_group(): db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupName"].should.equal( "test" ) + db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupArn"].should.equal( + "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name) + ) @mock_rds2 From ffa7f2e41ad610e4f7b3ebbc06081a8df2459cc7 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Thu, 3 Dec 2020 13:12:19 +0530 Subject: [PATCH 658/658] Fix:SES:Get Template Html part (#3504) * SES:Get Template Html part * Linting * Linting Co-authored-by: Bert Blommers --- moto/ses/responses.py | 2 +- tests/test_ses/test_ses_boto3.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 9702c724d3d5..703cd2e7a313 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -374,7 +374,7 @@ def create_receipt_rule(self): diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index 5af4d9cbfc52..e3c2b6d3dd83 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -473,7 +473,9 @@ def test_create_ses_template(): result = conn.get_template(TemplateName="MyTemplate") result["Template"]["TemplateName"].should.equal("MyTemplate") result["Template"]["SubjectPart"].should.equal("Greetings, {{name}}!") - + result["Template"]["HtmlPart"].should.equal( + "

Hello {{name}}," "

Your favorite animal is {{favoriteanimal}}.

" + ) # get a template which is not present with pytest.raises(ClientError) as ex: conn.get_template(TemplateName="MyFakeTemplate")